From 47833b1f08d0f58b8bbefec42d788b662c0d4b1a Mon Sep 17 00:00:00 2001
From: KKkai <1640576073@qq.com>
Date: Thu, 22 Jan 2026 13:54:28 +0800
Subject: [PATCH 01/14] [Codex] After prompt #0
---
"Icon\r" | 0
.../qwen2_5omni/python_src_code/LICENSE | 201 +
.../qwen2_5omni/python_src_code/README.md | 982 +
.../qwen2_5omni/python_src_code/__init__.py | 28 +
.../python_src_code/added_tokens.json | 24 +
.../python_src_code/audio_process.py | 94 +
.../python_src_code/chat_template.json | 3 +
.../qwen2_5omni/python_src_code/config.json | 495 +
.../python_src_code/configuration.json | 1 +
.../configuration_qwen2_5_omni.py | 1018 +
.../python_src_code/generation_config.json | 4 +
.../qwen2_5omni/python_src_code/merges.txt | 151388 +++
.../model.safetensors.index.json | 2455 +
.../python_src_code/modeling_qwen2_5_omni.py | 4126 +
.../python_src_code/modular_qwen2_5_omni.py | 4289 +
.../qwen2_5omni/python_src_code/name.py | 75 +
.../python_src_code/preprocessor_config.json | 31 +
.../processing_qwen2_5_omni.py | 405 +
.../python_src_code/special_tokens_map.json | 38 +
.../qwen2_5omni/python_src_code/spk_dict.pt | Bin 0 -> 259544 bytes
.../python_src_code/tokenizer.json | 757444 +++++++++++++++
.../python_src_code/tokenizer_config.json | 223 +
.../python_src_code/vision_process.py | 498 +
.../qwen2_5omni/python_src_code/vocab.json | 1 +
24 files changed, 923823 insertions(+)
create mode 100644 "Icon\r"
create mode 100644 mllm/models/qwen2_5omni/python_src_code/LICENSE
create mode 100644 mllm/models/qwen2_5omni/python_src_code/README.md
create mode 100644 mllm/models/qwen2_5omni/python_src_code/__init__.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/added_tokens.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/audio_process.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/chat_template.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/config.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/configuration.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/configuration_qwen2_5_omni.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/generation_config.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/merges.txt
create mode 100644 mllm/models/qwen2_5omni/python_src_code/model.safetensors.index.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/modeling_qwen2_5_omni.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/modular_qwen2_5_omni.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/name.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/preprocessor_config.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/processing_qwen2_5_omni.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/special_tokens_map.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/spk_dict.pt
create mode 100644 mllm/models/qwen2_5omni/python_src_code/tokenizer.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/tokenizer_config.json
create mode 100644 mllm/models/qwen2_5omni/python_src_code/vision_process.py
create mode 100644 mllm/models/qwen2_5omni/python_src_code/vocab.json
diff --git "a/Icon\r" "b/Icon\r"
new file mode 100644
index 000000000..e69de29bb
diff --git a/mllm/models/qwen2_5omni/python_src_code/LICENSE b/mllm/models/qwen2_5omni/python_src_code/LICENSE
new file mode 100644
index 000000000..9b1dd4944
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2025 Alibaba Cloud
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/mllm/models/qwen2_5omni/python_src_code/README.md b/mllm/models/qwen2_5omni/python_src_code/README.md
new file mode 100644
index 000000000..9eb4a8e90
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/README.md
@@ -0,0 +1,982 @@
+---
+license: other
+license_name: apache-2.0
+license_link: https://huggingface.co/Qwen/Qwen2.5-Omni-7B/blob/main/LICENSE
+language:
+- en
+tags:
+- multimodal
+library_name: transformers
+pipeline_tag: any-to-any
+---
+
+# Qwen2.5-Omni
+
+
+
+
+
+## Overview
+### Introduction
+Qwen2.5-Omni is an end-to-end multimodal model designed to perceive diverse modalities, including text, images, audio, and video, while simultaneously generating text and natural speech responses in a streaming manner.
+
+
+
+
+ +### Key Features + +* **Omni and Novel Architecture**: We propose Thinker-Talker architecture, an end-to-end multimodal model designed to perceive diverse modalities, including text, images, audio, and video, while simultaneously generating text and natural speech responses in a streaming manner. We propose a novel position embedding, named TMRoPE (Time-aligned Multimodal RoPE), to synchronize the timestamps of video inputs with audio. + +* **Real-Time Voice and Video Chat**: Architecture designed for fully real-time interactions, supporting chunked input and immediate output. + +* **Natural and Robust Speech Generation**: Surpassing many existing streaming and non-streaming alternatives, demonstrating superior robustness and naturalness in speech generation. + +* **Strong Performance Across Modalities**: Exhibiting exceptional performance across all modalities when benchmarked against similarly sized single-modality models. Qwen2.5-Omni outperforms the similarly sized Qwen2-Audio in audio capabilities and achieves comparable performance to Qwen2.5-VL-7B. + +* **Excellent End-to-End Speech Instruction Following**: Qwen2.5-Omni shows performance in end-to-end speech instruction following that rivals its effectiveness with text inputs, evidenced by benchmarks such as MMLU and GSM8K. + +### Model Architecture + +
+
+
+ +### Performance + +We conducted a comprehensive evaluation of Qwen2.5-Omni, which demonstrates strong performance across all modalities when compared to similarly sized single-modality models and closed-source models like Qwen2.5-VL-7B, Qwen2-Audio, and Gemini-1.5-pro. In tasks requiring the integration of multiple modalities, such as OmniBench, Qwen2.5-Omni achieves state-of-the-art performance. Furthermore, in single-modality tasks, it excels in areas including speech recognition (Common Voice), translation (CoVoST2), audio understanding (MMAU), image reasoning (MMMU, MMStar), video understanding (MVBench), and speech generation (Seed-tts-eval and subjective naturalness). + +
+
+
+
+ YRf6NHY?zorU4lUUYpqi+J%C^mKOP!$d+i
zw*%AdTG7$i4dbh)F?x=zo7Zk))q{3C;5I|{(J8hMC_r#@1x6y$@aIMvez&BfZ({?D
z@-HIsO$Rpcs-ei7#PbNky7f)qyg7{e3A*6#R)CL?3|54X$CE$-#Hk5F{2)Kfs`%I(
z{Y%j$Uun_KPjp~th)P92&``)5n$|W*D{sA_G2R=RqxX^~2!5gJg^IYiXg a~T@V4W
z>} HmY6N
zNLVp!mxxO9Ea}*!h@(Rq_&i_(i^y!s(Tb)F=d*Nu_M|#uD$b^)K)N*%g>R~1AYO$t
zPLa?WI}Pc<0HiGlhwE)We6>4?Me8HceUbLIjjOPPw;QvU+YmxI!>&iKL72*t=4p-4
zYR-e({j< ^oSYg?naq41F`-0%
z7aeubVeXMkjd@2H(C8w|eHH4iyTMwUpTzb0htal%YTZ5JMVMr+ZnAH##>L&?NJbLh
zs-@CRCy0sX-6bdEN#vR!7X0+W@|!1rddT@$;Lpi~VA9V;@NLOPl3xyCU${24vf8mr
z@`aX17vtRJfBh$Ubnwf>Mt2uWUx!n3-fsF^d-3^<9Y=}=G9gRO?o PuAd*3Si
z+<~8?X<6z?yXB6&2wjE2Vh=*>ys^Hvj$Qfk+}!Pl-<@z4gbTZr6dFuPz^m^r9<>Q)
zdT1;z;t>uu+{eCg$1wFf$c4{R8K7M(n$~S@mbXz`U23IPo7qX--`_y}vsupLcG4^S
z{xWAPcTurTG=>sy4s6-My3b+s+ZHXJ8#ksU>hb=cCjX6VhHr2U?Av`*Zq%(uj(FW#
zJl;-zU>prMultimodality -> Text
+
+
+
+
+
+
+ Datasets
+ Model
+ Performance
+
+
+ OmniBench
+
Speech | Sound Event | Music | AvgGemini-1.5-Pro
+ 42.67%|42.26%|46.23%|42.91%
+
+
+ MIO-Instruct
+ 36.96%|33.58%|11.32%|33.80%
+
+
+ AnyGPT (7B)
+ 17.77%|20.75%|13.21%|18.04%
+
+
+ video-SALMONN
+ 34.11%|31.70%|56.60%|35.64%
+
+
+ UnifiedIO2-xlarge
+ 39.56%|36.98%|29.25%|38.00%
+
+
+ UnifiedIO2-xxlarge
+ 34.24%|36.98%|24.53%|33.98%
+
+
+ MiniCPM-o
+ -|-|-|40.50%
+
+
+ Baichuan-Omni-1.5
+ -|-|-|42.90%
+
+
+ Qwen2.5-Omni-3B
+ 52.14%|52.08%|52.83%|52.19%
+
+
+Qwen2.5-Omni-7B
+ 55.25%|60.00%|52.83%|56.13%
+ Audio -> Text
+
+
+
+
+
+
+
+ Datasets
+ Model
+ Performance
+
+
+ ASR
+
+
+ Librispeech
+
dev-clean | dev other | test-clean | test-otherSALMONN
+ -|-|2.1|4.9
+
+
+ SpeechVerse
+ -|-|2.1|4.4
+
+
+ Whisper-large-v3
+ -|-|1.8|3.6
+
+
+ Llama-3-8B
+ -|-|-|3.4
+
+
+ Llama-3-70B
+ -|-|-|3.1
+
+
+ Seed-ASR-Multilingual
+ -|-|1.6|2.8
+
+
+ MiniCPM-o
+ -|-|1.7|-
+
+
+ MinMo
+ -|-|1.7|3.9
+
+
+ Qwen-Audio
+ 1.8|4.0|2.0|4.2
+
+
+ Qwen2-Audio
+ 1.3|3.4|1.6|3.6
+
+
+ Qwen2.5-Omni-3B
+ 2.0|4.1|2.2|4.5
+
+
+ Qwen2.5-Omni-7B
+ 1.6|3.5|1.8|3.4
+
+
+ Common Voice 15
+
en | zh | yue | frWhisper-large-v3
+ 9.3|12.8|10.9|10.8
+
+
+ MinMo
+ 7.9|6.3|6.4|8.5
+
+
+ Qwen2-Audio
+ 8.6|6.9|5.9|9.6
+
+
+ Qwen2.5-Omni-3B
+ 9.1|6.0|11.6|9.6
+
+
+ Qwen2.5-Omni-7B
+ 7.6|5.2|7.3|7.5
+
+
+ Fleurs
+
zh | enWhisper-large-v3
+ 7.7|4.1
+
+
+ Seed-ASR-Multilingual
+ -|3.4
+
+
+ Megrez-3B-Omni
+ 10.8|-
+
+
+ MiniCPM-o
+ 4.4|-
+
+
+ MinMo
+ 3.0|3.8
+
+
+ Qwen2-Audio
+ 7.5|-
+
+
+ Qwen2.5-Omni-3B
+ 3.2|5.4
+
+
+ Qwen2.5-Omni-7B
+ 3.0|4.1
+
+
+ Wenetspeech
+
test-net | test-meetingSeed-ASR-Chinese
+ 4.7|5.7
+
+
+ Megrez-3B-Omni
+ -|16.4
+
+
+ MiniCPM-o
+ 6.9|-
+
+
+ MinMo
+ 6.8|7.4
+
+
+ Qwen2.5-Omni-3B
+ 6.3|8.1
+
+
+ Qwen2.5-Omni-7B
+ 5.9|7.7
+
+
+ Voxpopuli-V1.0-en
+ Llama-3-8B
+ 6.2
+
+
+ Llama-3-70B
+ 5.7
+
+
+ Qwen2.5-Omni-3B
+ 6.6
+
+
+ Qwen2.5-Omni-7B
+ 5.8
+
+
+ S2TT
+
+
+ CoVoST2
+
en-de | de-en | en-zh | zh-enSALMONN
+ 18.6|-|33.1|-
+
+
+ SpeechLLaMA
+ -|27.1|-|12.3
+
+
+ BLSP
+ 14.1|-|-|-
+
+
+ MiniCPM-o
+ -|-|48.2|27.2
+
+
+ MinMo
+ -|39.9|46.7|26.0
+
+
+ Qwen-Audio
+ 25.1|33.9|41.5|15.7
+
+
+ Qwen2-Audio
+ 29.9|35.2|45.2|24.4
+
+
+ Qwen2.5-Omni-3B
+ 28.3|38.1|41.4|26.6
+
+
+ Qwen2.5-Omni-7B
+ 30.2|37.7|41.4|29.4
+
+
+ SER
+
+
+ Meld
+ WavLM-large
+ 0.542
+
+
+ MiniCPM-o
+ 0.524
+
+
+ Qwen-Audio
+ 0.557
+
+
+ Qwen2-Audio
+ 0.553
+
+
+ Qwen2.5-Omni-3B
+ 0.558
+
+
+ Qwen2.5-Omni-7B
+ 0.570
+
+
+ VSC
+
+
+ VocalSound
+ CLAP
+ 0.495
+
+
+ Pengi
+ 0.604
+
+
+ Qwen-Audio
+ 0.929
+
+
+ Qwen2-Audio
+ 0.939
+
+
+ Qwen2.5-Omni-3B
+ 0.936
+
+
+ Qwen2.5-Omni-7B
+ 0.939
+
+
+ Music
+
+
+ GiantSteps Tempo
+ Llark-7B
+ 0.86
+
+
+ Qwen2.5-Omni-3B
+ 0.88
+
+
+ Qwen2.5-Omni-7B
+ 0.88
+
+
+ MusicCaps
+ LP-MusicCaps
+ 0.291|0.149|0.089|0.061|0.129|0.130
+
+
+ Qwen2.5-Omni-3B
+ 0.325|0.163|0.093|0.057|0.132|0.229
+
+
+ Qwen2.5-Omni-7B
+ 0.328|0.162|0.090|0.055|0.127|0.225
+
+
+ Audio Reasoning
+
+
+ MMAU
+
Sound | Music | Speech | AvgGemini-Pro-V1.5
+ 56.75|49.40|58.55|54.90
+
+
+ Qwen2-Audio
+ 54.95|50.98|42.04|49.20
+
+
+ Qwen2.5-Omni-3B
+ 70.27|60.48|59.16|63.30
+
+
+ Qwen2.5-Omni-7B
+ 67.87|69.16|59.76|65.60
+
+
+ Voice Chatting
+
+
+ VoiceBench
+
AlpacaEval | CommonEval | SD-QA | MMSUUltravox-v0.4.1-LLaMA-3.1-8B
+ 4.55|3.90|53.35|47.17
+
+
+ MERaLiON
+ 4.50|3.77|55.06|34.95
+
+
+ Megrez-3B-Omni
+ 3.50|2.95|25.95|27.03
+
+
+ Lyra-Base
+ 3.85|3.50|38.25|49.74
+
+
+ MiniCPM-o
+ 4.42|4.15|50.72|54.78
+
+
+ Baichuan-Omni-1.5
+ 4.50|4.05|43.40|57.25
+
+
+ Qwen2-Audio
+ 3.74|3.43|35.71|35.72
+
+
+ Qwen2.5-Omni-3B
+ 4.32|4.00|49.37|50.23
+
+
+ Qwen2.5-Omni-7B
+ 4.49|3.93|55.71|61.32
+
+
+ VoiceBench
+
OpenBookQA | IFEval | AdvBench | AvgUltravox-v0.4.1-LLaMA-3.1-8B
+ 65.27|66.88|98.46|71.45
+
+
+ MERaLiON
+ 27.23|62.93|94.81|62.91
+
+
+ Megrez-3B-Omni
+ 28.35|25.71|87.69|46.25
+
+
+ Lyra-Base
+ 72.75|36.28|59.62|57.66
+
+
+ MiniCPM-o
+ 78.02|49.25|97.69|71.69
+
+
+ Baichuan-Omni-1.5
+ 74.51|54.54|97.31|71.14
+
+
+ Qwen2-Audio
+ 49.45|26.33|96.73|55.35
+
+
+ Qwen2.5-Omni-3B
+ 74.73|42.10|98.85|68.81
+
+
+Qwen2.5-Omni-7B
+ 81.10|52.87|99.42|74.12
+ Image -> Text
+
+| Dataset | Qwen2.5-Omni-7B | Qwen2.5-Omni-3B | Other Best | Qwen2.5-VL-7B | GPT-4o-mini |
+|--------------------------------|--------------|------------|------------|---------------|-------------|
+| MMMUval | 59.2 | 53.1 | 53.9 | 58.6 | **60.0** |
+| MMMU-Prooverall | 36.6 | 29.7 | - | **38.3** | 37.6 |
+| MathVistatestmini | 67.9 | 59.4 | **71.9** | 68.2 | 52.5 |
+| MathVisionfull | 25.0 | 20.8 | 23.1 | **25.1** | - |
+| MMBench-V1.1-ENtest | 81.8 | 77.8 | 80.5 | **82.6** | 76.0 |
+| MMVetturbo | 66.8 | 62.1 | **67.5** | 67.1 | 66.9 |
+| MMStar | **64.0** | 55.7 | **64.0** | 63.9 | 54.8 |
+| MMEsum | 2340 | 2117 | **2372** | 2347 | 2003 |
+| MuirBench | 59.2 | 48.0 | - | **59.2** | - |
+| CRPErelation | **76.5** | 73.7 | - | 76.4 | - |
+| RealWorldQAavg | 70.3 | 62.6 | **71.9** | 68.5 | - |
+| MME-RealWorlden | **61.6** | 55.6 | - | 57.4 | - |
+| MM-MT-Bench | 6.0 | 5.0 | - | **6.3** | - |
+| AI2D | 83.2 | 79.5 | **85.8** | 83.9 | - |
+| TextVQAval | 84.4 | 79.8 | 83.2 | **84.9** | - |
+| DocVQAtest | 95.2 | 93.3 | 93.5 | **95.7** | - |
+| ChartQAtest Avg | 85.3 | 82.8 | 84.9 | **87.3** | - |
+| OCRBench_V2en | **57.8** | 51.7 | - | 56.3 | - |
+
+
+| Dataset | Qwen2.5-Omni-7B | Qwen2.5-Omni-3B | Qwen2.5-VL-7B | Grounding DINO | Gemini 1.5 Pro |
+|--------------------------|--------------|---------------|---------------|----------------|----------------|
+| Refcocoval | 90.5 | 88.7 | 90.0 | **90.6** | 73.2 |
+| RefcocotextA | **93.5** | 91.8 | 92.5 | 93.2 | 72.9 |
+| RefcocotextB | 86.6 | 84.0 | 85.4 | **88.2** | 74.6 |
+| Refcoco+val | 85.4 | 81.1 | 84.2 | **88.2** | 62.5 |
+| Refcoco+textA | **91.0** | 87.5 | 89.1 | 89.0 | 63.9 |
+| Refcoco+textB | **79.3** | 73.2 | 76.9 | 75.9 | 65.0 |
+| Refcocog+val | **87.4** | 85.0 | 87.2 | 86.1 | 75.2 |
+| Refcocog+test | **87.9** | 85.1 | 87.2 | 87.0 | 76.2 |
+| ODinW | 42.4 | 39.2 | 37.3 | **55.0** | 36.7 |
+| PointGrounding | 66.5 | 46.2 | **67.3** | - | - |
+Video(without audio) -> Text
+
+| Dataset | Qwen2.5-Omni-7B | Qwen2.5-Omni-3B | Other Best | Qwen2.5-VL-7B | GPT-4o-mini |
+|-----------------------------|--------------|------------|------------|---------------|-------------|
+| Video-MMEw/o sub | 64.3 | 62.0 | 63.9 | **65.1** | 64.8 |
+| Video-MMEw sub | **72.4** | 68.6 | 67.9 | 71.6 | - |
+| MVBench | **70.3** | 68.7 | 67.2 | 69.6 | - |
+| EgoSchematest | **68.6** | 61.4 | 63.2 | 65.0 | - |
+Zero-shot Speech Generation
+
+
+
+
+
+
+
+ Datasets
+ Model
+ Performance
+
+
+ Content Consistency
+
+
+ SEED
+
test-zh | test-en | test-hard Seed-TTS_ICL
+ 1.11 | 2.24 | 7.58
+
+
+ Seed-TTS_RL
+ 1.00 | 1.94 | 6.42
+
+
+ MaskGCT
+ 2.27 | 2.62 | 10.27
+
+
+ E2_TTS
+ 1.97 | 2.19 | -
+
+
+ F5-TTS
+ 1.56 | 1.83 | 8.67
+
+
+ CosyVoice 2
+ 1.45 | 2.57 | 6.83
+
+
+ CosyVoice 2-S
+ 1.45 | 2.38 | 8.08
+
+
+ Qwen2.5-Omni-3B_ICL
+ 1.95 | 2.87 | 9.92
+
+
+ Qwen2.5-Omni-3B_RL
+ 1.58 | 2.51 | 7.86
+
+
+ Qwen2.5-Omni-7B_ICL
+ 1.70 | 2.72 | 7.97
+
+
+ Qwen2.5-Omni-7B_RL
+ 1.42 | 2.32 | 6.54
+
+
+ Speaker Similarity
+
+
+ SEED
+
test-zh | test-en | test-hard Seed-TTS_ICL
+ 0.796 | 0.762 | 0.776
+
+
+ Seed-TTS_RL
+ 0.801 | 0.766 | 0.782
+
+
+ MaskGCT
+ 0.774 | 0.714 | 0.748
+
+
+ E2_TTS
+ 0.730 | 0.710 | -
+
+
+ F5-TTS
+ 0.741 | 0.647 | 0.713
+
+
+ CosyVoice 2
+ 0.748 | 0.652 | 0.724
+
+
+ CosyVoice 2-S
+ 0.753 | 0.654 | 0.732
+
+
+ Qwen2.5-Omni-3B_ICL
+ 0.741 | 0.635 | 0.748
+
+
+ Qwen2.5-Omni-3B_RL
+ 0.744 | 0.635 | 0.746
+
+
+ Qwen2.5-Omni-7B_ICL
+ 0.752 | 0.632 | 0.747
+
+
+Qwen2.5-Omni-7B_RL
+ 0.754 | 0.641 | 0.752
+ Text -> Text
+
+| Dataset | Qwen2.5-Omni-7B | Qwen2.5-Omni-3B | Qwen2.5-7B | Qwen2.5-3B | Qwen2-7B | Llama3.1-8B | Gemma2-9B |
+|-----------------------------------|-----------|------------|------------|------------|------------|-------------|-----------|
+| MMLU-Pro | 47.0 | 40.4 | **56.3** | 43.7 | 44.1 | 48.3 | 52.1 |
+| MMLU-redux | 71.0 | 60.9 | **75.4** | 64.4 | 67.3 | 67.2 | 72.8 |
+| LiveBench0831 | 29.6 | 22.3 | **35.9** | 26.8 | 29.2 | 26.7 | 30.6 |
+| GPQA | 30.8 | 34.3 | **36.4** | 30.3 | 34.3 | 32.8 | 32.8 |
+| MATH | 71.5 | 63.6 | **75.5** | 65.9 | 52.9 | 51.9 | 44.3 |
+| GSM8K | 88.7 | 82.6 | **91.6** | 86.7 | 85.7 | 84.5 | 76.7 |
+| HumanEval | 78.7 | 70.7 | **84.8** | 74.4 | 79.9 | 72.6 | 68.9 |
+| MBPP | 73.2 | 70.4 | **79.2** | 72.7 | 67.2 | 69.6 | 74.9 |
+| MultiPL-E | 65.8 | 57.6 | **70.4** | 60.2 | 59.1 | 50.7 | 53.4 |
+| LiveCodeBench2305-2409 | 24.6 | 16.5 | **28.7** | 19.9 | 23.9 | 8.3 | 18.9 |
+Minimum GPU memory requirements
+
+|Model | Precision | 15(s) Video | 30(s) Video | 60(s) Video |
+|--------------|-----------| ------------- | ------------- | ------------------ |
+| Qwen-Omni-3B | FP32 | 89.10 GB | Not Recommend | Not Recommend |
+| Qwen-Omni-3B | BF16 | 18.38 GB | 22.43 GB | 28.22 GB |
+| Qwen-Omni-7B | FP32 | 93.56 GB | Not Recommend | Not Recommend |
+| Qwen-Omni-7B | BF16 | 31.11 GB | 41.85 GB | 60.19 GB |
+
+Note: The table above presents the theoretical minimum memory requirements for inference with `transformers` and `BF16` is test with `attn_implementation="flash_attention_2"`; however, in practice, the actual memory usage is typically at least 1.2 times higher. For more information, see the linked resource [here](https://huggingface.co/docs/accelerate/main/en/usage_guides/model_size_estimator).
+Video URL resource usage
+
+Video URL compatibility largely depends on the third-party library version. The details are in the table below. Change the backend by `FORCE_QWENVL_VIDEO_READER=torchvision` or `FORCE_QWENVL_VIDEO_READER=decord` if you prefer not to use the default one.
+
+| Backend | HTTP | HTTPS |
+|-------------|------|-------|
+| torchvision >= 0.19.0 | ✅ | ✅ |
+| torchvision < 0.19.0 | ❌ | ❌ |
+| decord | ✅ | ❌ |
+Batch inference
+
+The model can batch inputs composed of mixed samples of various types such as text, images, audio and videos as input when `return_audio=False` is set. Here is an example.
+
+```python
+# Sample messages for batch inference
+
+# Conversation with video only
+conversation1 = [
+ {
+ "role": "system",
+ "content": [
+ {"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."}
+ ],
+ },
+ {
+ "role": "user",
+ "content": [
+ {"type": "video", "video": "/path/to/video.mp4"},
+ ]
+ }
+]
+
+# Conversation with audio only
+conversation2 = [
+ {
+ "role": "system",
+ "content": [
+ {"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."}
+ ],
+ },
+ {
+ "role": "user",
+ "content": [
+ {"type": "audio", "audio": "/path/to/audio.wav"},
+ ]
+ }
+]
+
+# Conversation with pure text
+conversation3 = [
+ {
+ "role": "system",
+ "content": [
+ {"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."}
+ ],
+ },
+ {
+ "role": "user",
+ "content": "who are you?"
+ }
+]
+
+
+# Conversation with mixed media
+conversation4 = [
+ {
+ "role": "system",
+ "content": [
+ {"type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."}
+ ],
+ },
+ {
+ "role": "user",
+ "content": [
+ {"type": "image", "image": "/path/to/image.jpg"},
+ {"type": "video", "video": "/path/to/video.mp4"},
+ {"type": "audio", "audio": "/path/to/audio.wav"},
+ {"type": "text", "text": "What are the elements can you see and hear in these medias?"},
+ ],
+ }
+]
+
+# Combine messages for batch processing
+conversations = [conversation1, conversation2, conversation3, conversation4]
+
+# set use audio in video
+USE_AUDIO_IN_VIDEO = True
+
+# Preparation for batch inference
+text = processor.apply_chat_template(conversations, add_generation_prompt=True, tokenize=False)
+audios, images, videos = process_mm_info(conversations, use_audio_in_video=USE_AUDIO_IN_VIDEO)
+
+inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True, use_audio_in_video=USE_AUDIO_IN_VIDEO)
+inputs = inputs.to(model.device).to(model.dtype)
+
+# Batch Inference
+text_ids = model.generate(**inputs, use_audio_in_video=USE_AUDIO_IN_VIDEO, return_audio=False)
+text = processor.batch_decode(text_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
+print(text)
+```
+
diff --git a/mllm/models/qwen2_5omni/python_src_code/__init__.py b/mllm/models/qwen2_5omni/python_src_code/__init__.py
new file mode 100644
index 000000000..0d7ddae0d
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+from ...utils.import_utils import define_import_structure
+
+
+if TYPE_CHECKING:
+ from .configuration_qwen2_5_omni import *
+ from .modeling_qwen2_5_omni import *
+ from .processing_qwen2_5_omni import *
+else:
+ import sys
+
+ _file = globals()["__file__"]
+ sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__)
diff --git a/mllm/models/qwen2_5omni/python_src_code/added_tokens.json b/mllm/models/qwen2_5omni/python_src_code/added_tokens.json
new file mode 100644
index 000000000..456225635
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/added_tokens.json
@@ -0,0 +1,24 @@
+{
+ "": 151658,
+ "
+ s
+
+Ġع ÙĦÙī
+Ġm á»Ļt
+Ġv Ỽi
+Ġng ưá»Ŀi
+ĠØ¥ ÙĦÙī
+Ġnh ững
+Ġth á»ĥ
+Ġ×IJ ×ķ
+Ġ×¢ ×Ŀ
+ا Ùĭ
+Ġ à¹ģละ
+ĠÙĦ ا
+Ġnh ư
+ĠاÙĦت ÙĬ
+Ġ×Ķ ×ķ×IJ
+ĠÄij ến
+ĠØ£ ÙĪ
+Ġv á»ģ
+ĠlÃł m
+Ġs ẽ
+Ġc Å©ng
+Ġ ợ
+ĠÄij ó
+Ġnhi á»ģu
+Ġt ại
+Ġtr ên
+Ġ×Ĵ ×Ŀ
+Ġnh Ãł
+Ġ׼ ×Ļ
+Ġs á»±
+ĠÄij ầu
+Ġb á»ĭ
+ĠÙĩ ذا
+Ġnh ất
+Ġph ải
+Ġhi á»ĩn
+Ġdụ ng
+ĠÄij á»Ļng
+ĠاÙĦÙĦ Ùĩ
+ĠØ Į
+ĠÙĥ ÙĦ
+Ġvi á»ĩc
+Ġn Äĥm
+Ġth ì
+Ġh á»įc
+ĠÙĪ Øª
+t é
+Ġا ÙĨ
+Ġt ôi
+Ġ×IJ ׳×Ļ
+Ġ׾ ×Ļ
+Ġ×ŀ ×ķ
+Ġng Ãły
+Ġn Æ°á»Ľc
+Ġ×Ķ ×Ļ×IJ
+Ġ×IJ ×Ļ
+Ġh Æ¡n
+ĠÙĩ ذÙĩ
+ĠÙĪ ÙĬ
+ĠاÙĦ ذÙĬ
+Ġ×ķ ×ŀ
+Ġgi á
+Ġnh ân
+Ġch ÃŃnh
+Ġm ình
+ĠÐĿ а
+Ġth ế
+Ġ×Ļ ×ķתר
+Ġ×IJ ×Ŀ
+Ġn ên
+Ġh ợ
+Ġhợ p
+Ġc òn
+ĠÙĩ ÙĪ
+Ġc Æ¡
+Ġr ất
+ĠVi á»ĩt
+Ġب عد
+Ġש ×Ļ
+Ġth á»Ŀi
+Ġc ách
+ĠÄij á»ĵng
+Ġн о
+Ġtr ưá»Ŀng
+Ø Ł
+ĠÄij á»ĭnh
+ĠÄiji á»ģu
+×Ļ ×Ļ×Ŀ
+Ġth á»±c
+n ın
+Ġh ình
+Ġn ói
+Ġc ùng
+Ġ×Ķ ×Ķ
+ĠØ¥ ÙĨ
+Ġ×IJ ×ij׾
+Ġnh ưng
+Ġbi ết
+Ġж е
+Ġch úng
+ĠÄij ang
+Ġذ ÙĦÙĥ
+Ġl ên
+Ġkh ách
+Ġn Ãło
+Ġs á»Ń
+Ġkh ác
+Ġë° ı
+Ġl ý
+×Ļ ×Ļ
+ĠÄij ây
+Ġ׾ ×ŀ
+Ġc ần
+Ġtr ình
+Ġph át
+ãģ« ãĤĤ
+п о
+Ġn Äĥng
+Ġb á»Ļ
+Ġv ụ
+ĠÄij á»Ļ
+Ñĩ е
+Ġnh áºŃn
+Ġtr Æ°á»Ľc
+Ġ×¢ ×ĵ
+Ġh Ãłnh
+ĠØ® ÙĦاÙĦ
+Ġl ượng
+Ġc ấp
+Ġtá» ±
+Ġv ì
+Ġt ư
+Ġch ất
+Ġ׼ ×ŀ×ķ
+Ġg ì
+Ġש ׳
+Ġt ế
+ת ×ķ
+Ġnghi á»ĩp
+Ġm ặt
+ĠÙĥ Ùħا
+Ġ×ij ×Ļף
+Ġר ×§
+Ġth ấy
+Ġmá y
+ĠÙģ Ùī
+Ġd ân
+Ġ×IJ ×Ĺ×ĵ
+Ġt âm
+Ġ׼ ×ļ
+Ġ׾ ×ķ
+в о
+Ġt ác
+Ġto Ãłn
+ĠÙĪ Ùħ
+Ġk ết
+Ġ หรืà¸Ń
+ĠÙĪØ§ÙĦ Ùħ
+ĠÄiji á»ĥm
+Ġ×ĸ ×ķ
+Ġ×ij ×ķ
+׼ ×ķת
+Ġh á»Ļi
+Ġb ằng
+ت Ùĩا
+Ġ׼ ×ĵ×Ļ
+Ġ×Ķ ×Ŀ
+Ġxu ất
+ĠÙĤ د
+Ġb ảo
+Ġt á»ijt
+Ġt ình
+ĠÙĩ ÙĬ
+ĠÄij á»iji
+Ġthi ết
+Ġhi á»ĩu
+Ġti ếp
+Ġt ạo
+ת ×Ķ
+Ġch á»§
+o ÅĽÄĩ
+Ġgi ú
+Ġgiú p
+ĠÃ ½
+Ġqu ả
+Ġlo ại
+Ġc ô
+ĠÃ ´
+Ġô ng
+Ġ×Ķ ×ķ
+ĠاÙĦÙĬ ÙĪÙħ
+ĠtÃŃ nh
+г а
+Ġph òng
+Ġ Äĥn
+Ġع اÙħ
+Ġv á»ĭ
+lar ını
+r ÃŃa
+Ġt Ỽi
+ĠÄij ưá»Ŀng
+Ġgi Ỽi
+Ġb ản
+Ġc ầu
+Ġnhi ên
+Ġb á»ĩnh
+Ġth ưá»Ŀng
+Ġ×IJ ×Ļף
+ĠÄij á»ģ
+Ġh á»ĩ
+Ġ×Ļש ר×IJ׾
+Ġqu á
+ĠÐĹ Ð°
+ãģ® ãģ§ãģĻãģĮ
+ĠÐŁ ÑĢи
+Ġph ần
+ĠÙĪ ÙĦا
+ĠlỼ n
+Ġtr á»ĭ
+Ġcả m
+Ġм о
+Ġd ùng
+ĠاÙĦ Ùī
+ĠعÙĦÙĬ Ùĩ
+ĠìŀĪ ìĬµëĭĪëĭ¤
+ÙĬ ÙĤ
+ĠÙĤ بÙĦ
+Ġho ặc
+ĠØŃ ÙĬØ«
+Ġ à¸Ĺีà¹Ī
+Ġغ ÙĬر
+ĠÄij ại
+Ġsá»ij ng
+нÑĭ ми
+Ġth ức
+Ġפ ×Ļ
+ĠÄiji á»ĩn
+ãģª ãģĭãģ£ãģŁ
+Ġgi ải
+Ġv ẫn
+Ġи Ñħ
+Ġö nce
+Ġv áºŃy
+Ġmu á»ijn
+Ġ ảnh
+à¹ĥà¸Ļ à¸ģาร
+ĠQu á»ijc
+Ġk ế
+׳ ×IJ
+Ġס ×Ļ
+Ġy êu
+ãģ® ãģĭ
+ĠÄij ẹ
+ĠÄijẹ p
+Ġch ức
+Ġy ıl
+ĠTür kiye
+d é
+ĠÙĤ اÙĦ
+Ġd á»ĭch
+ĠolduÄŁ u
+Ġch á»įn
+Ġت Ùħ
+หà¸Ļ ึà¹Īà¸ĩ
+ãģķãĤĮ ãģŁ
+Ġph áp
+ìĽ Ķ
+Ġti á»ģn
+ãģĹ ãģ¾ãģĹãģŁ
+Ġש ׾×IJ
+ÙĦ Ø©
+Ġ׾פ ׳×Ļ
+Ġ×ij ×Ļת
+ĠH Ãł
+ĠØŃ ت
+ĠØŃت Ùī
+Ġ×¢ ×ķ×ĵ
+Ġn ó
+Ġth áng
+à¹Ģลืà¸Ń à¸ģ
+ר ×Ķ
+Ġt Äĥng
+Ġcá i
+Ġtri á»ĥn
+Ġ×IJ×ķת ×ķ
+ìłģ ìĿ¸
+ĠC ông
+Ġ׾×Ķ ×Ļ×ķת
+Ġг ода
+и Ñİ
+Ġب عض
+Ġ à¸ģาร
+èī¯ ãģĦ
+ÙĪ Øª
+Ġli ên
+ĠÐĿ о
+ĠÐĿ е
+çļĦ ãģª
+ĠÙħ ت
+ĠÑĤак же
+ĠкоÑĤоÑĢ Ñĭе
+Ġ×Ļ ×ĵ×Ļ
+Ġtr á»įng
+ãĤµ ãĤ¤ãĥĪ
+ìłģ ìľ¼ë¡ľ
+Ġt áºŃp
+Ġש ׾×Ļ
+íķĺ ê²Į
+Ġt Ãłi
+ĠÐ ¯
+Ġr á»ĵi
+ا Ùĥ
+Ġth ương
+Ġ×Ķ ×ĸ×Ķ
+ĠÙĪ ÙħÙĨ
+à¸Ĺีà¹Ī มี
+Ġcu á»Ļc
+Ġbü yük
+ãģ¨ ãģĭ
+Ġ×ij ×Ļ×ķתר
+Ġl ần
+Ġgö re
+Ġtr ợ
+Ġ×ĺ ×ķ×ij
+ÑĤÑĮ ÑģÑı
+Ġth á»ijng
+Ġ׼ ש
+Ġti êu
+Ġ×ŀ×IJ ×ķ×ĵ
+Ø Ľ
+k Äħ
+Ġ à¹ĥà¸Ļ
+Ġv ấn
+Ġש ׾×ķ
+ĠÄij á»ģu
+Ùģ Øª
+Ġê²ĥ ìĿ´
+Ġh óa
+ĠاÙĦع اÙħ
+ĠÙĬ ÙĪÙħ
+к ой
+Ġbi á»ĩt
+ÑģÑĤ о
+Ġ×Ķ ×Ļ×ķ
+à¸Ĺีà¹Ī à¸Īะ
+Ġ×ĵ ×Ļ
+Ġ×IJ ×ļ
+Ġá n
+ص ÙĪØ±
+Ġtr ÃŃ
+ĠÐŁÑĢ Ð¾
+Ġl á»±c
+ãģĹãģ¦ ãģĦãģ¾ãģĻ
+Ġb Ãłi
+Ġ×ĸ ×IJת
+Ġb áo
+à¸ļ à¸Ļ
+ĠëĮĢ íķľ
+Ġti ế
+Ġtiế ng
+Ġb ên
+ãģķãĤĮ ãĤĭ
+s ión
+Ġt ìm
+×¢ ×ķ
+m é
+ни Ñı
+ãģ» ãģ©
+Ġà¹Ģà¸ŀ ราะ
+ب ة
+Ġë¶ Ħ
+Ġ×IJ ×ĸ
+à¸Ĺ à¹Īาà¸Ļ
+ת ×Ŀ
+Ġth êm
+Ġho ạt
+y ı
+×ĸ ×ķ
+Ġgi á»Ŀ
+Ġb án
+à¸Ĥ าย
+Ñĩ а
+Ġ à¹Ĩ
+ĠاÙĦÙħ ت
+ĠоÑĩ енÑĮ
+Ġb ất
+Ġtr ẻ
+ÑĤ ÑĢ
+ĠØ£ ÙĨÙĩ
+ĠØ« Ùħ
+Ġ׼ ×ŀ×Ķ
+Ġkh ó
+Ġr ằng
+ĠÙĪ ÙģÙĬ
+ни й
+Ġho Ãłn
+t ó
+Ġ×IJ שר
+ĠìĥĿ ê°ģ
+Ñģ а
+Ġ׼ ×ijר
+ĠÑįÑĤ ом
+lar ının
+Ġch ưa
+з и
+Ġd ẫn
+ĠÐļ ак
+ج ÙĪ
+ĠбÑĭ ло
+ĠÙĬ ت
+n ı
+ÅĤ am
+ĠÙĪÙĩ ÙĪ
+×ij ×ķ
+п и
+ר ת
+Ġqu á»ijc
+ж д
+ĠÄij Æ¡n
+Ùĥت ب
+Ġm ắt
+ระ à¸ļ
+ระà¸ļ à¸ļ
+ĠÙĥ اÙĨت
+Ġth ân
+สิà¸Ļ à¸Ħà¹īา
+×Ĵ ×Ļ
+Ġph ương
+à¹Ħมà¹Ī à¹Ħà¸Ķà¹ī
+ĠìĦ ±
+ĠC ác
+Ġ×Ķ×ŀ ×ķ
+ĠÑĤ ем
+Ġ×ĵ ×ķ
+à¸Ńะ à¹Ħร
+Ġv Äĥn
+ãģª ãģ®ãģ§
+ĠN á»Ļi
+Ġ×¢ ×ķ
+ãĤīãĤĮ ãĤĭ
+Ġs áng
+Ġgö ster
+ãģĵãģ¨ ãĤĴ
+Ġtaraf ından
+Ġм а
+ĠпоÑģл е
+Ġ׳ ×Ļת
+Ġ׳×Ļת ף
+Ġл еÑĤ
+Ġ׾ ׳×ķ
+Ñģ Ñģ
+Ġ×Ļ ×ķ
+п е
+ĠÙĪ ÙĦÙĥ
+ĠÙĪÙĦÙĥ ÙĨ
+Ġngo Ãłi
+ĠÄij á»ĭa
+r zÄħd
+dz iaÅĤ
+ĠÙħ ر
+иÑĤÑĮ ÑģÑı
+Ġ×IJ×Ĺר ×Ļ
+Ġ׾ ׼׾
+à¸Ĥ à¹īà¸Ńม
+à¸Ĥà¹īà¸Ńม ูล
+Ġб ол
+Ġбол ее
+جÙħ ع
+л еÑĤ
+Ġl á»ĭch
+ĠÙħ Ø«ÙĦ
+Ġ그리 ê³ł
+Ġth ứ
+ĠdeÄŁ il
+ÙĪ ØŃ
+Ġש׾ ×ļ
+ĠÙħ ØŃÙħد
+Ġn ếu
+ĠÄij á»ķi
+Ġv ừa
+Ġm á»įi
+Ġо ни
+Ġl úc
+ĠÙĬ ÙĥÙĪÙĨ
+ì§ Ī
+Ġש׾ ׳×ķ
+ĠÐĶ Ð¾
+Ġש ׳×Ļ
+ล ิ
+×IJ פשר
+Ġs ức
+ê¶ Į
+Ġ ứng
+à¹Ħมà¹Ī มี
+Ø·ÙĦ ب
+ĠÑĩ ем
+Ġch uyên
+Ġth ÃŃch
+Ġ×ķ ×Ļ
+íķ ©
+ĠÙħ صر
+д о
+ĠÄij ất
+Ġch ế
+à¸Ĭ ืà¹Īà¸Ń
+Ġìĭ ł
+ĠØ¥ ذا
+Ġر ئÙĬس
+Ġש ×Ļש
+Ġgiả m
+Ñģ ка
+lar ında
+Ġs ợ
+ĠtÃŃ ch
+ĠÙĦ ÙĥÙĨ
+Ġب Ùħ
+×¢ ×ķ×ij
+×¢×ķ×ij ×ĵ
+ÅĤÄħ cz
+ları na
+Ġש ×Ŀ
+ĠÙĦ ت
+Ġש×Ķ ×ķ×IJ
+t ów
+Ġëĭ¤ 른
+ĠØ£ Ùĥثر
+ãģ® ãģ§ãģĻ
+׼ ×Ļ×Ŀ
+ĠolduÄŁ unu
+ãģĭ ãģª
+ãĤĤ ãģĨ
+ÙĬ ØŃ
+Ġnh ìn
+Ġngh á»ĩ
+ãģ«ãģª ãģ£ãģ¦
+п а
+Ġquy ết
+ÙĦ ÙĤ
+t á
+Ġlu ôn
+ĠÄij ặc
+Ġ×IJ ר
+Ġtu á»ķi
+s ão
+ìĻ ¸
+ر د
+ĠبÙĩ ا
+Ġ×Ķ×Ļ ×ķ×Ŀ
+×ķ ×ķ×Ļ
+ãģ§ãģĻ ãģŃ
+ĠÑĤ ого
+Ġth á»§
+ãģĹãģŁ ãģĦ
+ر ÙĤ
+Ġb ắt
+г Ñĥ
+Ġtá» Ń
+ÑĪ Ð°
+Ġ à¸Ľà¸µ
+Ġ×Ķ×IJ ×Ŀ
+íı ¬
+ż a
+Ġ×IJת ×Ķ
+Ġn á»Ļi
+Ġph ÃŃ
+ĠÅŁek ilde
+Ġl á»Ŀi
+d ıģı
+Ġ׼×IJ ף
+Ġt üm
+Ġm ạnh
+ĠM ỹ
+ãģĿ ãĤĵãģª
+Ġnh á»ı
+ãģª ãģĮãĤī
+Ġb ình
+ı p
+à¸ŀ า
+ĠÄij ánh
+ĠÙĪ ÙĦ
+ר ×ķת
+Ġ×IJ ×Ļ×ļ
+Ġch uyá»ĥn
+Ùĥ ا
+ãĤĮ ãĤĭ
+à¹ģม à¹Ī
+ãĤĪ ãģı
+ĠÙĪ ÙĤد
+íĸ Īëĭ¤
+Ġn Æ¡i
+ãģ«ãĤĪ ãģ£ãģ¦
+Ġvi ết
+Ġà¹Ģà¸ŀ ืà¹Īà¸Ń
+ëIJĺ ëĬĶ
+اد ÙĬ
+ĠÙģ Ø¥ÙĨ
+ì¦ Ŀ
+ĠÄij ặt
+Ġh Æ°á»Ľng
+Ġx ã
+Ġönem li
+ãģł ãģ¨
+Ġm ẹ
+Ġ×ij ×Ļ
+Ġ×ĵ ×ijר
+Ġv áºŃt
+ĠÄij ạo
+Ġdá»± ng
+ĠÑĤ ом
+ĠÙģÙĬ Ùĩا
+Ġج ÙħÙĬع
+Ġthu áºŃt
+st ÄĻp
+Ġti ết
+Ø´ ÙĬ
+Ġе Ñīе
+ãģĻãĤĭ ãģ¨
+ĠmÃł u
+ĠÑįÑĤ ого
+Ġv ô
+ĠÐŃ ÑĤо
+Ġth áºŃt
+Ġn ữa
+Ġbi ến
+Ġn ữ
+Ġ׾ ׼×Ŀ
+×Ļ ×Ļף
+Ġس ت
+ĠÐŀ ÑĤ
+Ġph ụ
+ê¹Į ì§Ģ
+Ġ׾ ×ļ
+Ġk ỳ
+à¹ĥ à¸Ħร
+Ġg ây
+ĠÙĦ ÙĦÙħ
+Ġtụ c
+ت ÙĬÙĨ
+Ġtr ợ
+Ġ׾ פ×Ļ
+Ġb á»ij
+ĠÐļ а
+ĠÄij ình
+ow Äħ
+s ında
+Ġkhi ến
+s ız
+Ġк огда
+ס ׾
+ĠбÑĭ л
+à¸Ļ à¹īà¸Ńย
+обÑĢаР·
+Ġê²ĥ ìĿ´ëĭ¤
+ëĵ¤ ìĿĢ
+ãģ¸ ãģ®
+Ġà¹Ģม ืà¹Īà¸Ń
+Ġph ục
+Ġ׊׾ק
+Ġh ết
+ĠÄij a
+à¹Ģà¸Ķà¹ĩ à¸ģ
+íĺ ķ
+l ÃŃ
+ê¸ ī
+Ġع دد
+ĠÄij á»ĵ
+Ġg ần
+Ġ×Ļ ×ķ×Ŀ
+Ġs Ä©
+ÑĢ Ñıд
+Ġquy á»ģn
+Ġ×IJ ׾×IJ
+Ùĩ Ùħا
+׳ ×Ļ×Ķ
+׾ ×ķת
+Ġ×Ķר ×ij×Ķ
+Ġti ên
+Ġal ın
+Ġd á»ħ
+人 ãģĮ
+но Ñģ
+л ÑģÑı
+ĠÄij ưa
+ส าว
+иÑĢов ан
+Ġ×ŀס פר
+×Ĵ ף
+Ġki ến
+ĠÐ ¨
+p é
+б Ñĥ
+ов ой
+б а
+ĠØ¥ ÙĦا
+×IJ ׾×Ļ
+Ġx ây
+Ġb ợi
+Ġש ×ķ
+人 ãģ®
+×§ ×Ļ×Ŀ
+à¹Ģà¸Ķ ืà¸Ńà¸Ļ
+Ġkh á
+Ġ×ķ ׾×Ķ
+×ĵ ×ķת
+Ġ×¢ ×ij×ķר
+Ġبش ÙĥÙĦ
+ĠÙĩÙĨا Ùĥ
+ÑĤ ÑĢа
+Ġ íķĺëĬĶ
+ร à¸Ńà¸ļ
+owa ÅĤ
+h é
+Ġdi á»ħn
+Ġ×Ķ ×Ľ×ľ
+ĠØ£ س
+Ġch uyá»ĩn
+ระ à¸Ķัà¸ļ
+ĠNh ững
+Ġ×IJ ×Ĺת
+ĠØŃ ÙĪÙĦ
+л ов
+׳ ר
+Ġ×ķ ׳
+Ġch Æ¡i
+Ġiç inde
+ÑģÑĤв Ñĥ
+Ġph á»ij
+ĠÑģ Ñĥ
+ç§ģ ãģ¯
+Ġch ứng
+Ġv á»±c
+à¹ģ à¸Ń
+Ġl áºŃp
+Ġtừ ng
+å°ij ãģĹ
+ĠNg uy
+ĠNguy á»ħn
+ĠÙģÙĬ Ùĩ
+Ġб а
+×Ļ ×Ļת
+Ġ×ľ×¢ ש×ķת
+Ġ×ŀ ׼
+Ġnghi á»ĩm
+Ġм ного
+Ġе е
+ëIJĺ ìĸ´
+Ġl ợi
+Ġ׾ ׾×IJ
+Ġ׼ ף
+Ġch ÃŃ
+ãģ§ ãģ®
+×Ĺ ×ķ
+ש ×ķ×Ŀ
+Ġ×ŀ ר
+ĠÐĶ Ð»Ñı
+Å ģ
+Ġ׼×IJ שר
+ĠM á»Ļt
+ĠÙĪØ§ÙĦ ت
+ĠìĿ´ 룰
+ÅŁ a
+Ġchi ến
+Ġaras ında
+Ġ×ij ×IJתר
+ãģķãĤĮ ãģ¦ãģĦãĤĭ
+Ø´ ÙĥÙĦ
+Ġt ượng
+Ġت ت
+ĠC ó
+Ġb á»ı
+Ġtá»ī nh
+Ġkh ÃŃ
+ĠпÑĢ Ð¾ÑģÑĤ
+ĠпÑĢоÑģÑĤ о
+ĠÙĪ ÙĤاÙĦ
+Ġgi áo
+ĠN ếu
+×IJ ×ŀר
+×¢×ł×Ļ ×Ļף
+íİ ¸
+Ùĩد Ùģ
+ĠB á»Ļ
+Ġb Ãłn
+Ġng uyên
+Ġgü zel
+ส าย
+ì² ľ
+×ŀ ×ķר
+Ġph ân
+ס פק
+×§ ×ij׾
+ĠاÙĦÙħ تØŃ
+ĠاÙĦÙħتØŃ دة
+ائ د
+Ġ×IJ ×ŀר
+Ġki ÅŁi
+ì¤ Ģ
+Ġtr uyá»ģn
+ĠÙĦ Ùĩا
+ĠÐľ а
+à¸ļริ ษ
+à¸ļริษ ั
+à¸ļริษั à¸Ĺ
+Ġש ׳×Ļ×Ŀ
+Ġмен Ñı
+ÅŁ e
+Ġdi á»ĩn
+Ġ×IJ׳ ×Ĺ׳×ķ
+k ü
+Ġc á»ķ
+Ġm á»Ĺi
+w ä
+Ùħ ÙĬ
+Ġhi á»ĥu
+ëĭ ¬
+Ġ×Ķ ×Ĺ׾
+Ġt ên
+Ġki á»ĩn
+ÙĨ ÙĤÙĦ
+Ġv á»ĩ
+×ĵ ת
+ĠÐłÐ¾ÑģÑģ ии
+л Ñĥ
+ĠاÙĦع ربÙĬØ©
+ĠØ· رÙĬÙĤ
+Ġ×Ķ×ij ×Ļת
+Ñģ еÑĢ
+Ġм не
+ä u
+Ġtri á»ĩu
+ĠÄij á»§
+Ġר ×ij
+ت ÙĩÙħ
+à¸ĭ ี
+Ġì§Ģ ê¸Ī
+li ÅĽmy
+د عÙħ
+ãģł ãĤįãģĨ
+Ñģки е
+Ġh á»ıi
+Ġ×§ ×ķ
+ÑĢÑĥ Ñģ
+ÙĨ ظر
+ãģ® ãĤĤ
+Ġ×Ķ ×Ľ×Ļ
+ĠìĽ IJ
+ÙĪ Ùĩ
+ĠÙĪ Ùİ
+ĠB ạn
+п лаÑĤ
+Ġ×ŀ ×ŀש
+лÑİ Ð±
+ĠнÑĥж но
+Ġth ư
+ãģ µ
+ãģı ãĤīãģĦ
+ر ش
+ר ×ķ×Ĺ
+ĠÙĬ تÙħ
+Ġצר ×Ļ×ļ
+Ġph á
+ม à¸Ńà¸ĩ
+Ġ×ij×IJ ×ķפף
+Ġcả nh
+Ġíķľ ëĭ¤
+Ġ×Ķ×ŀ ת
+à¸ķà¹Īาà¸ĩ à¹Ĩ
+มี à¸ģาร
+Ñģки Ñħ
+ĠÐĴ Ñģе
+Ġا ÙĪ
+ج ÙĬ
+ãģĵãģ¨ ãģ¯
+Ġd Ãłi
+Ġh á»ĵ
+èĩªåĪĨ ãģ®
+à¹Ħ หà¸Ļ
+ëĵ¤ ìĿĦ
+ĠV Äĥn
+Ġд аж
+Ġдаж е
+Ñĭ ми
+лаÑģ ÑĮ
+ÙĬ ÙĪÙĨ
+ÙĨ ÙĪ
+c ó
+ãģĹãģ¦ ãģĦãģŁ
+ãģł ãģĭãĤī
+طاÙĦ ب
+Ġc á»Ńa
+п ÑĢоÑģ
+ãģªãģ© ãģ®
+รุ à¹Īà¸Ļ
+Ġchi ếc
+л Ñĭ
+ĠÑıвлÑı еÑĤÑģÑı
+Ġn á»ķi
+ãģ® ãģĬ
+Ġ×IJת ×Ŀ
+ĠëķĮ문 ìĹIJ
+à¸ģล าà¸ĩ
+ĠbaÅŁ ka
+ìĦ Ŀ
+ĠÑĨ ел
+Ùģ ÙĤ
+ãģ«ãĤĪ ãĤĭ
+ÙĤ ا
+Ġçı kar
+Ġcứ u
+ط ا
+Ġש ת
+à¹Ĥ à¸Ħ
+Ġ×ŀ ׾
+Ġ×Ķ ×¤×¨
+Ġг де
+ĠØ® Ø·
+åīį ãģ«
+c jÄĻ
+Ġ׊ש×ķ×ij
+ר×Ĵ ×¢
+Ġkho ảng
+ĠÄij á»Ŀi
+ĠÐł е
+Ġо на
+Ġ×IJ ׳×ķ
+ãģ® ãģ«
+ĠاÙĦذ ÙĬÙĨ
+кÑĥ п
+ãĤµ ãĥ¼ãĥ
+ãĤµãĥ¼ãĥ ĵ
+ãĤµãĥ¼ãĥĵ ãĤ¹
+в ал
+г е
+Ġgi ữa
+ĠKh ông
+ĠâĹ ĭ
+à¸ģล ุà¹Īม
+ĠÙħÙĨ ذ
+à¸Ń à¹Īาà¸Ļ
+ĠÑģп оÑģоб
+ĠÄij á»Ļi
+Ġdi ÄŁer
+Ġ à¸ĸà¹īา
+Ùħ Ø«ÙĦ
+Ġ×Ķ×IJ ×Ļ
+Ġد ÙĪÙĨ
+ÙĬر اÙĨ
+Ñī и
+بÙĨ اء
+ĠØ¢ خر
+ظ Ùĩر
+Ġ×ij ׼
+ĠاÙĦÙħ ع
+ãĥ Ĵ
+Ġt ất
+Ġm ục
+ĠdoÄŁ ru
+ãģŁ ãĤī
+Ġס ×ķ
+Ġx ác
+ร à¸Ń
+ĠcÄĥ n
+Ġон л
+Ġонл айн
+Ġk ý
+Ġch ân
+Ġ à¹Ħมà¹Ī
+اØŃ Ø©
+r án
+׳×Ļ ×Ļ×Ŀ
+Ġ×ij ף
+ĠÐ ĸ
+à¸ķร à¸ĩ
+д Ñĭ
+Ġs ắc
+ÙĦ ت
+ãĥŃ ãĥ¼
+ĠÙĦ ÙĨ
+Ġר ×ķ
+Ġd Æ°á»Ľi
+à¹Ģ à¸ĺ
+à¹Ģà¸ĺ à¸Ń
+e ÄŁi
+Ġ×ķ ש
+ĠÙĦ Ø£
+Ġg ặp
+Ġc á»ij
+ãģ¨ ãģ¦ãĤĤ
+رÙĪ Ø³
+Ġ׾×Ķ ×Ļ
+Ġë³ ¸
+ä¸Ĭ ãģĴ
+Ġm ức
+Ñħ а
+Ġìŀ ¬
+à¸ī ัà¸Ļ
+ÑĢÑĥ ж
+Ġaç ık
+ÙĪ Ø§ÙĦ
+Ġ×ĸ ×ŀף
+人 ãģ¯
+ع ÙĬÙĨ
+Ñı Ñħ
+Ġ×Ĵ×ĵ ×ķ׾
+ר ×ķ×ij
+g ó
+ëĿ¼ ê³ł
+Ġark adaÅŁ
+ÙĨ شر
+Ġгод Ñĥ
+ĠболÑĮ ÑĪе
+ãģ¡ãĤĩ ãģ£ãģ¨
+Ġcâ u
+Ġs át
+íĶ ¼
+Ġti ến
+íķ´ ìķ¼
+ĠÙĪ Ø£ÙĨ
+à¸Ļ าà¸Ļ
+Ġ×ij×IJ×ŀ צע
+Ġ×ij×IJ×ŀצע ×ķת
+Ġ׾ ר
+Ġqu ản
+ĠÙĪØ§ÙĦ Ø£
+Ġ×IJ×ķת ×Ķ
+Ġìĸ´ëĸ ¤
+Ġê²ĥ ìĿĢ
+ØŃس ÙĨ
+Ġm ất
+à¸Ħ ูà¹Ī
+ãĥ¬ ãĥ¼
+ĠÐĶ Ð°
+Ġol ması
+Ġthu á»Ļc
+׳ ×Ĺ
+íĨ ł
+Ġsö yle
+ãģĿãģĨ ãģ§ãģĻ
+Ġت ÙĥÙĪÙĨ
+л ÑĥÑĩ
+׾ ×Ļ×ļ
+ĠØ£ ØŃد
+ли ÑģÑĮ
+ĠвÑģ его
+Ġ×Ķר ×ij
+Ġëª »
+o ÄŁ
+oÄŁ lu
+ĠìĦ ł
+Ġк аÑĢ
+à¸łà¸² à¸Ħ
+e ÅĦ
+Ġ à¸ģà¹ĩ
+Ġa ynı
+Ġb Ãł
+ãģªãĤĵ ãģ¦
+Ġ모 ëĵł
+ÙĤر ار
+ãģĹãģª ãģĦ
+ĠÐĴ о
+ĠÙĪÙĩ ÙĬ
+ни ки
+ãĤĮ ãģŁ
+Ġchu ẩn
+ר ע
+Ùģ Ø±ÙĬÙĤ
+ãĤĴ åıĹãģij
+ĠÄij úng
+б е
+׼ ×ķ×Ĺ
+п Ñĥ
+Ġ×ķ ×Ĵ×Ŀ
+×ŀ ׳×Ļ
+íĸ ¥
+צ ×Ļ×Ŀ
+à¸ĭ ิ
+Ùĩ ÙĨ
+н ем
+Ġ×ij×ij ×Ļת
+ر ع
+Ġ ส
+ĠÄIJ Ãł
+íķĺ ëĭ¤
+Ġ ấy
+×Ĺ ×ķ×ĵ
+×Ĺ×ķ×ĵ ש
+ĠÑĩеÑĢ ÐµÐ·
+Ñĥ л
+ĠB ình
+Ġê²ĥ ìĿĦ
+Ġ×Ĵ ר
+ä»ĺ ãģij
+×Ĺ׾ ×§
+Ġت ÙĦÙĥ
+à¹ĥส à¹Ī
+sz Äħ
+ÙĤ اÙħ
+د ÙĪØ±
+ĠÙģ ÙĤØ·
+Ġh ữu
+Ġмог ÑĥÑĤ
+Ġg á»įi
+Ġ×§ ר
+à¸Īะ มี
+ت ÙĤدÙħ
+Ġع بر
+Ġ׾×Ķ ×Ŀ
+ĠÑģам о
+ס ×ĵר
+Ġc Ãłng
+r ÃŃ
+Ġìŀ ¥
+ëĵ¤ ìĿĺ
+ĠÙĦ Ùĥ
+п оÑĢÑĤ
+Ġkh ả
+ĠÑģеб Ñı
+׳ ף
+Ġد ÙĪØ±
+Ġm ợ
+Ġcâ y
+Ġf ark
+Ġfark lı
+а ÑİÑĤ
+Ġtr á»±c
+wiÄĻks z
+Ġthu á»ijc
+Ġت ØŃت
+ت ÙĦ
+ов Ñĭе
+ëĤ ł
+Ġв ам
+بÙĦ غ
+Ġê°Ļ ìĿĢ
+íĮ IJ
+ÙĦ ب
+Ġnas ıl
+Ġод ин
+м ан
+ĠعÙĦÙĬ Ùĩا
+б и
+Ġפ ש×ķ×ĺ
+×ijר ×Ļ
+Ġש ׳×Ķ
+Ġëı Ħ
+ĠÄIJ ại
+Ġ×IJ×ķת ×Ŀ
+ĠاÙĦØŃ ر
+Ġб о
+à¸Ī ุà¸Ķ
+Ġr õ
+ĠdeÄŁi ÅŁ
+Ġëĭ ¨
+ĠÑģлÑĥÑĩ а
+ĠÑģлÑĥÑĩа е
+Ġ×IJ׳ ש×Ļ×Ŀ
+×ĵ ×£
+ש×ij ת
+Ġש׾ ׼×Ŀ
+Ġch ú
+nik ów
+Ġtan ı
+Ġcá o
+ĠÄij á
+Ġ×IJ ×ĵ×Ŀ
+Ġê° ķ
+Ġnhi á»ĩm
+Ġ׾ ס
+Ġ×Ľ×ª ×ij
+Ġ×Ķס פר
+ĠÄij Äĥng
+Ġë ijIJ
+à¸ľ ิ
+à¸ľà¸´ ว
+ج ا
+Ġê° IJ
+ر أ
+ست خدÙħ
+ãģ«ãģªãĤĬ ãģ¾ãģĻ
+Ġtá» ·
+×ĺ ×ķר
+г овоÑĢ
+Ġв оÑģ
+ĠÙħÙĨ Ùĩا
+иÑĢов аÑĤÑĮ
+ĠÄij ầy
+׳ ×Ĵ
+ĠÙħ ÙĪ
+ĠÙħ ÙĪÙĤع
+ר׼ ×Ļ
+ت Ùı
+ëª ¨
+Ġת ×ķ
+ÙĬا Ùĭ
+à¹ĥ à¸Ķ
+ãĤĬ ãģ¾ãģĻ
+à¸Ńยูà¹Ī à¹ĥà¸Ļ
+ĠØ£ ÙĪÙĦ
+ĠØ£ خرÙī
+Ġc ư
+ص ار
+×ŀ׊ש×ij
+б ÑĢа
+ÅĦ ski
+б ÑĢ
+ĠÙĬ Ùı
+à¸ģ ิà¸Ļ
+Ġch á»ijng
+Ùħ Ùı
+Ġ à¸Ħืà¸Ń
+Ġت ÙĨ
+t ÃŃ
+y Äĩ
+Ġm ạng
+Ùģ ÙĪ
+Ġdü nya
+×§ ר×IJ
+Ġ×§ ׾
+ĠØŃ اÙĦ
+c ÃŃa
+Ġà¹Ģ รา
+Ġר ×ķצ×Ķ
+Ġá p
+ë° ķ
+ا ÙĤØ©
+ни Ñİ
+Ġ×IJ ׾×ķ
+Ġ×ŀס ×ķ
+ãģ§ãģ¯ ãģªãģı
+Ġtr ả
+Ġ×§ שר
+mi ÅŁtir
+Ġl ưu
+Ġh á»Ĺ
+ĠбÑĭ ли
+Ġl ấy
+عÙĦ Ùħ
+Ġö zel
+æ°Ĺ ãģĮ
+Ġ×ĵ ר×ļ
+Ùħ د
+s ını
+׳ ×ķש×IJ
+r ów
+Ñĩ еÑĢ
+êµIJ ìľ¡
+ĠÐľ о
+л ег
+ĠV Ỽi
+วัà¸Ļ à¸Ļีà¹ī
+ÑİÑī ие
+ãģĬ ãģĻ
+ãģĬãģĻ ãģĻ
+ãģĬãģĻãģĻ ãĤģ
+ëı ħ
+Ġ×Ļ×Ķ ×Ļ×Ķ
+×ŀ ×ĺר
+Ñı ми
+Ġl á»±a
+ĠÄij ấu
+à¹Ģส ียà¸ĩ
+Ġt ương
+ëĵ ±
+ĠÑģÑĤ аÑĢ
+à¹ĥ à¸ļ
+ว ัà¸Ķ
+Ġİ stanbul
+Ġ à¸Īะ
+à¸ķ ลาà¸Ķ
+Ġب ÙĬ
+à¹ģà¸Ļ ะ
+à¹ģà¸Ļะ à¸Ļำ
+س اعد
+Ġب Ø£
+Ġki á»ĥm
+ØŃ سب
+à¸Ĭั à¹īà¸Ļ
+Ġ×ķ ×¢×ķ×ĵ
+ов ÑĭÑħ
+оÑģ нов
+Ġtr Æ°á»Łng
+צ ×ij×¢
+ĠÃŃ t
+Ġk ỹ
+cr é
+Ñı м
+êµ °
+ãģĮ ãģªãģĦ
+ÙĬÙĦ Ø©
+ãĥķ ãĤ£
+ر Ùī
+ĠÙĬ جب
+Ġ×IJ ×£
+Ġc á»±c
+ãĤīãĤĮ ãģŁ
+Ġ à¸ľà¸¹à¹ī
+Ġ à¸Ń
+lar ımız
+Ġkad ın
+Ġê·¸ ëŀĺ
+Ġê·¸ëŀĺ ìĦľ
+ĠëĺIJ ëĬĶ
+ĠÄij ả
+ĠÄijả m
+Ġ×IJ ×ķ×ŀר
+Ġy ếu
+ci Äħ
+ciÄħ g
+Ġt á»ij
+Ġש×IJ ׳×Ļ
+Ġdz iaÅĤa
+Ñī а
+ĠÄij Ãłn
+s ına
+ãģĵãĤĮ ãģ¯
+Ġ×ij ׾×Ļ
+Ġ×ij ×Ļשר×IJ׾
+л оÑģÑĮ
+Ġgi ữ
+ê° IJ
+ÑĢ Ð¾Ð½
+تج ار
+г лав
+в ин
+Ġh ạn
+Ġyapı lan
+ب س
+Ġ à¸ŀรà¹īà¸Ńม
+ê´Ģ 리
+mÄ±ÅŁ tır
+b ü
+r ück
+ĠBaÅŁkan ı
+ĠÙĦ ÙĬس
+Ġs Æ¡
+à¸Īัà¸ĩ หว
+à¸Īัà¸ĩหว ัà¸Ķ
+د اء
+Ġ×Ķ ×Ľ
+v ÃŃ
+ש ×IJר
+Ġh Æ°á»Łng
+Ġb óng
+ĠCh ÃŃnh
+Äħ c
+à¹Ģà¸ģีà¹Īยว à¸ģัà¸ļ
+Ġtá» ©
+Ġtứ c
+ĠÑĨ веÑĤ
+Ġt á»iji
+ĠnghÄ© a
+ÙĦا عب
+د ÙĦ
+Ġפע ×Ŀ
+h ör
+à¸Ĭ ุà¸Ķ
+à¸ŀ ู
+à¸ŀู à¸Ķ
+п аÑģ
+ĠÅŁ u
+Ġt Æ°á»Łng
+خار ج
+Ġâ m
+ĠинÑĤеÑĢ ÐµÑģ
+ен нÑĭÑħ
+×IJ ׳×Ļ
+بد أ
+ëĿ¼ ëĬĶ
+ì¹ ´
+æĸ¹ ãģĮ
+ли в
+Ġ à¸Ħà¸Ļ
+ער ×ļ
+à¸Ĥà¸Ńà¸ĩ à¸Ħุà¸ĵ
+п ад
+Ġc ạnh
+ĠëĤ ¨
+ĠÄij âu
+Ġbi á»ĥu
+ãĤĤ ãģĤãĤĭ
+׾ ×Ĵ
+Ġ สำหรัà¸ļ
+Ġxu á»ijng
+ס ×ķ
+Ġذ ات
+ĠÐľ е
+ع اÙĦÙħ
+×IJ ס
+ب ÙĬØ©
+ش ا
+и ем
+ĠNg ưá»Ŀi
+íĺ ij
+Ñģл ов
+Ġп а
+Ġm ẫu
+ĠпÑĢоÑĨ еÑģÑģ
+ĠNh Ãł
+пÑĢо из
+пÑĢоиз вод
+à¸łà¸²à¸¢ à¹ĥà¸Ļ
+Ġ à¸ļาà¸Ĺ
+×ŀ ׳×ķ
+ĠоÑĢг ан
+רצ ×ķ
+×ķ×ŀ ×Ļ×Ŀ
+Ġyaz ı
+Ġd ù
+ãĥ¬ ãĥ³
+ÙĪÙĦ ÙĬ
+ย ู
+Ġtr ò
+à¹Ģà¸ŀ ลà¸ĩ
+Ġ×ŀ ׾×IJ
+à¸ķ ล
+à¸ķล à¸Ńà¸Ķ
+ĠÄij ạt
+Ġ×Ĺ×ĵ ש
+p óÅĤ
+Ġ×ŀ ×ĵ×Ļ
+ujÄħ c
+×ŀ׳×Ķ ×ľ
+Ġש×ij ×ķ
+Ġ×Ķ×ŀש פ×ĺ
+Ġ×IJ ׾×Ķ
+ĠÙĪ Ø°ÙĦÙĥ
+à¹Ģà¸ŀ ราะ
+ĠÄijo Ãłn
+Ġíķ¨ ê»ĺ
+Ġd ục
+ش ت
+Ġ ula
+Ġula ÅŁ
+Ġqu ý
+Ġ×Ķ ×Ĵ×ĵ×ķ׾
+à¸ķัà¹īà¸ĩ à¹ģà¸ķà¹Ī
+Ġש ר
+Ø´ Ùĩد
+׳ ש×Ļ×Ŀ
+à¸ŀ ล
+رÙĪ Ø§
+ãĤĮ ãģ¦
+Ġн иÑħ
+Ġдел а
+ãģ§ãģį ãģªãģĦ
+ÅĤo ż
+×IJ ×Ĺר
+ì ½Ķ
+ãĤ¢ ãĥĥãĥĹ
+د Ù쨹
+Ġti á»ĩn
+Ġkh á»ı
+Ġkhá»ı e
+ĠاÙĦع اÙħØ©
+ãģ« ãģĤãĤĭ
+ĠÄij á»Ļc
+ì¡ ±
+Ġc ụ
+й ÑĤе
+Ġзак он
+ĠпÑĢо екÑĤ
+ìĸ ¸
+ÙĦ ØŃ
+ĠçalÄ±ÅŁ ma
+ãĤĴ ãģĻãĤĭ
+Ñħ и
+ع اد
+Ġ׳ ×ŀצ×IJ
+Ġר ×Ļ
+à¸Ńà¸Ńà¸ģ มา
+ĠT ôi
+Ġth ần
+ĠÙĬ ا
+ล าย
+Ġав ÑĤо
+Ġsı ra
+ĠÙĥ Ø«ÙĬر
+Ùħ ÙĬز
+ĠاÙĦع ÙĦÙħ
+æĸ¹ ãģ¯
+×ķ×¢ ×ĵ
+Ġобла ÑģÑĤи
+×Ļ׾ ×Ļ×Ŀ
+ãģĮ åĩº
+à¸ĺ ุ
+à¸ĺุ ร
+à¸ĺุร à¸ģิà¸Ī
+ÙĤت ÙĦ
+ר×IJ ×ķ
+Ġng u
+Ġngu á»ĵn
+Ġ มา
+Ġпл ан
+t ório
+Ġcu á»iji
+Ñģк ом
+ĠاÙĦÙħ اض
+ĠاÙĦÙħاض ÙĬ
+Ġ×ij×¢ ׾
+Ġר ×ij×Ļ×Ŀ
+Ġlu áºŃn
+Ùĥ ÙĪ
+à¸Ĺัà¹īà¸ĩ หมà¸Ķ
+в ан
+Ġtho ại
+à¹Ħ à¸Ń
+б иÑĢ
+ĠاÙĦ ض
+ت ا
+ĠÑĢ Ð¾Ð´
+ĠV Ãł
+×ŀ ×Ļף
+ĠбÑĭ ла
+к ами
+ĠÐĶ Ðµ
+t ık
+קר ×Ļ
+ĠeÄŁ itim
+ĠÙĥ بÙĬر
+ب Ùĥ
+ĠÙĦ ÙĪ
+в ой
+Ġ ãģĵãģ®
+ĠÑĤ ÑĢÑĥд
+my ÅĽl
+Ġs ư
+à¸ŀ ีà¹Ī
+Ġ à¹ģลà¹īว
+×¢ ×§
+Ġ×Ĺ×ijר ת
+ระ หว
+ระหว à¹Īาà¸ĩ
+×Ļ ×Ļ×Ķ
+ĠاÙĦÙĨ اس
+ün ü
+Ġ׾ ×ŀ×Ķ
+Ġch ương
+ĠH á»ĵ
+ار ت
+ãĤĪãģĨ ãģ§ãģĻ
+l á
+×§×Ļ ×Ļ×Ŀ
+æľ¬ å½ĵ
+æľ¬å½ĵ ãģ«
+ãģĵãĤĵ ãģª
+Ñģ ов
+Ġ×ķ ×Ĺ
+à¹Ģà¸ģ à¹ĩà¸ļ
+Ġк ÑĤо
+à¹Ĥร à¸Ħ
+ĠØ´ رÙĥØ©
+ع زÙĬ
+عزÙĬ ز
+Ø·ÙĦ ÙĤ
+п ÑĥÑģÑĤ
+Ùģ ØªØŃ
+ëŀ Ģ
+Ġhã y
+ض Ùħ
+ë¦ °
+åł´åIJĪ ãģ¯
+ãĤª ãĥ¼
+Ġh ắn
+Ġ×IJ ×ij×Ļ×ij
+Ġש׾×Ķ ×Ŀ
+Ġ×Ķ×Ļ ×Ļת×Ķ
+ĠاÙĦد ÙĪÙĦØ©
+ĠاÙĦ ÙĪÙĤ
+ĠاÙĦÙĪÙĤ ت
+ãģĤ ãģ¾ãĤĬ
+Ġta ÅŁÄ±
+İ N
+ע סק
+ãģ¦ ãģĦãģŁ
+Ġtá»ķ ng
+ĠاÙĦØ¥ ÙĨس
+ĠاÙĦØ¥ÙĨس اÙĨ
+ÑĢ ÐµÑĪ
+Ġg ái
+ĠÑĨ ен
+ĠÙģ ÙĤد
+Ùħ ات
+ãģķãĤĵ ãģ®
+Ġph ù
+×ĺ ×Ķ
+ĠÙĪØ§ÙĦ تÙĬ
+Ġب Ùĥ
+ìĿ´ ëĤĺ
+к Ñģ
+Ùħ ÙĬر
+Ġv ùng
+ĠاÙĦØ´ عب
+ĠNh ưng
+ãĥĢ ãĥ¼
+Ġ×Ĺ×Ļ ×Ļ×Ŀ
+ĠØ´ خص
+×§ ×ķ×ĵ
+ê² Ģ
+ע ש
+×¢ ×ķ׾×Ŀ
+צ ×ķר
+ع ÙĤد
+ĠiÅŁ lem
+Ġ×Ķ×ij ×IJ
+Ġd ưỡng
+à¸Ł รี
+Ġph ÃŃa
+ãģ®ä¸Ń ãģ§
+Ġп и
+Ġng Ãłnh
+ним а
+ĠÙĩ ÙĦ
+Ġ×ķ ×IJת
+ĠÄij áng
+é quipe
+ĠÑįÑĤ оÑĤ
+Ġgö rev
+ë§ ¤
+Ġqu ân
+å¼ķ ãģį
+æĻĤ ãģ«
+Ġب Ùħا
+×ŀ ×Ļת
+Ġü lke
+Ġ×ŀ×§ ×ķ×Ŀ
+×ij ף
+æ°Ĺ æĮģãģ¡
+Ġë§İ ìĿĢ
+Ġyük sek
+ÑĨ енÑĤÑĢ
+ĠÙħ جÙĦس
+ç§ģ ãģ®
+ÙĤد ر
+Ġë¶Ģ ë¶Ħ
+Ġì° ¨
+خر ج
+ãģĭ ãģªãĤĬ
+ë³´ ëĭ¤
+Ġ×ŀ ×Ļ×ĵ×¢
+peÅĤ ni
+Ġx á»Ń
+ìĹIJìĦľ ëĬĶ
+ĠباÙĦ Ùħ
+ĠÙĪ Ùħا
+ĠÑįÑĤ ой
+ب ÙĬÙĨ
+n ü
+ØŃ ز
+ØŃز ب
+ĠÑĢабоÑĤ а
+ĠNh áºŃt
+ÙĦ اء
+Ġëĵ ¤
+Ġëĵ¤ ìĸ´
+ãĤĦãģĻ ãģĦ
+×Ĺ×ĸ ×§
+Ġ×Ķ×Ĺ ×ijר×Ķ
+п иÑĤ
+ãģĭãĤī ãģ®
+Ġë§IJ ìĶĢ
+Ġפ ×ķ
+ÙĦ Ùİ
+à¹Ģà¸ķà¹ĩ ม
+ĠÐļ о
+Ġm ówi
+Ġt ÃŃn
+ר×Ĵ ש
+פר ק
+Ġtr ạng
+ĠÐŀ н
+×Ĺ ×ķ×¥
+ĠعÙĨد Ùħا
+Ġب ر
+使 ãģĦ
+Ġr á»Ļng
+ëĮĢ ë¡ľ
+íĪ ¬
+Ġktóry ch
+в ид
+ลูà¸ģ à¸Ħà¹īา
+Ġmog Äħ
+Ġש ×Ĺ
+×ij ×Ĺר
+ãĥĸ ãĥŃãĤ°
+ĠTh Ãłnh
+Ġ×Ķ ×¨×Ļ
+ĠÑģÑĤ аÑĤÑĮ
+ĠH á»Ļi
+à¸ļ à¹īาà¸ĩ
+çī¹ ãģ«
+ĠÄIJ ức
+èĢħ ãģ®
+×¢ ×ŀ×ķ×ĵ
+×ĺר ×Ķ
+Ð ¥
+ĠÙħ Ùħا
+Ġe ÅŁ
+ĠнеобÑħодим о
+ник ов
+Ġüzer inde
+a ÅĤa
+Ġchá»ĭ u
+ĠاÙĦ دÙĬÙĨ
+أخ بار
+ĠÄij au
+ãģĮ å¤ļãģĦ
+jÄħ cych
+د Ø®ÙĦ
+ları nd
+larınd an
+Ġs ẻ
+à¸ŀิ à¹Ģศ
+à¸ŀิà¹Ģศ ษ
+ת ף
+t ıģı
+Ġlu áºŃt
+ĠÅŀ e
+ãĤ« ãĥ¼
+ãģ® ãģĤãĤĭ
+Ġ×Ķ×IJ תר
+ĠاÙĦØ¢ ÙĨ
+ıld ı
+Ġá o
+ĠнаÑĩ ал
+Ġvi á»ĩn
+Ġ×ij×¢ ×ķ׾×Ŀ
+з наÑĩ
+×Ļ×ĺ ×Ķ
+к ам
+ĠÐĺ з
+à¹Ģà¸Ĥ ียà¸Ļ
+à¸Ļ à¹īà¸Ńà¸ĩ
+ÑĤ ÑĢо
+à¹Ģ à¸Ł
+Ġжиз ни
+Ġ สà¹Īวà¸Ļ
+Ġv áºŃn
+Ġê´Ģ 볨
+Ġl âu
+ס ×ĺר
+ק ש
+س ÙĬر
+Ġ×IJ×ķת ×Ļ
+Ġm ôi
+ائ ب
+Ġо ÑģÑĤа
+Ġm ón
+Ġ×ij ×ŀ×§×ķ×Ŀ
+Ġد اخÙĦ
+Ġ×IJ ×ķר
+Ġв аÑģ
+Ùĥ Ø´Ùģ
+ìĺ ¨
+à¸ĸ à¹Īาย
+Ġkullan ıl
+Ġt ô
+ãģ« ãĤĪãĤĬ
+ĠëĺIJ íķľ
+Ġ×¢×ij×ķ×ĵ ×Ķ
+Ġri ê
+Ġriê ng
+Ġyak ın
+ز ا
+Å »
+×IJ ×ķ׼׾
+شار Ùĥ
+Ġб еÑģ
+× ´
+Ġا بÙĨ
+ĠTá»ķ ng
+ÙĨ ظ
+ÅĽwi ad
+ãĤµ ãĥ¼
+ห าย
+ĠG ün
+Ġhakk ında
+à¹Ģà¸Ĥà¹īา มา
+ز ÙĨ
+ĠÐł о
+Ġbi á»ĥn
+ãģ© ãģĵ
+Ùģ Ø¹ÙĦ
+ز ع
+פר ×ĺ
+Ġ×Ķ ×Ł
+Ø£ ÙĩÙĦ
+Ġth ất
+ØŃ ÙħÙĦ
+Ñĩ Ñĥ
+ĠìĤ¬ ìĭ¤
+ì° ¸
+ĠìľĦ íķ´
+ÙĪ Ø¸
+ĠÐŁ од
+Ġkho ản
+ÑĤ ен
+ĠÙģ Ø§ÙĦ
+Ñģ ад
+à¸Ļ à¸Ńà¸Ļ
+ĠاÙĦسعÙĪØ¯ ÙĬØ©
+" ØĮ
+ĠاÙĦ ÙĴ
+ãĤī ãģļ
+Ġto án
+Ġch ắc
+׼ ×Ļר
+m éd
+méd ia
+ز ÙĪ
+Ġyan ı
+פ ׳×Ļ×Ŀ
+ØŃ ظ
+Ġб еÑģп
+ĠбеÑģп лаÑĤ
+ĠбеÑģплаÑĤ но
+ĠØ£ ÙħاÙħ
+à¸Ń าย
+à¸Ńาย ุ
+ר שת
+Ġg á»ĵ
+Ġgá»ĵ m
+Ġu á»ijng
+ص ب
+k ır
+ãĥij ãĥ¼
+Ġ׾×ĵ עת
+Ġк ÑĥпиÑĤÑĮ
+׾ ×ķ×Ĺ
+ÙĪØ¶ ع
+ÙĤÙĬ Ùħ
+à¸Ľ า
+ж ив
+à¸Ķ ิà¸Ļ
+×IJ ×ķפ
+à¹Ģล à¹ĩà¸ģ
+ãĥĥ ãĥī
+иÑĩеÑģки Ñħ
+ĠCh á»§
+кÑĢ Ð°Ñģ
+ÙĪ ØµÙĦ
+p ÅĤat
+м оÑĢ
+Ġ×Ķ×IJ ×ķ
+à¸Ń ิà¸Ļ
+Ġíķľ êµŃ
+гÑĢ Ðµ
+Ġìłľ ê³µ
+ì° ½
+Ġê°ľìĿ¸ ìłķë³´
+Ġngh á»ĭ
+à¸ĭ า
+ØŃس اب
+Ġby ÅĤa
+ÙħÙĦ Ùĥ
+иÑĩеÑģки е
+Ġb ác
+ض ØŃ
+ê¸ ¸
+ש ×ŀ×¢
+Ġìĸ´ëĸ »
+Ġìĸ´ëĸ» ê²Į
+ìĽ Į
+ات Ùĩ
+à¹Ĥรà¸ĩ à¹ģ
+à¹Ĥรà¸ĩà¹ģ รม
+خد ÙħØ©
+ĠÐł а
+׼×ķ׾ ×Ŀ
+×ŀש ×Ĺ×§
+ĠÙĪ ÙĥاÙĨ
+ס ×ķ×£
+ĠاÙĦØŃÙĥÙĪÙħ Ø©
+Ġ×ij ×ĺ
+Ġtr áºŃn
+Ġ×Ķ×¢ ×ķ׾×Ŀ
+ĠÃŃ ch
+t Äħ
+ש×ŀ ×ķ
+Ġ×Ķר×IJש ×ķף
+Ġíķĺ ê³ł
+ãģķ ãĤī
+ãģķãĤī ãģ«
+ãģ« ãģĹãģ¦
+Ġ à¸ľà¸¡
+ãģ® ãĤĪãģĨãģª
+ĠÙĪ ÙĤت
+ãĥį ãĥĥãĥĪ
+ÙĦ عب
+ÙĪ Ø´
+ìĺ ¬
+Ġ หาà¸ģ
+Ġm iaÅĤ
+à¸Ĺ à¸Ńà¸ĩ
+иÑĤ а
+ا صر
+ил ÑģÑı
+з е
+à¸Ľà¸£à¸° มาà¸ĵ
+ãģĿãĤĮ ãģ¯
+Ġb ır
+Ġbır ak
+صÙĨ اع
+Ð ®
+ش عر
+Ġ׳ ×Ĵ×ĵ
+Ġب سبب
+ãĥĿ ãĤ¤
+ãĥĿãĤ¤ ãĥ³ãĥĪ
+ĠاÙĦج ÙĪ
+ĠнеÑģк олÑĮко
+Ġki ếm
+Ùģ Ùİ
+Ġض د
+×ij×Ļ×ĺ ×ķ×Ĺ
+تاب ع
+ÙĨ ز
+ĠB ản
+Ġaç ıkl
+Ġaçıkl ama
+Ġ à¸Ħุà¸ĵ
+à¸Ĺ า
+ÅĤ ów
+ط ب
+ÙĨ ØŃÙĨ
+Ġ×ŀ×§ ×ķר
+Ġİ s
+Ġдом а
+Ġ วัà¸Ļ
+Ġd Ãłnh
+Ñı н
+ми ÑĢ
+Ġm ô
+ĠvÃł ng
+ص اب
+s ının
+à¸Ħ ืà¸Ļ
+خ بر
+×ĸ׼ ×ķ
+Ġ×ŀ ש×Ķ×ķ
+m ü
+Ġкомпани и
+Ġ×Ķ×¢ ×Ļר
+ĠÙĥ ÙĪ
+ÙĤÙĦ ب
+ĠlỼ p
+и ки
+׳ ×ij
+à¹Ĥ à¸Ħร
+à¹Ĥà¸Ħร à¸ĩ
+à¹Ĥà¸Ħรà¸ĩ à¸ģาร
+×ŀ×ķ×¢ ×ĵ
+ÑıÑĤ ÑģÑı
+หลัà¸ĩ à¸Īาà¸ģ
+ени Ñİ
+Ġש ×¢
+Ġb Æ°á»Ľc
+ãĥ¡ ãĥ¼ãĥ«
+ãĤĦ ãĤĬ
+Ġ×Ļ×ķ×ĵ ×¢
+Ġê´Ģ íķľ
+ĠاÙĦØ£ Ùħر
+Ġböl ge
+ĠÑģв ой
+ÙĦ س
+Ġ×ŀ×Ļ ×ķ×Ĺ×ĵ
+ĠëĤ´ ìļ©
+ĠØ£ جÙĦ
+ĠÄIJ ông
+Ġ×ŀ ×ł×ª
+Ġìĭľ ê°Ħ
+Ùĥ Ùİ
+ãģ¨ãģĦãģĨ ãģ®ãģ¯
+Ġnale ży
+تÙĨظ ÙĬÙħ
+ĠÑģозд а
+Ġph é
+Ġphé p
+ãģ§ãģį ãģ¾ãģĻ
+Ġع ÙĦÙħ
+大ãģį ãģª
+ãĤ² ãĥ¼ãĥł
+í ħĮ
+Ġ׼×ķ׾ ׾
+ĠинÑĤеÑĢ Ð½ÐµÑĤ
+ĠT ừ
+ãģ¨ ãģªãĤĭ
+ز اÙĦ
+Ġktóry m
+Ġnh é
+ìĪ ľ
+н ев
+д еÑĢ
+ãĤ¢ ãĥĹãĥª
+i á»ĩu
+×ij ×Ļ׾
+Ġت س
+ĠÄIJ ây
+ĠاÙĦØ® اصة
+Ġà¹Ģ à¸Ĭ
+Ġà¹Ģà¸Ĭ à¹Īà¸Ļ
+ص اد
+Ġd ạng
+س عر
+Ġש ×Ļ×ŀ×ķש
+×Ĵ ×Ļ×Ŀ
+ãģĮãģĤ ãģ£ãģŁ
+п ÑĢов
+пÑĢов од
+Ġ×IJ ×Ļ׳×ķ
+Ġ׾ ר×IJ
+Ġ׾ר×IJ ×ķת
+ĠØ£ Ù쨶ÙĦ
+ĠØŃ ÙĦ
+ĠØ£ بÙĪ
+ê° ķ
+Ġì§ ij
+ãģ® ãĤĪãģĨãģ«
+Ġפ ׳×Ļ
+ס ×Ļ×Ŀ
+ĠÙĪÙĩ ذا
+Ġka ç
+Ġé én
+Ġê± ´
+ë° Ķ
+Ñĥ з
+à¸Ĥà¸Ńà¸ĩ à¹Ģรา
+i ÅĤ
+ĠÐľ Ñĭ
+Ġch ết
+ĠاÙĦØ« اÙĨÙĬ
+×IJ ×§
+Ġ×ķ ×¢×ľ
+ĠاÙĦØ· ب
+×ij×ĺ ×Ĺ
+Ġج دÙĬدة
+Ġع دÙħ
+ع ز
+สิà¹Īà¸ĩ à¸Ĺีà¹Ī
+ãģĻ ãĤĮãģ°
+ĠÄij ô
+ì£ ł
+د ÙĤ
+н омÑĥ
+Ġk á»ĥ
+ãĤ¢ ãĥ³
+å¤ļãģı ãģ®
+à¸Ľà¸£à¸° à¸ģ
+à¸Ľà¸£à¸°à¸ģ à¸Ńà¸ļ
+פע×Ļ׾ ×ķת
+ĠÑģÑĤ ол
+may ı
+ãģ¤ ãģĦ
+Ġyılı nda
+Ġ à¸Īึà¸ĩ
+koÅĦ cz
+ĠTh ông
+Ġак ÑĤив
+н ÑģÑĤ
+нÑģÑĤ ÑĢÑĥ
+ĠÃĸ z
+Ġת ×ŀ×Ļ×ĵ
+ĠÙĥ ÙĨت
+Ñģ иÑģÑĤем
+pr és
+prés ent
+Ġn â
+Ġnâ ng
+gÅĤ os
+ĠÙĪØ² ÙĬر
+ØŃ صÙĦ
+Ġиме еÑĤ
+ØŃ رÙĥØ©
+à¸ŀ à¹Īà¸Ń
+ãĤĴ ãģĬ
+Ġاست خداÙħ
+×IJ×Ļר ×ķ×¢
+ä»ĸ ãģ®
+Ġש×Ķ ×Ŀ
+ãģĹãģŁ ãĤī
+ש×ŀ ×Ļ
+Ñģ ла
+m ı
+Ġbaz ı
+Ġíķĺ ì§Ģë§Į
+×ĵ ׾
+Ġyapt ıģı
+ãĥĬ ãĥ¼
+׾ ×Ļ׾×Ķ
+ãģ¨ãģĦ ãģ£ãģŁ
+änd ig
+ĠÅŁ a
+ĠÙģÙĬ Ùħا
+иÑĤ елÑı
+×ŀ ×ķש
+à¸Ĥ à¸Ńà¸ļ
+l ük
+Ġh á»ĵi
+Ġëª ħ
+ĠاÙĦÙĥ Ø«ÙĬر
+צ ×IJ
+Ġhaz ır
+طر Ùģ
+ا ÙĬا
+ĠÄij ôi
+ен д
+ÙĦ غ
+×Ĺ ×ĸ×ķר
+ĠвÑģ ег
+ĠвÑģег да
+ëIJĺ ê³ł
+×ĵ ×ķ×ĵ
+ан а
+د ÙĪÙĦØ©
+Ġho ạch
+ع ÙĦا
+عÙĦا ج
+Ġ×ķ ×¢×ĵ
+×Ķ ×Ŀ
+ки й
+ÙĦ ÙIJ
+Ġ×¢ ׾×Ļ×ķ
+ÑİÑī ий
+Ġng á»§
+صÙĨ ع
+ĠاÙĦع راÙĤ
+à¸ķà¹Īà¸Ń à¹Ħà¸Ľ
+ãģŁãģı ãģķãĤĵ
+Ġph ạm
+ÙĦ اÙĨ
+ات Ùĩا
+Ġbö yle
+تÙĨ ÙģÙĬ
+تÙĨÙģÙĬ ذ
+Ġש×Ķ ×Ļ×IJ
+Ñģ Ñĥ
+ย าว
+Ġש ×ķ׳×Ļ×Ŀ
+Ġ×ŀ ×ķ׾
+ĠÑģ ил
+Ġ×IJ×Ĺר ×Ļ×Ŀ
+Ġph á»§
+ÙĤØ· ع
+ĠTh á»§
+à¸Ľà¸£à¸°à¹Ģà¸Ĺศ à¹Ħà¸Ĺย
+ÙĨ ÙĤ
+ĠÄijo ạn
+Ġب Ø¥
+п ÑĢедел
+×ķת ×ķ
+Ġy arı
+пÑĢ Ðµ
+ĠczÄĻ ÅĽci
+ØŃ ÙĥÙħ
+×ķ׳ ×Ļת
+פע ׾
+ãĤĴ ãģĹãģ¦
+Ġktó rzy
+׾ ×Ŀ
+ĠÄIJi á»ģu
+ĠкоÑĤоÑĢ Ð°Ñı
+ĠìĿ´ ìĥģ
+ãģĤ ãģ£ãģŁ
+Ġ×ŀ×ĵ ×ķ×ijר
+פ ×ķ×¢×ľ
+d ım
+éĢļ ãĤĬ
+ĠбÑĥд ÑĥÑĤ
+à¹Ģวà¹ĩà¸ļ à¹Ħà¸ĭ
+à¹Ģวà¹ĩà¸ļà¹Ħà¸ĭ à¸ķà¹Į
+ا خر
+×Ĺ ×Ļ׾
+Ġ×Ļ ×ľ
+Ġ×Ļ׾ ×ĵ×Ļ×Ŀ
+×Ĺ ×Ļפ
+×Ĺ×Ļפ ×ķש
+Ġd òng
+Ġש ×ĸ×Ķ
+ÑĮ е
+ãģĤ ãģ¨
+ìŀIJ ê°Ģ
+×IJ ×ĵ
+Ġü z
+Ġüz ere
+ظ ÙĦ
+Ġ×IJ ×ķ׾×Ļ
+Ġ×ij ×Ļ×ķ×Ŀ
+ÙĦ ات
+Ġm ê
+ì¹ ¨
+تØŃ د
+تØŃد Ø«
+ĠØ® اصة
+Ġب رÙĨ
+ĠبرÙĨ اÙħج
+ĠH Ãłn
+×Ĺ ×¡
+ĠÙĪ ÙĦÙħ
+×¢ ×Ŀ
+Ġm ı
+à¸Ł ัà¸ĩ
+ש ×¢×Ķ
+ÙĪÙģ ÙĤ
+ס ×ij×Ļר
+алÑĮ нÑĭй
+×Ĺש ×ķ×ij
+Ġn Ãłng
+ë³ ¼
+ĠкоÑĤоÑĢ ÑĭÑħ
+Ġ×Ĺ ×ķ×§
+t ör
+ĠлÑĥÑĩ ÑĪе
+ãĥij ãĥ³
+ลà¹Īา สุà¸Ķ
+Ġج دÙĬد
+ÙĬد Ø©
+à¸Ĺ รà¸ĩ
+ãĤĪãĤĬ ãĤĤ
+ÙĦ ÙĦ
+ãĤĤ ãģ£ãģ¨
+ש×ĺ ×Ĺ
+Ġ×ķ ×IJ×Ļ
+Ġgi á»ijng
+Ø¥ ضاÙģ
+ק ת
+ë§ Ŀ
+Ġzosta ÅĤ
+ÑĢ Ð¾Ð·
+×Ļפ ×Ļ×Ŀ
+Ġ׼׾ ׾
+ת×ķ׼ ף
+dıģ ını
+ÙĤ سÙħ
+ĠÑģ ÑĩиÑĤ
+ĠÑģÑĩиÑĤ а
+×ĺ ×ķת
+Ġ ưu
+ĠØ¢ ÙĦ
+Ġм ом
+Ġмом енÑĤ
+ĠاÙĦتع ÙĦÙĬÙħ
+×¢×ľ ×ķת
+Ġch ữa
+Ġy ön
+Ġtr Ãł
+ĠØŃ ÙĬÙĨ
+à¸ĭ ั
+ĠC á
+×¢ ×ĸ
+ĠاÙĦØ£ ÙħÙĨ
+c ÃŃ
+Ġv á»ijn
+Ġ à¸Ļาย
+об ÑĢа
+×§ ×IJ
+Ġthi ếu
+ãĥŀ ãĥ¼
+ส วà¸Ļ
+Ġg á»Ń
+Ġgá»Ń i
+Ġê ¹
+Ġê¹ Ģ
+Ġthi á»ĩn
+ÙĤ ع
+w ÄĻ
+Ġн ам
+ÑĤ ол
+Ġs ân
+ס ×ķ×Ĵ
+Ġgeç ir
+ÑĤ он
+ев а
+ĠÙĪ Ø¶Ø¹
+Ġع شر
+Ñģ ло
+à¸Ī ัà¸ļ
+ãĤ· ãĥ¼
+ãĤĤ ãģĤãĤĬãģ¾ãģĻ
+Ġv ẻ
+ĠÄIJ á»ĥ
+ر Ù쨹
+ĠاÙĦØ£ÙĪÙĦ Ùī
+ÑĤ аÑĢ
+ãģªãģı ãģ¦
+Ùħ Ùİ
+qu ÃŃ
+×¢×ł×Ļ ×Ļ׳
+г ен
+Ġh ôm
+à¸Ī า
+Ġnh Ỽ
+ĠاÙĦع ربÙĬ
+×IJ ף
+Ġl á»Ļ
+Ġje ÅĽli
+à¹Ģà¸Ĺà¹Īา à¸Ļัà¹īà¸Ļ
+ĠØ£ÙĨ Ùĩا
+Ġt uy
+Ġtuy á»ĩt
+Ġت ص
+Ġتص ÙĨÙĬ
+ĠتصÙĨÙĬ Ùģ
+Ġê·¸ëŁ¬ ëĤĺ
+о ÑĨен
+à¸ģิà¸Ī à¸ģรรม
+ãĤĦ ãģ£ãģ¦
+Ġkh á»ıi
+Ġl á»ĩ
+ĠاÙĦÙħج تÙħع
+à¸Ńาà¸Ī à¸Īะ
+à¸Īะ à¹Ģà¸Ľà¹ĩà¸Ļ
+ов Ñĭй
+ר ×Ŀ
+ร à¹īà¸Ńà¸Ļ
+ש ×ŀש
+人 ãģ«
+Ġüzer ine
+פר ×Ļ
+du ÄŁu
+Ñĩ ик
+Ġmù a
+Ġ×ŀת ×ķ×ļ
+Ġc áºŃp
+Ġت ارÙĬØ®
+×ij׾ ת×Ļ
+Ġì¢ Ģ
+ÙĦ ع
+ب اÙĨ
+Ġch út
+Ġ×Ķ×ĸ ×ŀף
+n ée
+ĠLi ên
+ĠÙĦÙĦ Ø£
+ØŃد ÙĪØ¯
+Ġ×¢ ׼ש×Ļ×ķ
+в оз
+Ġyapt ı
+Ġоб о
+à¹ĥหà¹ī à¸ģัà¸ļ
+Ġ×ij×Ķ ×Ŀ
+ãģı ãģ¦
+ر أس
+ĠÑģÑĢед ÑģÑĤв
+ĠB Ãłi
+ãģĵãģ¨ ãģ«
+ĠìĤ¬ íļĮ
+Ġ모 ëijIJ
+×ij ×IJ
+Ġtr ắng
+ĠاÙĦبÙĦ د
+ĠHo Ãłng
+ли бо
+ĠдÑĢÑĥг иÑħ
+İ R
+Ñĥм а
+ĠJe ÅĽli
+ãĤĤ ãģĹ
+Ġv òng
+Ġ×IJתר ×Ļ×Ŀ
+ĠÄij á»įc
+Ġв оÑĤ
+ãģł ãģĮ
+ë° °
+à¸Ķู à¹ģล
+Ġ×ŀ ׼׾
+ìĹIJ ëıĦ
+г аз
+Ġ׳×ķס פ×Ļ×Ŀ
+ãģĵãģ¨ ãģ§
+Ġت ÙĪ
+ãģ§ ãģĤãĤĬ
+à¸Ļั à¹Īà¸ĩ
+ĠможеÑĤ е
+sz ÄĻ
+ãģ® ãģł
+ĠÙħÙĨ Ùĩ
+Ġb á»ķ
+Ġb üt
+Ġbüt ün
+ë³´ ê³ł
+Ġch á»ĵng
+à¹ģà¸Ī à¹īà¸ĩ
+ĠV ì
+ĠØŃ ر
+Ġgi ản
+ĠÙħ دÙĬÙĨØ©
+تط بÙĬÙĤ
+à¸Ī ิ
+æĹ¥ ãģ®
+б ил
+à¸ģ à¸Ńà¸ĩ
+ê³ ³
+ĠØ£ Ùħا
+ìĨ IJ
+Ġtr ái
+ĠвÑģ ем
+Ġس ÙĨØ©
+ĠÑģай ÑĤ
+Ġг оÑĤов
+п Ñĭ
+ĠëIJ ł
+ĠاÙĦØ® Ø·
+ĠاÙĦرئÙĬس ÙĬØ©
+Ġíķ ©ëĭĪëĭ¤
+ĠìķĦëĭĪ ëĿ¼
+ĠìĿ´ ëłĩ
+ĠìĿ´ëłĩ ê²Į
+) ØĮ
+h ält
+ĠØ£ Ùħر
+Ġع Ùħر
+à¸ģà¹ĩ à¸Īะ
+Ġ à¸Ĺำà¹ĥหà¹ī
+Ġc ân
+Ġ×ij ׾
+Ġ×ij׾ ×ij×ĵ
+פ סק
+ĠÙĬ ÙĤÙĪÙĦ
+н ÑĥÑĤÑĮ
+à¹ģ à¸Ħ
+Ġ×§ צת
+Ġn ằm
+Ġh òa
+bilit Ãł
+ĠìĹĨ ëĭ¤
+Ġ׼ פ×Ļ
+ÑĢ Ð¾Ð¶
+лаг а
+Ġ×Ķש ×Ļ
+ĠNgo Ãłi
+ĠÙĪ Ø¬
+ĠÙĪØ¬ ÙĪØ¯
+ĠìľĦ íķľ
+Ġus ÅĤug
+Ġtu ần
+d ź
+×ŀ ×ķף
+ĠاÙĦع دÙĬد
+Ġch ẳng
+สุà¸Ĥ à¸łà¸²à¸ŀ
+Ġ×ij ×ĵר×ļ
+ĠÑģеб е
+ĠìŀĪ ìĿĦ
+ĠاÙĦØŃ اÙĦ
+Ġd á
+Ġc ưá»Ŀi
+Ġnghi ên
+ie ÅĦ
+ĠD ương
+ï¼ ħ
+ش د
+ãģĦãģ¤ ãĤĤ
+ĠвÑĭб оÑĢ
+Ġc á»Ļng
+ש ×Ļ׳×ķ×Ļ
+Ġch ạy
+Ġ×ij×¢ ׾×Ļ
+اخ بار
+íķĺ ë©°
+ż Äħ
+ج از
+Ġ׳ ר×IJ×Ķ
+ศ ู
+ศู à¸Ļ
+ศูà¸Ļ ยà¹Į
+×Ĵ ×¢
+Ġ×¢ ×ĵ×Ļ
+Ġ×¢×ĵ×Ļ ×Ļף
+بر ا
+ÑĨи й
+ĠÄIJ á»ĵng
+ÙĤ اÙĨÙĪÙĨ
+ĠÄij ứng
+ãģĹãģŁ ãĤĬ
+Ġ×Ĺ×Ļ ×Ļ
+Ġë IJľ
+ĠëIJľ ëĭ¤
+Ġм еждÑĥ
+à¸ŀวà¸ģ à¹Ģà¸Ĥา
+ĠB ắc
+ล ำ
+ë° ±
+ĠíĻ ķ
+มาà¸ģ ม
+มาà¸ģม าย
+бан к
+à¸Ńา à¸ģาร
+Ġh Ãł
+Ġ׾ ׳
+à¸Ń à¸Ń
+Ġë°Ķ ë¡ľ
+л ом
+m ática
+ĠØŃ د
+اب ت
+à¸Ĺีà¹Ī à¸Ļีà¹Ī
+Ġco ÅĽ
+ÙģÙĬ دÙĬ
+ÙģÙĬدÙĬ ÙĪ
+ĠмеÑģÑĤ о
+Ġph út
+มาà¸ģ à¸ģวà¹Īา
+×IJ פ
+ب ÙIJ
+ĠPh ú
+ì± Ħ
+ĠÙĪ Ø³ÙĦÙħ
+à¸Īี à¸Ļ
+поÑĤ ÑĢеб
+Ġ×Ĺ×ĵ ש×ķת
+Ø´ ÙĪ
+Ġעצ ×ŀ×ķ
+ĠعÙħÙĦ ÙĬØ©
+à¸Ħุà¸ĵ à¸łà¸²à¸ŀ
+ãģ¾ãģĻ ãģĮ
+دع ÙĪ
+طر ÙĤ
+à¹Ħมà¹Ī à¸ķà¹īà¸Ńà¸ĩ
+ë² Ķ
+ìĬ ¹
+Ġk ÃŃch
+ĠìĹĨ ëĬĶ
+ĠÑĤ ам
+ĠÙĨ ØŃÙĪ
+ĠاÙĦÙĤ اÙĨÙĪÙĨ
+×Ĺ ×ķ×Ŀ
+Ġk ız
+Ġ×ĵ ×Ļף
+ĠвÑĢем ени
+ãģ£ãģŁ ãĤĬ
+ĠØ´ Ùĩر
+ĠìĦľ ë¹ĦìĬ¤
+×¢ ש×Ķ
+Ġgi ác
+ĠاÙĦسÙĦ اÙħ
+Ġ×IJ ש
+ĠполÑĥÑĩ а
+à¸Īัà¸Ķ à¸ģาร
+к оÑĢ
+Ġ×Ķ×ĺ ×ķ×ij
+ราย à¸ģาร
+주 ìĿĺ
+à¹ģà¸ķà¹Ī ละ
+Ġê·¸ëŁ° ëį°
+à¸Ĺีà¹Ī à¹Ģà¸Ľà¹ĩà¸Ļ
+Ġת ×ķ×ļ
+بÙĬ اÙĨ
+Ð Ļ
+oÅĽci Äħ
+ÑĤ ок
+ĠÃ Ķ
+ĠÃĶ ng
+à¹Ħมà¹Ī à¹ĥà¸Ĭà¹Ī
+ãģ¿ ãģ¦
+ÐŁ о
+ĠЧ ÑĤо
+íĻ ©
+×ĺ ×ij×¢
+меÑĤ ÑĢ
+Ġ×ij ×ŀ×Ķ
+Ġ×ij×ŀ×Ķ ×ľ
+Ġ×ij×ŀ×Ķ׾ ×ļ
+Ñĩ ÑĮ
+×§ ש×Ķ
+з нак
+знак ом
+uj ÄĻ
+×Ļצ ר
+ĠاÙĦÙħ ÙĦÙĥ
+ı yla
+×IJ×ŀ ת
+à¸Ľ ิà¸Ķ
+×IJ ×Ĺ×ĵ
+ر اد
+Ġm áºŃt
+ëĭ¤ ëĬĶ
+Ġl ạnh
+ש׾ ×ķש
+ØŃ دÙĬØ«
+ت ز
+å¹´ ãģ®
+Ġк ваÑĢ
+ĠкваÑĢ ÑĤиÑĢ
+ä½ľ ãĤĬ
+رÙĪ Ø¨
+ов ан
+ĠТ е
+à¸Īำ à¸ģ
+à¸Īำà¸ģ ัà¸Ķ
+ب اط
+×Ĵ ת
+Ġм аÑĪ
+ĠмаÑĪ Ð¸Ð½
+×Ļצ ×Ķ
+ãģ» ãģ¨
+ãģ»ãģ¨ ãĤĵãģ©
+ÃŃ do
+ĠÑı зÑĭк
+à¸ļ ิà¸Ļ
+สà¸ĸาà¸Ļ à¸Ĺีà¹Ī
+ĠìĹ ´
+ãĤ¦ ãĤ§
+Ġc Ãł
+п ан
+åı£ ãĤ³ãĥŁ
+Ġر د
+اÙĤ ت
+ĠÙĥ ب
+ĠÙĥب ÙĬرة
+ÑģÑĤ ал
+ש×ŀ ×Ĺ
+pos ición
+ĠÙħÙĦÙĬ ÙĪÙĨ
+ĠìĿ´ ìķ¼
+ĠìĿ´ìķ¼ ê¸°
+Ġh út
+ĠÅĽw iat
+Ġë°© ë²ķ
+ĠÑģв еÑĤ
+Ġвиде о
+ĠاÙĦÙĨ ظاÙħ
+Ġtr á»Ŀi
+ĠëĮĢ íķ´ìĦľ
+ר ×ŀת
+ت داÙĪÙĦ
+×ķר ×ĵ
+ת ×ŀ
+ת×ŀ ×ķ׳×ķת
+Ġ×ŀ ף
+Ġдв а
+Ġ×Ķ×§ ×ķ
+æĹ¥ ãģ«
+Ġ×Ķ×Ĵ ×Ļ×¢
+à¹Ģà¸ŀิà¹Īม à¹Ģà¸ķิม
+Ùħار س
+Ġê²ĥ ìŀħëĭĪëĭ¤
+ãģªãģĦ ãģ¨
+Ġnhi á»ĩt
+ëIJ ©ëĭĪëĭ¤
+Ġ×ij׳ ×ķש×IJ
+Ġê°Ģ ìŀ¥
+Ġv ợ
+ĠÄij óng
+צ×Ļ׾ ×ķ×Ŀ
+ê´Ģ ê³Ħ
+в аÑı
+×IJ ×Ļ×ĸ
+×IJ×Ļ×ĸ ×Ķ
+ĠÙĨ ظاÙħ
+ÙħØŃ اÙ쨏
+Ġt ải
+기 ëıĦ
+à¸Ľà¸±à¸Ī à¸Īุ
+à¸Ľà¸±à¸Īà¸Īุ à¸ļัà¸Ļ
+׼ ×ĵ×ķר
+ĠìķĦ ìĿ´
+׼׳ ×Ļס
+à¹Ģ à¸ķร
+à¹Ģà¸ķร ียม
+Ġngo ại
+ĠدÙĪÙĦ ار
+Ġr ẻ
+Ġkh Äĥn
+عد د
+ش عب
+czy Äĩ
+ĠاÙĦ Ùĥر
+ĠÑĩеловек а
+ĠÙĪ Ø¥ÙĨ
+×IJ ×ĺ
+Ġth Æ¡
+ĠاÙĦ رÙĬاض
+оп ÑĢедел
+опÑĢедел ен
+×Ķ ×ŀש×ļ
+ĠÐĿ ово
+з Ñĭва
+ĠاÙĦدÙĪÙĦ ÙĬ
+ĠÄij áp
+Ġк ÑĢед
+ĠкÑĢед иÑĤ
+ов ого
+Ġm ôn
+à¸Ľà¸£à¸° à¹Ĥย
+à¸Ľà¸£à¸°à¹Ĥย à¸Ĭà¸Ļ
+à¸Ľà¸£à¸°à¹Ĥยà¸Ĭà¸Ļ à¹Į
+ÑģÑĤ е
+ĠTh á»ĭ
+د ÙĬØ©
+×ŀצ ×ķ
+Ùģ Ø§Øª
+×§ ×ĵ×Ŀ
+ìĿ´ëĿ¼ ê³ł
+ÙĪ Ø®
+Ġ×Ĺ ×ĸ
+ĠÑĦоÑĤ о
+׾ ×Ļת
+ت Ùİ
+ÙĪ Ø¨Ø±
+й ÑĤи
+ĠÃ¶ÄŁ ren
+Ġ×Ķ×ĸ ×ķ
+Ġv á»įng
+ÙĤÙĪ Ø©
+ĠT ây
+ĠÐĿ и
+Ġש ×ķ×ij
+ãģ¨è¨Ģ ãĤıãĤĮ
+ãģ© ãĤĵãģª
+׊צ×Ļ
+ï½ ľ
+Ġ×ķ×Ķ ×ķ×IJ
+ä¸Ģ ãģ¤
+ĠÑģÑĤо иÑĤ
+ni Äħ
+×ĺר ×Ļ
+ĠдеÑĤ ей
+нÑı ÑĤÑĮ
+ĠÑģдел аÑĤÑĮ
+Ġë§İ ìĿ´
+ä½ķ ãģĭ
+ãģĽ ãĤĭ
+à¹Ħ หม
+à¸ķิà¸Ķ à¸ķà¹Īà¸Ń
+Ġ×ij ת×Ĺ
+Ġ×ijת×Ĺ ×ķ×Ŀ
+ìĻ Ħ
+ì§Ģ ëĬĶ
+ÑģÑĤ аÑĤ
+ÑıÑģ н
+ü b
+Ġth ả
+Ġ×ij×IJ×ŀ ת
+Ġt uyến
+×ĵ ×Ļר×Ķ
+Ġ×IJ ×Ļש×Ļ
+×ĸ׼ ר
+ãģ° ãģĭãĤĬ
+Ġx ét
+׼ ×Ļ×ķ
+׼×Ļ×ķ ×ķף
+diÄŁ ini
+ĠاÙĦÙħ ÙĪØ¶ÙĪØ¹
+Ġh áºŃu
+à¸Īาà¸ģ à¸ģาร
+×ijס ×Ļס
+Ġ×ŀ×Ĵ ×Ļ×¢
+×ij ×Ļ×¢
+ĠÙĪ Ø¬Ùĩ
+à¹ģà¸Ķ à¸ĩ
+à¸Ļ าà¸ĩ
+ĠÅŀ a
+ì ¡´
+ë¡ Ģ
+à¸ķ ะ
+Ġ×Ķ×Ĺ×Ļ ×Ļ×Ŀ
+Ùģ ÙĬد
+ãģ§ãģĻ ãģĭãĤī
+ê· ľ
+ź ni
+ĠлÑİ Ð´ÐµÐ¹
+Ġyüz de
+ıy orum
+ĠاÙĦ بØŃر
+e ño
+п аÑĢ
+ÙĬ ÙĤØ©
+об ÑĢ
+ר ×ķ×ļ
+ت ÙĪÙĤع
+ĠاÙĦØ´ ÙĬØ®
+åĪĿ ãĤģãģ¦
+ĠÑĤ елеÑĦ
+ĠÑĤелеÑĦ он
+Ġth ôi
+Ġ×Ļ׼×ķ׾ ×Ļ×Ŀ
+ĠÅŁ irk
+ĠÅŁirk et
+Ġìļ°ë¦¬ ê°Ģ
+ĠÄij ông
+Ġת ×ķ×ĵ×Ķ
+ÑģмоÑĤÑĢ ÐµÑĤÑĮ
+ĠÙĦ ÙĩÙħ
+Ġ׾ ׼
+ĠN ó
+ĠØŃ اÙĦØ©
+ãģĦ ãģij
+קר ×ķ
+az ı
+ãĤ³ ãĥ¼
+ĠÙĦÙĦ ت
+s ınız
+ĠH ải
+기 ìĪł
+ยัà¸ĩ à¹Ħมà¹Ī
+ëĭ¤ ê³ł
+פ ×Ĺ
+Ġ׾×Ĵ ×ij×Ļ
+Ġع ÙĨÙĩ
+Ġк аз
+Ġказ ино
+ب ÙĪØ±
+ÑĦ еÑĢ
+Ġê°Ļ ìĿ´
+تس جÙĬÙĦ
+ĠاÙĦÙħ رÙĥز
+ĠTh ái
+д аÑĤÑĮ
+×ŀ×Ļ ×Ļ׾
+Ġpay laÅŁ
+ãģ¤ ãģ®
+à¹Ģร ืà¸Ń
+n ça
+׳ ×ķ×Ĺ
+Ġ×IJ פ×Ļ׾×ķ
+ãģ¨ èĢĥãģĪ
+ãģ¨ãģĹãģ¦ ãģ¯
+à¹Ģà¸Ī à¸Ń
+×ŀ פ
+Ġg iriÅŁ
+л иÑĤ
+ÑĤ елÑı
+Ñij н
+æ°Ĺ ãģ«
+Ġg ó
+Ġgó p
+åĪĩ ãĤĬ
+Ġ×Ķ ×Ĺ×ĵש
+ж ал
+Ġ×ĵ עת
+éģķ ãģĨ
+à¹Ģà¸Ĥà¹īา à¹Ħà¸Ľ
+Ġס ר×ĺ
+e ña
+æĸ° ãģĹãģĦ
+ر Ùİ
+ĠÐIJ ÑĢ
+Ġph ản
+à¸Īะ à¹Ħà¸Ķà¹ī
+Ġ×ijצ ×ķר×Ķ
+Ø´ اÙĩ
+شاÙĩ د
+ÙĪØ± د
+à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ à¸Īาà¸ģ
+или ÑģÑĮ
+à¹ģละ à¸ģาร
+Ġ×Ķ ×ĸ׼
+Ġ×Ķ×ĸ׼ ×ķ×Ļ×ķת
+ei ÃŁ
+ãĥ ¨
+ìĥ Ī
+ĠÃĩ a
+Æ ¯
+ש ×Ĵ
+ÙĬÙĨ Ø©
+ร à¹īà¸Ńà¸ĩ
+ãĤµ ãĥ³
+ÑĢоÑģÑģ ий
+ÑĢоÑģÑģий Ñģк
+a ÄŁa
+ĠнаÑĩ ина
+Ġص ÙĦÙī
+à¸Ĺุà¸ģ à¸Ħà¸Ļ
+íļĮ ìĤ¬
+Ġли ÑĨ
+Ø´ ÙĬر
+ĠØ´ÙĬ Ø¡
+ÙĬÙĨ ا
+Ġפ ×Ĺ×ķת
+Ġiçer is
+Ġiçeris inde
+ĠØ£ ØŃÙħد
+Ġże by
+ì´ Ŀ
+Ġп оказ
+Ġи менно
+หà¸Ļัà¸ĩ ส
+หà¸Ļัà¸ĩส ืà¸Ń
+ĠÑĤÑĢ Ðµ
+สัà¸ĩ à¸Ħม
+Ø¥ ÙIJ
+ãģĮ å¿ħè¦ģ
+ÙĬÙij Ø©
+פ צ
+íĭ °
+ĠÙħ جاÙĦ
+׳ פש
+к ан
+×Ĺ ×ķפ
+×Ĺ×ķפ ש
+ì²ĺ ëŁ¼
+ов аÑı
+з ов
+Ġh ạ
+Ġdzi ÄĻki
+×Ļר ×ķ
+Ġ׾ ×ŀצ
+Ġ׾×ŀצ ×ķ×IJ
+×Ļ×ĵ ×ķ
+Ġs ợ
+Ġ׾×Ķ ×Ĵ×Ļ×¢
+×§ ×ij×¢
+Ġchi á»ģu
+ãĥŀ ãĤ¤
+Ġd Ãłng
+à¹ģà¸Ł à¸Ļ
+Ġü ye
+×Ļ׳ ×Ĵ
+à¹Ģรีย à¸ģ
+ç§ģ ãģĮ
+th é
+ĠÑĦ илÑĮ
+ĠÑĦилÑĮ м
+ĠNg Ãły
+Ġж ен
+Ġжен Ñīин
+ج ÙĬد
+n ç
+à¸Ľ รา
+×Ļ×ŀ ×ķ
+Ġn á»ģn
+×IJ ×ķ׾×Ŀ
+Ġвозмож ноÑģÑĤÑĮ
+Ġëĭ¤ ìĭľ
+è¦ĭ ãģŁ
+à¸ĸ à¸Ļ
+à¸ĸà¸Ļ à¸Ļ
+mız ı
+ĠÙħ جÙħÙĪØ¹Ø©
+c jÄħ
+ĠÐł Ф
+à¸ģำ หà¸Ļ
+à¸ģำหà¸Ļ à¸Ķ
+ĠìŬ 기
+land ı
+ни ÑĨ
+ÑģÑĤв е
+Ġ×ĵ ×ijר×Ļ×Ŀ
+Ġsk ÅĤad
+ãĤĬ ãģ¾ãģĹãģŁ
+ĠоÑĤ кÑĢÑĭÑĤ
+нÑı ÑĤ
+ĠÑģво ей
+à¸Ī ิà¸ķ
+ĠкаÑĩеÑģÑĤв е
+Ġet tiÄŁi
+ìĤ¬ íķŃ
+ĠاÙĦÙĬ ÙħÙĨ
+иÑĩеÑģки й
+ë¸ Į
+Ġ×ij×IJר ×¥
+Ġا سÙħ
+Ġиз веÑģÑĤ
+r ão
+Ġatt ivitÃł
+à¹Ģà¸Ľà¹ĩà¸Ļ à¸ģาร
+ĠاÙĦد Ùĥت
+ĠاÙĦدÙĥت ÙĪØ±
+ĠÙĪØ§ØŃد Ø©
+ĠÑģ ÑĩеÑĤ
+ĠпÑĢ Ð¸Ñĩ
+ĠпÑĢиÑĩ ин
+ĠÙĪØ² ارة
+Ġh uyá»ĩn
+ĠÙĥ تاب
+à¹ģà¸Ļ à¹Īà¸Ļ
+à¹ģà¸Ļà¹Īà¸Ļ à¸Ńà¸Ļ
+Ġgün ü
+г ÑĢÑĥз
+ĠاÙĦØ® اص
+Ġgör ül
+׾ ×ŀ×ĵ
+Ġìłķ ëıĦ
+×ķ×ij ×Ļ׾
+Ġ×ŀ×§ צ×ķ×¢×Ļ
+ĠоÑģоб енно
+à¸Ľà¸£à¸° à¸ģา
+à¸Ľà¸£à¸°à¸ģา ศ
+aca ģını
+ë¶ ģ
+à¸łà¸¹ มิ
+ĠÑį лекÑĤ
+ĠÑįлекÑĤ ÑĢо
+Ġ×§ ש×Ķ
+سÙĦ Ø·
+à¸Ĭà¸Ļ ะ
+×¢ ×Ļ׾
+ĠЧ е
+à¹ģà¸Ļ à¹Ī
+lı ģ
+lıģ ın
+Ġ×ŀ×¢ ×¨×Ľ×ª
+好ãģį ãģª
+มาà¸ģ à¸Ĥึà¹īà¸Ļ
+×ŀ×¢ ×ijר
+ĠاÙĦÙħ غرب
+ĠпеÑĢ Ð¸
+ĠпеÑĢи од
+Ġnh ạc
+ا ÙĪÙĬ
+ĠÙĪ Ø¹ÙĦÙī
+أخ ذ
+ĠC ô
+תר ×ij×ķת
+×Ĵ ×Ķ
+Ġktóre j
+×IJ ×Ļת
+×ij ×ķ×IJ
+д елÑĮ
+รี วิ
+รีวิ ว
+ж Ñĥ
+Ġ×ij×Ĺ ×ķ
+еÑĪ ÑĮ
+ĠØ£ ÙĦÙģ
+ĠاÙĦÙĪ Ø·ÙĨÙĬ
+ĠاÙĦÙħÙĨ Ø·ÙĤØ©
+nÄħ Äĩ
+Ġthi ên
+иÑĩеÑģк ой
+ĠاÙĦÙħ ÙĦ
+Ġع Ùħ
+ס פר
+Ġnh óm
+ÙĪØµ Ùģ
+ĠCh úng
+Ġر ÙĤÙħ
+ãģ¾ãģĹãģŁ ãģĮ
+al ité
+ล ม
+ĠëĤ´ ê°Ģ
+׾ק ×ķ×Ĺ
+ĠS Æ¡n
+pos ição
+mi ÄĻ
+Ġtr ánh
+ĠÄIJ á»Ļ
+׼ ×Ĺ
+ãģĤ ãģ£ãģ¦
+à¸Ńย à¹Īา
+Ġ×ŀ×Ĺ ×Ļר
+Ġ×Ķ ×Ļת×Ķ
+à¸Ľ à¹Īา
+à¸Ńืà¹Īà¸Ļ à¹Ĩ
+Ø´ ÙĤ
+×ł×¡ ×Ļ
+ë¦ ¼
+ãģ¦ãģĹãģ¾ ãģĨ
+Ġ×ŀ צ×ij
+ãģ« åĩº
+ÙħÙĪØ§ Ø·ÙĨ
+ยัà¸ĩ มี
+алÑĮ нÑĭе
+san ız
+Ø¥ سرائÙĬÙĦ
+ĠvÃł i
+ì¤ Ħ
+ã썿ĢĿ ãģ£ãģ¦
+×Ļ ×ķ׳×Ļ
+çĶŁ ãģį
+Ġs âu
+Ñĩ иÑģÑĤ
+Ġl á»ħ
+ĠGi á
+à¸Ńุ à¸Ľ
+à¸Ńà¸¸à¸Ľ à¸ģร
+à¸Ńà¸¸à¸Ľà¸ģร à¸ĵà¹Į
+Ġnh ẹ
+r ö
+ס ×ĺ×Ļ
+ãģķãĤĵ ãģĮ
+Ġd ầu
+ع Ùİ
+ت را
+×Ĵ×ĵ ׾
+Ġtécn ica
+׼ ׳×Ļ×Ŀ
+תק ש
+תקש ×ķרת
+Ġн его
+ét ait
+Ġm á»ģm
+Ñģ еÑĤ
+Ġnh áºŃt
+Ġ×ŀ ×¢×ľ
+Ġ×Ķ×¢ ×ij×ķ×ĵ
+Ġ×Ķ×¢×ij×ķ×ĵ ×Ķ
+Ġ×Ĵ ×Ļ׾
+ãģ¯ ãģªãģĦ
+ائ ØŃ
+Ġз деÑģÑĮ
+×IJ ×Ļ׳×ĺר
+Ùħ ÙIJ
+Ġ×Ļ ×Ĺ×ĵ
+ر اÙģ
+ì²ĺ 리
+×ĵ ×¢×ķת
+ì¹ ľ
+ĠТ о
+ĠTh ế
+ì¶ ©
+Ġ׳׼ ×ķף
+عÙĬ Ø´
+ни з
+Ġج اÙĨب
+×ŀ×§ צ×ķ×¢
+à¹Ĥ à¸ĭ
+Ñģ ÑĥÑĤ
+ìĸ´ ìļĶ
+ãĤĴè¦ĭ ãģ¦
+ار د
+Ġaç ıl
+ĠاÙĦØŃ ÙĬاة
+à¸ģà¹ĩ à¹Ħà¸Ķà¹ī
+ãģĿãĤĮ ãĤĴ
+عض ÙĪ
+Ġг ÑĢаж
+ĠгÑĢаж дан
+à¸Īะ à¸ķà¹īà¸Ńà¸ĩ
+ĠìĿ´ 룬
+ĠìĿ´ë٬ íķľ
+Ġtr ách
+ÙĨ Ùİ
+Ġkı sa
+Ã Ķ
+ÑĪ ÐºÐ°
+ãģ® äºº
+ĠÐŁ оÑģ
+ĠÐŁÐ¾Ñģ ле
+Ñĥ лÑĮ
+ÙĪØ§ جÙĩ
+ÙĤ رب
+à¸Ľà¸ıิ à¸ļัà¸ķิ
+ê° Ļ
+Ġ×ŀ ׳
+ĠÑģво и
+بر اÙħج
+Ġر ÙĪ
+пÑĢ Ð¾Ð´
+пÑĢод аж
+Ġby ÅĤy
+วั ย
+Ġgör ün
+ĠÃ Ī
+ÑİÑī им
+ĠÑĤак ой
+Ùģ ÙĪØ±
+ĠÙģ Ø¹ÙĦ
+Ġб ел
+ëIJ ł
+er ÃŃa
+ĠÑģво Ñİ
+Ġl ã
+Ġlã nh
+à¹Ģà¸ŀืà¹Īà¸Ń à¹ĥหà¹ī
+ÙĤ ÙĨ
+تط ÙĪÙĬر
+Ġsay ı
+ĠÑģ ейÑĩаÑģ
+Ġ×IJ×Ĺר ת
+×§ ×ķפ×Ķ
+×§×ķר ס
+Ġس Ùħ
+Ġ×ĺ ×Ļפ×ķ׾
+ìĿ´ëĿ¼ ëĬĶ
+دراس ة
+èµ· ãģĵ
+×Ĺ ×Ļ׳
+×Ĺ×Ļ׳ ×ķ×ļ
+×ĵ ×§
+Ġë§ ŀ
+Ġком анд
+ĠÐij о
+Ġиг ÑĢÑĭ
+à¸ļ ี
+ĠØ£ Ùİ
+в ен
+ĠاÙĦج دÙĬد
+ĠÙĦ Ø¥
+Ġ×ķ×IJ ׳×Ļ
+Ġ×Ķס ×Ļ
+иÑĩеÑģк ого
+رÙĪ ØŃ
+à¸ģาร ศึà¸ģษา
+ĠTr ưá»Ŀng
+иг ÑĢа
+ıl ması
+Ġм аÑģÑģ
+ãģ¨ãģį ãģ«
+à¸Ĺีà¹Ī à¸ľà¹Īาà¸Ļ
+à¸Ĺีà¹Īà¸ľà¹Īาà¸Ļ มา
+ĠاÙĦساب ÙĤ
+Ġ×ŀ×¢ ×ĺ
+в аÑĤÑĮ
+m Ã¼ÅŁ
+Ġ׾ ׼×ļ
+Ġt á»ĭch
+Ùģ ÙĩÙħ
+تد رÙĬب
+Ø´ Ùĥ
+Ġ×ij ×ŀ×Ļ
+Ġ×ij×ŀ×Ļ ×ķ×Ĺ×ĵ
+ÙĤØ· اع
+ãģª ãģĹ
+×ķצ ×Ļ×IJ
+ĠÙĪ Ø³ÙĬ
+з Ñĥ
+Ġy at
+Ġyat ırım
+ë§ İ
+Ġth ắng
+ãģĬ 客
+ãģĬ客 æ§ĺ
+ĠThi ên
+ãģ«å¯¾ ãģĹãģ¦
+ÑĢ Ð¸Ñģ
+ÙĨت ائ
+ÙĨتائ ج
+Ġ×ŀ שר
+Ġ×ŀשר ×ĵ
+Ġتع اÙĦ
+ĠتعاÙĦ Ùī
+ש ׳×Ļ
+Ùĩ اÙħ
+×IJ׳ ש×Ļ×Ŀ
+Ġżyc ia
+ĠÑĢÑĥб лей
+ÙĬ ض
+Ġkat ıl
+ĠÙħ ÙĪØ¶ÙĪØ¹
+Ġvard ır
+ĠÙħÙĨ Ø·ÙĤØ©
+ĠTr ần
+Ġв еÑģ
+ü p
+Ùħ ÙĪÙĨ
+ÑĪ Ð»Ð¸
+Ġn óng
+Ø® ÙĦÙģ
+ĠС ÑĤа
+Ġд оÑĢ
+ĠдоÑĢ Ð¾Ð³
+ĠwÅĤa ÅĽnie
+eÄŁ in
+Ġhi á»ĥm
+ĠС ам
+ê»ĺ ìĦľ
+ĠÑĦ а
+ãģ» ãģĨ
+ãģ»ãģĨ ãģĮ
+×ķפ ×Ļ×¢
+ê° Ī
+د ÙĪÙĦ
+Ġthu ê
+Ġch á»Ĺ
+Ġëĭ¹ ìĭł
+ãģij ãĤĮ
+ãģijãĤĮ ãģ©
+ë³´ íĺ¸
+ãģķãĤĮ ãģ¦ãģĦãģ¾ãģĻ
+Ġнад о
+ĠìĤ¬ëŀĮ ëĵ¤
+à¹Ģà¸Ĥ à¸ķ
+สม ัย
+z ÅĤ
+ت ÙĪØ±
+Ġש ת×Ļ
+v ê
+Ġ×ijת ×ķ×ļ
+à¸Ĭ ัย
+ãģĦ ãģ£ãģŁ
+ìĿ ij
+Ġt ầ
+Ġtầ ng
+ש ׼ר
+Ġê¸ Ģ
+Ġ×Ķש ׳×Ķ
+Ġا ÙĨÙĩ
+ç«ĭ ãģ¡
+r és
+füh ren
+ر ØŃÙħ
+ê· ¹
+ĠâĢ «
+Ġsu ất
+à¸Ł ิ
+ÙĬ Ùĩا
+ĠاÙĦ اتØŃاد
+Ġt uyá»ĥn
+ãģ¾ ãĤĭ
+Ġm ại
+Ġng ân
+ãĤ° ãĥ©
+欲 ãģĹãģĦ
+س ار
+ãĤĤãģ® ãģ§ãģĻ
+ки е
+Ġseç im
+åħ¥ ãĤĬ
+ãģªãģ© ãĤĴ
+ÑĤ ÑĢи
+ĠÑģп еÑĨ
+ĠØ£ د
+Ġод но
+ÑĪ ÐµÐ»
+ãĥĩ ãĥ¼ãĤ¿
+ãĤ· ãĤ¹ãĥĨ
+ãĤ·ãĤ¹ãĥĨ ãĥł
+è¡Į ãģį
+ã썿ĢĿ ãģ£ãģŁ
+à¹Ģà¸ģิà¸Ķ à¸Ĥึà¹īà¸Ļ
+ĠÑĤ ож
+ĠÑĤож е
+Ġs ạch
+ĠÑģ ÑĢок
+Ġкли енÑĤ
+ĠÙħØ´ رÙĪØ¹
+Ġalt ında
+Ġì ·¨
+ä¸Ń ãģ®
+ãģķãģĽ ãĤĭ
+ãģĻ ãģ¹
+ãģĻãģ¹ ãģ¦
+ê°ľ ë°ľ
+ĠÄij êm
+ãģªãģĦ ãģ®ãģ§
+ì² ł
+×¢ ×ij×ĵ
+Ġd ấu
+à¸Ħà¸Ļ à¸Ĺีà¹Ī
+ĠC ách
+تع ÙĦÙĬÙħ
+Ġh ại
+ãĤ» ãĥķãĥ¬
+ĠÙĨÙ쨳 Ùĩ
+ĠíĨµ íķ´
+ÑĪ Ð»Ð¾
+Ġнап ÑĢав
+ĠнапÑĢав лен
+ÑĢÑĥ Ñĩ
+íĶ Į
+Ġ×ijר ×Ļ×IJ
+ãģ® ãģ¿
+ãģ«ãģĬ ãģĦãģ¦
+×ij ׳ק
+ãĤ¨ ãĥ³
+Ø«ÙĦ اث
+Ġm ỹ
+ĠÑģай ÑĤе
+Ġе мÑĥ
+ت غÙĬ
+تغÙĬ ÙĬر
+خص ÙĪØµ
+ÑĤе ли
+Ġ×ķ׾ ׼ף
+פע ×Ŀ
+Ġпо ÑįÑĤомÑĥ
+ر اÙĨ
+иÑĤел ей
+пиÑģ ан
+×¢ ×¥
+ĠìĤ¬ ìĹħ
+Ùħ ز
+جÙħ ÙĬع
+ë©´ ìĦľ
+à¸ľà¸¥à¸´à¸ķ à¸łà¸±
+à¸ľà¸¥à¸´à¸ķà¸łà¸± à¸ĵ
+à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵ à¸ij
+à¸ľà¸¥à¸´à¸ķà¸łà¸±à¸ĵà¸ij à¹Į
+ĠпÑĢ Ð¸Ð¼ÐµÑĢ
+ãĤŃ ãĥ¼
+l â
+Ġch Äĥm
+缮 ãģ®
+ãģĦ ãģĭ
+ãģ¨è¨Ģ ãģĨ
+×ĸ ×ķ×Ĵ
+Ġ×ij ×ĵ×Ļ
+Ġ×ij×ĵ×Ļ ×ķ×§
+ãģĬ åºĹ
+à¸ķà¸Ńà¸Ļ à¸Ļีà¹ī
+Ġph á»iji
+п ÑĤ
+สà¸Ļ าม
+Ø· ÙĪ
+ص اØŃ
+صاØŃ ب
+ĠD ü
+ĠDü nya
+Ġп ока
+п ал
+ĠÄij ảo
+ĠاÙĦÙģ ÙĪØ±
+ĠاÙĦÙģÙĪØ± Ùĥس
+Ġmá u
+кÑĢ ÐµÐ¿
+ĠاÙĦس اعة
+ĠгоÑĢ Ð¾Ð´Ð°
+Ùģ ØµÙĦ
+ай ÑĤе
+Ġд ог
+Ġдог овоÑĢ
+ĠØ¥ ذ
+Ġ×ij׼׾ ׾
+ÙĬ تÙĩ
+×Ĵ ×ijר
+Ġbir ç
+Ġbirç ok
+문 íĻĶ
+ãģĿãģĨ ãģª
+را ØŃ
+ĠÙħ رة
+ĠденÑĮ ги
+f ä
+à¸Ĥà¹īา ว
+ĠÑģов ÑĢем
+ĠÑģовÑĢем енн
+׾×Ĺ ×¥
+èī¯ ãģı
+ĠÙģ Ø£
+Ġ×ķ ×ĸ×Ķ
+Ġз ани
+Ġзани ма
+Ġê°Ģì§Ģ ê³ł
+Ġh Æ¡i
+ãģªãģ® ãģĭ
+ãĥĨ ãĥ¬ãĥĵ
+Ġר ×ij×ķת
+à¸ķ ี
+Ġ×ijש ×ł×ª
+ĠT ại
+Ġthu áºŃn
+Ñģ ел
+Ñij м
+dzi Äĩ
+ĠÑģ ка
+ĠÑģка Ñĩ
+ĠÑģкаÑĩ аÑĤÑĮ
+×ķ×ŀ ×ķ
+г ла
+Ġмин ÑĥÑĤ
+åĩº ãģĻ
+Ġ×Ĺ×Ļ ×Ļ×ij
+Ġת ×Ĵ×ķ×ij×Ķ
+à¸£à¸¹à¸Ľ à¹ģà¸ļà¸ļ
+ни ÑĨа
+Ġİ n
+ĠØ£ ع
+Ġض ÙħÙĨ
+Ùħ ثاÙĦ
+ĠyaÅŁ an
+ĠìŰ 구
+ĠL ê
+ש׾ ×Ĺ
+ãģı ãģªãĤĭ
+ìĹĨ ìĿ´
+ĠÑĤ ÑĢи
+ĠÑĩаÑģÑĤ о
+Ġоб ÑĢаÑĤ
+п ло
+د خ
+دخ ÙĪÙĦ
+س Ùĩ
+à¸Ń าà¸ģ
+à¸Ńาà¸ģ าศ
+Ġ׼ ×ĸ×Ķ
+Ġ×Ķ×¢ סק
+ĠاÙĦØ£ ÙĨ
+å¹´ ãģ«
+×¢ ש×ķ
+Ġש ×¢×ķת
+Ġm Ãłn
+×IJר ×Ļ
+sı yla
+Ù쨱 ÙĤ
+ни Ñħ
+Ġت ست
+è¦ĭ ãģ¦
+ØŃا ÙĪÙĦ
+×IJ ×Ļ׼×ķת
+ĠbaÅŁ ladı
+st Äħ
+stÄħ pi
+à¸Ĺีà¹Ī à¹Ģรา
+ÙĤر ر
+ج اب
+Ġ×ijר ×ķר
+à¹Ģà¸Ĥà¹īา à¹ĥà¸Ī
+×ŀ׊קר
+al ım
+Ġס ×Ļפ×ķר
+ãģ§ãģĤ ãĤĮãģ°
+Ġש×ŀ ×ķר×ķת
+Ġ×ķ ×ŀ×Ķ
+ãģĵ ãģĿ
+id ée
+ä¸ĭ ãģķãģĦ
+تÙĨا ÙĪÙĦ
+Ġ ลà¹īาà¸Ļ
+Ġìļ°ë¦¬ ëĬĶ
+اÙĨ ا
+ÑģÑĤ ой
+б оÑĤ
+ĠyaÅŁ am
+kö y
+Ø¥ ÙĦ
+ÑĢ Ñĭв
+기 ìĹħ
+Ġ×Ķ×ŀ ×ĵ
+Ġ×Ķ×ŀ×ĵ ×Ļ׳×Ķ
+د ب
+×¢ ×Ļ׳×Ļ
+×ŀ ת×Ĺ
+Ġפ ר×Ļ
+ãĥĭ ãĥ¼
+اÙħ ÙĬ
+Ġnh ằm
+ãĤĮ ãģªãģĦ
+ت عرÙģ
+Ġë§Ī ìĿĮ
+ìĵ °
+Ġh ấp
+ר×Ĵ ×Ļ׾
+ب Ùİ
+Ġr Äĥng
+gl Äħd
+ĠÑģиÑģÑĤем Ñĭ
+Ġkh óa
+ãģ§ãģĻ ãĤĪãģŃ
+大ãģį ãģı
+기 를
+Ġké o
+ÙĪ Ø¡
+ج اÙħ
+جاÙħ ع
+Ġ×¢ ×Ļצ×ķ×ij
+t éri
+Ġת ש
+Ġ×IJ ×ij×Ļ
+ĠCh ương
+à¸ļริ à¹Ģว
+à¸ļริà¹Ģว à¸ĵ
+ãģ¤ ãģı
+Ġ×Ĺ ×ķ׾
+עת ×Ļ×ĵ
+ש ×Ļ×ŀ×Ķ
+ëĤ ¨
+Ġש×IJ ×Ļף
+ĠÙĪØ§ÙĦ Ø¥
+ÑĦ а
+Ġkh ám
+Ġ×ĺ ×ķ×ij×Ķ
+ĠвÑĭ Ñģ
+ĠвÑĭÑģ око
+ĠاÙĦØŃ دÙĬØ«
+人 ãĤĤ
+d Ã¼ÄŁÃ¼
+×Ļ×Ĺ ×ķ×ĵ
+تع ÙĦÙĬ
+تعÙĦÙĬ ÙĤ
+l ö
+تØŃ دÙĬد
+н его
+ĠÑĥд об
+Ġ׾ ×ŀ×Ļ
+Ġר ×ķצ×Ļ×Ŀ
+Ġج اء
+Ġ×ij ×ĸ×ŀף
+à¸Ľà¸ģ à¸ķิ
+é«ĺ ãģı
+à¸Ľà¸¥ า
+Ġart ık
+Ġbug ün
+×§ ׳×Ļ
+Ġkho á
+ĠÙħ رÙĥز
+ĠìŀIJ 기
+در جة
+×ŀש ר×ĵ
+Ġgi ấy
+Ġch óng
+ק פ
+ÙĬب Ø©
+ĠczÄĻ sto
+в али
+Ùĥ ب
+ìŁ ģ
+ส à¸ļาย
+à¸Ľà¸£à¸°à¸Ĭา à¸Ĭà¸Ļ
+×Ĵ ×ķ×£
+ëŁ ī
+ãģ® ãģĵãģ¨
+ล à¸Ń
+Ġngh á»ī
+åŃIJ ãģ©
+åŃIJãģ© ãĤĤ
+à¹Ħà¸Ķ à¹īà¸Ńย
+à¹Ħà¸Ķà¹īà¸Ńย à¹Īาà¸ĩ
+×ĵ ×¢
+ĠاÙĦت Ùī
+ĠÑģов еÑĤ
+Ġqual itÃł
+åĩº ãģĹ
+ĠÑĢÑĥк ов
+ĠÑĢÑĥков од
+ราย ละà¹Ģà¸Ńียà¸Ķ
+ãģªãģĭ ãģªãģĭ
+기 ê´Ģ
+Ġ×Ĺ ×ķש
+Ġ×Ĺ×ķש ×ij
+л оÑĤ
+à¸Ļะ à¸Ħรัà¸ļ
+×§×ij ×ķצ×Ķ
+Ġth ái
+Ġש ×ij×Ķ
+ĠÑĪ ÐºÐ¾Ð»
+ĠÙĦ ÙĥÙĦ
+à¹ĥà¸Ļ à¸Ĭà¹Īวà¸ĩ
+ĠÙħ ÙĥاÙĨ
+ë ķĮ
+Ġc ải
+ĠCh ÃŃ
+ÑĥÑĩ а
+ìĿ µ
+Ġx ảy
+à¸Ĭà¸Ļ ิà¸Ķ
+Ġc áºŃu
+к ÑĢов
+ss é
+ĠÙĨ ÙĪØ¹
+ĠТ а
+Ø® Ùħس
+פ×ķס ×ĺ
+Ġm ắc
+ĠÄij em
+à¸ģาร à¹ĥà¸Ĭà¹ī
+ר ×ķס
+ĠÐĽ е
+Ġth á»Ń
+รà¹Īาà¸ĩ à¸ģาย
+üz ü
+æĹ¥æľ¬ ãģ®
+ê³¼ ìłķ
+ש ×Ļ×IJ
+ĠìŀĪ ê³ł
+×ij ×ķ׾
+ìķ ħ
+ĠÙĪØ§ÙĦ ا
+ĠÐĽ и
+ĠвÑģ Ñij
+Ġużytk ow
+×Ĺ ×ķ׾
+ر Ù쨶
+Ġson uç
+ãģĦ ãģ¾ãģĽãĤĵ
+ìĤ¬ ìĹħ
+ëĪ Ħ
+ÑĤ ек
+Ġud ziaÅĤ
+л ез
+Ġ×Ķ×Ļ ×Ļת×Ļ
+ãĤīãĤĮ ãģ¦
+Ùħس ؤÙĪÙĦ
+ر ار
+ÑĤ ан
+ĠÄij Ãło
+Ġר ×ķ×ij
+Ġ×ijש×ij ×Ļ׾
+ä»ĬåĽŀ ãģ¯
+ãĤ¸ ãĥ¥
+Ġ×¢ ×ijר
+ãģĽ ãģ¦
+п олÑĮ
+ak lı
+Ġk ÃŃnh
+د ت
+лож ение
+ĠاÙĦÙħ ص
+ĠاÙĦÙħص رÙĬ
+à¸Īริà¸ĩ à¹Ĩ
+ĠاÙĦشر ÙĥØ©
+ĠÄij á»ı
+ãĥĽ ãĥĨ
+ãĥĽãĥĨ ãĥ«
+Ñį кон
+Ñįкон ом
+ĠÙĪ Ø¹ÙĨ
+Ġת ׳
+Ġ×ª×ł ×IJ×Ļ
+ĠاÙĦدÙĪÙĦ ÙĬØ©
+Ġì§Ģ ìĹŃ
+ãģ§ãģĻ ãģĭ
+Ġв аÑĢи
+ĠваÑĢи анÑĤ
+ĠاÙĦع رب
+ел а
+Ġt Æ°á»Ľng
+sk Äħ
+Ġm ặc
+ส ัà¸ģ
+ãĥĵ ãĥ¼
+Ġ×ij ×Ĵ׾
+Ġ×ij×Ĵ׾ ׾
+ãĥķãĤ¡ ãĥ³
+×ij ×Ļצ
+×ij×Ļצ ×ķ×¢
+ли ÑģÑĤ
+à¸Ł ุ
+à¸Łà¸¸ à¸ķ
+à¸Łà¸¸à¸ķ à¸ļà¸Ńล
+à¸Ŀ à¹Īาย
+ìŀIJ ìĿĺ
+Ġس ÙĪÙģ
+Ġש ×Ķת
+Ġê± ¸
+×¢ ×ij×ķ×ĵ
+ãģĻãĤĭ ãģĵãģ¨ãģĮ
+ĠÑĩа ÑģÑĤÑĮ
+ãĤ¢ ãĥ¡ãĥª
+ãĤ¢ãĥ¡ãĥª ãĤ«
+Ġtak ım
+Ġs Ỽ
+ĠsỼ m
+שר ×Ķ
+è¨Ģ ãģĨ
+л ан
+ì» ¤
+׼ ׳×Ķ
+ÙĪÙģ ÙĬ
+íĹ Ī
+lu ÄŁu
+ĠëĮĢ íķ´
+Ġ׾×ij ×Ļת
+Ġ×Ķר×IJש ×ķ׳×Ķ
+ص Ùħ
+Ġsö yled
+Ġsöyled i
+à¸Ľ าà¸ģ
+Ġard ından
+ãģĪ ãģŁ
+à¸Ĺัà¹Īว à¹Ħà¸Ľ
+Ġ׳×ķס ×£
+б олÑĮ
+ãĤĵãģ§ãģĻ ãģijãģ©
+ĠлиÑĪ ÑĮ
+Ġ×ij ×IJ×Ļ
+ĠбÑĭ ÑģÑĤÑĢо
+ส ัà¸Ļ
+Ġ×ij פ׳×Ļ
+л еÑĩ
+ĠاÙĦØ® بر
+Ġsó c
+Ġth ú
+Ġп ÑıÑĤ
+ãģĬ é¡ĺ
+ãģĬé¡ĺ ãģĦ
+ÑĤ ин
+ãģ«ãģ¤ãģĦãģ¦ ãģ¯
+פ ף
+Ġдв ÑĥÑħ
+à¸į ีà¹Ī
+à¸įีà¹Ī à¸Ľ
+à¸įีà¹Īà¸Ľ ุ
+à¸įีà¹Īà¸Ľà¸¸ à¹Īà¸Ļ
+оп еÑĢ
+ĠاÙĦب شر
+ĠاÙĦÙħ اÙĦ
+ıyor uz
+تØŃ ÙħÙĬÙĦ
+à¸ģ ะ
+éĸĵ ãģ«
+×Ĺ ×ķש
+ĠNg uyên
+ãģĦãģ¦ ãģĦãĤĭ
+дÑĥ ÑĪ
+ש פע
+ÑĪ Ñĥ
+å®Ł éļĽãģ«
+ĠÑĢай он
+ĠCh á»ī
+ÙĨ صر
+Ġìļ ´
+Ġìļ´ ìĺģ
+Ġ×Ķ×ĵ ×Ļף
+ØŃد د
+ر ز
+ĠاÙĦد Ùħ
+ĠPh áp
+ÑĤ ÑģÑı
+è¦ĭ ãģĪ
+Ġti á»ĥu
+Ġs á»Ńa
+а ÑİÑĤÑģÑı
+ĠB á
+Ġ×ķ ׼׾
+Ð ĸ
+ÑĪ Ð¸Ð¼
+ìĿ´ ëĬĶ
+л ев
+d ık
+Ġprés ente
+Ġara ç
+صد ÙĤ
+Ġпом ог
+ĠاÙĦشر ÙĤ
+ĠÙĪØ§ÙĦ ذÙĬ
+رÙĬ ا
+×ij ׳×ķת
+Ġng á»ĵi
+ר ×ķפ
+ר×ķפ ×IJ
+Ġth ấp
+ãĤĦ ãģ¯
+ãĤĦãģ¯ ãĤĬ
+ĠاÙĦج دÙĬدة
+éĿŀ常 ãģ«
+ÙĬÙĦ ÙĬ
+ìª ½
+تع اÙħÙĦ
+ãģł ã썿ĢĿãģĦãģ¾ãģĻ
+Ùħ Ùħ
+иÑĤе ли
+ãĤµãĤ¤ ãĤº
+اد ات
+ĠاÙĦÙħ اÙĦÙĬØ©
+Ùĥات ب
+к ли
+веÑĢ Ñħ
+ни Ñĩ
+Ġ×ľ×¢ ×ij×ķ×ĵ
+׾ ×Ļ×Ķ
+ØŃ Ùİ
+ãĤ¤ ãĥĻ
+ãĤ¤ãĥĻ ãĥ³ãĥĪ
+Ġת ×Ĵ×ķ×ij×ķת
+ÑĦ он
+ĠдÑĢÑĥг ие
+×IJ ×ĸ×ķר
+Ġper ò
+ìķ ŀ
+åĢŁ ãĤĬ
+ר צ×Ļ
+×IJ ×ĸ
+алÑĮ нÑĭÑħ
+Ġê²ĥ ìľ¼ë¡ľ
+ĠпÑĢав о
+ĠاÙĦØ£ رض
+à¹Ģà¸Ĺ à¸Ħ
+à¹Ģà¸Ĺà¸Ħ à¹Ĥà¸Ļ
+à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļ à¹Ĥล
+à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥล ย
+à¹Ģà¸Ĺà¸Ħà¹Ĥà¸Ļà¹Ĥลย ี
+צ ר×Ļ
+ĠÐļ Ñĥ
+ıl ma
+決 ãĤģ
+ا ÙĪ
+Ġ×ĵ ×§×ķת
+à¸Ħร ู
+ĠÙħست ÙĪÙī
+à¸Ľ à¹īà¸Ńà¸ĩ
+à¸Ľà¹īà¸Ńà¸ĩ à¸ģัà¸Ļ
+×ĵ ×ķ×ŀ×Ķ
+ĠÑģ егоднÑı
+س ÙĪÙĤ
+ר×Ĺ ×ķ×ij
+ĠØ¥ دارة
+Ñħ ож
+éģİ ãģİ
+à¸Ħ à¸Ń
+нÑĥ л
+×ķ׼ ×Ķ
+ÙĪ Ø§ÙģÙĤ
+׼׾ ׾
+Ġ×Ķ ×ĵ×ķ
+Ġl Ä©nh
+Ġkh ảo
+×IJ×ŀ צע
+ë¨ ¸
+Ġ׼ ×Ļצ
+Ġ׼×Ļצ ×ĵ
+Ġдолж нÑĭ
+หว ัà¸ĩ
+ãĥĩ ãĤ¶
+ãĥĩãĤ¶ ãĤ¤ãĥ³
+Ġng á»Ŀ
+ä¸Ń ãģ«
+à¸ģลัà¸ļ มา
+جÙħ اÙĦ
+à¸Ķัà¸ĩ à¸ģลà¹Īาว
+س ÙĥÙĨ
+س ÙĨ
+Ġözellik le
+з еÑĢ
+rz ÄĻ
+×ŀ ×ķר×Ķ
+Ġl ạ
+×ŀ ×Ļ׳×Ļ
+ר ×Ļת
+ãģĿãĤĮ ãģĮ
+ãģĭ ãĤĮ
+ĠÙĬÙħÙĥÙĨ Ùĥ
+öff entlich
+г ан
+ĠاÙĦØŃ ÙĦ
+ĠmiÄĻd zy
+ĠÑĩа ÑģÑĤи
+ujÄħ cy
+ĠbaÄŁ lı
+ĠiliÅŁ ki
+Ùģ Ø§Ø¡
+ãĥª ãĥ³ãĤ°
+Ġhã ng
+ĠконÑĤ ÑĢ
+ĠконÑĤÑĢ Ð¾Ð»
+к оп
+ש ×Ļ×¢
+ש×Ļ×¢ ×ķר
+ĠÐĴ аÑĪ
+Ġ×Ķ ×ª×§
+ÙħÙĨ ع
+ĠpolÃŃt ico
+Ġг олов
+ĠØ¥ ÙĬ
+Ø¥ ÙĨتاج
+à¸ļ ิ
+Ġг овоÑĢ
+ĠговоÑĢ Ð¸ÑĤ
+Ġph á»ķ
+ĠÑģем ÑĮ
+ãģ¯ ãģĤãĤĬãģ¾ãģĽãĤĵ
+ĠÙĪ Ø§Ø³Øª
+×ŀש פ×ĺ
+з ем
+×ŀ×ĵ ×ijר
+Ġíģ °
+ĠìĿ´ ë²Ī
+ê°Ģ ëĬĶ
+Ġì§Ģ ìĽIJ
+Ġca ÅĤy
+Ġgeli ÅŁtir
+Ñģк ое
+pos é
+Ġkh ô
+à¸ķิà¸Ķ à¸ķาม
+miss ão
+Ġ׾ ×ŀר
+Ġ׾×ŀר ×ķת
+Ġb ó
+à¸ķรวà¸Ī สà¸Ńà¸ļ
+Ġngh á»ģ
+Ġб из
+Ġбиз неÑģ
+ÑģÑĤ еÑĢ
+ÙĪ Ùİ
+楽 ãģĹãģ
+楽ãģĹãģ ¿
+ãģĵãĤĮ ãģĭãĤī
+wiÄħ zan
+ส à¸Ńà¸Ļ
+Ùħ ÙĪØ±
+׳×ĵ ׾
+Ġ×Ķ×IJ ×ĵ×Ŀ
+Ġм олод
+ØŃ Ùħا
+ØŃÙħا ÙĬØ©
+ÑģÑĤ ÑĢан
+Ġbu á»ķi
+ת×Ļ ×Ļ×Ŀ
+abile ceÄŁi
+L İ
+à¹Ģย à¸Ńะ
+à¸Ī ร
+س ÙĥاÙĨ
+à¸Ļ ัà¸Ķ
+Ġm ấy
+ĠÐij а
+s ÅĤaw
+ĠÙģ ÙĦا
+ĠкоÑĤоÑĢ Ð¾Ð¹
+Ġпло Ñī
+ĠплоÑī ад
+ãĤĤ ãģĤãĤĬ
+sz czÄĻ
+×Ļפ ×ķ
+ש×ŀ ת
+owa ÅĤa
+Ġn ông
+צ×ij ×IJ
+ĠìŀĪ ìĹĪ
+ãģ¾ ãģ¨
+ãģ¾ãģ¨ ãĤģ
+ÙĤÙĪ Ø§Øª
+ãģ¿ ãĤĵãģª
+Ġ׼ ×ŀ×¢×ĺ
+Ġx úc
+ï¼ Ĩ
+r ÄĻ
+rÄĻ cz
+×ĵ ×ŀ×Ļ
+Ġt áºŃn
+à¸Ķ วà¸ĩ
+ê²½ ìłľ
+п ÑĥÑĤ
+أ ربع
+Ġ×ŀ שת×ŀש
+ãĤ¿ãĤ¤ ãĥĹ
+Ġìłľ ê°Ģ
+Ġ׾ ׼ף
+ĠобÑĢаз ом
+ÙĬÙĥ ا
+w ÅĤ
+wÅĤ asn
+ĠاÙĦÙĪØ·ÙĨ ÙĬØ©
+بÙĬ ب
+×ŀ ׾×Ļ
+к ÑĢаÑĤ
+기 ìĹIJ
+ÙĤ اد
+ĠÙĦ دÙī
+à¸Ħวาม รูà¹ī
+×ŀ×ĵ×Ļ׳ ×Ļ×ķת
+ê² ¨
+Ġíĺ Ħìŀ¬
+ש ת×Ļ
+м ол
+Ġmá i
+à¸ŀิ ม
+à¸ŀิม à¸ŀ
+à¸ŀิมà¸ŀ à¹Į
+หล วà¸ĩ
+Ġx uyên
+×Ĺ ×¡×¨
+رÙĪ ÙĨ
+ãģĿãģĨ ãģĦãģĨ
+ãģĿãĤĮ ãģŀ
+ãģĿãĤĮãģŀ ãĤĮ
+Ġ׼ ש×Ķ
+ÐŁ ÑĢав
+×ŀ×ij צע
+ع رب
+Ġbü yü
+פ×Ļת ×ķ×Ĺ
+à¸Ī à¸ļ
+ĠØ£ Ùĥبر
+שר ת
+×ŀ׼ ש×Ļר
+ĠÙĪ Ùħع
+ãģ® ãģŁãĤģãģ«
+à¸Ļ ัà¸ļ
+ì° °
+ãĥª ãĥķãĤ©
+ãĥªãĥķãĤ© ãĥ¼ãĥł
+Ġc ưá»Ŀng
+ĠìłĢ íĿ¬
+ÙħÙĨظ ÙħØ©
+Ġhiç bir
+ãģ§ãģ¯ ãģĤãĤĬãģ¾ãģĽãĤĵ
+ร à¸Ńย
+ëIJľ ëĭ¤
+ãģĻãģIJ ãģ«
+к ла
+Ġürün ler
+Ġki á»ĥu
+ĠëĤĺ ëĬĶ
+ÑĤ ки
+Ñģ им
+Ġchá»ī nh
+ãĤĤ ãģªãģĦ
+ศ รี
+æĽ¿ ãģĪ
+ta ÅŁ
+Ġب ÙĥÙĦ
+Ġ×ķ ×Ļש
+vis ão
+ä¼ Ŀ
+ä¼Ŀ ãģĪ
+ÙĦ د
+׾ ×Ļ×ŀ
+׾×Ļ×ŀ ×ķ×ĵ
+t ória
+د Ùij
+اÙħ ر
+Ġê·¸ëłĩ ê²Į
+Ġmateria ÅĤ
+à¸Ĺ รา
+à¸Ĺรา à¸ļ
+ã쮿ĸ¹ ãģĮ
+ãģ¦ ãģįãģŁ
+ض غ
+ضغ ط
+ĠÙĬ عÙĨÙĬ
+ел о
+×IJ×Ķ ×ij×Ķ
+×¢ ×ŀ
+ÅŁ ık
+ìŀIJ ëĬĶ
+ãĤ¿ ãĥ³
+Ġb áºŃt
+×ŀשפ ×Ĺ×Ķ
+к ÑĢи
+б ли
+สั à¸ķ
+สัà¸ķ วà¹Į
+ĠسÙĨ ÙĪØ§Øª
+ĠPh ương
+ãģ¦ãģĹãģ¾ ãģ£ãģŁ
+ãģª ãģľ
+Ġ×ij×IJ ×ķ
+Ġc án
+س جÙĦ
+Ġl ẽ
+ãĤ± ãĥ¼ãĤ¹
+Ġ×§ ×Ļ×ij׾
+à¸ļà¸Ĺ à¸Ħวาม
+Ġ×ķ ׼ף
+ĠпÑĢедÑģÑĤав лен
+Ġn á»iji
+Ġcoment ário
+ени ем
+Ġtá» ı
+l Ãł
+Ġש×Ķ ×Ļ×Ķ
+Ñģл ав
+ĠاÙĦ ÙĪÙĦا
+ĠاÙĦÙĪÙĦا ÙĬات
+ÙĦج ÙĨØ©
+×§×ķר ×IJ
+бÑĭ ÑĤ
+Ġì ¦
+Ġì¦ ī
+ãģ§ãģĻ ãģĹ
+หรืà¸Ń à¹Ħมà¹Ī
+за ÑīиÑĤ
+ÙģÙĦ سطÙĬÙĨ
+Ġmi á»ħn
+à¹Ģย à¹ĩà¸Ļ
+ĠçalÄ±ÅŁ an
+×Ļ×Ĵ ×Ķ
+ĠE ÄŁ
+ĠEÄŁ itim
+ãĥĥãĤ· ãĥ¥
+Ġоп Ñĭ
+ĠопÑĭ ÑĤ
+ر غ
+رغ ب
+ĠÑģво иÑħ
+à¸Ľà¸£à¸° à¸ķ
+à¸Ľà¸£à¸°à¸ķ ู
+Ġ×ŀ×IJ ×ĵ
+׼ ×ķ׳×Ļ×Ŀ
+à¸Ļ ี
+ĠвÑĭ Ñħод
+ãģ®ä¸Ń ãģ«
+פ ׾×IJ
+ĠÙĪ ÙĦÙĬس
+פ×ķר ס
+פ×ķרס ×Ŀ
+Ùħ سÙĦÙħ
+Ġng ôi
+×ĵ ×ŀ×ķת
+ãĤĴ使 ãģ£ãģ¦
+ĠпомоÑī ÑĮÑİ
+أ سر
+бл ок
+ÙĤ Ùĩ
+ãģĹãģ¾ ãģĦ
+ãģ¨ ãģĹãģŁ
+Ġп еÑģ
+ãĥī ãĥ«
+×Ĺ ×Ŀ
+ãģĹãģª ãģĮãĤī
+ĠÐŁ ÑĢед
+ãĥģãĤ§ ãĥĥãĤ¯
+å¼· ãģĦ
+ש ×Ļר×ķת
+д аеÑĤ
+×Ļ×ij ×ķ
+Ġgen ç
+ил аÑģ
+илаÑģ ÑĮ
+ĠبÙĦ د
+æĤ ª
+æĤª ãģĦ
+Ġ×ŀ שת
+æ§ĺ ãĢħ
+æ§ĺãĢħ ãģª
+à¸ĺรรม à¸Ĭาà¸ķิ
+ĠÙĥ اÙħÙĦ
+ĠاÙĦس Ùħ
+×ij×ĺ ×Ļ×Ĺ
+c á
+g ência
+ãĤ¹ãĤ¿ ãĥ¼
+à¸Ĺำ à¸ģาร
+×Ļ׾ ת
+Ġ×Ļ ×ķצ×IJ
+w ój
+à¸ļุ à¸Ħ
+à¸ļุà¸Ħ à¸Ħล
+ع تÙħ
+عتÙħ د
+ãģĿãĤĮ ãģ«
+ĠاÙĦت ارÙĬØ®
+ÙĤر اء
+Ġyönet im
+ק שר
+ĠÑģп оÑĢÑĤ
+Ġר×IJש ×ķף
+Ġseñ al
+Ġch ắn
+çĦ¡ ãģĦ
+ĠдоÑģÑĤ аÑĤ
+ĠдоÑģÑĤаÑĤ оÑĩно
+Ġá gua
+à¸ģร à¸ĵ
+à¸ģรà¸ĵ ี
+Ġ×ŀש ×ķ
+Ġtr ải
+ë² Į
+ujÄħ cych
+Ù쨱 د
+à¹ĥ à¸ģล
+à¹ĥà¸ģล à¹ī
+ãĤĭ ãģ®ãģ¯
+ר×ķ ×ķ×Ĺ
+ÙĨ Ùĥ
+ĠاÙĦÙĨ ÙĤ
+ãģ®ãģ§ ãģĹãĤĩãģĨ
+ãģ®ãģ§ãģĹãĤĩãģĨ ãģĭ
+Ùħ عرÙģ
+ÙħعرÙģ Ø©
+ÑĥÑī е
+Ġ×ij×¢ ×Ļקר
+ت صÙĦ
+Ġ×Ķ×IJ ר
+Ġ×Ķ×IJר ×¥
+ĠÅŀ i
+à¸Ĥา à¸Ķ
+íŀ ĺ
+ãģªãĤĵ ãģ¨
+ĠìĤ¬ëŀ ij
+l Ã¼ÄŁÃ¼
+ب اء
+ĠاÙĦØ¢ خر
+Ġfam ÃŃlia
+ĠTh áng
+Ñī ениÑı
+ãĤ¯ ãĥŃ
+ĠTh ứ
+æĽ¸ ãģį
+ен ной
+ìŀ ¡
+бл аг
+благ о
+п ов
+à¹ģ ว
+à¸ĩ à¸Ħà¹Į
+à¸Ńัà¸Ļ à¸Ķัà¸ļ
+ãģĤ ãģĴ
+ร à¹īาย
+ün ün
+Ġ×Ļ׼×ķ׾ ×Ķ
+з он
+ĠÐľ и
+маÑĤ еÑĢиал
+Ġë³´ ë©´
+ØŃÙģ Ø¸
+ê Ìģ
+ãģ« ãģĻãĤĭ
+Ġת ×IJ
+Ġ×Ķס ×ķ
+ĠÑģÑĤ оÑĢ
+ĠÑģÑĤоÑĢ Ð¾Ð½
+ãĥĪ ãĥĥãĥĹ
+ÅĤo ÅĽÄĩ
+ëħ ¼
+ëĵ Ŀ
+ĠÙĪØ§ÙĦ ع
+ì¶ Ķ
+Ġ×Ļצ ×IJ
+ĠÑĢаз дел
+алÑĮ наÑı
+×IJ׳ ש×Ļ
+spo ÅĤ
+spoÅĤ ec
+spoÅĤec zn
+Ø¥ عÙĦ
+إعÙĦ اÙĨ
+ÙĤÙĪ Ùī
+íķĺë©´ ìĦľ
+تط ÙĪØ±
+Ġsi êu
+Ỽ t
+д ви
+дви ж
+Ġqu ần
+k ıl
+ĠпÑĢи зна
+ĠH ã
+ĠHã y
+ĠباÙĦ ت
+man ın
+ãĤ« ãĥ«
+Ġk á»·
+×§ ׾×Ļ
+ëIJĺ ì§Ģ
+تعÙĦ Ùħ
+ìĭľ ìĦ¤
+ìĭ ¶
+íĺ ¼
+Ùĥ ÙĬÙģ
+売 ãĤĬ
+วิ à¸Ĭา
+б ал
+ĠØ£ ØŃ
+Ġдолж ен
+รา à¸ĩ
+ราà¸ĩ วั
+ราà¸ĩวั ล
+Ùħ اء
+ج ار
+Å ļ
+Ġ×ŀ×IJ ×ĸ
+ר ×ŀ×Ķ
+ãģĭãĤĤãģĹãĤĮ ãģªãģĦ
+ét ude
+czÄħ c
+Ġg ór
+×ł×¡ ×Ķ
+Ùħ ÙĬد
+ĠÐŁ еÑĢе
+أ خر
+ãģĿãģ® å¾Į
+à¹Ģà¸Ķียว à¸ģัà¸Ļ
+×ŀ ×Ĵ×ķ
+×ŀ×Ĵ×ķ ×ķף
+д ов
+mas ına
+×¢ ׳×Ķ
+ãĤ± ãĥĥãĥĪ
+ס ע
+סע ×Ļ×£
+ĠT ư
+Ġt óc
+íĻľ ëıĻ
+ĠÐŀ д
+ĠÐŀд нако
+Ġdol ayı
+ؤ Ùĥد
+ê³Ħ íļį
+׾ ר
+в еÑĩ
+Ġkh ợi
+Ġth á»§y
+×ĵ ף
+ร à¸ģ
+à¸ļั à¸ķร
+à¹Ģà¸ģ à¹Īา
+ĠاÙĦØ« اÙĦ
+ĠاÙĦثاÙĦ Ø«
+Ġpod rá
+ער ×Ļ
+ÙĨج اØŃ
+Ġkh ắc
+ì¸ ¡
+İ M
+ãĤ» ãĥĥãĥĪ
+ż enia
+Ġ׾×Ĺ ×ijר
+er Ãł
+ì ´Ī
+Ġkü ç
+Ġküç ük
+ات ÙĩÙħ
+à¸ĭ à¹Į
+Ùħشار ÙĥØ©
+ĠاÙĦ بط
+Ġd ây
+ен нÑĭм
+à¸Ĺีà¹Ī à¹Ħมà¹Ī
+ÙĤ Ùİ
+Ġv ượt
+Ġtr ì
+Ġwp ÅĤyw
+A Åŀ
+з о
+ĠاÙĦس ÙĬد
+à¸Ĺะ à¹Ģล
+ĠÑģодеÑĢж а
+ع Ø·ÙĬ
+ĠاÙĦع ÙĨ
+èĢħ ãģĮ
+à¹Ģ หà¸Ļ
+à¹Ģหà¸Ļ ืà¸Ń
+Ġb ÃŃ
+Ġüzer inden
+ĠV Å©
+Ġnu ôi
+ÙĨ Ùħ
+алÑĮ ного
+×¢ ×Ļף
+ØŃ ضر
+ĠоÑĤ дел
+ëª ĩ
+ìķ ¡
+ĠÙĦدÙĬ Ùĩ
+ìĻ ľ
+Ġse ktör
+Ġвозмож но
+ĠÐĶ Ð¶
+Ġh ô
+äºĭ ãģĮ
+иÑĢов ание
+алÑĮ ной
+Ġ미 êµŃ
+ر ØŃÙĦ
+ĠÑįк Ñģ
+пÑĢав лÑı
+Ġnh á»Ŀ
+ĠÄij ẩ
+ĠÄijẩ y
+Ùģ Ùĥر
+ĠÙĪØ£ ضاÙģ
+ãĥIJ ãĤ¹
+ת×ķ׼ ׳×Ļת
+ÑĤел ей
+ĠØ¥ÙĦÙĬ Ùĩ
+ãģ¨è¨Ģ ãģ£ãģ¦
+Ġдв е
+Ġch ấp
+ĠL ö
+à¸Ħล ิ
+à¸Ħลิ à¸Ľ
+Ġس ÙĪØ±
+ĠسÙĪØ± ÙĬا
+×ŀ×Ĺ ×ķ
+st ä
+д об
+Ġni á»ĩm
+ãģ® å¤§
+פר×ķ ×Ļ×§
+פר×ķ×Ļ×§ ×ĺ
+ĠCh âu
+Ġ×ŀ×Ķ ×Ŀ
+Ñģк им
+ĠполÑĥÑĩ иÑĤÑĮ
+ÙĬ ÙĪÙħ
+Ø« ÙĪØ±
+פ×ķ׾ ×Ļ×ĺ
+פ×ķ׾×Ļ×ĺ ×Ļ
+ĠмеÑģÑı ÑĨ
+åħ¨ ãģ¦
+ĠاÙĦÙħ جÙĦس
+ĠاÙĦت اÙĦÙĬ
+Ġ׊ר
+åIJij ãģij
+׼ ×ŀ×Ķ
+б ед
+أ عض
+أعض اء
+ÙĪÙĦ د
+วà¹Īา à¸Īะ
+Ġb ánh
+à¸Ļิ ย
+à¸Ļิย ม
+à¸Ľà¸£à¸° à¸ģัà¸Ļ
+ÑģÑĤав иÑĤÑĮ
+à¸ŀ à¸Ļัà¸Ļ
+ĠÑį ÑĦÑĦ
+ĠÑįÑĦÑĦ екÑĤив
+Ġав ÑĤоÑĢ
+ĠÄIJ Äĥng
+Ġth Æ°á»Łng
+ãĤĴ æĦŁãģĺ
+à¸ģัà¸ļ à¸ģาร
+å¾Į ãģ«
+Ġya ÄŁ
+ست اÙĨ
+Ġli á»ģn
+ãģĦ ãģ¾
+i êu
+à¹Ĥà¸Ķ à¸Ļ
+ĠÙĦ ذÙĦÙĥ
+à¹Ĥรà¸ĩ à¹Ģรียà¸Ļ
+צ ×Ļ×Ĵ
+ĠاÙĦÙħ عÙĦÙĪÙħات
+ç§ģ ãģŁãģ¡
+à¸Ĺีà¹Ī à¸Ħุà¸ĵ
+ãģ«ãģª ãģ£ãģ¦ãģĦãĤĭ
+×ŀ×ĵ ×Ļ׳×Ķ
+ס ׼×Ŀ
+Ġв не
+à¸ŀ à¸Ļัà¸ģà¸ĩาà¸Ļ
+ÑĢ ÐµÐ¹
+à¹Ģà¸Īà¹īา หà¸Ļà¹īาà¸Ĺีà¹Ī
+ĠHi á»ĩn
+Ġméd ico
+ĠتØŃ ÙĤÙĬÙĤ
+ÑĮ ÑĤе
+miÅŁ ti
+ÙĤÙĬ ادة
+ãĤı ãģĭãĤĬ
+มา à¸Īาà¸ģ
+ëħ Ģ
+ãģ«éĸ¢ ãģĻãĤĭ
+×IJר×Ĵ ×ķף
+m ètre
+Ġעצ ×ŀ×Ļ
+ĠCh úa
+รูà¹ī à¸Ī
+รูà¹īà¸Ī ัà¸ģ
+ì£ Ħ
+ëĭ µ
+à¹ģà¸Ĺ à¹ī
+Ġgeç en
+Ġlan ça
+ĠاÙĦ بØŃØ«
+×ĵ ×ŀ×ķ
+ãģ¯ ãģĺ
+ãģ¯ãģĺ ãĤģ
+Ġdön Ã¼ÅŁ
+è¿ij ãģı
+à¹Ģส ม
+à¹Ģสม à¸Ń
+ëĿ ½
+Ġü ç
+á» ŀ
+ÑĪ Ð°Ñı
+à¸Ĺ ร
+ØŃ ÙĤÙĬÙĤØ©
+à¸Ĥà¸Ńà¸ĩ à¸ģาร
+Ġ무 ìĹĩ
+Ġ×Ķ ×Ľ×¨
+ĠاÙĦص ÙĬÙĨ
+ĠлÑİ Ð´Ð¸
+à¸ķ าย
+ب ÙĪÙĦ
+Ġvi êm
+Ġthi á»ĩu
+à¸ģ à¸Ķ
+Ġ׾ ×ĵ×ijר
+פ ׳×Ķ
+×IJר ×ij×¢
+س Ùī
+ĠاÙĦسÙĬ اس
+ĠاÙĦسÙĬاس ÙĬØ©
+yd ı
+ÙĪØŃØ¯ Ø©
+ĠдеÑıÑĤелÑĮ ноÑģÑĤи
+Ġ×ķ×Ķ ×ŀ
+п еÑĩ
+пеÑĩ аÑĤ
+иÑĢов аниÑı
+ĠÑģ ог
+ĠÑģог лаÑģ
+Ġ׼ ×ĵ
+Ġ׼×ĵ ×IJ×Ļ
+ĠиÑģполÑĮзов аÑĤÑĮ
+ס פ×ķר×ĺ
+Ġil çe
+exp érience
+ĠTh á»Ŀi
+İ K
+à¹Ħà¸Ł à¸Łà¹īา
+ëĵ¤ ìĹIJê²Į
+à¸Ľà¸£à¸° à¹Ģà¸ł
+à¸Ľà¸£à¸°à¹Ģà¸ł à¸Ĺ
+Ġmü mk
+Ġmümk ün
+Ġ×IJ×ķת ׳×ķ
+ìĦ± ìĿĦ
+ĠìĿ´ ìľł
+زÙĬ ارة
+Ġolduk ça
+r ób
+ĠØ£ ÙĨا
+Ġ×Ķ ×ij×Ļ
+Ñģ ен
+×¢ ×Ļקר
+×Ļ×ĵ ×ķ×¢
+d zÄħ
+Ùħ عÙĦÙĪÙħات
+ش اب
+Ġpar ça
+à¸Ļะ à¸Ħะ
+ب اس
+ĠÑĤоÑĢ Ð³
+ĠÑĤоÑĢг ов
+Ġ×Ĺ ×ĵר
+׼ ר×ĺ
+׼ר×ĺ ×Ļס
+ĠA yrıca
+ÃªÌ £
+ìľ ¨
+ĠÑĤак ие
+Ġ×ŀצ ×ķ×Ļ
+ãĥ©ãĥ³ ãĤŃãĥ³ãĤ°
+ש×Ļ×ķ ×ķ×§
+åīį ãģ®
+ĠB ảo
+Ñī Ñĥ
+æĹ© ãģı
+ĠPh òng
+à¸ŀระ ราà¸Ĭ
+פ ×Ĺ×ķת
+Ġг л
+Ġгл аз
+à¸Ĺ à¹Īา
+Ġd ạy
+ÑĢ Ð¾ÑģÑĤ
+à¹Ĥà¸Ķย à¹Ģà¸īà¸ŀาะ
+Ġqu áºŃn
+Ġ×Ĺ×ijר ×ķת
+m ême
+mÄ±ÅŁ tı
+ĠاÙĦت داÙĪÙĦ
+Ġn ạn
+Ġ×Ķ ×ĵ×Ļ
+ĠاÙĦØ· رÙĬÙĤ
+×Ĵ ×ķת
+Ġ×Ķ ×ĵר×ļ
+ujÄħ ce
+Ġch ữ
+ãĤĤãģ® ãģ®
+ë° Ľ
+ãģķãĤĵ ãģ¯
+Ġyard ım
+ĠاÙĦع Ùħ
+Ġì§Ħ íĸī
+Ġ×Ļ ×Ĺ
+Ġ×Ļ×Ĺ ×¡×Ļ
+ĠاÙĦÙħ دÙĬÙĨØ©
+Ġc ú
+à¸ģี ฬ
+à¸ģีฬ า
+Ġni ên
+mis ión
+׳×Ļס ×Ļ
+׳×Ļס×Ļ ×ķף
+Ġвоз ÑĢаÑģÑĤ
+Ġ×¢×ķש ×Ķ
+ĠÙħ دÙĬر
+Ñı ÑģÑĮ
+ØŃ جÙħ
+íĻĺ ê²½
+ĠاÙĦØ£ خرÙī
+u ÃŁer
+ĠاÙĦعاÙĦÙħ ÙĬØ©
+ĠNg á»įc
+êµIJ íļĮ
+ä¸Ĭ ãģ§
+×Ļ×Ķ ×ķ×ĵ
+×Ļ×Ķ×ķ×ĵ ×Ļ×Ŀ
+Ùħس اعدة
+Ġжиз нÑĮ
+ĠпоÑĤ омÑĥ
+ĠاÙĦÙħ ÙħÙĦ
+ĠاÙĦÙħÙħÙĦ ÙĥØ©
+ĠG ör
+ر ÙIJ
+×ŀ×§ ×ķ×ŀ×ķת
+åĩºæĿ¥ ãĤĭ
+ÑĦ ÑĤ
+ĠìĿ´ ìłľ
+ĠÑĢ ÐµÐ¼
+ĠÑĢем онÑĤ
+ת ×ķ×ļ
+æĻĤ ãģ¯
+ãĤīãĤĮ ãģªãģĦ
+alt ı
+å®¶ ãģ®
+ĠاÙĦØ¥ عÙĦاÙħ
+리 ëĬĶ
+ãģĭãĤī ãģ¯
+ĠH ạ
+ãģĤ ãģ®
+×ĵ×Ļ ×ķף
+رÙĬ س
+Ġsoci etÃł
+ĠاÙĦÙĥ بÙĬر
+Ġ×ij ×ŀס
+Ġ×ij×ŀס ×Ĵר
+Ġ×ij×ŀס×Ĵר ת
+ĠìŀĪ ìľ¼ë©°
+Ġn ặng
+Ùĩ Ùī
+ĠB Ãł
+×ŀר ×ķ
+Ġj ÄĻ
+ĠjÄĻ zy
+ĠjÄĻzy k
+Ġ׼ ×ŀ×ķ×ijף
+×¢ ׾×Ķ
+à¸Ĺีà¹Ī à¹Ħà¸Ķà¹ī
+ãģ¾ ãģĹãĤĩãģĨ
+×ŀס פר
+Т Ðŀ
+سÙĬاس Ø©
+Ġкажд Ñĭй
+ë² ł
+t ım
+y á»ĩn
+ร ีà¹Ī
+ĠдеÑĤ Ñģк
+วิà¸ĺี à¸ģาร
+m ówi
+×ĺ×¢ ×Ŀ
+×Ķצ׾ ×Ĺ×Ķ
+ض ÙĬÙģ
+ĠÑħоÑĤ Ñı
+ãĤĵãģ§ ãģĦãĤĭ
+à¸Ħา à¸Ķ
+à¸Ħร à¸ļ
+Ġк ÑĥÑĢÑģ
+ĠbaÅŁ arı
+×ijר ×ķ
+ÙĬع Ø©
+ĠÐĿ Ñĥ
+à¸Ħวาม à¹Ģà¸Ľà¹ĩà¸Ļ
+Ġ׾ ×ŀש׾
+Ġì¢ĭ ìĿĢ
+Ùħؤس س
+Ùħؤسس ات
+Ġpréc is
+Ġth ảo
+à¸ģà¹ĩ à¸Ħืà¸Ń
+Ġש ׼׾
+führ ung
+ãģĦ ãģ§
+à¹ģละ มี
+à¸ģà¹ĩ มี
+Ġש ש
+м ел
+Ġкни г
+ĠباÙĦ ÙĨ
+ĠباÙĦÙĨ سبة
+Ġald ı
+ÑĤ ай
+Ġ×Ĺ×ĵ ש×Ļ×Ŀ
+å®Ł ãģ¯
+ع ÙĪØ§
+ĠìĿĺ 미
+из м
+ÑĢабоÑĤ аÑĤÑĮ
+Ùģ Øµ
+Ġ×ij׳ ×ķסף
+ãģ¨ãģĹãģ¦ ãĤĤ
+à¹Ģà¸Ľà¹ĩà¸Ļ à¸Ĺีà¹Ī
+ĠÑģлед ÑĥеÑĤ
+èĢĥãģĪ ãģ¦
+Ġ׼ ×Ļ×ķ×Ŀ
+ÑģÑĤ Ñĭ
+׼׾׼ ׾×Ļ
+æµģ ãĤĮ
+ãĤĴ ãģ¤ãģij
+Ñĩ аÑĤ
+×Ļ׼ ×ķף
+×Ļר ×Ļ
+ları yla
+ãĤ¤ ãĥ¡
+ãĤ¤ãĥ¡ ãĥ¼ãĤ¸
+׳×ĸ ×§
+Ġci ò
+Ġs ın
+Ġsın ır
+à¸Ļ à¸Ħร
+к аÑĤ
+Ġl á»Ĺi
+ëŀ Į
+تÙģ Ø§Øµ
+تÙģØ§Øµ ÙĬÙĦ
+ëĨ ĵ
+ĠÙħ ض
+il miÅŁ
+بار Ùĥ
+ÐĿ Ðĺ
+Ġth ẩm
+Ġ×IJ×ķת ×ļ
+ĠпÑĢин им
+ĠпÑĢиним а
+Ġyö nt
+Ġyönt em
+Ġ×ŀ×§ ×ij׾
+Ġktó rego
+ê· Ģ
+شر Ùģ
+د اÙħ
+ãģĦãĤį ãģĦãĤį
+ĠAl ém
+Ġgör ü
+Ġgörü nt
+Ġgörünt ü
+د س
+ÑĪ ÐºÐ¸
+г ÑĢад
+Ġl ạc
+Ġs ữa
+ãĤīãĤĮ ãģ¾ãģĻ
+o Ãłi
+Ñī ен
+ãģĭ ãģªãģĦ
+Ġп оп
+Ġпоп Ñĥ
+ĠпопÑĥ лÑıÑĢ
+ĠاÙĦÙħ ÙĪÙĤع
+rä g
+ï¼ ¡
+íķ Ħ
+ãĤĴè¦ĭ ãĤĭ
+اÙħ ا
+ĠاÙĦØŃ رب
+ĠÐŁ а
+Ġ׾ ×IJתר
+Ġt á»ijc
+×ij ׾×Ķ
+ر ئÙĬس
+в Ñĥ
+ÙĬ دÙĬ
+каз ан
+Ġ׊ש×ij×ķף
+h ôtel
+×¢ ×ķ׳×Ķ
+ب ÙĨÙĬ
+×ŀ ×ķ׾
+Ġд нÑı
+éĽ£ ãģĹãģĦ
+вед ениÑı
+Ġ×ķ ×ŀת
+н апÑĢимеÑĢ
+ÙĤ ابÙĦ
+Ġrésult at
+ĠÑĢазвиÑĤ иÑı
+ر Ùij
+ìłĦ 문
+ĠاÙĦÙħ زÙĬد
+ĠìľĦ íķ´ìĦľ
+ëĨ į
+íĻ ķ
+ĠThi ết
+íĮ ¨
+malı dır
+Ġcz ÅĤ
+ĠczÅĤ owie
+ĠczÅĤowie k
+ĠÙĦ بÙĨ
+ĠÙĦبÙĨ اÙĨ
+üs ü
+ãģªãĤĵ ãģł
+Ġżyc ie
+ĠÑħоÑĢоÑĪ Ð¾
+æĸ¹ ãģ«
+ëĭ¤ ë©´
+иÑĩеÑģ каÑı
+ער ×Ļ׼
+ער×Ļ׼ ת
+ãģ¾ãģĽãĤĵ ãģ§ãģĹãģŁ
+ĠÑģоб ой
+Ġg á»Ĺ
+Ġдел аÑĤÑĮ
+da Äĩ
+аÑĢ Ð°
+róż ni
+à¹Ģล ีà¹ī
+à¹Ģลีà¹ī ย
+à¹Ģลีà¹īย à¸ĩ
+à¸Ŀ าà¸ģ
+Ġت ÙĤ
+ĠتÙĤ دÙĬ
+ĠتÙĤدÙĬ Ùħ
+หà¸Ļ ุà¹Īม
+Ġmü cade
+Ġmücade le
+ì§Ģ 를
+ãĤ¤ ãĤ¹
+ĠØ£ ساس
+jÄħce go
+ĠÅŁ eh
+н ÑĤеÑĢ
+ÑĨи Ñİ
+ï» »
+ÑİÑī его
+à¹Ĥà¸Ľà¸£ à¹ģ
+à¹Ĥà¸Ľà¸£à¹ģ à¸ģรม
+Ġmie Äĩ
+ØŃÙĥÙĪÙħ Ø©
+ãģ§ãģĹãģŁ ãģĮ
+×Ļס ×Ķ
+ãĤĤãģ® ãĤĴ
+Ġ×ŀ ×IJת
+สุà¸Ķ à¸Ĺà¹īาย
+Ġc Å©
+ÙĨ سب
+ĠпÑĢ Ð¾Ñĩ
+Ġд ней
+ĠÑįÑĤи Ñħ
+׾ ×ŀת
+нÑı Ñı
+Ñį к
+Ġì§Ģ ëĤľ
+มหา วิà¸Ĺยา
+มหาวิà¸Ĺยา ล
+มหาวิà¸Ĺยาล ัย
+d ão
+ĠMá y
+ĠêµŃ ê°Ģ
+à¸ļุ รี
+×Ĵ ×Ļ׾
+ĠÑĤÑĭ ÑģÑı
+ĠÑĤÑĭÑģÑı Ñĩ
+Ùģ Ùĥ
+ĠÐĺ Ñģ
+è¡Į ãĤıãĤĮ
+פר ×ĵ
+ãģ¤ ãģį
+à¸Ħร à¸Ńà¸ļ
+à¸Ħรà¸Ńà¸ļ à¸Ħรัว
+à¸Ĥึà¹īà¸Ļ มา
+ä»ĬæĹ¥ ãģ¯
+ĠìĤ¬ëŀĮ ìĿ´
+עצ ×ŀ×Ķ
+п оÑĢ
+ĠK ỳ
+Ġ Æ¡n
+Ġth Äĥm
+Ùģ Ø§ÙĤ
+ãģļ ãģ«
+Ġ׾ קר
+Ġ׾קר ×ķ×IJ
+اÙģ ÙĬØ©
+Ùħ ÙİØ§
+г аÑĢ
+ص ÙĦا
+صÙĦا Ø©
+Ġ×ŀ ×ĸ×Ķ
+lı ģını
+Ġ×IJ ×Ļ׳×Ķ
+к ÑĢо
+Ġng ươi
+Ġв ним
+Ġвним ание
+jÄħ cy
+ÙĢÙĢÙĢÙĢ ÙĢ
+Ñģ Ñħод
+ãģªãĤĵ ãģĭ
+×ŀ ×Ļ׾
+Ġ×Ķ×IJ ×Ĺ
+ãĤı ãģªãģĦ
+ع سÙĥر
+ĠìĦ¸ ê³Ħ
+ĠÑĩ его
+ĠÑģÑĢед ÑģÑĤва
+ĠÐł аÑģ
+ãģª ãģģ
+ÙĨ Ù쨳
+ר×Ļ ×ķף
+Ñģ Ñĥд
+ĠìĿ¸ ê°Ħ
+ĠاÙĦÙħ ÙĤبÙĦ
+ÙĨ عÙħ
+تÙĪ Ù쨱
+ש ×ij×¢
+ı lm
+ılm Ä±ÅŁ
+Ġ×ľ×ª ת
+تص Ùģ
+×Ķפ ×ķ×ļ
+à¹ĥà¸Ļ à¸Ľà¸µ
+ìĿ´ ê³ł
+Ùģ ÙĪØ²
+à¸ľà¸¥ à¸ĩาà¸Ļ
+ĠGi áo
+à¸ļà¸Ńà¸ģ วà¹Īา
+Ġd Ä±ÅŁ
+ĠdÄ±ÅŁ ında
+ì£ ½
+Ġdzie ÅĦ
+к ÑĨии
+и ÑĨе
+ãģ® ä¸Ģ
+ع ش
+пÑĢ ÐµÑģÑģ
+หà¸Ļ à¹Īà¸Ńย
+ลัà¸ģษ à¸ĵะ
+Ġpossibilit Ãł
+à¹Ħà¸Ķà¹īรัà¸ļ à¸ģาร
+หย ุà¸Ķ
+Ġphi ên
+çĶŁ ãģ¾ãĤĮ
+Ø· ÙĪÙĦ
+ÑĦ ин
+f ür
+ØŃ ÙĬاة
+íĸ ĪìĬµëĭĪëĭ¤
+׼ ׳×ķת
+à¸Ľà¸£à¸° ส
+à¸Ľà¸£à¸°à¸ª à¸ļ
+à¸Ľà¸£à¸°à¸ªà¸ļ à¸ģารà¸ĵà¹Į
+ëIJĺ ìĹĪ
+Ġkaż dy
+Ġl uyá»ĩn
+ĠоÑĢганиз аÑĨии
+å°ij ãģªãģı
+ÑģÑĤÑĢо ен
+Ġtécn ico
+×§ ×Ķ׾
+Ġ×ķ×IJ ×Ĺ
+ĠعÙĦÙĬ Ùĥ
+Ñī ение
+Ġ×Ķ ×Ļ׾×ĵ×Ļ×Ŀ
+ÙĪØ³ ائÙĦ
+Ġ×ķ ×Ķת
+تÙħ ÙĬز
+ĠÑģ казал
+Ġпол и
+Ġ×Ķ×ŀ ס
+ÙĦÙij Ùİ
+Ùħؤس سة
+Ġ×ŀ ×Ļ×ĵ
+ãģ£ ãģ¡
+ĠëĦĪ ë¬´
+à¸ŀ ี
+Ġt ặng
+Ġt ấn
+ר ש×Ŀ
+Ġméd ica
+Ġ×¢ ×ķ×ŀ
+Ġ×¢×ķ×ŀ ×ĵ
+ÑĦ оÑĢ
+Ùħر Ø©
+Ġvat anda
+Ġvatanda ÅŁ
+Ġдел о
+à¸Ļ ม
+ãģ¨ åIJĮãģĺ
+Ùģ Ùī
+Ñģ оÑĢ
+Ġ×Ķס ר×ĺ
+Ġép oca
+ìłķ ì±ħ
+ĠÑģвÑıз ан
+ض رب
+ĠÙĦ ÙĨا
+Ġuży wa
+ĠاÙĦج ÙĬØ´
+Ñİ ÑĢ
+×ijס ×ķ×£
+Ġм Ñĥ
+ĠмÑĥ зÑĭк
+bilit é
+Ġma ç
+س Ùİ
+ت ÙĦÙĥ
+ãģ ¬
+ÙĬ ÙĦا
+ÑĪ Ð»Ð°
+ÙĢÙĢ ÙĢ
+Ġод ной
+зв ан
+ĠÑģ ÑĢаз
+ĠÑģÑĢаз Ñĥ
+ÙĨ ظÙħ
+را Ùĩ
+ĠÙĦÙĩ ذا
+׼ ×ķר
+Ġ×Ķש ×ij×ķ×¢
+Ġ×Ķש ת
+ĠQu ảng
+ãĥ« ãĥ¼
+ãģĪ ãģªãģĦ
+×ĺ ×IJ
+Ġmi á»ģn
+ĠPh áºŃt
+ĠاÙĦس ÙĪÙĤ
+Ä Ĥ
+ĠاÙĦج Ùħع
+ĠاÙĦجÙħع Ø©
+ÑİÑī ей
+a ÅĤem
+عت ÙĤد
+Ø£ ÙĦÙħ
+Ñģ ке
+ĠìĿ´ íķ´
+ÙĨس Ø®
+è¨Ģ ãģĦ
+д обав
+سب ÙĤ
+×¢×ķר ר
+ÑĤи п
+ãģĿãģĵ ãģ§
+vis ión
+عÙĪØ¯ Ø©
+ë¨ ¹
+×ŀ ×ĸר×Ĺ
+ĠØ¥ ØŃ
+Ġ׾×ij ×Ļף
+Ġ׾צ ×IJת
+Ġyard ı
+Ġyardı mc
+Ġyardımc ı
+İ Z
+×§ פ×Ķ
+tr é
+liÄŁ ini
+клÑİÑĩ а
+Ġüret im
+Ġa yrı
+ĠkiÅŁ iler
+à¸Ħ à¹īà¸Ļ
+à¸Ħà¹īà¸Ļ หา
+ĠS á»±
+Ġ׼ ס
+Ġ×Ľ×¡ ×£
+ĠÑĤак иÑħ
+ĠXu ân
+Ġл ег
+Ġлег ко
+Ø«ÙĤ اÙ쨩
+ÐĿ Ðŀ
+ãĤ¹ãĤ¿ ãĥĥ
+ãĤ¹ãĤ¿ãĥĥ ãĥķ
+åIJĪ ãģĦ
+Ġ×Ķש ×Ļ×ŀ×ķש
+man ız
+ĠÐĴ аÑģ
+g ün
+ìľĦìĽIJ íļĮ
+Ġwsp óln
+ĠÑģв ое
+í ĥģ
+à¹Ģà¸Ļ ีย
+ÙĪØ¨ Ø©
+в Ñıз
+ı dır
+ëIJĺ ìĹĪëĭ¤
+ĠdeÄŁi ÅŁtir
+ãĤĭ ãģĵãģ¨ãģĮ
+Ġ×Ĺ×ĵ ש×Ķ
+ãĤīãĤĮ ãģ¦ãģĦãĤĭ
+×Ĺ×Ļ ×Ļ×ij
+ĠÐļ аÑĢ
+׳×Ļת ×ķ×Ĺ
+Ġ×§×ĺ ף
+ר ×ĸ
+ÙĪ Øº
+èªŃ ãģ¿
+Ġت ÙĤÙĪÙħ
+ĠÙĥ اÙĦ
+à¸Ŀ ึà¸ģ
+Ġë°ľ ìĥĿ
+ológ ico
+ر اع
+à¹ģà¸ģà¹ī à¹Ħà¸Ĥ
+ĠÑĢабоÑĤ Ñĥ
+ÙĨÙij Ùİ
+à¸Ńยูà¹Ī à¸Ĺีà¹Ī
+ĠاÙĦØ« اÙĨÙĬØ©
+ĠNh ân
+Ñħ ваÑĤ
+ö ne
+Ġع دة
+à¹ģ สà¸ĩ
+ÑĤ оп
+пÑĥÑģ ка
+شر اء
+ĠÐļ ом
+Ġפע ×ķ׾×Ķ
+ìĤ¬ ìĿ´
+ìĤ¬ìĿ´ íĬ¸
+è¡Į ãģ£ãģ¦
+Ġ×Ķ ×Ķת
+ĠÑģÑĤ оÑĢо
+ĠÑģÑĤоÑĢо нÑĭ
+در س
+à¸ĭ ู
+à¸ķà¹Ī ำ
+ĠØ£ بÙĬ
+под об
+ãģ« ãģ¦
+ار تÙģØ§Ø¹
+ĠÙħ ؤ
+ик ов
+ge führt
+มืà¸Ń à¸ĸืà¸Ń
+ĠÙĦ ÙĤد
+ĠØ£ÙĨ Ùij
+سÙĬ طر
+ãģ¾ãģļ ãģ¯
+ס ×ĵ
+Ñģк олÑĮко
+ãģ¿ãģŁãģĦ ãģª
+×ĵר ×Ĵ
+×¢ ×Ļ×ĵ
+à¹ĥหà¹ī à¸ļริà¸ģาร
+ĠÐĶ Ð¸
+×ij×¢ ×Ļ×ķת
+Ġ×Ķ×Ĺ ×ķ
+пиÑģ ÑĮ
+ĠاÙĦØ® ÙĦ
+б ав
+Ġİ lk
+ĠاÙĦØ® Ùħ
+ĠاÙĦØ®Ùħ ÙĬس
+ĠÙĬ ÙĤÙĪÙħ
+æĻĤ ãģ®
+ĠsÅĤ ow
+ĠØ£ ÙĩÙħ
+Ø®ÙĦ ÙĤ
+ĠØ£ صبØŃ
+Ġchứ a
+Ġth ác
+Ùģ Ø§ÙĦ
+Ġch á»Ŀ
+ĠاÙĦØ® ار
+ĠاÙĦخار ج
+ĠاÙĦخارج ÙĬØ©
+ط ائر
+Ġt Ãł
+ĠtÃł u
+à¸ģล à¹īà¸Ńà¸ĩ
+ĠاÙĦÙħر Ø£
+ĠاÙĦÙħرأ Ø©
+åħ¨ ãģı
+ĠÃĸ n
+çļĦ ãģ«ãģ¯
+Ġpiè ce
+×Ĵ ×Ļ×ij
+ĠاÙĦ ÙĪØ§ÙĤع
+ä»Ĭ ãģ®
+ĠاÙĦÙħ ÙĤ
+cz nÄħ
+Ù쨹 اÙĦ
+ен ного
+ĠÑĦак ÑĤ
+ìĭł ì²Ń
+ĠÐŀ ни
+ĠاÙĦبÙĦ اد
+ов иÑĩ
+ëı Į
+ÑĦ ÑĥнкÑĨи
+Ġìĸ´ ëĬIJ
+ãĥķãĤ© ãĥ¼
+d ÃŃ
+ил оÑģÑĮ
+Ùħ Ùī
+ĠاÙĦØ£ÙħرÙĬ Ùĥ
+ĠاÙĦØ£ÙħرÙĬÙĥ ÙĬØ©
+×ĺ ×Ļפ×ķ׾
+íĶĦ ë¡ľê·¸
+íĶĦë¡ľê·¸ ëŀ¨
+Ġש ×ķ׳×ķת
+Ø´ ÙħÙĦ
+ĠпаÑĢ Ð°
+Ġ×Ķ×Ĺ ×ķ×§
+ÙĪØ² ارة
+ãģ¨ ãģĻãĤĭ
+Ġqu ảng
+ĠaÄŁ ır
+ĠاÙĦÙĦ ج
+ĠاÙĦÙĦج ÙĨØ©
+ê¸ ´
+ĠT ân
+ج ÙħÙĦ
+д ол
+à¹ģà¸ŀ à¸Ĺย
+à¹ģà¸ŀà¸Ĺย à¹Į
+Ġר×IJ ש×Ļ
+Ñī ей
+Ġçev re
+Ġкомп лекÑģ
+Ġ×ij ×ŀש×ļ
+Ġalt ın
+ĠØ£ عÙħاÙĦ
+ĠÑģво его
+ãĤĪ ãģĦ
+×Ĺ׾ ×Ļ×ĺ
+×ŀ׳ ×¢
+Ġר ×ij×Ķ
+ĠØ£ÙĬضا Ùĭ
+×ĸ ׾
+ĠاÙĦسÙĬ اسÙĬ
+æĢĿ ãģĨ
+קר ק
+קרק ע
+ĠاÙĦÙģ Ø±ÙĬÙĤ
+б иÑĤ
+×§ ׳×Ķ
+ĠØ¥ ÙĨÙĩ
+ĠÐĴ ам
+Ðł Ðŀ
+ãĥĪ ãĥª
+å¿ħè¦ģ ãģª
+Ġch âu
+ç¶ļ ãģij
+Ġçöz üm
+gÅĤ ow
+ع ÙĤÙĦ
+売 ãĤĭ
+i ết
+à¸Ĭิ à¹īà¸Ļ
+ĠØŃÙĤ ÙĪÙĤ
+Ø·ÙĦ ع
+ĠÄij en
+ĠÙĥ اÙ쨩
+ãģ® ãģĶ
+Ġë ¬
+Ġë¬ ¼
+Ġ물 ë¡ł
+Ġرس ÙĪÙĦ
+з ам
+зам ен
+Ġkullan ıcı
+×¢ ×ķ׾
+èī² ãĢħ
+ÑĪи ÑĢ
+Ġ׊ש
+Ġwy gl
+Ġwygl Äħda
+ש ×Ļ×ŀ×ķש
+å¿ĺ ãĤĮ
+×¢ ×Ļצ×ķ×ij
+ĠاÙĦس ÙĪØ±ÙĬ
+å°ij ãģªãģĦ
+Ġпо иÑģк
+สำ à¸Ļัà¸ģà¸ĩาà¸Ļ
+Ġ×ŀצ ×ĵ
+Ġmü ÅŁ
+ĠmÃ¼ÅŁ ter
+ĠmÃ¼ÅŁter i
+ĠÙħÙĨ ÙĩÙħ
+à¸ķำ à¹ģ
+à¸ķำà¹ģ หà¸Ļ
+à¸ķำà¹ģหà¸Ļ à¹Īà¸ĩ
+ÅĽ mie
+Ġש ×ł×ª
+Ġ×Ķ ×¤×Ļ
+פר ש
+×¢×ijר ×Ļת
+สà¸Ļ ัà¸ļ
+สà¸Ļัà¸ļ สà¸Ļุ
+สà¸Ļัà¸ļสà¸Ļุ à¸Ļ
+è¨Ģ ãģ£ãģ¦
+à¸ģาร à¸Īัà¸Ķ
+ĠMo że
+из аÑĨии
+ứ t
+ĠÙĪØ¨ عد
+ĠdeÄŁ ild
+ĠdeÄŁild ir
+Ġת ×ŀ
+Ġ×ŀ×ŀ ׳×ķ
+話 ãĤĴ
+ĠÑĨ ена
+Ġth úc
+×Ļ×ŀ ×ķף
+ĠB áo
+ãĤĴ åıĸãĤĬ
+å®ī ãģĦ
+Ġ×¢×ķש ×Ļ×Ŀ
+èĩªåĪĨ ãģĮ
+l ée
+ãĤĭ ãģ®ãģ§
+иÑĢÑĥ еÑĤ
+ãģ¦ ãĤĭ
+ست ر
+ĠاÙĦØŃ ÙĬ
+×Ļ׾ ×ķת
+Ġ×Ĺ ×ij
+ÙĤر Ø£
+تÙħ ÙĥÙĨ
+س ائÙĦ
+prü f
+ãģĭ ãģijãģ¦
+ĠÑģоб ÑģÑĤвенно
+ĠìľĦ íķĺìŬ
+׾ ×Ļ×ĺ
+ãģĮ å¤ļãģı
+ÙĬت Ùĩا
+ç«ĭ ãģ¦
+ม à¸Ńà¸ļ
+ìĭľ ìŀ¥
+оÑĢ Ð°
+Ġs avaÅŁ
+×ĺ×Ļ×ij ×Ļ
+×ij ׳×ķ
+Ùħا ذا
+기 ê°Ħ
+ãģªãģ© ãģ§
+Ġ×ŀ ת×Ĺ×Ļ׾
+Ġnhi á»ħ
+Ġnhiá»ħ m
+ка ÑĢ
+каÑĢ ÑĤ
+Ġ׾×Ķ ×©×ª×ŀש
+׳ ×Ļ×Ĺ
+اد ÙĬØ©
+ราย à¸ĩาà¸Ļ
+Ġprzy kÅĤad
+Ñī ий
+ØŃض ÙĪØ±
+Ġh ôn
+Ã Ŀ
+ת ×ķצ×IJ×ķת
+راب ط
+Ġb ếp
+ĠполÑĥÑĩ и
+åĩºä¼ļãģĦ ç³»
+à¸Ľà¸¥ à¹Īà¸Ńย
+ĠاÙĦØ´ باب
+اÙĩ ÙĦ
+ä»Ĭ ãģ¾ãģ§
+رج ع
+ãĤ¶ ãĥ¼
+ÙĤ Ùģ
+ĠGro ÃŁ
+ĠíļĮ ìĽIJ
+اج ر
+Ġ×ij×ŀ קר×Ķ
+Ġseg urança
+fü hl
+ãģ¦ ãģĦãģı
+หม à¸Ń
+ĠкоÑĤоÑĢ Ð¾Ð¼
+ĠN Äĥm
+ĠdÅĤ ugo
+ÙħÙĨ ØŃ
+ש×ķ ×ķ×Ļ
+ĠØ£ÙĬ اÙħ
+ส à¸łà¸²à¸ŀ
+r zÄħ
+شر Ùĥات
+ãĤĴ èĢĥãģĪ
+д аÑĢ
+à¸Ľà¸£à¸° à¸Ĭุม
+Ġ×ķ×IJ ×ĸ
+i á»ĩn
+Ġt ươi
+ש ×Ļ×Ĺ
+à¸Ń à¹Īà¸Ńà¸Ļ
+æĽ¸ ãģĦãģ¦
+Ġng ữ
+×ij×Ļ×ĺ ×Ĺ
+×ij×Ļ×ĺ×Ĺ ×ķף
+Ġs ẵ
+Ġsẵ n
+ì§Ģ ëıĦ
+ĠпÑĢ ÐµÐ¿
+ĠпÑĢеп аÑĢаÑĤ
+Ġна ÑĥÑĩ
+ĠÃľ nivers
+ĠÃľnivers ites
+ĠÃľniversites i
+Ġ×Ĵ×ĵ ×ķ׾×Ķ
+Ġ×Ķ ×ł×ª
+Ġ×Ķ×ł×ª ×ij×¢
+ãģ§ãģĤ ãģ£ãģŁ
+Ġmies iÄħ
+ĠmiesiÄħ c
+г ÑĢам
+гÑĢам м
+Ġبش Ø£ÙĨ
+ĠÑħ ÑĢ
+×§ ×Ļ×ĵ
+×§×Ļ×ĵ ×ķ×Ŀ
+Ø´ Ùĥر
+Ġ á»ķ
+Ġá»ķ n
+ãģĮãģĤ ãģ£ãģ¦
+ãģķãĤĮ ãģ¾ãģĻ
+Ġ×Ĺ ×ķ×ĵ
+Ġ×Ĺ×ķ×ĵ ש×Ļ×Ŀ
+ÙħÙĪØ§ جÙĩ
+ÙħÙĪØ§Ø¬Ùĩ Ø©
+أش خاص
+ب غ
+à¹Ģรียà¸Ļ รูà¹ī
+ãģĹãģ¦ ãģĦãģı
+Ġs ạn
+å¿ħ ãģļ
+׳ ×Ļ×Ĵ
+׳×Ļ×Ĵ ×ķ×ĵ
+باÙĦ غ
+׊ש×ŀ
+×Ĺש×ŀ ׾
+Ġnap raw
+Ġnapraw dÄĻ
+Ø´Ùĩ اد
+×IJ ×ķ×Ķ
+×IJ×ķ×Ķ ×ij
+и ÑĨÑĭ
+Ġ×Ķ ×¨×Ľ×ij
+ëŀ ij
+Ġת ×¢
+Ġ×Ķ ×Ļש
+Ġ×Ķ×Ļש ר×IJ
+Ġ×Ķ×Ļשר×IJ ׾×Ļ
+Ø£ ÙħÙĨ
+ÑİÑī аÑı
+sk ór
+LER İ
+Ġ×Ķ×IJ×Ĺר ×ķף
+×¢ ׳ק
+ĠÙĪ ÙĥÙĦ
+ãģĵãģĵ ãģ§
+Ġqu án
+liÄŁ in
+à¸ģà¸İ หมาย
+Ø· Ùħ
+Ø£ جÙĩ
+أجÙĩ زة
+ĠEr doÄŁan
+ãģ§ ãģĬ
+Ġв ÑĢа
+ĠвÑĢа Ñĩ
+ĠPh ó
+à¸Ĭั à¹Īว
+à¸Ĭัà¹Īว à¹Ĥม
+à¸Ĭัà¹Īวà¹Ĥม à¸ĩ
+Ġph úc
+×Ļפ ×ķת
+×¢×Ļ ×ķף
+Ġduż o
+ãĥģ ãĥ¼ãĥł
+ĠÙĬ Ùİ
+Ġзад аÑĩ
+Ġ×Ĵ×ij×ķ×Ķ ×Ķ
+Ġ׼ ׼׾
+лож ен
+ét at
+Ġng Äĥn
+èµ· ãģį
+ĠTi ến
+ص عب
+Ġexperi ência
+Ø® Ùħ
+à¸ģาร à¸Ĺำà¸ĩาà¸Ļ
+س ÙĬد
+ĠD á»±
+ĠкоÑĤоÑĢ Ð¾Ð³Ð¾
+lad ıģı
+Ġkh á»ķ
+Ġê³Ħ ìĨį
+Ñī ик
+สà¹Īวà¸Ļ à¸ķัว
+з оÑĢ
+ÙĨ Ùı
+Ġ à¸Ķัà¸ĩ
+Ġà¸Ķัà¸ĩ à¸Ļัà¹īà¸Ļ
+Ġc ấu
+ĠÄij á»ijc
+о ÑĦ
+ĠاÙĦØ£ عÙħاÙĦ
+ãģªãģı ãģ¦ãĤĤ
+×ķ׼ ×Ļ×Ŀ
+à¹ģ à¸Ľ
+ĠB ên
+ãĥ¯ ãĥ³
+Ġgi ám
+ĠÅŀ u
+Ġd áng
+ع ÙĦÙĬ
+à¹Ģà¸ģ ษ
+à¹Ģà¸ģษ à¸ķร
+ÙĪØ¬ ب
+н нÑĭе
+ÙĤ ضاء
+à¸Ħว à¸ļ
+à¸Ħวà¸ļ à¸Ħุ
+à¸Ħวà¸ļà¸Ħุ ม
+ãģ¤ ãģ¤
+ĠVi á»ĩc
+×ŀ×ij ×ĺ
+ש×Ļת ×ķ×£
+Ġв едÑĮ
+k aza
+kaza ÅĤ
+à¸ķำ รวà¸Ī
+ãĤ¿ ãĥ«
+Ġпов Ñĭ
+ĠповÑĭ ÑĪен
+ĠS ợ
+ĠìĦ¤ ëªħ
+ĠÃĩ ünkü
+ìĥĿ íĻľ
+Ö ¾
+ãĤĮ ãģ¦ãģĦãĤĭ
+Ġ×ij ר×IJש
+ר ×ķ×Ĵ
+Ġо ÑĦи
+ĠоÑĦи ÑĨиалÑĮн
+ĠÑĥ ÑģÑĤанов
+ĠÑĥÑģÑĤанов лен
+ĠاÙĦÙħ صر
+ĠاÙĦÙħصر ÙĬØ©
+ĠÐŁÐ¾ ÑįÑĤомÑĥ
+ÙĨ صÙģ
+ĠÙĪØ§ÙĦ ÙĨ
+Ġh Ãłi
+à¸Ħ ิ
+ĠApr ès
+ì³ IJ
+à¹Ģà¸ĭ ีย
+×ĵ ×ŀ×Ķ
+activ ité
+à¸Ħิà¸Ķ วà¹Īา
+ÑĤ ÑĢен
+à¹Ģ ฮ
+ãĥı ãĤ¤
+ãģĮ å¢ĹãģĪ
+ен наÑı
+Ġìĺ¤ ëĬĺ
+ãĥ¢ ãĥ³
+Ġкон еÑĩно
+ĠÙħÙĤ ابÙĦ
+cl é
+Ġh ü
+Ġth ẳng
+ìłģ ìĿ´
+ĠÐIJ лекÑģ
+ĠÐIJлекÑģ ан
+ĠÐIJлекÑģан дÑĢ
+ãĥŀãĥ³ ãĤ·ãĥ§ãĥ³
+ãģ²ãģ¨ ãģ¤
+ãģª ãģĬ
+à¹Ģà¸Īà¹īา à¸Ĥà¸Ńà¸ĩ
+ëĵľ 리
+ش اء
+ĠsaÄŁ lık
+ĠÅŁ imdi
+×Ļ×IJ ׾
+تأ Ø«ÙĬر
+أ سب
+أسب اب
+ĠвÑĭполн ен
+л ок
+ש ×Ļ×ij×Ķ
+Ġl ắm
+ĠTr Æ°á»Ľc
+Ġ×Ķ×¢ ׾
+리 를
+ĠÑĢ ÐµÐ¶
+ĠÑĢеж им
+int é
+inté gr
+×Ĵ ׳×Ļ
+ĠاÙĦØ´ عر
+Ġmil hões
+Ġpeque ño
+ãĤ³ ãĥ¼ãĤ¹
+×ķ׼ ×Ĺ
+à¹Ģà¸Ĭ à¹īา
+شر ÙĤ
+Ġh ương
+รัà¸IJ à¸ļาล
+à¸ģล าย
+à¸ģลาย à¹Ģà¸Ľà¹ĩà¸Ļ
+Ġпод Ñħод
+תש ×ķ×ij×Ķ
+ãģıãģª ãģ£ãģ¦
+ĠاÙĦØ£Ùħ Ùħ
+ĠH á»įc
+ĠwspóÅĤ pr
+ĠwspóÅĤpr ac
+Ñĩ Ñĥв
+ÑĩÑĥв ÑģÑĤв
+ÃŃst ico
+à¹Ģà¸ģ าะ
+ìĽ Ģ
+Ġназ ад
+ãĤĭ ãĤĪãģĨãģ«
+ĠС Ш
+ĠСШ ÐIJ
+м он
+ĠAs ÃŃ
+×ķר ×Ĵ
+полн ен
+×ŀס ׾
+×ŀ×¡×ľ ×ķ׾
+à¹Ģลืà¸Ń à¸Ķ
+à¹Ģริà¹Īม à¸ķà¹īà¸Ļ
+ĠاÙĦØ¥ Ùħ
+ĠاÙĦØ¥Ùħ ارات
+צ×Ķ ×¨
+ãĥ¡ãĥª ãĥĥãĥĪ
+ĠпоÑĤ ом
+в из
+ĠÙģ ØªØ±Ø©
+å¾Į ãģ®
+ÐĿ ÐIJ
+×ŀס ר
+ÙĬر ÙĬ
+pr é
+Ġte ÅŁek
+ĠteÅŁek kür
+Ġöd eme
+د اÙĨ
+ãģ¾ ãģĹãģ¦
+缮 ãģ«
+ĠÑĤ еÑĩение
+l ard
+lard ır
+à¹Ģรา à¸Īะ
+ס פ×Ļ
+ĠÙĪÙĥ ذÙĦÙĥ
+Ġh át
+Ġt á»Ļc
+à¸Ħุ ย
+Ġb ức
+ØŃ ÙĬÙĨ
+èģŀ ãģĦãģ¦
+Ùħؤ شر
+ĠNh ư
+Ġмен ее
+ละ à¸Ħร
+Ñģ ин
+ĠÑĢ ÐµÐº
+ĠÑĢек л
+ĠÑĢекл ам
+ĠÙģ ÙĩÙĪ
+Ġ׾ ×ĸ
+×Ļ׳ ×ķת
+ĠÅŁ art
+ÑģÑĤав ка
+Ġíı¬ íķ¨
+ãģ«è¡Į ãģı
+ï¼ Ŀ
+ĠпозволÑı еÑĤ
+Ġת×ķ׼ ׾×ķ
+ов ал
+صÙĦ Ø©
+Ġ׾ש ׳×ķת
+ĠÐĺ гÑĢ
+ÙħÙĨتج ات
+Ġsat Ä±ÅŁ
+Ñģ ко
+ĠاÙĦØ«ÙĦاث اء
+Ġ×Ķ×ĵ×ijר ×Ļ×Ŀ
+ãģĹãģ¾ ãģĹãĤĩãģĨ
+بÙĤ Ùī
+åĬĽ ãĤĴ
+ĠÃĩ ok
+ãĥģ ãĥ¥
+à¹Ģà¸Ĭ ืà¹īà¸Ń
+ยุ à¸Ħ
+ศา ล
+Ġ×§×ķ×ĵ ×Ŀ
+×ĸר ×Ļ×Ŀ
+ãģ® åł´åIJĪ
+ĠìķĬ ìķĺ
+ãģĤãĤĬãģ¾ãģĻ ãģĮ
+×IJ שר
+è¡Į ãģı
+ãģ» ãģĭ
+æ°Ĺ ãģ«ãģªãĤĭ
+й деÑĤ
+íķĺìĺĢ ëĭ¤
+ستÙħر ار
+ĠÐŁÑĢ Ðµ
+ĠÑģ боÑĢ
+ĠìķĦ 무
+ç§ģ ãĤĤ
+ع ص
+Ġн иÑĩ
+ĠниÑĩ его
+ĠпÑĢи ем
+×§ ×ķ×ŀ
+ĠìĪĺ ëıĦ
+Ġì ¡´
+Ġì¡´ ìŀ¬
+ĠØ£ Ø«ÙĨ
+ĠأثÙĨ اء
+ĠÙĪØ§ÙĦ ØŃ
+ãģĮ ãģ§ãģįãĤĭ
+Ġת ×Ķ
+Ġת×Ķ ×Ļ×Ķ
+ר ף
+ĠÑģвÑıз и
+×Ĵ שת
+Ñģп екÑĤ
+ס ×ij×Ļ×ij
+ס×ij×Ļ×ij ×Ķ
+ĠíķĦìļĶ íķľ
+ت خصص
+Ġж ив
+Ġжив оÑĤ
+ĠMay ıs
+تع ا
+تعا ÙĪÙĨ
+ĠعÙĨ Ùĩا
+ów ki
+ĠاÙĦÙģÙĦسطÙĬÙĨ ÙĬ
+ãģłãģijãģ§ ãģªãģı
+ìĿ¸ ì§Ģ
+ĠاÙĦس ÙĪØ¯
+ĠاÙĦسÙĪØ¯ اÙĨ
+إجراء ات
+Ġkö tü
+Ġ×Ļ ×ª×¨
+×Ĵ ×Ļש×Ķ
+Ġצ ×ķר×ļ
+รà¸ĸ ย
+รà¸ĸย à¸Ļà¸ķà¹Į
+Ñħ оÑĤ
+Ðł ÐIJ
+ÙĪ Ø·ÙĨ
+Ġsay ısı
+ס ×Ĺר
+Ùħ ÙĪÙĦ
+ãĤĴæĮģ ãģ£ãģ¦
+ع اÙĨ
+Ġt á»Ļi
+ĠвÑĭ ÑĪе
+Ġt ầm
+ãĥĪ ãĥ¬
+×Ļצ ×ķ
+ม ุม
+س ÙĪØ¯
+ìłĦ ìŀIJ
+ãĤµ ãĥŃãĥ³
+ìĤ° ìĹħ
+ĠоÑģнов ан
+Ø® Ù쨶
+רצ ×Ķ
+بÙĬ ض
+×ķÖ ¹
+ס×Ļ ×Ļ×¢
+Ġש ×IJ×Ļ
+ĠاÙĦÙĤر Ø¢ÙĨ
+ĠТак же
+×ŀש ×ŀ×¢×ķת
+س ÙĩÙĦ
+Ġ×Ķ ×ł×Ķ
+ãĤĴ ãģĹãģ¦ãģĦãĤĭ
+×Ļ ×Ļס
+×Ķ ×ķ×IJ
+ĠB ÃŃ
+Ġмал о
+ĠëͰëĿ¼ ìĦľ
+Ġר ×Ĺ×ij
+ãģĮ é«ĺãģĦ
+ÙĪ Ø§Ø³
+ìĤ ¼
+׳ ×¢
+ãģ£ ãģ¡ãĤĥ
+ĠT üm
+à¸Ńีà¸ģ à¸Ķà¹īวย
+ãģĹãģ¦ ãģıãģłãģķãģĦ
+ÙĨØ´ اط
+ãĥĹ ãĥ©ãĥ³
+али ÑģÑĮ
+×ĵ ×ľ×ª
+Ġwc zeÅĽ
+ĠwczeÅĽ niej
+ĠÑįÑĤ им
+Ġthá»ĭ t
+à¸ļ ัà¸į
+à¸ļัà¸į à¸Ĭี
+ãģļ ãģ£ãģ¨
+ÑĢ Ð¸Ð½
+Ġswo jÄħ
+íķĺëĬĶ ëį°
+Ġë§Įëĵ¤ ìĸ´
+تش Ùĥ
+تشÙĥ ÙĬÙĦ
+ائ Ùĩ
+Ġ׾פ ×Ĺ×ķת
+ãĥĭ ãĥ¥
+ãĥĭãĥ¥ ãĥ¼ãĤ¹
+׼×IJ ף
+ãģ§ãģį ãģŁ
+зв он
+Ġsta ÅĤ
+×Ĺ×ijר ת×Ļ
+ĠØ£ عÙĦÙĨ
+à¹ģà¸ļà¸ļ à¸Ļีà¹ī
+بد ء
+ãĤģ ãģŁ
+Ġ×ŀש ×ŀ×¢×ķת
+Ġ×ŀש×ŀ×¢×ķת ×Ļ
+ör ü
+Ġh ạnh
+z ähl
+ĠL ý
+Ġ×ij ×Ķת
+Ġ×ij×Ķת ×IJ×Ŀ
+б аÑĢ
+ì¦ Ī
+ä»ĬåĽŀ ãģ®
+Ġy ü
+Ġyü ks
+Ġyüks el
+ãĤ½ ãĥ¼
+ãģĤ ãĤĮ
+ת ׾×ŀ×Ļ×ĵ
+ãģ¤ ãģª
+×ij ׳×Ļ×Ŀ
+Ġx ếp
+ĠмÑĥж Ñĩин
+ĠاÙĦÙĥ تاب
+׼ ×ŀ×ķת
+Ġç e
+Ġçe ÅŁ
+ĠçeÅŁ it
+ĠçeÅŁit li
+×ĵ ×Ļר×ķת
+à¸ļุ à¸į
+ĠاÙĦØ¥ ÙĦÙĥ
+ĠاÙĦØ¥ÙĦÙĥ ترÙĪ
+ĠاÙĦØ¥ÙĦÙĥترÙĪ ÙĨÙĬ
+ĠباÙĦØ¥ ض
+ĠباÙĦإض اÙ쨩
+Ġyö nel
+Ġyönel ik
+mys ÅĤ
+à¸Ķà¹īวย à¸ģาร
+à¸ģาร à¸Ĺำ
+ов Ñĭм
+Ø£ زÙħØ©
+æİ¢ ãģĹ
+íļ ¨
+Ġ×ķ×IJ ×Ŀ
+Ġnghi êm
+ÑĪ Ð¸Ð½
+ка л
+Ġcrian ças
+èĩªåĪĨ ãģ§
+Ġн ай
+Ġнай ÑĤи
+ĠS á»ij
+ĠÃ¶ÄŁrenc iler
+ãĥ¶ æľĪ
+Ñģ ан
+ĠJ á
+ĠkonuÅŁ ma
+شر ط
+ëĪ Ī
+ar rière
+ضر ÙĪØ±Ø©
+ãĥĶ ãĥ³
+ע שר
+аÑĢ ÑĮ
+جÙħ اع
+Ġdé co
+Ġ×Ļ×Ķ ×ķ×ĵ×Ļ
+à¸ŀ ลาà¸Ķ
+ĠÙĬ ÙĥÙĨ
+Ġج اÙħعة
+Ø· بÙĤ
+Ġbo ÅŁ
+×ķ ×ķ×IJ
+×ŀ×ĵ ×¢
+×§×ij×ķצ ת
+פ ×Ļר
+jÄħc ym
+ÙħØ´ ا
+Ùħشا ÙĥÙĦ
+צ פ×ķף
+إ ست
+×ŀ׼ ר
+سÙħ ع
+Ġкак ой
+ÑĤ воÑĢ
+ØŃ ج
+Ù쨱 ض
+пÑĢав лен
+Ġник ак
+Ġmi á»ĩ
+Ġmiá»ĩ ng
+ü ÃŁ
+иÑĢов ал
+׾ ×ŀ×ķת
+次 ãģ®
+ÙĦ Ø·
+à¸ķ ัà¸Ļ
+×Ķ ×ª×Ĺ×Ļ׾
+Ġfoto ÄŁ
+ĠfotoÄŁ raf
+طر ØŃ
+à¸Ńà¸Ńà¸ģ à¹Ħà¸Ľ
+Ġy ên
+Ġп ок
+Ġпок Ñĥп
+ĠпокÑĥп а
+ÑĨ Ñĥ
+Ġкомп ÑĮÑİ
+ĠкомпÑĮÑİ ÑĤеÑĢ
+ĠاÙĦÙĥ رÙĬÙħ
+تص Ùħ
+تصÙħ ÙĬÙħ
+Ġоказ а
+Ġzar ówn
+Ġzarówn o
+ëĮĢ ì¶ľ
+ãĤ»ãĥ³ ãĤ¿ãĥ¼
+Ġjako ÅĽci
+æĤ ©
+æĤ© ãģ¿
+Ø£ÙĨ ÙĪ
+Ø£ÙĨÙĪ Ø§Ø¹
+ë¹ ł
+Ġìłķ ë§IJ
+Ġk ẻ
+ĠÑģай ÑĤа
+Ġ×Ķ ×¢×¨×ij
+Ùĩ ز
+pres ión
+ĠÑģÑĤ ен
+ãģ£ãģ¦ ãĤĭ
+Ġhız lı
+Ðļ ÐIJ
+×ŀשפ ×Ĺת
+ĠÙĨ Ùĩا
+ĠÙĨÙĩا ÙĬØ©
+ãģ¾ ãģĦ
+о ÑħÑĢан
+ร à¹īà¸Ńย
+ล ึà¸ģ
+ĠÙĪØ¨ اÙĦ
+ãĤĤãģ® ãģĮ
+ר׼ ×Ļ×ij
+ãĤ¤ ãĥ¤
+س ؤ
+سؤ اÙĦ
+ĠÙĦØ£ÙĨ Ùĩ
+ĠkonuÅŁ tu
+Ðļ ÑĥпиÑĤÑĮ
+Ġש×IJת ×Ķ
+ĠÙĪØ§ÙĦ س
+Ġmożliwo ÅĽci
+Ġpró b
+ëĶ °
+ãģ© ãĤĮ
+ĠÐľ ин
+ĠоÑĢганиз м
+ãģ«å¯¾ ãģĻãĤĭ
+ĠPr é
+Ġpriv é
+ch è
+ãģĦãģŁãģł ãģį
+สà¸Ļุ à¸ģ
+ajÄħ ce
+ĠD zi
+ĠDzi ÄĻki
+ÅĤat w
+r än
+rän k
+æĿ¥ ãģŁ
+Ġ×Ķ×Ļ×Ķ ×ķ×ĵ×Ļ
+ãĤ¬ ãĥ¼
+ĠÑĢаР´
+ĠÑĢад и
+к ÑĤив
+Ø£ Ùĩد
+Ø£Ùĩد اÙģ
+ש ×IJ×Ļר
+ãģ¦ ãģĦãģªãģĦ
+Ġfr üh
+Ġок ол
+Ġокол о
+Ġreg ião
+ĠÑĩиÑģ ле
+Ġpon iew
+Ġponiew aż
+ìĦ¼ íĦ°
+Ġb ầu
+Ġê ·
+Ġê· ľ
+Ġê·ľ ìłķ
+ĠH òa
+ĠÑĤ оÑĤ
+ãĤĤ å¤ļãģĦ
+ĠاÙĦإسÙĦاÙħ ÙĬØ©
+ãģĭ ãģĦ
+Ñį н
+ĠÑĥказ ан
+ĠÑĤак ое
+ï¼ ³
+ëĮĢ íķĻ
+Ġgen iÅŁ
+ĠاÙĦØ® ÙĬ
+ĠاÙĦØ®ÙĬ ارات
+ãĤĴè¡Į ãģĨ
+ש ×ŀ×Ķ
+ĠLÃł m
+ÙĪÙĨ ÙĬ
+Ġ×IJ ׾×Ļ×ķ
+Ä ĺ
+à¹Ħมà¹Ī สามารà¸ĸ
+人 ãģ¨
+بر ز
+×Ļס ×ķ×ĵ
+×Ĵ ׾×Ļ
+ĠÙĬ ÙĨا
+ĠÙĬÙĨا ÙĬر
+ĠкаÑĢÑĤ ин
+Ġt ôn
+à¹Ģ à¸ģร
+à¸Ħ à¸Ķี
+Ġ׾×IJ ×ķר×ļ
+ãĤĤãĤī ãģĨ
+ãģĭ ãģĭãĤĭ
+ани и
+Ġara ÅŁtırma
+ÙĦاØŃ ظ
+ãģĦ ãĤĦ
+ĠT Ãłi
+Ġ à¸Ļà¸Ńà¸ģà¸Īาà¸ģ
+Ġà¸Ļà¸Ńà¸ģà¸Īาà¸ģ à¸Ļีà¹ī
+ĠÄIJ ảng
+ãģ£ãģ¦ ãģįãģŁ
+Ġà¸ĭึà¹Īà¸ĩ à¹Ģà¸Ľà¹ĩà¸Ļ
+Ġt ả
+Ġmożliwo ÅĽÄĩ
+ĠS ản
+Ġİ ki
+Ġc ắt
+س Ø£ÙĦ
+Ġbak ım
+ش ب
+à¸ķ ีà¹ī
+à¸ŀ ยาย
+à¸ŀยาย าม
+สั à¸Ľ
+à¸ªà¸±à¸Ľ à¸Ķา
+à¸ªà¸±à¸Ľà¸Ķา หà¹Į
+ë° Ģ
+еÑĢ Ñĭ
+Ġc ánh
+Ġthu ế
+ت بع
+ãģ«åħ¥ ãĤĮ
+Ñİ ÑģÑĮ
+íļĮ ìĿĺ
+ç°¡ åį
+ç°¡åį ĺ
+ç°¡åįĺ ãģ«
+Ġtr úc
+ĠاÙĦÙĥ ÙĪÙĬ
+ĠاÙĦÙĥÙĪÙĬ ت
+ãĤıãģij ãģ§ãģĻ
+ĠÑģв об
+ĠÑģвоб од
+ĠÑĥÑĩаÑģÑĤ ник
+สิ à¹īà¸Ļ
+ĠпÑĢо ÑĦеÑģÑģиона
+ĠпÑĢоÑĦеÑģÑģиона лÑĮн
+Ñģп оÑĢ
+×Ĺ ×ķ×ij×Ķ
+Ùħع ÙĨÙī
+ĠاÙĦÙģ ØªØ±Ø©
+สูà¸ĩ สุà¸Ķ
+ãĤı ãģļ
+ĠÄij è
+ĠÄijè n
+æ¯Ķ ãģ¹
+า à¸ĺิ
+Ġmoż emy
+à¹ģ à¸ĭ
+à¸Īะ à¹Ħมà¹Ī
+Ġs ắp
+Ðļ Ðŀ
+Ġprá ctica
+ÙĪÙĥ اÙĦØ©
+è¾¼ ãĤĵãģ§
+ológ ica
+Ġе Ñī
+ĠеÑī Ñij
+تع دÙĬÙĦ
+ĠØ£ Ùĥد
+Ġצר ×Ļ׼
+Ġצר×Ļ׼ ×Ļ×Ŀ
+Ø« Ùħ
+Ġк ÑĢÑĥ
+ĠкÑĢÑĥ п
+×ij×Ļ×§ ×ķרת
+Ġì¡° ê¸Ī
+ãģ¨ãģį ãģ¯
+Ġb ạc
+ĠÑĢаÑģ пол
+ĠÑĢаÑģпол ож
+ĠÑĢаÑģполож ен
+ز ÙĬÙĨ
+ĠÐļ ÑĢоме
+ĠاÙĦÙĨ ظر
+×Ķ ×ķ×ĵ
+ĠاÙĦس بت
+ã썿ĢĿ ãģĦ
+Ġpa ÅĦst
+ĠpaÅĦst w
+ĠÙĦÙĬ ست
+ĠбÑĥд Ñĥ
+à¸Ĺัà¸Ļ à¸Ĺี
+ร าม
+ØŃ صÙĪÙĦ
+ãģĹãģ¦ãģıãĤĮ ãĤĭ
+ĠاÙĦØ¥ سرائÙĬÙĦ
+ĠاÙĦإسرائÙĬÙĦ ÙĬ
+ãģĵãĤĮ ãģ¾ãģ§
+ìĤ¬ 를
+Ġs ürü
+à¹Ģว à¸Ńรà¹Į
+à¹Ģà¸ĭ à¸Ńรà¹Į
+Ġutilis é
+ĠÑģиÑģÑĤем а
+Ġdw ó
+Ġdwó ch
+Ġpróp rio
+Ġëĵ± ìĿĦ
+arr êt
+ĠЧ а
+×IJ×ŀ ׳×ķת
+عار ض
+à¹Ģà¸ģม สà¹Į
+Ġ׾×Ķ ×ij×Ļף
+Ġ׾ ×ij×Ĺ
+Ġ׾×ij×Ĺ ×ķר
+สา à¸Ĥา
+ĠÐľÐ¾Ñģк ве
+ب عد
+ĠاÙĦÙĤر ار
+ĠÄIJ á»ĭa
+Ġ×Ĺ ×Ĵ
+Ùģ ØªØ±
+ÙĪÙĨ Ø©
+Ġ×Ķ×ĸ ×IJת
+å¸Ĥ ãģ®
+ãģ» ãģĹãģĦ
+Ġ×ij×¢ ×Ļר
+ĠÑĤеп еÑĢÑĮ
+ìĬµ ëĭĪê¹Į
+à¹Ħม à¹Īว
+à¹Ħมà¹Īว à¹Īา
+à¹Ħมà¹Īวà¹Īา à¸Īะ
+×ŀ ×IJ×Ķ
+æĥħ åł±
+æĥħåł± ãĤĴ
+غ ÙĨ
+Ġпо Ñı
+ĠпоÑı ви
+éģİ ãģĶ
+تش غ
+تشغ ÙĬÙĦ
+в ел
+Ġ×Ĺ ×ŀ
+ãģ¨ãģªãĤĬ ãģ¾ãģĻ
+Ġra ÄŁ
+ĠraÄŁ men
+ãģĭ ãģ©ãģĨ
+ãģĭãģ©ãģĨ ãģĭ
+ен ко
+ì§Ģ ê³ł
+Ġ×IJ׾ ×Ļ×Ķ
+ĠØ£ ÙĦ
+à¸Īำ หà¸Ļ
+à¸Īำหà¸Ļ à¹Īาย
+nız ı
+Ġ׾ק ×Ĺת
+Ø£ ÙĩÙħ
+Ø£ÙĩÙħ ÙĬØ©
+ت غÙĬر
+ש ×Ĺר
+ס×ķפ ר
+×ĵ ×Ļר
+èī¯ ãģĭãģ£ãģŁ
+×ŀ׾×Ĺ ×ŀ×Ķ
+ÑģÑĤв ие
+ÑĤ ÑĢаÑĤ
+ĠاÙĦØ£ Ø®
+ĠاÙĦأخ ÙĬرة
+ĠاÙĦØŃ صÙĪÙĦ
+Ġcréd ito
+צ ×Ļ×¢
+ãĥ¬ ãĥĻãĥ«
+بر ÙĬ
+ëIJ IJ
+ãģł ãģ£ãģ¦
+Ġreal tÃł
+س Ù쨱
+×ķ׳ ×ķ
+×Ĵ ×ķ×ĵ
+×Ĵ×ķ×ĵ ׾
+ฮ า
+ãģĹãģ¦ ãģĬãĤĬãģ¾ãģĻ
+Ġg Ãł
+Ġ׾×ij צע
+å¼ķ è¶ĬãģĹ
+Ġ×ŀ ×Ļ׾×Ļ
+Ġ×ŀ×Ļ׾×Ļ ×ķף
+Ùħ در
+Ùħدر سة
+פ ×ķ×ĺ
+à¸Ļà¹īำ มัà¸Ļ
+ëģ Ŀ
+ع Ùĥس
+ĠÙĤ ض
+ĠÑĢÑĭ б
+خط ط
+×ŀ×ķס ×ĵ
+Ġ׼׾ ׾×Ļ
+ĠкоÑĤоÑĢ Ð¾Ðµ
+צ×Ļ ×ķף
+ĠмеÑģÑĤ а
+ãģĭ ãģ¤
+г ÑĢÑĥпп
+׾ ×Ļ׾
+ת ×ķ×IJר
+ë³µ ì§Ģ
+à¹ģà¸ľ à¹Īà¸Ļ
+Ġ×ij×¢ ת
+æĻĤéĸĵ ãĤĴ
+ï¼ £
+ãģ¨ãģĦãģĨãģĵãģ¨ ãģ§
+Ġ׾×Ķ ×§
+Ġ׾ ×ĸ×Ķ
+ĠìłĢ ëĬĶ
+ĠاÙĦØ¥ رÙĩاب
+ĠìŀĪëĬĶ ëį°
+ĠÑĤ огда
+Ġ×Ķ ×¦×Ļ
+×ķ׾ ×ĺ
+Ġר פ×ķ×IJ×Ļ
+ãģĵãģ¨ ãģ§ãģĻ
+ĠÄij ÃŃch
+ØŃ ÙĬا
+Ġ×Ķ×ŀש ×Ĺ×§
+ãģľ ãģ²
+Ġ×ŀ×IJ פשר
+ãģ¿ ãģ¾ãģĹãģŁ
+ĠاÙĦØ£ÙħÙĬر ÙĥÙĬ
+Ùħج تÙħع
+Ġس اب
+Ġساب ÙĤ
+׼ ×Ļ׾
+Ạ¾
+ãĥª ãĤ¹ãĥĪ
+Ġì ĥ
+Ġìĥ Ī
+ĠìĥĪ ë¡ľ
+ĠìĥĪë¡ľ ìļ´
+ĠD á»ĭch
+à¹Ģหมาะ สม
+ĠاÙĦÙĨ بÙĬ
+׾ ׾
+ÙĨ ع
+Ðĵ лав
+Ðĵлав наÑı
+Ùħر ض
+Ġ×ķ ×ĵ
+ت ÙĤÙĬ
+تÙĤÙĬ ÙĬÙħ
+Ġb ảng
+ĠÙģ ÙĤاÙĦ
+×¢ ×ŀ×Ļ
+д ÑĢа
+Ġsu á»ijt
+سر عة
+Ġc á»Ń
+Ġ×Ķ ×Ļ×Ĺ×Ļ×ĵ
+سع ÙĬد
+à¸Ńา à¸Ĭีà¸ŀ
+Ġس ÙĪØ§Ø¡
+ãĤ½ ãĥķãĥĪ
+Ġл иÑĩно
+ĠÐļ оÑĢ
+اÙĩ تÙħ
+اÙĩتÙħ اÙħ
+à¸Ń à¸Ķี
+à¸Ńà¸Ķี à¸ķ
+ãģIJ ãĤīãģĦ
+Ġiht iya
+Ġihtiya ç
+ãģ¾ãģ§ ãģ®
+ìĭľ ìĬ¤
+ìĭľìĬ¤ íħľ
+ÑĢÑĥ ÑĪ
+ãĤĦ ãģ£ãģ±
+ãĤĦãģ£ãģ± ãĤĬ
+к еÑĢ
+Ġ ży
+Ġży w
+кл он
+Ġl ượt
+Ã ¾
+да Ñĩи
+tür k
+غ ÙĪ
+ĠигÑĢ Ð¾Ðº
+Ġph ê
+Ġש ×¢×ľ
+ĠاÙĦÙħ دÙĨÙĬ
+ĠìŬ룬 ë¶Ħ
+ער ×Ļ×Ŀ
+Ñħод ÑıÑĤ
+Ġx ứ
+ÐĹ Ð°
+ĠÙģ Ø±Øµ
+à¸Īะ à¸Ĺำà¹ĥหà¹ī
+íģ ´
+×¢ ×ij×ķר
+à¹Ģหลà¹Īา à¸Ļีà¹ī
+èĢĥãģĪ ãĤĭ
+ÑĢ ÐµÑģÑĤ
+н нÑĭй
+Ġc ầm
+دا Ø®ÙĦ
+ĠÙħÙĦÙĬ ار
+ĠÐIJ л
+ĠвÑĢем ен
+à¸Ĭà¹Īวย à¹ĥหà¹ī
+ר×Ļ ×ķת
+ëĵ ¯
+飲 ãģ¿
+׳ ׾
+שת ף
+ĠاÙĦسعÙĪØ¯ ÙĬ
+u ÃŁ
+ìĿ¸ ëį°
+ĠìĿ¼ ë°ĺ
+ÅĤ ÄĻ
+Ġm á»iji
+×ŀ ×Ļ׳
+ĠاÙĦØ£ Ø·Ù쨧ÙĦ
+Ġçı kan
+é cole
+×§ ×Ļש
+×§×Ļש ×ķר
+ĠоÑģ ÑĥÑīеÑģÑĤв
+ĠоÑģÑĥÑīеÑģÑĤв лÑı
+×ij ×IJר
+à¹Ħà¸Ľ à¸Ķà¹īวย
+Ġ×¢ ×ķ׾×Ķ
+à¸ģà¹ĩ à¹Ħมà¹Ī
+ãĥ¢ ãĥĩ
+ãĥ¢ãĥĩ ãĥ«
+تØŃ ÙĪÙĦ
+Ġод ного
+ת×Ĺ×Ļ׾ ת
+Ġت Ø®
+Ġch cia
+Ġchcia ÅĤ
+ãĥIJ ãĥ³
+èĢħ ãģ¯
+ĠÙħ ØŃÙĦ
+Ñģл ож
+Ñģлож н
+Ġt ÄĻ
+Ġçı kt
+Ġçıkt ı
+ĠC Æ¡
+à¹Ħà¸Ķà¹ī à¹Ģลย
+ır ken
+à¹Ģà¸Ĥà¹īา สูà¹Ī
+ÙħØŃ Ùĥ
+ÙħØŃÙĥ ÙħØ©
+à¸Ħุ à¹īม
+à¸Ļà¹Īา à¸Īะ
+лÑİ Ð´
+де ÑģÑı
+деÑģÑı ÑĤ
+ĠлÑİб ой
+تØŃر ÙĬر
+צע ×ĵ
+Ġе Ñij
+ĠاÙĦØŃ ÙĥÙħ
+Ġص باØŃ
+à¹Ģà¸ļ à¸Ńรà¹Į
+Ġróż nych
+ги б
+ĠÑģ оÑĤ
+ĠÑģоÑĤ ÑĢÑĥд
+ĠÑģоÑĤÑĢÑĥд ник
+ĠобÑĬ ем
+פ ×ĺר
+ãģĻãģĶ ãģı
+ãģ«éĸ¢ ãģĹãģ¦
+в ол
+Ø« ÙħاÙĨ
+Ġd ần
+æĬ ľ
+æĬľ ãģij
+Ġ×¢ ש
+Ġעש ×ķ×Ļ
+ס ×ķף
+ãģªãģ® ãģ§ãģĻ
+ãģ¯ ãģ©ãģĨ
+×ŀ×¢ ר×ij
+ï¼ °
+Ùħ صر
+ÙħÙĨ اسب
+ÙħÙĨاسب Ø©
+ä¸Ĭ ãģ®
+×IJ×Ļש ×ķר
+ĠìĦ¤ ì¹ĺ
+×ŀ×ĵ×Ļ׳ ×ķת
+×ŀר ת
+ãĤĭ ãģ®ãģĮ
+د Ùİ
+ĠاÙĦشر Ùĥات
+ìĭľ ê°Ħ
+ĠÑĢеÑĪ ÐµÐ½Ð¸Ðµ
+ãģĻãĤĭ ãģ®ãģ¯
+ĠìŀIJìĭł ìĿĺ
+׾ ×ŀ×ķ
+ãģ¨ãģĵãĤį ãģ§
+Ġ×§ צר
+Ġmã i
+Ġkü ltür
+ãĥ©ãĤ¤ ãĥĸ
+à¸ľà¸¹à¹ī หà¸įิà¸ĩ
+æĻĤéĸĵ ãģĮ
+клÑİÑĩ и
+diÄŁ iniz
+มาà¸ģ à¹Ĩ
+تØŃ ÙħÙĦ
+Ġh ạt
+ãĤ¦ ãĤ£
+п ле
+×ŀ ׾×IJ
+ÅĤ ó
+Ġg á»ijc
+Ġ×IJ ×ķ×ĵ×ķת
+หว าà¸Ļ
+ĠاÙĦ ÙĪØ²
+ĠاÙĦÙĪØ² راء
+ëĵ¤ ê³¼
+Ġص ØŃ
+ĠصØŃ ÙĬÙ쨩
+Ġм м
+تد Ø®ÙĦ
+Ġpersön lich
+Ġز ÙĬ
+ĠزÙĬ ادة
+ãĤ· ãĤ¢
+Ġng ắn
+à¸Ħล ิà¸ģ
+Ġs ông
+Ġtü ket
+Ñį ÑĦÑĦ
+ÑįÑĦÑĦ екÑĤ
+ש ×Ļ×ij
+Ġا عت
+ت ض
+تض ÙħÙĨ
+ĠاÙĦÙħØ´ رÙĪØ¹
+Ġprodu ção
+ĠпÑĢимен Ñı
+ни ÑĨÑĭ
+주 ëĬĶ
+ر Ùı
+Ġm Æ¡
+Ġhayat ı
+ëŁ ½
+Ġü cret
+Ġyan ında
+Ġpr ática
+×ij×Ļ×§ ×ķר
+Ãľ N
+Ñģ оÑĤ
+ãĤıãģij ãģ§
+Ġдол го
+ת ׼×ķ
+ĠìķĦ ëĭĮ
+ë į°ìĿ´
+Ġç iz
+Ġcho Äĩ
+Ġ×Ķ ×Ļת
+Ġ×Ķ×Ļת ר
+Ġso át
+׼ ×ij×ĵ
+à¹Ģล à¹Īา
+Ġд еÑĢ
+ĠдеÑĢ ÐµÐ²
+ãĤĴ åħ¥ãĤĮ
+×Ĺ ×ķס
+×Ĺ×ķס ר
+ج ÙĬÙĨ
+t ón
+onn é
+Ġпол ноÑģÑĤÑĮÑİ
+人 ãģŁãģ¡
+Ġpr êt
+ëł ¸
+Ġdéc embre
+cı lar
+Ġת ת
+Ġê²½ìļ° ìĹIJëĬĶ
+ÙĪ Ø¹Ø¯
+è¦ĭ ãĤĭ
+วิ à¸Īัย
+ë ¶Ī
+ز ÙĪØ§
+زÙĪØ§ ج
+d ì
+ãģ§ãģĻ ãĤĪ
+Ġвод о
+ĠÙĬ ÙĪØ¬Ø¯
+Ñģ оÑģÑĤоÑı
+Ðŀ С
+ĠÄIJ ó
+׊פש
+Ġצ ×Ļ×ij×ķר
+ĠاÙĦÙĤ Ø·
+ĠاÙĦÙĤØ· اع
+Ġиме ÑİÑĤ
+Ġph áºŃn
+×Ľ×¡ פ×Ļ
+полн иÑĤелÑĮ
+éĻIJ ãĤĬ
+ĠÑģ ÑĢав
+ĠÑģÑĢав н
+ÙħاÙĦ Ùĥ
+×ĵר ×ķ×Ŀ
+çļĨ ãģķãĤĵ
+ØŃÙĤ ÙĤ
+à¹ģหล à¹Īà¸ĩ
+ĠاÙĦر سÙħÙĬ
+оÑĩ ки
+×ĺ ×ij×Ĺ
+Ġcan lı
+Ġ׾ ׾
+Ġ׾׾ ×ŀ×ķ×ĵ
+×ŀ×ij ×ķ
+ת ׼
+×ª×Ľ ׳×Ļת
+ĠاÙĦÙħ شار
+ĠاÙĦÙħشار ÙĥØ©
+İ Åŀ
+ĠسÙĬ اسÙĬ
+в олÑĮ
+ĠÑģ пÑĢав
+æĿ¥ ãģ¦
+פ×ķר ×ķ×Ŀ
+สำ à¹Ģรà¹ĩ
+สำà¹Ģรà¹ĩ à¸Ī
+ĠÅŁ öyle
+Ġzosta ÅĤa
+ĠH ü
+ר ×ķש
+د ÙĦÙĬÙĦ
+ÑĢи д
+ש ף
+×ŀ×§ ×ķר
+ĠÑĥ Ñĩ
+ĠÑĥÑĩ еб
+ĠÑį ÑĤа
+ков а
+à¸ķà¸Ļ à¹Ģà¸Ńà¸ĩ
+ÙĨ ÙIJ
+à¸Ńีà¸ģ à¸Ħรัà¹īà¸ĩ
+ระ à¸ļุ
+Ġd ữ
+ĠاÙĦØŃ اÙĦÙĬ
+׼ ×ķ׼
+׼×ķ׼ ×ij
+Ġ×ŀ×IJ שר
+Ġtr ụ
+ÑĤел ем
+Ġв ли
+Ġвли Ñı
+Ġש×IJת ×Ŀ
+Ġuw ag
+Ġuwag ÄĻ
+×ĺ ×Ļת
+×IJ ×ĵ×Ŀ
+à¸Ķ ุ
+Ġ×Ķ×IJ ׾×Ķ
+Ġkar Ä±ÅŁ
+ĠÄIJ á»iji
+да ÑİÑĤ
+ãģªãģ® ãģ«
+Äħ cych
+à¹Ģà¸Ļ à¹īà¸Ļ
+ãģĹãģ¦ ãģĹãģ¾ãģĨ
+int érieur
+ĠfÃŃs ica
+ĠÐŁ ол
+ãģĹãģ ķ
+à¸Ĺำ à¹Ħม
+ĠL âm
+ĠاÙĦÙħ سÙĦÙħ
+ĠاÙĦÙħسÙĦÙħ ÙĬÙĨ
+ص ØŃØ©
+ìĹ Ħ
+à¹Ģà¸Ķà¹ĩ à¸Ķ
+ĠÑĥ ÑĩеÑĤ
+â Ìģ
+Ġب ÙĦا
+ĠاÙĦاجتÙħاع ÙĬ
+פרס ×Ŀ
+ãĥķ ãĥ©
+ĠÐļ огда
+mie ÅĽci
+ĠبÙĬÙĨ Ùħا
+Ġ×ŀ×IJ ×ŀר×Ļ×Ŀ
+Ġ×ij×IJ ×ĸ×ķר
+×ķש ×Ļ×Ŀ
+ĠÑģдел а
+entr ée
+à¹Ģ à¸Ħà¹īา
+Ñĥг л
+ĠاÙĦÙģ ÙĨÙĬ
+ĠÐĴ оÑĤ
+à¸Ĺีà¹Ī มา
+×ķצ ×Ĵ
+ÙĤد رة
+Ġëª ©
+Ġ목 ìłģ
+íıī ê°Ģ
+ĠاÙĦØ£ ربع
+ĠاÙĦأربع اء
+פס ×Ļ×§
+ĠÑıвлÑı ÑİÑĤÑģÑı
+ب ÙĪÙĨ
+ì° ¾
+×ŀ×¢ ר׼
+×ŀ×¢×¨×Ľ ×ķת
+ãĤ· ãĤ§
+ĠباÙĦ Ø£
+íĸĪ ëįĺ
+ĠاÙĦبر ÙĨاÙħج
+ĠاÙĦØ£ ØŃد
+Ġm Å©
+ĠmÅ© i
+п аÑĤ
+ب ث
+ĠÑĨ енÑĭ
+Ġ×ijת ׾
+è¨Ģ ãĤıãĤĮ
+ĠاÙĦÙħ جاÙĦ
+ĠìĦ¸ ìĥģ
+Ġ×Ĵ ×ķפ
+ĠнаÑĪ ÐµÐ¹
+Ġкомп аниÑı
+б ин
+öl ü
+×Ļ ×Ļ×ĺ
+Ġ×ŀס פ×Ļ×§
+ยัà¸ĩ à¸Ħà¸ĩ
+ĠЧ и
+Ġан ÑĤи
+ĠÑģÑĢед и
+สà¹Īวà¸Ļ à¹ĥหà¸įà¹Ī
+оÑĩ ка
+íĬ¹ ë³Ħ
+ว à¹Īาà¸ĩ
+гоÑĢ Ð¾Ð´
+با Ùĥ
+à¹Ģส ีà¹Īย
+à¹Ģสีà¹Īย à¸ĩ
+ãĤĤãĤī ãģĦ
+×§ ×ķ×Ŀ
+ãģĽ ãģļ
+ĠاÙĦÙĤ اÙĩرة
+Ġ×ij ׼×ļ
+Ùħشار ÙĬع
+باØŃ Ø«
+Ġпо Ñĩ
+ĠпоÑĩ ÑĤи
+ĠÑĦоÑĢм а
+S İ
+Ġ×ŀצ ×Ļ×¢
+ล ื
+ลื ม
+ĠÑĤ еÑĢ
+ĠÑĤеÑĢ ÑĢиÑĤоÑĢ
+ĠÑĤеÑĢÑĢиÑĤоÑĢ Ð¸Ð¸
+Ġв меÑģÑĤ
+ĠвмеÑģÑĤ е
+dıkl arı
+op ération
+à¹Ĥ ห
+ص دÙĬ
+صدÙĬ ÙĤ
+íĸī ìłķ
+تج ا
+تجا ÙĪØ²
+Ġsu ç
+Ġar ty
+Ġarty ku
+Ġartyku ÅĤ
+ãĤ·ãĥ§ ãĥĥãĥĹ
+ש פ
+שפ ×Ļ×¢
+Ġ×Ķש ×Ļר×ķת
+à¹ģà¸ĸ ม
+ë¸ Ķ
+Ġuk ÅĤad
+Ġ×ķ ׼×Ļ
+หล าà¸ģ
+หลาà¸ģ หลาย
+æĸ¹ ãĤĤ
+Ġpodr óż
+ĠE ÄŁer
+Ġком наÑĤ
+ĠÑģам ÑĭÑħ
+Ġв кÑĥÑģ
+б еж
+Ġ×ij ×§×ķ
+æİĽ ãģij
+ãģ¿ ãĤĭãģ¨
+ĠiliÅŁ kin
+ĠÙĬ عÙħÙĦ
+Ġпод аÑĢ
+Ġyaz ılı
+ãĤĴ å¾Ĺ
+Ġwyst ÄĻp
+à¸Ĺีà¹Ī à¹ĥà¸Ĭà¹ī
+ØŃاد Ø«
+ÙĪ ÙĬد
+кÑĥ лÑĮÑĤ
+кÑĥлÑĮÑĤ ÑĥÑĢ
+à¸ģาร à¹ģà¸Ĥà¹Īà¸ĩ
+à¸ģารà¹ģà¸Ĥà¹Īà¸ĩ à¸Ĥ
+à¸ģารà¹ģà¸Ĥà¹Īà¸ĩà¸Ĥ ัà¸Ļ
+ÙħÙĪ Ø¸
+ÙħÙĪØ¸ Ùģ
+ÙĬÙħ ÙĬ
+ãĤĵãģ§ãģĻ ãģĮ
+diÄŁ im
+diÄŁim iz
+ĠÐŁ еÑĢ
+ĠÐŁÐµÑĢ Ð²
+Ġm ão
+ĠÑģ ез
+ĠÑģез он
+Ġ×Ķ×ŀ ×¢
+Ùħ جÙħÙĪØ¹Ø©
+ĠинÑĦоÑĢм аÑĨии
+i ếc
+ã ng
+ĠÄij ấy
+ãģĶ ç´
+ãģĶç´ ¹
+ãģĶç´¹ ä»ĭ
+Ġad ım
+à¹Ħ หล
+Ġп ÑĢакÑĤи
+ĠпÑĢакÑĤи Ñĩ
+ĠпÑĢакÑĤиÑĩ еÑģ
+ĠпÑĢакÑĤиÑĩеÑģ ки
+ĠاÙĦÙĨ Ù쨳
+ĠÑĢабоÑĤ е
+ÙĦÙĬ Ùģ
+ĠاÙĦجÙĨ ÙĪØ¨
+Ġвод Ñĭ
+ì¹ Ļ
+Ġм иÑĢа
+ĠÄij ừng
+ĠпÑĢоÑĤив о
+ĠÑģÑĤÑĢан Ñĭ
+ล ู
+ìĤ ¶
+kre ÅĽl
+Ġbul und
+Ġbulund uÄŁu
+à¹ģ สà¸Ļ
+ãĤ± ãĤ¢
+ת×Ĺ ×ķ×ŀ×Ļ
+ר׼ ×Ķ
+Ġ׾ק ×ķ×Ĺ
+Ġ׾ק×ķ×Ĺ ×ķת
+Ġ×Ľ×ª ×ķ×ijת
+ĠÙĦ ÙĥÙħ
+ب شر
+Ġr Ãłng
+Ġ×ŀ×Ķ ×ŀ
+Ġ×IJ×Ĺר ×ķת
+Ġб он
+Ġбон ÑĥÑģ
+ï½ Ĺ
+à¹ģ ยà¸ģ
+ãģĤãģªãģŁ ãģ®
+ĠÑĥÑĩаÑģÑĤ ие
+ĠE yl
+ĠEyl ül
+ĠçalÄ±ÅŁmalar ı
+خ طر
+ìĿ ½
+à¸ģาร à¹ĥà¸Ĭà¹īà¸ĩาà¸Ļ
+Ġана лиз
+תק ×ij׾
+ни ем
+Ġİ ns
+Ġİns an
+ĠبÙĪ Ø§Ø³
+ĠبÙĪØ§Ø³ طة
+Ġ׳ ×Ľ×ł×¡
+Ġ×Ķ×ŀ ×Ļ×ĵ×¢
+Ġç o
+Ġço ÄŁu
+á» ĺ
+ĠêµŃ 민
+ãĤĤ ãģĦãģĦ
+Ġ׼ ׾×Ļ
+ĠÑģÑĢед не
+g ÅĤo
+gÅĤo ÅĽ
+Ġneg ó
+Ġnegó cio
+ĠÑĢ ÐµÐ³Ð¸ÑģÑĤ
+ĠÑĢегиÑģÑĤ ÑĢа
+ĠÑĢегиÑģÑĤÑĢа ÑĨии
+Ġtr á»ĵng
+ĠпÑĢ Ñı
+ĠпÑĢÑı мо
+ëłĪ ìĿ´
+Ġk ém
+к ле
+à¸Ļำ มา
+ĠÑĦ ин
+ĠÑĦин анÑģ
+ĠÑĦинанÑģ ов
+Ġki á»ĩm
+ยัà¸ĩ à¹Ħ
+ยัà¸ĩà¹Ħ à¸ĩ
+ย ิà¸ĩ
+à¹Ĥ à¸Ľ
+ĠполÑĥÑĩ ил
+×Ļ×ĸ ×Ŀ
+à¹ģละ à¸Ħวาม
+Ġво обÑīе
+ص ÙĬر
+ãĥı ãĥ³
+ĠاÙĦÙĤ اد
+ĠاÙĦÙĤاد Ùħ
+Ġب دÙĪÙĨ
+ع ظÙħ
+ת ׳×ķ×¢
+×ª×ł×ķ×¢ ×Ķ
+Ø£ ÙħÙĦ
+ãģķ ãģĪ
+ÑĤ ем
+ÑĤем пеÑĢ
+ÑĤемпеÑĢ Ð°ÑĤÑĥÑĢ
+Ġ׾ ×Ļצ×ķר
+Ġr ÄĻk
+ر سÙĦ
+ìŀIJ 를
+Ġ×Ļצ ×Ļרת
+ÙĨ بÙĬ
+Ñĩ наÑı
+تØŃ ÙĦÙĬÙĦ
+Ġм ик
+Ġмик ÑĢо
+ĠS öz
+Ġfor ça
+Ñģ он
+ĠاÙĦع را
+ĠاÙĦعرا ÙĤÙĬ
+ĠH á»ĵng
+ãģĻãĤĭ ãģŁãĤģãģ«
+à¸Ĺีà¹Ī à¸Ńยูà¹Ī
+Ġ×ķ×IJ ×£
+ص ÙĬد
+ĠìķĬ ê³ł
+ร ัà¸ĩ
+ĠاÙĦت ÙĪØ§ØµÙĦ
+à¹Ģม à¸ķร
+Ñĥ ÑģÑĤÑĢой
+ÑĥÑģÑĤÑĢой ÑģÑĤв
+m ıyor
+Ġبا سÙħ
+Ġ×ķ ׼×ķ
+ĠG ül
+á» IJ
+Ãī tat
+غ اÙĦ
+Ø¥ ÙĨØ´
+Ø¥ÙĨØ´ اء
+T İ
+à¸Ĥà¹īา ม
+Ġtro ch
+Ġtroch ÄĻ
+إ ص
+إص ابة
+ĠØ« اÙĨÙĬ
+ĠاÙĦص ØŃØ©
+Ġ×ĸ×Ķ ×ķ
+jÄħ cej
+ãĥĢ ãĥ³
+ìĿ¸ ìĿ´
+Ġв олоÑģ
+ëIJĺ ë©´
+Ġzak ÅĤad
+ãģĻ ãģĵãģ¨
+以ä¸Ĭ ãģ®
+Ġ×Ķ×ŀ×§ ×ķ×Ŀ
+ÙħØ´ اÙĩ
+ÙħشاÙĩ دة
+Ñĩ ив
+ب ش
+ย à¹īาย
+Ġsür dür
+ĠN ẵ
+ĠNẵ ng
+ĠигÑĢ Ð°ÑĤÑĮ
+Ġê·¸ëŁ¬ ë©´
+ãĥķ ãĥ«
+ล à¹Īะ
+Ġtend rá
+Ġb Ãły
+à¹Ģà¸Ľà¹ĩà¸Ļ à¸ľà¸¹à¹ī
+Ġok o
+Ġoko ÅĤo
+w ÅĤa
+wÅĤa ÅĽci
+wÅĤaÅĽci w
+æĢĿ ãĤı
+ĠYa ÅŁ
+ĠB á»ĩnh
+íı Ń
+بÙĬ د
+קר ף
+à¹Ģศ ร
+à¹Ģศร ษ
+à¹Ģศรษ à¸IJ
+à¹Ģศรษà¸IJ à¸ģิà¸Ī
+ĠاÙĦØ£ ÙĪØ±ÙĪ
+ĠاÙĦØ£ÙĪØ±ÙĪ Ø¨ÙĬ
+fl äche
+ä¹Ĺ ãĤĬ
+Ġb á»ģn
+Ùĩ ب
+æľĢ ãĤĤ
+Ġsa ç
+à¸Ńำ à¹Ģà¸ł
+à¸Ńำà¹Ģà¸ł à¸Ń
+ĠØ£ ج
+ĠاÙĦد اخÙĦ
+ĠاÙĦداخÙĦ ÙĬØ©
+×ĺ ×ķ×ij
+ãĤĤ ãģªãģı
+Ġли ÑĨа
+à¹ģลà¹īว à¸ģà¹ĩ
+×ĸ׼ ×Ļר
+Ġqu Ãł
+ĠÙĥ ذÙĦÙĥ
+صØŃ Ùģ
+ĠÃĤ u
+ÙĪØ¨ ا
+à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļ à¹ģà¸Ľà¸¥
+à¹Ģà¸Ľà¸¥à¸µà¹Īยà¸Ļà¹ģà¸Ľà¸¥ à¸ĩ
+à¸ķัว à¸Ńยà¹Īาà¸ĩ
+Ġráp ida
+Ġtas ar
+Ġtasar ım
+ĠعÙĦÙĬ ÙĩÙħ
+ס ×ķ׾
+c ılı
+cılı k
+Ġر غÙħ
+ìĭľ íĤ¤
+Ġ×IJ׾ ×§
+Ġ×IJ׾ק ×ĺר
+Ġ×IJ׾ק×ĺר ×ķ׳×Ļ
+à¹ģà¸ļ à¹Īà¸ĩ
+Ġh ạng
+ãģ£ãģ¦ ãģıãĤĮ
+ĠÙĨ تÙĬ
+ĠÙĨتÙĬ جة
+ıkl ı
+غ اÙĨ
+à¸Ĥà¹īà¸Ń à¸Ħวาม
+à¸Ľà¸¥ าย
+ĠØ£ Ùħس
+à¸Ĺีà¹Ī à¹Ģà¸ģีà¹Īยว
+à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยว à¸Ĥ
+à¸Ĺีà¹Īà¹Ģà¸ģีà¹Īยวà¸Ĥ à¹īà¸Ńà¸ĩ
+Ġdé fin
+Ġdéfin i
+ÙģÙĨ اد
+ÙģÙĨاد ÙĤ
+à¹Ħà¸Ķà¹ī วà¹Īา
+ãģªãģĦ ãĤĪãģĨãģ«
+Ġpróp ria
+ĠPh át
+ãĤĦãģĻ ãģı
+สวย à¸ĩาม
+ê³ł ìļĶ
+Ñı еÑĤ
+ãģĭãĤĤãģĹãĤĮãģ¾ãģĽãĤĵ ãģĮ
+تر جÙħ
+ĠкÑĢаÑģ ив
+Ġ×ŀ ר×IJש
+д еж
+ĠÙĬ ÙĪÙĨ
+ĠÙĬÙĪÙĨ ÙĬÙĪ
+Ñģк оÑĢ
+ĠKas ım
+ê³Ħ ìķ½
+к оÑģ
+Ġна ÑĢÑĥ
+ĠнаÑĢÑĥ ÑĪен
+Ġdu że
+acc ès
+Ġh á»ĵng
+Ġv Å©
+ãģĦãģŁ ãģĹãģ¾ãģĻ
+Ġ×ĺ ×Ļ
+Ġ×ĺ×Ļ ×ķ׾
+lıkl arı
+Ġqu ê
+ëħ¸ ëıĻ
+ìķ Ķ
+CI ÃĵN
+Ġt ắc
+press ão
+ĠìŀĪ ìľ¼
+สิà¸Ĺà¸ĺิ à¹Į
+íĥ Ħ
+Ġ×Ķ×ŀ ×ŀש׾×Ķ
+å¬ī ãģĹãģĦ
+ĠÄIJ ặc
+ÙĨ زÙĦ
+ĠдÑĢÑĥг ой
+д ÑĥÑĤ
+ìĪ Ļ
+Ġth ụ
+à¹Ģส ร
+à¹Ģสร à¹ĩ
+à¹Ģสรà¹ĩ à¸Ī
+Ġto plant
+Ġtoplant ı
+×IJ×ŀ ף
+×ķ׾ ת
+п омн
+Ġyo ÄŁun
+ÅĦsk iego
+ì° ©
+ĠØ« ÙĦاث
+ĠØ«ÙĦاث Ø©
+Ġl ắng
+ë¦ ´
+ราà¸Ĭ à¸ģาร
+ĠÑģлов а
+á» Ĩ
+à¸Ķี à¸ģวà¹Īา
+ãģĶãģĸ ãģĦãģ¾ãģĻ
+Ġд из
+Ġдиз айн
+fé rence
+lıkl ar
+ãģªãĤĵ ãģ§ãģĻ
+ajÄħ cy
+Ġëĭ¤ ìĸij
+Ġëĭ¤ìĸij íķľ
+×§ ×Ļר
+ØŃ ار
+ส ูà¹ī
+Ġz ro
+Ġzro bi
+Ġzrobi Äĩ
+×ŀ ×Ļ׼×Ķ
+à¸Ĭà¹Īวย à¹Ģหลืà¸Ń
+ĠÑįÑĤ Ñĥ
+ë´ ī
+楽 ãģĹãģĦ
+س ÙĪØ±
+íķĺ ê±°ëĤĺ
+Ùħؤ تÙħر
+Ġpoc zÄħ
+ĠpoczÄħ tk
+ĠpoczÄħtk u
+Ġع ربÙĬ
+اÙĦØ£ ر
+اÙĦأر دÙĨ
+à¸Ķ ร
+Åĵ uvre
+ĠÙĪÙĥ اÙĨت
+ĠÅĽ redni
+خ ضر
+Ġch uyến
+н ÑĤ
+ĠìķĮ ê³ł
+Ġv á»Ŀi
+Ġ×ij ×Ļ×ĵ×Ļ
+×ŀ×ĵ ×ķ×ijר
+ÙĪ Ù쨱
+ÙĬ Ø¡
+׳ ×Ľ×¡
+ĠÐĽ а
+л он
+Ġx ấu
+Ùģ ÙĬÙĨ
+Ġfé vrier
+ĠÐŀ на
+ĠV á»ģ
+ĠÅŁey ler
+ĠполÑĥÑĩ ен
+з ад
+Ġn ét
+à¹Ħà¸Ľ ยัà¸ĩ
+×Ĺש×ij ×ķ
+à¸ļัà¸Ļ à¸Ĺ
+à¸ļัà¸Ļà¸Ĺ ึà¸ģ
+Ġgerçek leÅŁ
+иÑĩеÑģк ое
+ìĪĺ ê°Ģ
+ث بت
+ãģ¤ ãģ¾ãĤĬ
+ĠÑĥÑģловиÑı Ñħ
+ëĭ¤ ê°Ģ
+ราย à¹Ħà¸Ķà¹ī
+׼×IJ ×ij
+à¹Ĥà¸Ľà¸£ à¹Ĥม
+à¹Ĥà¸Ľà¸£à¹Ĥม à¸Ĭัà¹Īà¸Ļ
+j ähr
+jähr ige
+×§ ׳×Ļ×Ŀ
+×ŀ ×ķ×§
+×ŀ×ķ×§ ×ĵ
+ãģ«è¡Į ãģ£ãģ¦
+Ø¢ ÙĦ
+вед ение
+Ġ׾ ×Ľ×ª×ķ×ij
+جÙħ Ùĩ
+جÙħÙĩ ÙĪØ±ÙĬØ©
+à¸ī à¸ļ
+à¸īà¸ļ ัà¸ļ
+ĠC òn
+à¸ľ สม
+ãģªãģ© ãģĮ
+×IJ×Ķ ×ij
+ĠдейÑģÑĤв иÑı
+y ız
+à¹Ħมà¹Ī à¹Ģà¸Ħย
+ج ÙĪØ²
+×Ķ×Ĺ׾×ĺ ×Ķ
+f ällt
+ãĥĵ ãĤ¸
+ãĥĵãĤ¸ ãĥį
+ãĥĵãĤ¸ãĥį ãĤ¹
+Ġ×IJ ×Ļ׳×Ŀ
+ĠнаÑħод иÑĤÑģÑı
+Ġdzi ÅĽ
+ست Ø·ÙĬع
+׾ ×Ļף
+Ø® ÙĦاÙģ
+Ùĩ ÙIJ
+Ġatr ás
+íĺ ģ
+ãĤĴ ãģĶ
+Ġ×Ķ×ŀ ×ķצר
+ĠBakan lıģı
+ÑİÑī ее
+ÙħÙĨ اط
+ÙħÙĨاط ÙĤ
+Ùģ Ø¯
+à¸Ļำ à¹Ħà¸Ľ
+Ġв аж
+Ġваж но
+Ġm ạch
+׼ ׳×ķ
+بع ث
+lan ması
+Ġa yr
+Ġayr ıl
+ìĤ¬ íļĮ
+d ÃŃa
+p ÅĤyw
+اÙħ ÙĬØ©
+íĺ ľ
+×IJ׳ ×Ĵ׾
+×IJ׳×Ĵ׾ ×Ļת
+ĠìŀĪëĭ¤ ëĬĶ
+Ġس اعة
+ĠëĤĺ íĥĢ
+b ö
+à¸Ħ ัà¸Ļ
+ĠdziaÅĤ ania
+Ø© Ùĭ
+Ġng Å©
+׳צ ×Ĺ
+ãģ¯ ãģĤãĤĭ
+ĠyaÅŁ ında
+st ück
+car acter
+caracter ÃŃsticas
+Ġr á»Ńa
+ĠÙħختÙĦÙģ Ø©
+ãģ«ãģĬ ãģijãĤĭ
+à¹ģà¸ŀ à¸ĩ
+วิ à¹Īà¸ĩ
+ת פ×ķ
+سا ÙĩÙħ
+使 ãģĨ
+Ùĥ رÙĬ
+×IJ פ×Ļ
+........ .......
+ĠÑĤак им
+×Ļ׼ ×ķ×Ļ
+Ø´ بÙĩ
+ج ÙĬر
+ãģĿãģ® ãģ¾ãģ¾
+ac jÄĻ
+ĠاÙĦت رÙĥ
+ĠاÙĦترÙĥ ÙĬ
+ĠпÑĢав илÑĮно
+Ġت عÙħÙĦ
+à¸ģล à¹īา
+Ġbi ên
+Ġ×ij׳×Ļ ×Ļת
+Ġкл Ñĥб
+Ġ×ŀ ש×Ķ
+в ÑĪий
+ãģĵãģ¨ãģĮãģ§ãģį ãĤĭ
+à¸ŀัà¸Ļà¸ĺ ุ
+à¸ŀัà¸Ļà¸ĺุ à¹Į
+ר ×ķ×Ŀ
+ĠاÙĦÙģ Ø±ÙĨ
+ĠاÙĦÙ쨱ÙĨ سÙĬ
+à¹Ģà¸Ľà¹ĩà¸Ļ à¸Ħà¸Ļ
+ãģĹãģ¦ ãģĬãĤĬ
+Ġth ầy
+ãĤĵ ãģłãģijãģ©
+ìĶ ¨
+Ùħ دÙĨ
+ت ÙĪÙĨ
+ĠмеÑĤ ал
+ĠмеÑĤал л
+Ġin ÃŃcio
+à¸Ńà¸Ńà¸ģ à¸Īาà¸ģ
+ëĴ ¤
+Ġcu á»ijn
+Ġbu á»Ļc
+ÙĨ سÙĬ
+ä cht
+×ŀ ×Ļ׳×Ļ×Ŀ
+ãģķ ãģ¦
+ãģĮ ãģ§ãģį
+ÑĬ ем
+Ġtá i
+ĠЧ ÑĤ
+ĠЧÑĤ обÑĭ
+à¸Ľà¸¥ ูà¸ģ
+à¸Ĭุม à¸Ĭà¸Ļ
+н Ñģкий
+Ġv ững
+Ġ×Ķ ×ľ×ij
+ë le
+Ġש ×¢×ijר
+в аÑĤÑĮÑģÑı
+б ой
+ع ÙĪÙĨ
+à¹ģà¸Ķ à¸Ļ
+Ġספר ×Ļ×Ŀ
+Ġt uyên
+Ġnhi êu
+ĠQu ý
+Ġh uyết
+ãĤı ãģĭãĤīãģªãģĦ
+Ġ×ŀ ׼ף
+Ġ×Ķ ×§×ľ
+Ġ׾×IJ ×ķר
+ĠÄIJi á»ĩn
+ش ؤ
+شؤ ÙĪÙĨ
+Ġ×ŀ׊פש
+ĠпоÑģÑĤоÑıн но
+×ŀ ×Ļר
+ìħ Ķ
+Ðŀ Ñģ
+ÐŀÑģ нов
+×ĸ ×Ļת
+ĠH á
+ĠÑĩаÑģ ов
+×IJ ×ķ׾×Ļ
+Ġm át
+Ø® رÙĪ
+خرÙĪ Ø¬
+ÙĤ ضا
+ÙĤضا ÙĬا
+à¹Ģà¸Ľ à¸Ńรà¹Į
+ĠÙĬ ÙĪÙĦ
+ĠÙĬÙĪÙĦ ÙĬÙĪ
+à¹Ĥà¸Ĺ ษ
+׳ פ׾
+ת ×ķש
+ת×ķש ×ij×Ļ
+Ġv ários
+×ŀ ר×IJ×Ķ
+ëĿ¼ ìĿ´
+ÙĨ غ
+×ij צע
+г он
+ĠÄIJ ược
+ع Ùı
+пÑĥÑģ к
+ĠÙĪØ§ÙĦ Ùģ
+üc ü
+×Ļ×§ ×Ļ×Ŀ
+Ġس بÙĬÙĦ
+׾×ij ף
+ĠاÙĦÙĤ رÙĨ
+ס ×ķת
+ĠQu áºŃn
+ãģĵãĤĮ ãģĮ
+ãĥĸ ãĥ©ãĥ³ãĥī
+×Ĵ ×ŀר
+Ġwarto ÅĽci
+ĠÙĪØ¨ ÙĬÙĨ
+Ġd ạ
+ÐIJ в
+ÐIJв ÑĤо
+Ġol acaktır
+à¸Ļ à¸Ĺà¹Į
+Ùħ طار
+Ġ×¢ ×§×ij
+Ġת פ
+ãģĹãģ¦ ãģĦãģ¦
+צ ×ŀ×Ĺ
+à¸Ī à¸Ńà¸ĩ
+Ġö de
+ìį ¨
+ÙĨ اس
+調 ãģ¹
+ĠогÑĢ Ð¾Ð¼Ð½
+ë³´ íĹĺ
+×ĺ ×§
+×ĺ×§ ס×ĺ
+ĠbaÅŁ v
+ĠbaÅŁv uru
+Ġpom ys
+Ġpomys ÅĤ
+ãģ« ä¹Ĺ
+Ġש ׼ף
+ĠاÙĦÙħس ؤÙĪÙĦ
+Ġз ан
+Ġзан ÑıÑĤ
+Ġd ương
+ãĥĹãĥ¬ ãĤ¤
+ล à¸ļ
+ÑĤи ка
+ĠAr alık
+Ġнед о
+Ġm á»Ļ
+Ġor an
+Ġoran ı
+Ġktó r
+Ġktór Äħ
+Ġ×Ķ×IJ×Ĺר ×ķ׳×ķת
+ائ ÙĨ
+ÅĦ s
+ÅĦs ka
+åĽ½ ãģ®
+×ŀ ×ĺ×Ļ
+ĠвопÑĢоÑģ Ñĭ
+à¸Ńà¸ĩà¸Ħà¹Į à¸ģร
+×ŀ ×ķצ×IJ
+Ġpó ź
+Ġpóź niej
+ש×ŀ ×IJ׾
+Ġk aps
+Ġkaps am
+Ġkapsam ında
+Ġmá quina
+ĠÅĽwie cie
+Ġho Ãłng
+Ġöz gü
+×Ĵ×ķר ×Ŀ
+ãģĤ ãģŁãĤĬ
+à¸ķัà¸Ķ สิà¸Ļ
+à¸ķัà¸Ķสิà¸Ļ à¹ĥà¸Ī
+б ÑĢи
+ãģ«ãģªãĤĭ ãģ¨
+ت ÙĥÙĪÙĨ
+Ġ×ķ×Ķ ×Ļ×IJ
+Ġchi ếu
+ÑģÑĤан ав
+ÑģÑĤанав ли
+ÑģÑĤанавли ва
+×ŀ ×ķ×Ĵ
+c ité
+ĠK örper
+Ġש ×Ĵ×Ŀ
+ع ظ
+عظ ÙĬÙħ
+Ġ×Ķ×IJ ×Ļש×Ļ
+Ġmat ière
+ĠÙģ ÙĪÙĤ
+Ġk to
+Ġkto ÅĽ
+à¸Ļ à¹Ĥย
+à¸Ļà¹Ĥย à¸ļาย
+å¾ħ ãģ¡
+à¹Ģม à¸Ļ
+à¹Ģมà¸Ļ ู
+A ÃĩÃĥO
+Ġt ù
+Ġtù y
+ãĥĪ ãĥ³
+ĠоÑĤ каз
+Ġ×ŀ ×ķצר
+ül ü
+ãģķãĤĵ ãģ«
+Ġ×Ĺ ×ķ×ij
+קר ×Ļ×IJ×Ķ
+ĠاÙĦØ® دÙħات
+ĠÙĦÙħ دة
+ر ؤ
+رؤ ÙĬØ©
+ãĤĴè¦ĭ ãģ¤ãģij
+à¸Ł า
+Ġréuss i
+à¸Ļัà¸ģ à¹Ģรียà¸Ļ
+ĠÑĩиÑģ л
+à¸ģาร à¹Ģลà¹Īà¸Ļ
+Ġhaz ırl
+Ġhazırl an
+ĠпеÑĢв Ñĭй
+ли м
+ĠоÑĤзÑĭв Ñĭ
+Ġwy jÄħ
+ĠwyjÄħ tk
+ĠØ£ ÙĤÙĦ
+ס ×ļ
+Ġê²° ìłķ
+Ġ׾×ŀ×¢ ש×Ķ
+Ġl ắp
+à¹ģà¸ļ ร
+à¹ģà¸ļร à¸Ļà¸Ķà¹Į
+วà¹Īา à¹Ģà¸Ľà¹ĩà¸Ļ
+Ġب دا
+Ġبدا ÙĬØ©
+ãģ¨ãģĦãģĨ ãģ®ãģĮ
+иÑĩеÑģк им
+à¸ģาร à¸ŀัà¸Ĵà¸Ļา
+Ġb Ãło
+Ġmia ÅĤa
+y waÄĩ
+ĠMär z
+ĠÙĨ سبة
+Ġéconom ique
+×ĸ ×ŀ
+×ĸ×ŀ ׳×Ļ×Ŀ
+æŃ¢ ãĤģ
+Ġt á»§
+íķĺ ìĭł
+Ġkażde go
+stra ÃŁe
+à¸Ĭ ีà¹ī
+à¹Ģ à¸ļา
+ÑĢеÑģ ÑĥÑĢÑģ
+ев ой
+ش باب
+à¸ķà¹Īาà¸ĩ à¸Ľà¸£à¸°à¹Ģà¸Ĺศ
+Ġ×IJ ×Ļש
+Ġ×IJ×Ļש ×Ļת
+×Ļ ×ķפ
+×Ļ×ķפ ×Ļ
+ĠìļĶ êµ¬
+ì¡° ìĤ¬
+ãģ£ãģŁ ãĤī
+׾ ×Ļ×§
+миниÑģÑĤ ÑĢ
+ãĤĤãģ® ãģ¯
+Ġl ương
+Ġна и
+Ġнаи бол
+Ġнаибол ее
+íİ ĺ
+à¹ģà¸ŀ à¹ī
+ãĤŃ ãĥ¥
+ĠкоÑĤоÑĢ Ñĭм
+à¹ģà¸Ĺ à¸ĩ
+à¹ģà¸Ĺà¸ĩ à¸ļà¸Ńล
+Ġ׳ ×Ļ×Ķ
+Ġ׳×Ļ×Ķ ×ķ׾
+âĤ ª
+ĠGi ải
+ĠиÑģполÑĮзов а
+ëł¥ ìĿĦ
+ãģĹãģĭ ãĤĤ
+à¸ģà¹ĩ à¸ķà¹īà¸Ńà¸ĩ
+ĠÑĢ ÐµÐ±
+ĠÑĢеб ен
+ĠÑĢебен ка
+ت ÙĪØ§ØµÙĦ
+ãĤ°ãĥ« ãĥ¼ãĥĹ
+ãĤĦ ãĤī
+à¹Ģà¸Ľà¸´à¸Ķ à¸ķัว
+б ÑĢо
+ë°ĸ ìĹIJ
+ÙĨ ÙİØ§
+×Ķ ×Ĵ
+×Ķ×Ĵ ׳×Ķ
+à¸Ĺ รั
+à¸Ĺรั à¸ŀ
+à¸Ĺรัà¸ŀ ยà¹Į
+Ġkh á»iji
+עצ ×ŀ×ķ
+бол езн
+Ġë°Ľ ìķĦ
+ม à¸Ļ
+มà¸Ļ ุ
+มà¸Ļุ ษ
+มà¸Ļุษ ยà¹Į
+âĹ Ĩ
+×ŀ צ׾×Ļ×Ĺ
+Ñıв ление
+Ùħ Ø·ÙĦ
+ÙħØ·ÙĦ ÙĪØ¨
+Ø® اÙĦÙģ
+ت ÙĪÙĤÙģ
+ãģ§ãģį ãģ¾ãģĽãĤĵ
+оÑģÑĤ ей
+м еÑĩа
+기 ëĬĶ
+תש ע
+ص ÙĬب
+Ġ×ij×¢ ×ķ×ĵ
+à¸Ĥà¸Ńà¸ĩ à¹Ģà¸Ĥา
+ÑĤÑı ж
+ĠÑĥ пÑĢав
+ĠÑĥпÑĢав лениÑı
+Ġgén ér
+Ġth ÃŃ
+פ ×ļ
+Ġر Ùħض
+ĠرÙħض اÙĨ
+Ġtr uyá»ĩn
+إ عداد
+ãĤµ ãĥĿãĥ¼ãĥĪ
+Ġпол но
+Ø® اÙħ
+ÐŁ еÑĤ
+ÐŁÐµÑĤ еÑĢ
+ÐŁÐµÑĤеÑĢ Ð±ÑĥÑĢ
+ÐŁÐµÑĤеÑĢбÑĥÑĢ Ð³
+ÙħÙĨت دÙī
+ãģķãĤĮ ãģ¾ãģĹãģŁ
+ĠëĮĢ íķĺìŬ
+à¸ľà¸¹à¹ī à¸Ĺีà¹Ī
+Ġ×ŀ×IJ ×ķ
+׾ ׳×ĵ
+оÑĩ нÑĭе
+ĠнаÑĩ ала
+Ġ׾ ×Ļ׾×ĵ×Ļ×Ŀ
+ов ое
+ãģĻãĤĭãģĵãģ¨ ãģ§
+ĠاÙĦÙĨ Ùģ
+ĠاÙĦÙĨÙģ Ø·
+ìŀĪ ëĬĶ
+غ ÙĨÙĬ
+פ ×ĵ
+ãĤ ¾
+ĠCr é
+ãģ© ãģ¡ãĤī
+Ø« اÙĨ
+ÑĢаб аÑĤ
+ÑĢабаÑĤ Ñĭва
+Ġê°Ļ ëĭ¤
+à¸Ī ั
+à¸Īั à¸ģร
+Ġch ụ
+Ġchụ p
+Ġм аÑģÑĤ
+ĠмаÑģÑĤ еÑĢ
+Ġn ắm
+ĠÑģÑĤ али
+Ġ×Ķ×IJ ×Ļר×ķ×¢
+ãĤ½ ãĥ³
+åĪĨ ãģĭãĤĬ
+ط بع
+بد ا
+gr áfico
+г еÑĢ
+à¸Ķำà¹Ģà¸Ļิà¸Ļ à¸ģาร
+Ġsal dır
+Ġsaldır ı
+в ÑĪиÑħ
+ãģĭãģ£ãģŁ ãģ§ãģĻ
+Ġyapı yor
+ĠاÙĦÙģ Øª
+צר פת
+з доÑĢов
+×ij×¢ ׾
+Ġ×IJ ×ŀ×Ļת×Ļ
+Ġоб Ñĭ
+ĠобÑĭ Ñĩ
+ĠобÑĭÑĩ но
+Ġ׾ ×ķ×ŀר
+ت ÙĥÙĨ
+تÙĥÙĨ ÙĪÙĦÙĪØ¬
+تÙĥÙĨÙĪÙĦÙĪØ¬ ÙĬا
+Ġhakk ı
+ĠÑĢаР²
+ĠÑĢав но
+رÙĬ Ùĥ
+Ġ×ij ×ŀ×Ļ×ĵ
+Ġ×ij×ŀ×Ļ×ĵ ×Ķ
+à¹ģà¸ģ à¹īว
+Ġìĸ ĺ
+Ġìĸĺ 기
+ãģĹãģ¦ ãģĦãģ¾ãģĹãģŁ
+Ġkı sm
+Ġkısm ı
+ê± ¸
+åĨħ ãģ®
+ì§ ķ
+à¹Ģหมืà¸Ńà¸Ļ à¸ģัà¸Ļ
+ĠÙģ ÙIJ
+ĠÙģÙIJ ÙĬ
+ÙĤ اعدة
+Ġmoż esz
+Ùħ صاÙĦ
+ÙħصاÙĦ ØŃ
+ãģ¾ãģŁ ãģ¯
+б ег
+Ġs ıc
+Ġsıc ak
+Ñĩ иÑģ
+ÑĩиÑģ лен
+Ġн ог
+ãĥģãĥ£ ãĥ³
+ãĥ« ãĥī
+Ġgi ó
+Ġs ını
+Ġsını f
+ив аÑĤÑĮ
+Ġqu ên
+Ġì łģ
+Ġìłģ ìļ©
+ĠJo ão
+Ùģ Ø§Ø¯
+ĠGl ück
+à¸Ĺ à¸Ńà¸Ķ
+Ġg ói
+ï¼ Ĭ
+Ġdé tail
+ĠدÙĬ سÙħ
+ĠدÙĬسÙħ بر
+ë¡ľ ìĦľ
+×ŀ ×ķ×Ĺ
+à¹Ħ ฮ
+ĠоÑĤ д
+ĠоÑĤд ÑĭÑħ
+Ġkh uyến
+à¸Ħ à¸Ńย
+Ġج ÙĨÙĬ
+ĠجÙĨÙĬ Ùĩ
+ĠاÙĦد ÙģØ§Ø¹
+à¸Ļà¹īำ หà¸Ļัà¸ģ
+ĠìĤ¬ëŀĮ ëĵ¤ìĿ´
+Ġth ừa
+ĠÃ¶ÄŁrenc i
+ĠпомоÑī и
+ĠczÄĻ ÅĽÄĩ
+ש ×ĺר
+ĠN hi
+ĠNhi á»ģu
+׳ צ×Ļ
+ĠнаÑĪ ÐµÐ¼
+ĠkarÅŁÄ± laÅŁ
+Ġ×Ķש ׳×Ļ×Ŀ
+ĠÄIJ ưá»Ŀng
+Ġtr ú
+ĠÑĢазлиÑĩ нÑĭÑħ
+ĠاÙĦØ´ Ùĩر
+Ġ×ľ×¢ ×ķ׾×Ŀ
+ØŃ جر
+ĠÄij á»ķ
+ĠìĿĺ íķ´
+à¸ļ à¹Īà¸Ńย
+Ġ×Ķ ×Ļ׾×ĵ
+ãģ¨ãģª ãģ£ãģŁ
+Ġ×Ĺ×ķ ×ķת
+Ġש×Ļר×ķת ×Ļ
+Äħ cy
+س رÙĬ
+K İ
+פ ׳×ķ
+ÑģÑĤÑĢÑĥк ÑĤÑĥÑĢ
+ÑĤ ÑĢÑĥд
+Ġ×Ķ ×§×¨
+Ġ×Ķקר ×ķ×ij
+Ġth áºŃm
+èģŀ ãģį
+ÙĤÙĪ ÙĬ
+клÑİÑĩ ен
+ÑĤе Ñħ
+ÑĤеÑħ нолог
+è¡Į ãģ£ãģŁ
+Ġ×ķ×IJ ×Ļף
+ĠÅŁek lin
+ĠÅŁeklin de
+r ô
+ÑĢ Ð¾Ð³
+Ġнов Ñĭе
+Ġס ×ij×Ļ×ij
+Ġtecn ologÃŃa
+ס ׼
+×¡×Ľ ×ķ×Ŀ
+ĠÅŀ ub
+ĠÅŀub at
+Ġ×Ķ×ŀ ׾×IJ
+Ġwy pos
+Ġwypos aż
+ãģ¯ ä½ķ
+ãĤ¬ ãĥ³
+ê° ĸ
+Ġкак ие
+Ġçocuk lar
+Ġ׾צ ×ĵ
+Ġkay ıt
+ĠмеÑģÑĤ е
+Ùħ دÙĬÙĨØ©
+Ġ׼ ×Ĵ
+Ġ׼×Ĵ ×ķף
+ãģĹãģ¦ ãĤĭ
+ĠÙħا ÙĬÙĪ
+ãģ£ãģ¦ãģĹãģ¾ ãģ£ãģŁ
+ĠпÑĢогÑĢамм Ñĭ
+à¹ģล à¸Ļà¸Ķà¹Į
+ãĥ¯ ãĤ¤
+ער ×ķ×¥
+Ñģ ид
+ĠB öyle
+Ġì²ĺ ìĿĮ
+Ġת פק×Ļ×ĵ
+ĠTr ên
+íĥ Ī
+ĠÐłÐ¾ÑģÑģ ий
+ĠÐłÐ¾ÑģÑģий Ñģкой
+Ġs Ãłn
+Ġrè gle
+ĠyaklaÅŁ ık
+à¹Ģล ิà¸ģ
+Ġد ائÙħ
+Ġ×ķ ×Ĵ
+اب ر
+Ġb è
+ĠاÙĦ ÙĤدÙħ
+ĠÑĢеÑĪ ÐµÐ½Ð¸Ñı
+hi ên
+ÑĤи к
+Ä Ħ
+à¸ļรร ยาà¸ģ
+à¸ļรรยาà¸ģ าศ
+רצ ×ķף
+åĭķ ãģį
+ĠGä ste
+Ġ기 본
+ĠÙĬ عرÙģ
+ĠS á»Ń
+gÅĤ ÄĻb
+à¹Ģà¸Ń ส
+×IJ×ŀ ×Ļף
+Ġп Ñĥнк
+ĠпÑĥнк ÑĤ
+Ġ×Ļ×ķ×ĵ ×¢×Ļ×Ŀ
+ãĤ« ãĥ©ãĥ¼
+Ġ×ijס ×ĵר
+Ġbu á»ĵn
+й ÑĤ
+йÑĤ еÑģÑĮ
+ãĤĴ æ±ĤãĤģ
+Ġ×IJת ׼×Ŀ
+Ġ모 르
+ظ رÙĪÙģ
+Ñĩ еÑģÑĤво
+ìĸ´ ìĦľ
+Ġод на
+Ġkap ı
+Ġëħ¸ ëł¥
+ĠKü che
+ĠاÙĦت Ø´
+Ø· ÙĬب
+ĠíĬ¹ íŀĪ
+ĠвÑĭп ÑĥÑģ
+ĠвÑĭпÑĥÑģ к
+×ĵ ת×Ļ
+Ġu ÄŁ
+ĠuÄŁ ra
+ائ Ùĩا
+Ġtho át
+ãģª ãĤĤãģ®
+Ñij ÑĢ
+기 ê°Ģ
+ĠgeliÅŁ me
+تØŃ ÙĤ
+تØŃÙĤ ÙĤ
+Ġоп аÑģ
+б ÑĢоÑģ
+ห ุ
+หุ à¹īà¸Ļ
+ì¼ Ģ
+ãĤ¹ ãĥŀ
+ãĤ¹ãĥŀ ãĥĽ
+Ø£ Ù쨱
+Ø£Ù쨱 اد
+ĠTh á»±c
+Ġth ắ
+ãĥªãĥ³ ãĤ¯
+Ġni á»ģm
+ĠHö he
+عÙħ ار
+ÙĥÙĪØ± ÙĪÙĨ
+ÙĥÙĪØ±ÙĪÙĨ ا
+ĠÄIJ ến
+ĠÑģам ом
+ĠÑĤ еле
+ĠÄijo án
+à¸Ħวามà¸Ħิà¸Ķ à¹Ģหà¹ĩà¸Ļ
+Ġд иÑģк
+Ø£ Ø·Ù쨧ÙĦ
+ม ารà¹Į
+à¸Ĺ หาร
+à¸Ĺ à¸Ļ
+Ġب عÙĬد
+ĠاÙĦÙĩ ÙĨد
+åĩº ãģĹãģ¦
+Ġkar de
+Ġkarde ÅŁ
+×Ķ×Ļס×ĺ ×ķר
+×Ķ×Ļס×ĺ×ķר ×Ļ×Ķ
+éģ¸ ãģ³
+ع اÙħÙĦ
+à¸Ĥ ยาย
+Ġtü rl
+Ġtürl ü
+ĠìĿ¼ ìĿ´
+Ġmaté ria
+Ġ׼׾ ×ķ×ŀר
+ãĥģãĥ£ ãĥ¼
+جÙħ اعة
+ĠÑģво им
+Ø¥ÙĤ اÙħØ©
+ä¾ĭ ãģĪãģ°
+س اب
+آ خر
+ÙĤ دÙĬر
+×IJ×ŀ ×Ļ
+ìĸ »
+Ġ׳×ķס פת
+ĠÐĴ лад
+ĠÐĴлад им
+ĠÐĴладим иÑĢ
+Ġest ará
+ãģĵãģĨ ãģĦãģĨ
+ãĤĴ 使ç͍
+มา à¸ķร
+มาà¸ķร à¸IJาà¸Ļ
+ãģ£ãģ ½
+Ġn ú
+Ġnú i
+ย าà¸ĩ
+ĠاÙĦج ÙĨس
+Ġüst ün
+ëľ »
+ãĤ» ãĥ«
+ãģ¦ãģĦ ãģįãģ¾ãģĻ
+Ġ×Ĺ ×ķ×ĸ
+Ġ×Ĺ×ķ×ĸ ר
+ĠÐĵ лав
+à¹Ĥà¸Ĭ à¸Ħ
+íı IJ
+ÙĨت ظر
+Ġ×Ĵ ×ij×Ļ
+ع ÙĤب
+int ér
+intér êt
+×ŀ פ×Ĵ
+×ŀפ×Ĵ ש
+Ġth ù
+اÙģ Øª
+Ġ×ŀש פ
+Ġ×ŀשפ ×ĺ×Ļ
+ĠÙħ ÙĪØ§ÙĤع
+è¦ ļ
+è¦ļ ãģĪ
+×ĵ ×Ļף
+à¹Ģรืà¹Īà¸Ńà¸ĩ ราว
+ãģ¾ ãģĤ
+Ġgh ế
+иÑĢÑĥ ÑİÑĤ
+à¸ģ ว
+à¸ģว à¹īาà¸ĩ
+Ġпов еÑĢ
+ĠповеÑĢ Ñħ
+ĠповеÑĢÑħ ноÑģÑĤ
+׳ ×ĵר
+Ġкон ÑĨе
+Ġдолж на
+Ġ×Ļש ×Ļר
+acaģı z
+ìĹ Ķ
+Ġn ÃŃvel
+Ġö r
+Ġör nek
+Ùĥ Ùģ
+ĠФедеÑĢ Ð°ÑĨии
+Ġ구 ìĦ±
+หัว à¹ĥà¸Ī
+ĠV áºŃy
+м ед
+мед и
+меди ÑĨин
+медиÑĨин Ñģк
+از ÙĬ
+×Ĵ×ij ×ķ׾
+ÑĦ ÑĢ
+Ġzus ätzlich
+à¸ģ à¸ģ
+ĠاÙĦاÙĤتصاد ÙĬØ©
+Ġh è
+lu ÄŁun
+ج Ùİ
+à¹Ħà¸Ł ลà¹Į
+ÄIJ T
+ãģĿãģ® ä»ĸ
+à¸Ĺิ à¹īà¸ĩ
+ĠاÙĦØ£ ÙĪ
+ر سÙħ
+æ°Ĺ ãģ¥
+ìĿ´ ë©°
+ÑĮ ев
+ص ط
+ĠاÙĦاست Ø«
+ĠاÙĦاستث Ùħار
+à¸Ńา à¸Ħาร
+ĠÑĤоÑĩ но
+ĠV ân
+à¸Ń ร
+à¸Ńร à¹Īà¸Ńย
+ĠاÙĦس ÙĨØ©
+Ġc Æ°á»Ľi
+×Ļ×Ķ ×Ł
+íį ¼
+話 ãģĹ
+âĹ ĭ
+ĠìķĬ ìĿĢ
+ãĥ¡ ãĥ¼ãĤ
+ãĥ¡ãĥ¼ãĤ «
+ãĥ¡ãĥ¼ãĤ« ãĥ¼
+ĠÑĤеп ло
+å½¼ ãĤī
+Ġİ z
+Ġİz mir
+íĻ į
+Ġr ượ
+Ġrượ u
+æĢĿãģĦ åĩº
+ĠPh ạm
+Ġchá u
+צ×Ļ ×ķת
+ĠìĿ¼ 본
+ìĤ¬ ëĬĶ
+ĠÑģозд ан
+Ġar acı
+Ġ×¢ ר
+Ġער ×Ļ׼×Ķ
+ĠíķĺëĤĺëĭĺ ìĿĺ
+dzi ÅĤ
+à¸Ľà¸£à¸° à¸ĺาà¸Ļ
+Ġser ÃŃa
+ĠìŀĪ ëıĦë¡Ŀ
+در ج
+íķľëĭ¤ ëĬĶ
+à¸Ńา à¸Ĺ
+à¸Ńาà¸Ĺ ิà¸ķ
+à¸Ńาà¸Ĺิà¸ķ ยà¹Į
+ÑĤелÑĮ нÑĭй
+ĠØ® دÙħات
+×ŀ׳ ×ĺ
+Ġl ược
+ĠS Ãłi
+ĠÙĪ Ø§Ø¶
+ĠÙĪØ§Ø¶ ØŃ
+غ از
+ĠdoÄŁ al
+Ġ×ijש ×Ŀ
+Ġд лин
+ĠØ¥ طار
+Ġ×ijס פר
+ãĤĴ ä¸İ
+ãĤĴä¸İ ãģĪ
+Ġë²ķ ë¥ł
+ĠÑĥ вели
+ĠÑĥвели Ñĩи
+ส à¹Ħà¸ķ
+สà¹Ħà¸ķ ลà¹Į
+à¹Ħ à¸ģล
+×ij׊ף
+ĠìĿ´ íĽĦ
+Ġm unic
+Ġmunic ÃŃpio
+تÙħ Ø«ÙĦ
+ĠÄij áo
+H ôtel
+Ġl á»Ńa
+ĠÄij ẳng
+Ñĩ ки
+Ø´ رÙĪ
+شرÙĪ Ø·
+ĠìĿ´ 를
+ÙĬ Ùĭا
+×ŀ׾ ×ļ
+×ŀ×Ķ ×Ļר×ķת
+ĠобÑıз аÑĤелÑĮ
+ĠобÑıзаÑĤелÑĮ но
+é nergie
+Ġmud ança
+Ġm ụ
+Ġmụ n
+Ġn º
+ĠاÙĦت عا
+ĠاÙĦتعا ÙĪÙĨ
+ĠاÙĦاجتÙħاع ÙĬØ©
+Ġп лаÑģÑĤ
+Ġëĵ± ìĿĺ
+ãĥIJãĤ¤ ãĤ¯
+Ùĩج ÙĪÙħ
+ĠSa úde
+Ġì¤ijìļĶ íķľ
+Ġ×Ķצ ×Ļ×ij×ķר
+תק ף
+ĠاÙĦعاÙĦÙħ ÙĬ
+ĠболÑĮÑĪ Ð¾Ð¹
+ĠÙĥ ÙĦÙħ
+ĠÙĥÙĦÙħ Ø©
+ãģ®ãģ§ãģ¯ãģªãģĦ ãģ§ãģĹãĤĩãģĨãģĭ
+ĠÙħ باراة
+Ġש×IJ ׳
+Ġש×IJ׳ ×Ĺ׳×ķ
+ãĤ¹ãĤ¿ ãĤ¤ãĥ«
+ĠSa ÄŁ
+ĠSaÄŁ lık
+Ġh ư
+׳ ×Ĺ×Ķ
+Ġ×ij קר×ij
+Ø· عÙħ
+ห ิà¸Ļ
+à¸Ĺุà¸ģ วัà¸Ļ
+à¸Ħรัà¹īà¸ĩ à¸Ĺีà¹Ī
+ĠlÃł nh
+Ġdonn é
+ãģĽ ãģĦ
+جز ÙĬرة
+доÑĢ Ð¾Ð¶
+ì¼ ľ
+تÙĨظ ÙĬÙģ
+ãĥģ ãĥ§
+Ġald ıģı
+ج اج
+ĠÑĤ омÑĥ
+à¸Ľ ิ
+Ġ×ijר שת
+ãģıãģªãĤĬ ãģ¾ãģĻ
+ĠпÑĢин ÑĨип
+Ġ׊׾×ķ
+ëı ¼
+×ķ×Ĵ ש
+س س
+à¸Ľ ู
+Ġh ầu
+æĦŁãģĺ ãĤĭ
+ï¼ ´
+د ÙĪØ§
+ĠÑģм ог
+scri ção
+Ġth áºŃn
+Ġר ×ķ×IJ×Ķ
+обÑĢаж ен
+ĠاÙĦتج ارÙĬØ©
+Ø· بÙĬع
+jÄħc Äħ
+íĸī ìľĦ
+Ġнов Ñĭй
+Ġ×ŀ ×Ĺ×ĵש
+æĮ¯ ãĤĬ
+gu é
+Ġ×IJ ×Ļר×ķ×¢
+Ġ×IJ×Ļר×ķ×¢ ×Ļ×Ŀ
+ĠاÙĦ ذÙĩب
+×ĵ ×IJ
+ت اÙĨ
+ãģł ãģĹ
+à¸Ńั à¸ķรา
+à¹Ĥ à¸Ī
+بÙĦ اد
+×Ķ×Ļ ×Ļ׳×ķ
+ĠÑģп е
+ĠÑģпе ÑĨиалÑĮно
+ĠÅĽwi ata
+ãĤĵãģ§ãģĻ ãĤĪ
+شر ÙĥØ©
+ĠpÅĤ yt
+Ġsitu é
+Ġ׼×IJ ׾×Ķ
+ס ×ijר
+Ġkaż d
+Ġkażd ym
+ãĤĴæĮģ ãģ¤
+׾×Ķ ×ľ
+׾×Ķ׾ ף
+ĠwÅĤ as
+ĠwÅĤas ne
+ĠsaÄŁ lan
+×ŀ×¢ ׾×Ķ
+ĠاÙĦا ÙĪÙĦ
+ìĹIJìĦľ ëıĦ
+×IJ×Ļר ×ķפ×Ķ
+تÙĤ ÙĨÙĬØ©
+Ùħ ائ
+Ùħائ Ø©
+Ġcompañ ÃŃa
+Ġsü rek
+Ġsürek li
+ĠиÑģ кÑĥÑģ
+ĠиÑģкÑĥÑģ ÑģÑĤв
+ĠB ürger
+ת ×Ĺר
+ת×Ĺר ×ķת
+à¸ŀรà¹īà¸Ńม à¸ģัà¸ļ
+Ø´ Ùħ
+à¸ĸืà¸Ń วà¹Īา
+è¾¼ ãĤĢ
+ä¼ij ãģ¿
+ĠاÙĦØ£ ب
+ĠÑģÑĤоим оÑģÑĤÑĮ
+ĠпÑĢав а
+may ın
+ห วย
+ĠاÙĦØ· بÙĬعÙĬ
+à¸Ĺีà¹Ī à¸ŀัà¸ģ
+ĠEst á
+Ñĭва ÑİÑĤ
+ب سÙĬ
+بسÙĬ Ø·
+Ġ×ij×¢ ×ijר
+åı¯èĥ½ ãģ§ãģĻ
+Ġ×ĵ ×ķ׾
+Ġ×ĵ×ķ׾ ר
+Ùĩ ÙİØ§
+воÑĢ Ð¾ÑĤ
+ãģ¦ ãģĦãģ¾ãģĹãģŁ
+à¹Ĥà¸Ĺร ศ
+à¹Ĥà¸Ĺรศ ั
+à¹Ĥà¸Ĺรศั à¸ŀ
+à¹Ĥà¸Ĺรศัà¸ŀ à¸Ĺà¹Į
+Ġ×§ ׳
+ĠاÙĦØ« ÙĨ
+ĠاÙĦØ«ÙĨ ائÙĬØ©
+Ġco ût
+à¸ķิà¸Ķ à¸ķัà¹īà¸ĩ
+Ġö rg
+Ġörg üt
+ĠاÙĦØ® ÙĦÙĬ
+ĠاÙĦØ®ÙĦÙĬ ج
+Ġb á»įn
+×ķ׾×ķ×Ĵ ×Ļ
+ëŀ ľ
+ĠÐij олÑĮ
+ĠÐijолÑĮ ÑĪ
+×Ĵ ×ijר×Ļ×Ŀ
+ÙĤ ÙĬد
+×ij×Ļ×ĺ ×ķ×Ļ
+æīĵ ãģ¡
+Ġol muÅŁ
+f äh
+fäh ig
+ล าà¸Ļ
+ĠÙĤ طر
+ש פ×Ķ
+èªŃ ãĤĵãģ§
+à¸Ĥ วา
+Ġchi ếm
+ãĤ¤ãĥ³ ãĤ¿
+ãĤ¤ãĥ³ãĤ¿ ãĥ¼ãĥ
+ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥ į
+ãĤ¤ãĥ³ãĤ¿ãĥ¼ãĥį ãĥĥãĥĪ
+Ġ׾ש×ŀ ×ķר
+Ġت رÙĥ
+ĠترÙĥ ÙĬا
+ר ×ķ×ĺ
+ã썿ĢĿ ãģĦãģ¾ãģĹãģŁ
+ĠاÙĦت ÙĤ
+Ġd ư
+ãģ¦ãģıãĤĮ ãĤĭ
+ãģĹãģŁ ãģĵãģ¨
+Ġróż ne
+ĠاÙĦØ· ÙģÙĦ
+ĠPost é
+Ġ×ŀש ×ķ×Ŀ
+Ñį ÑĢ
+ĠÑĢабоÑĤ аеÑĤ
+ãĤ· ãĥª
+ãĤ·ãĥª ãĥ¼ãĤº
+Ġ×ij×Ķ ×Ĺ׾×ĺ
+×§×Ķ ×Ļ׾×Ķ
+ãĤ« ãĥ¡
+ãĤ«ãĥ¡ ãĥ©
+ï¼ ¯
+ĠìĤ¬ ìĿ´
+Ġk ì
+Ġth Æ°á»Ľc
+ض بط
+ÙĤب ÙĪÙĦ
+åĪ¥ ãģ®
+Ġparticul ière
+ĠÑģво ем
+Ġ×¢ סק
+Ġעסק ×Ļ×Ŀ
+×ij×Ĺ ×Ļר×ķת
+×ij ×Ļ׳×ķ
+à¸ĭ à¸Ń
+Ġ×¢ ×ķ×ijר
+ãģłãģ£ãģŁ ãģ®ãģ§
+ıld ıģı
+Ùħ دار
+Ùħدار س
+주 ìĭľ
+à¸Ńา ศ
+à¸Ńาศ ัย
+Ġt ấm
+à¸ŀิ à¸Ī
+à¸ŀิà¸Ī าร
+à¸ŀิà¸Īาร à¸ĵา
+ÑĤелÑĮ нÑĭе
+Ñģк ÑĥÑİ
+Ðľ Ðĺ
+à¹Ģà¸ģ า
+à¹Ģà¸ģา หล
+à¹Ģà¸ģาหล ี
+×ĵ ×Ĺ
+à¹Ģà¸Ĭ ิà¸ĩ
+Ġد ÙĤÙĬÙĤØ©
+íķĻ ìĥĿ
+Ġש×IJ ׾×Ķ
+Ġcontr ôle
+Ġsit uação
+à¸Ĥà¸Ńà¸ĩ à¸ľà¸¹à¹ī
+ÙĨ Ø·ÙĤ
+ê³¼ íķĻ
+หลาย à¸Ħà¸Ļ
+Ġn ắng
+ÙĤ Ùı
+ì¡° ê±´
+Ñ ķ
+ãĥĥ ãģ¨
+×ŀ ×Ļ׾×Ķ
+Gr ün
+×Ļ ×Ļ×¢
+×Ļ×Ļ×¢ ×ķ×¥
+×ŀ׳ ׼
+ë ŃIJ
+×ŀ×¢ ×ŀ×ĵ
+สำ à¸Ļัà¸ģ
+ج دد
+à¸Ħ ัà¸Ķ
+Ġ×Ķ×ŀש פ
+Ġ×Ķ×ŀשפ ×Ĺ×Ķ
+×ŀש ק׾
+ÙĦ Ùı
+Ġty tu
+Ġtytu ÅĤ
+ÑĪ ÐµÐ¹
+ĠìĿ¼ ë¶Ģ
+ÑĪ ÐµÐ½Ð¸Ðµ
+Ġph óng
+ĠìĹŃ ìĤ¬
+ãĤ« ãĥ³
+Ġtú i
+ĠÙĨ ÙĪÙģ
+ĠÙĨÙĪÙģ Ùħبر
+gr ün
+ĠاÙĦØ´ ÙħاÙĦ
+ÅĽwi adc
+ÅĽwiadc zenie
+ער ×Ķ
+Ġ×¢ ×ķ×ij
+Ġ×¢×ķ×ij ×ĵ×Ļ×Ŀ
+×ĵ×ķ×Ĵ ×ŀ×IJ
+ä»Ĭ ãģ¯
+Ġv ão
+ĠТ ем
+Ñģ илÑĮ
+Ġch ợ
+Ùħ را
+Ùħرا ÙĤب
+à¹Ħมà¹Ī รูà¹ī
+Ġر ائع
+×IJ׳ ×Ĺ׳×ķ
+สà¹Īà¸ĩ à¹Ģสริม
+צ ×Ĺ
+ĠìŀĪìĸ´ ìĦľ
+Ġkur ulu
+Ġkurulu ÅŁ
+ĠÃĸ zellik
+ĠÃĸzellik le
+Ġת ×Ļ×§
+Ġgh é
+Ġspr zÄĻ
+ĠsprzÄĻ t
+ער ×ķת
+را ØŃØ©
+ãģ£ ãģį
+ãģ£ãģį ãĤĬ
+ĠìķĦ ëŀĺ
+stit uição
+Ġдолж но
+×Ķ ×¨×©
+×Ķרש ×ŀ×Ķ
+×Ķ׾ ×ļ
+ãģ¡ ãģª
+ãģ¡ãģª ãģ¿
+ãģ¡ãģªãģ¿ ãģ«
+פ ×Ĺ×ĵ
+ĠاÙĦج ÙħÙĬع
+×ij×¢ ׾×Ļ
+Ġtr ùng
+Ġפ ת×Ĺ
+×ŀ׾×Ĺ ×ŀת
+ãĥĨ ãĥ¼ãĥ
+ãĥĨãĥ¼ãĥ ŀ
+Ùħ تاب
+Ùħتاب عة
+Ġ모 ìĬµ
+ÙĬ ص
+åIJĪ ãģĨ
+ĠY ap
+ĠYap ı
+ĠÑģ казаÑĤÑĮ
+ëª °
+à¸Ĺีà¹Ī สำà¸Ħัà¸į
+ĠìĹĨ ìĬµëĭĪëĭ¤
+Ġnh ắc
+Ġülk eler
+Ġмног ие
+íķĺ ìħ¨
+มาà¸ģ à¸Ĺีà¹Īสุà¸Ķ
+à¸ģ à¹īา
+à¸ģà¹īา ว
+Ġİ yi
+л еж
+леж а
+ãĤ¸ ãĥ§
+à¸Ĺั à¸ŀ
+ا ÙĪØ±
+Ġ×Ĺ×ijר ×Ļ
+Ġ׾ ש×Ŀ
+ì² «
+ĠT á»Ń
+×ŀ ×ķ׳×Ļ
+ÙĤ ÙĪØ¯
+à¸ģระ à¹Ģà¸Ľ
+à¸ģระà¹Ģà¸Ľ à¹ĭ
+à¸ģระà¹Ģà¸Ľà¹ĭ า
+ĠпÑĢоблем Ñĭ
+Ġaç ıs
+Ġaçıs ından
+Ġ×Ķ×ŀ ׼
+ĠÙħع ظÙħ
+ÙĤÙĬ اس
+ĠпÑĢод олж
+ĠпÑĢодолж а
+Ġver diÄŁi
+ĠпÑĢед меÑĤ
+ãģĦãģ¾ãģĻ ãģĮ
+ĠëͰ 른
+ĠاÙĦ ÙĤÙĬاÙħ
+ĠØ¥ÙĦÙĬ Ùĩا
+Т ÐIJ
+п оз
+ãĤ· ãĥ¥
+ä¸ĬãģĮ ãĤĬ
+à¹Ģà¸Ķิม à¸ŀัà¸Ļ
+à¸ģุ ล
+ØŃر ÙĬØ©
+×§×ij×ķצ ×ķת
+ë¯ ¿
+ĠاÙĦÙħ ÙĨا
+ĠاÙĦÙħÙĨا Ø·ÙĤ
+ĠвÑĭп ол
+ĠвÑĭпол нÑı
+ãĥĭ ãĤ¢
+Ġê²° êµŃ
+×Ĺ ×ķ×ŀ
+×Ĺ×ķ×ŀ ר×Ļ×Ŀ
+ĠУкÑĢа инÑĭ
+ห à¸Ńม
+ר ×Ļס
+ĠÑħоÑĤ ел
+ĠобÑĢаз ованиÑı
+Ġkh ẳng
+Ġm ưa
+Ġgör me
+Ġgüç lü
+سع Ùī
+มัà¹Īà¸Ļ à¹ĥà¸Ī
+íķĺ ê²łìĬµëĭĪëĭ¤
+Ġпол Ñĥ
+Ġfün f
+ã썿ĢĿ ãģ£ãģ¦ãģĦãģ¾ãģĻ
+Ġê·¸ê²ĥ ìĿĢ
+ĠdÃ¼ÅŁÃ¼n ce
+ìŀ ł
+ĠH Æ°á»Ľng
+ĠTi á»ĥu
+Ġç ift
+ãģij ãģ°
+à¸Īà¸Ļ à¸ĸึà¸ĩ
+à¸Ĺำ à¹Ħà¸Ķà¹ī
+ĠìŀIJ ì²´
+Ġd õ
+Ġdõ i
+à¸Ī ัà¸Ļ
+à¸Īัà¸Ļ à¸Ĺ
+à¸Īัà¸Ļà¸Ĺ รà¹Į
+ece ÄŁini
+׳×ķ×¢ ר
+غ ار
+ĠاÙĦØ£ÙħرÙĬ ÙĥÙĬ
+داع ش
+ĠбезопаÑģ ноÑģÑĤи
+Ġб Ñİ
+ĠбÑİ Ð´Ð¶
+ĠбÑİдж еÑĤ
+ãĥĬ ãĤ¤
+à¸ŀà¸ļ วà¹Īา
+da ÄŁ
+×IJ ×ķפף
+íĹ Į
+ãĥĢãĤ¤ ãĤ¨
+ãĥĢãĤ¤ãĤ¨ ãĥĥãĥĪ
+ĠëĮĢ íĨµ
+ĠëĮĢíĨµ ëł¹
+D İ
+Ø£ ØŃداث
+ĠA ÄŁ
+ĠAÄŁ ust
+ĠAÄŁust os
+ØŃÙĦ ÙĪÙĦ
+Ġw ÅĽ
+ĠwÅĽ ród
+ĠÑģо оÑĤвеÑĤ
+ĠÑģооÑĤвеÑĤ ÑģÑĤв
+ĠÑģооÑĤвеÑĤÑģÑĤв ии
+ĠLu áºŃt
+Ġ׼׾ פ×Ļ
+Ġв еÑī
+ĠвеÑī еÑģÑĤв
+×§ ×Ļ×¥
+ĠبÙĩ ذا
+عا ش
+à¹Ģà¸Ľà¹ĩà¸Ļ à¹Ģรืà¹Īà¸Ńà¸ĩ
+Т Ðķ
+Ġ×ij×IJ ×Ļ׳×ĺר׳×ĺ
+س عد
+Ġ×Ķ×ĺ ×Ļפ×ķ׾
+פ ×Ļס
+à¸ĩà¹Īาย à¹Ĩ
+ĠGer ät
+׾ ×Ļ×ĵ×Ķ
+ĠÑĢ Ð¸Ñģк
+׾ק ×Ĺ
+н наÑı
+ר ×Ļ×ĵ
+п ÑĢакÑĤи
+пÑĢакÑĤи к
+à¸Ĥัà¹īà¸Ļ à¸ķà¸Ńà¸Ļ
+à¸Ļà¹Īา รัà¸ģ
+larınız ı
+à¸Ńà¸Ļุ à¸įา
+à¸Ńà¸Ļุà¸įา à¸ķ
+ĠzdjÄĻ cia
+Ġb ây
+Ñģ ÑĢ
+ÑģÑĢ Ð¾Ñĩ
+ãĥĭ ãĥ³ãĤ°
+Ġö ner
+Ġöner i
+Ġнов ÑĭÑħ
+دع ÙĪØ©
+Ġg ắn
+ĠاÙĦÙĦ بÙĨ
+ĠاÙĦÙĦبÙĨ اÙĨÙĬ
+ãĥĨãĤ£ ãĥ¼
+Ġص ØŃÙĬØŃ
+ем ÑĭÑħ
+çĸ² ãĤĮ
+ĠпÑĢо иÑģ
+ĠпÑĢоиÑģ ÑħодиÑĤ
+ส à¸ķิ
+ĠT ết
+Ġ×Ķ׾ ׾×ķ
+à¹Ģรืà¹Īà¸Ńà¸ĩ à¸Ļีà¹ī
+×ŀ×ij ׳×Ķ
+Ġconte údo
+Ġا خت
+Ġاخت ÙĬار
+Ùħ سÙĦ
+ÙħسÙĦ سÙĦ
+ëı Ī
+Ġ׾ ×Ļ×ĵ
+à¸ŀิ à¸ĺี
+ĠÑģов Ñģ
+ĠÑģовÑģ ем
+ãģĮãģĤãĤĬ ãģ¾ãģĹãģŁ
+Ġsó ng
+Ø¥ صÙĦاØŃ
+ë§ ģ
+Ùģ ÙĬر
+ĠJe żeli
+ìłľ ëıĦ
+d ÅĤug
+ìĥģ ìĿĦ
+Ġc áºŃn
+Ġhá»į p
+أ ست
+أست اذ
+Ġ×ŀ ×Ļש×Ķ
+Ġ×ŀ×Ļש×Ķ ×ķ
+Ġd Ãły
+Ġch Ãłng
+ãģ¡ãĤĥãĤĵ ãģ¨
+ĠÄij ám
+Ġsw ój
+Ġpoder á
+ĠоÑĤлиÑĩ а
+Ġpéri ode
+ünd ig
+×ĺ×¢ ף
+ÑģÑĤÑĢо иÑĤелÑĮ
+ר ת×Ļ
+Ġ×Ļ×Ķ ×Ļ×ķ
+׾ ס
+ĠاÙĦÙħÙĨ زÙĦ
+à¸Ļิ à¹īว
+иÑĦ ика
+иÑĦика ÑĨи
+ðŁĺ ī
+Ġad ına
+ãĢĤãĢĤ ãĢĤ
+×IJ ×Ļף
+ס ×Ļר
+ĠÙĬ عد
+çŃĶ ãģĪ
+اÙĦ جز
+اÙĦجز ائر
+енÑĮ к
+ร ห
+รห ัส
+ĠTürk çe
+ê¾ ¸
+Ġ×Ļ ×ķ׼׾
+Ġש ×ķ׳×Ķ
+Ġ×ij×ŀ צ×ij
+ĠдейÑģÑĤв иÑĤелÑĮно
+ĠبأÙĨ Ùĩ
+×ŀ×§ ×ĵ
+Ġ×Ķש ×§
+Ø®ÙĬ ارات
+Ġf ı
+Ġfı rs
+Ġfırs at
+ëij ĺ
+ĠìĦľ ìļ¸
+Ġ×Ķ×Ĵ ×ķ×£
+ر عا
+رعا ÙĬØ©
+ĠK ết
+к Ñģи
+ĠÑĥÑģлÑĥг и
+ноÑģÑĤ ей
+ìļ´ ëıĻ
+ĠобÑĬ Ñı
+ĠобÑĬÑı вл
+н еж
+×Ķפ ×ļ
+Ġ×ij×¢ ×Ļ׳×Ļ
+ëĨ Ĵ
+ĠпÑĢоÑĨ ед
+ĠпÑĢоÑĨед ÑĥÑĢ
+Ġiht iy
+Ġihtiy acı
+Ġë°Ķ ëŀį
+Ġë°Ķëŀį ëĭĪëĭ¤
+à¸ģล ัว
+ĠÑģл ожно
+×§×Ļ ×Ļ×ŀת
+ĠÄIJ ình
+ĠÙħ ÙĦÙģ
+Ġà¹Ĥà¸Ķย มี
+Ġkat kı
+تØŃ ÙĪÙĬÙĦ
+à¹Ħ à¸ŀ
+ĠH á»į
+ñ e
+Ġдо Ñħод
+Ġtho ải
+íķĺìŬ ìķ¼
+ãĤ¹ãĥĿ ãĥ¼ãĥ
+ãĤ¹ãĥĿãĥ¼ãĥ Ħ
+ĠG òn
+Ġk è
+Ġkè m
+é̲ ãĤģ
+ãĤ¹ ãĥ¼ãĥ
+ãĤ¹ãĥ¼ãĥ ij
+ãĤ¹ãĥ¼ãĥij ãĥ¼
+ĠgiÃł u
+ĠØ¥ عادة
+Ġ׾ ×ķ×§
+Ġ׾×ķ×§ ×Ĺ
+ĠÑħоÑĩ еÑĤ
+×ĺ ׾×ķ×ķ
+×ĺ׾×ķ×ķ ×Ļ×ĸ
+×ĺ׾×ķ×ķ×Ļ×ĸ ×Ļ×Ķ
+Ġth uyết
+ãģĿãĤĮ ãģ§
+Ġvard ı
+à¹Ħร à¹ī
+ع بد
+ĠRep ública
+ãĥ¼ãĤ¿ ãĥ¼
+Ġ×ŀ×IJ ×ķת
+à¹Ħà¸Ľ à¹ģลà¹īว
+Ġyapıl acak
+ãĤ¹ãĤ¿ ãĥ¼ãĥĪ
+ãģ» ãģ¼
+Ġko ÅŁ
+ĠмаÑĤ еÑĢи
+Ġsiè cle
+ĠاÙĦÙħ ختÙĦÙģ
+ĠاÙĦÙħختÙĦÙģ Ø©
+Ġ׾ק ר×IJ
+Ġ׾קר×IJ ת
+Ġ×Ķפ ×ķ×¢×ľ
+Ġt òa
+Ġr Æ¡i
+åij¨ ãĤĬ
+à¸Ŀ à¸Ļ
+j ÅĽÄĩ
+ĠìķĬ ìĿĦ
+اÙĨت ÙĤاÙĦ
+ëĸ ł
+ив аеÑĤ
+ãĥĪ ãĥ«
+ĠاÙĦÙģÙĦسطÙĬÙĨ ÙĬØ©
+à¸ģลà¹Īาว วà¹Īา
+ا Ùĥت
+ĠÃĸ l
+ĠÑĢе ÑĪи
+ĠÑĢеÑĪи л
+Ġ׳×ķס פ×ķת
+Ġìłķ ì¹ĺ
+вл еÑĩен
+Ùħر ØŃÙĦØ©
+Ġcome ça
+Ġy ık
+ìĤ ´
+à¸ĺ à¸Ļา
+à¸ĺà¸Ļา à¸Ħาร
+à¸Ńà¸Ļ า
+à¸Ńà¸Ļา à¸Ħ
+à¸Ńà¸Ļาà¸Ħ à¸ķ
+Ġpeque ña
+ä»ķ äºĭãĤĴ
+Ġب ذÙĦÙĥ
+Ġнов ого
+ãģĹãģ¦ ãģĦãģªãģĦ
+ĠاÙĦÙħ ÙĬاÙĩ
+à¸ģà¹ĩ à¹Ģà¸Ľà¹ĩà¸Ļ
+Ġж ÑĥÑĢ
+ĠжÑĥÑĢ Ð½Ð°Ð»
+в еÑģ
+خت ار
+Ġ매 ìļ°
+ĠM ã
+ĠавÑĤомаÑĤ Ñĭ
+ضع Ùģ
+ĠاÙĦÙģ Ùĥر
+ãģ§ãģĻ ãģ®ãģ§
+ãĥ¡ãĥ³ ãĥIJãĥ¼
+Ġк ÑĢÑĥг
+ĠاÙĦسÙĦ طة
+à¸Ħรัà¹īà¸ĩ à¹ģรà¸ģ
+à¸ģระà¸Ĺ รว
+à¸ģระà¸Ĺรว à¸ĩ
+ÑĨ ов
+éķ· ãģĦ
+大ãģį ãģĦ
+Ġgeç miÅŁ
+ìĦ± ìĿ´
+Ġצר ×Ļ׼×Ķ
+Ġм оÑī
+ĠмоÑī н
+Ġ×§ ×Ļש
+Ġ×§×Ļש ×ķר×Ļ×Ŀ
+ĠNas ıl
+г ÑĢан
+Ġ×ŀ ×ķצר×Ļ×Ŀ
+Ġ×ŀס ×ķ×Ĵ
+Ġy ür
+Ġyür üt
+Ġ׾׊צ×ķ
+×ķÖ ¼
+ĠìŀĪ ìĹĪëĭ¤
+Ġter ör
+ĠTh ương
+ĠÙĪ ÙĬÙħ
+ĠÙĪÙĬÙħ ÙĥÙĨ
+ج ÙĪÙĨ
+ĠÙĪØºÙĬر Ùĩا
+×ŀ פ×ķ
+×Ĵ×ķר ×ŀ×Ļ×Ŀ
+׼×ij ×Ļש
+ĠاÙĦÙĦ غ
+ĠاÙĦÙĦغ Ø©
+شر Ùĥ
+ĠاÙĦر اب
+ĠاÙĦراب ع
+ĠпÑĢ ÐµÐº
+ĠпÑĢек ÑĢаÑģ
+ĠпÑĢекÑĢаÑģ н
+Ġenerg ÃŃa
+×§×ĵ ×ŀ×Ļ
+ãģıãģª ãģ£ãģŁ
+ĠÄij ứ
+ĠÄijứ a
+Serv i
+Servi ço
+Ġkald ır
+åĥį ãģį
+Ġод еж
+Ġодеж д
+물 ìĿĦ
+ãģĿãģĨ ãģ§
+ãģĮãģĤ ãĤĮãģ°
+ìĻ ķ
+צ×ĵ ×§
+Ġart ır
+Ġile ti
+Ġileti ÅŁim
+ãĤĪãģĨ ãģ§
+ãĥĪ ãĥ¼
+ãĤ¢ ãĥĭ
+ãĤ¢ãĥĭ ãĥ¡
+×ĺ×Ļ ×Ļ׾
+ãĥķ ãĥªãĥ¼
+ãĥĿ ãĥ³
+ÐŁÑĢ Ð¾
+Ġع اÙĦÙĬØ©
+ĠÃ¶ÄŁ ret
+ĠÃ¶ÄŁret men
+ĠкаÑĩеÑģÑĤв а
+Ġ×Ķ×ĺ ×ij×¢
+Ġзна Ñİ
+ãģ¦ ãģıãĤĭ
+Ġm ừng
+ÙħÙĪ Øª
+ש ×ķ×ŀר
+×Ĺ׾ ×ij
+Ġwzgl ÄĻ
+ĠwzglÄĻ du
+ë²Ī 째
+Ġtá» ĵ
+Ġtá»ĵ n
+ãĥ¯ãĥ¼ ãĤ¯
+Ġpo życz
+Ġpożycz k
+×Ļ ×ķצר×Ļ×Ŀ
+Ùĥر Ùħ
+Ġг аÑĢ
+ĠгаÑĢ Ð°Ð½
+ĠгаÑĢан ÑĤи
+ล à¹īาà¸ĩ
+Ġìĺģ íĻĶ
+×ĺ ×Ļס
+Ġth ẻ
+ĠìŀĪëĭ¤ ê³ł
+اÙĦت ز
+اÙĦتز اÙħ
+Ġна ÑĪи
+is ée
+ãģĵãĤĮ ãĤĴ
+Ġm ẽ
+ض ÙĦ
+بÙĪ Øª
+Ġ׼ ׼×Ķ
+h ợ
+ĠاÙĦس ÙĪØ±ÙĬØ©
+Ġ×ľ×¢ ×ķ×ŀ
+Ġ×ľ×¢×ķ×ŀ ת
+ĠbaÅŁ ar
+ĠbaÅŁar ılı
+е ÑģÑĤÑĮ
+à¸Ħร ี
+à¸Ħรี ม
+ĠìłĦ ì²´
+ĠسÙĬ ÙĥÙĪÙĨ
+Ġ×ŀ×ĵ ×ķ×¢
+ĠëķĮ문 ìĿ´ëĭ¤
+Ġc ứng
+ger ät
+Ġм иÑĢ
+ĠмиÑĢ Ðµ
+ĠÙĥÙĬÙģ ÙĬØ©
+Ġפר ×ĺ×Ļ×Ŀ
+Ġgo ÅĽci
+иÑĤ еÑģÑĮ
+ÑĥÑĪ ÐºÐ¸
+ؤ ÙħÙĨ
+Ġ×IJ ׼ף
+ĠاÙĦر جÙĦ
+Ġl á»įc
+à¹Ģรีย à¸ģวà¹Īา
+ãģĵãģ® ãĤĪãģĨãģª
+ë§Į íģ¼
+Ġп еÑĩ
+ÙĪÙĦ ات
+ĠÃľ ye
+liÄŁ inde
+à¸Ħะ à¹ģà¸Ļ
+à¸Ħะà¹ģà¸Ļ à¸Ļ
+ãĤĭãģĵãģ¨ ãģ¯
+วิ à¹Ģà¸Ħร
+วิà¹Ģà¸Ħร าะ
+วิà¹Ģà¸Ħราะ หà¹Į
+Ġвозмож ноÑģÑĤи
+ĠاÙĦÙĨ ساء
+ãĥīãĥ© ãĥŀ
+Ġgü c
+Ġgüc ü
+Ġt ưá»Ŀng
+Ġacomp aña
+ãĤ¤ ãĥ©
+×§ צ×ij
+ĠY ö
+ĠYö net
+ĠYönet im
+สัม à¸ľ
+à¸ªà¸±à¸¡à¸ľ ัส
+à¸Ļ าม
+ĠÄij ợi
+à¹ģหà¹Īà¸ĩ à¸Ĭาà¸ķิ
+ãģĿãĤĮ ãģ§ãĤĤ
+ät ig
+ת ×ķ×Ŀ
+ĠbaÅŁ lat
+ĠвÑģ ей
+ת ×Ļ×§
+ת×Ļ×§ ×ķף
+ĠNg ô
+ĠGesch ä
+ĠGeschä fts
+Ø£ Ùħ
+Ø£Ùħ راض
+à¹Ģà¸Ĺ à¸Ħà¸Ļ
+à¹Ģà¸Ĺà¸Ħà¸Ļ ิ
+à¹Ģà¸Ĺà¸Ħà¸Ļิ à¸Ħ
+Ġм енÑĮ
+ĠменÑĮ ÑĪе
+Ġöl ç
+Ġölç ü
+ĠÙĬ جعÙĦ
+ĠÄij ỡ
+ש ×Ļ׾
+ש×Ļ׾ ×ķ×ij
+ĠGr Ã¶ÃŁe
+ĠÙĩ اتÙģ
+รà¹īาà¸Ļ à¸Ńาหาร
+×Ķ׾ ×Ļ׼
+×Ķ׾×Ļ׼ ×Ļ
+иÑĢÑĥ ÑİÑī
+èĭ¥ ãģĦ
+ĠÃĸ zel
+ãģĦãģŁ ãĤī
+à¸Ħำ à¸ĸาม
+Ġzosta ÅĤy
+Ġ×Ķס ×Ļפ×ķר
+×Ķ ×ķ׾
+×Ķ×ķ׾ ×ļ
+à¹Ģà¸Ĭà¹Īà¸Ļ à¸ģัà¸Ļ
+à¹Ĥ à¸Ĩ
+à¹Ĥà¸Ĩ ษ
+à¹Ĥà¸Ĩษ à¸ĵา
+×IJר צ×ķת
+×Ĵר פ×Ļ
+Ġao ût
+ĠÙĬ رÙĬد
+ت ÙĪØ¬
+تÙĪØ¬ ÙĬÙĩ
+ĠÑįÑĤ ап
+ãĤ¹ãĤ¿ ãĥ³
+Ġkr ó
+Ġkró tk
+ãĤĴ使 ãģĨ
+ì ·¨
+éĸ¢ ãĤı
+à¸Ķà¹īวย à¸Ħวาม
+à¸Ļำ à¹Ģสà¸Ļà¸Ń
+Ġa yrıca
+à¸Ī à¹īาà¸ĩ
+ĠÑĦоÑĤ огÑĢаÑĦ
+Ġв еÑĩ
+ĠвеÑĩ еÑĢ
+åĩº ãģĹãģŁ
+ĠÐ¥ о
+Ġ×ŀ ר×Ĵ×Ļש
+à¹ĥหà¹ī à¹Ģà¸Ľà¹ĩà¸Ļ
+ãĤĴ 缮
+ãĤĴ缮 æĮĩ
+׾ ×ŀ×Ļ×Ŀ
+nÄħ ÅĤ
+ĠÑģÑĤ анд
+ĠÑģÑĤанд аÑĢÑĤ
+ĠSü d
+ĠT âm
+اخت بار
+à¹Ģà¸ģ à¸Ńรà¹Į
+Ùħس رØŃ
+Ġbi á»ĩn
+ب Ùı
+Ġص اÙĦ
+ĠصاÙĦ ØŃ
+ĠPh ụ
+íľ ´
+ãĥ¬ãĥĵ ãĥ¥ãĥ¼
+Ġbụ ng
+Ġrég ime
+ĠØ£ Ø´Ùĩر
+ĠÑĢабоÑĤ ник
+à¸Ŀ ัà¸Ļ
+اع تÙħ
+اعتÙħ اد
+Ġзам еÑĤ
+ãģ¾ ãģ£ãģ¦
+Ġch ặt
+æĿ¥ ãĤĭ
+ĠاÙĦÙĤ ÙĪØ§Øª
+ãģ«åħ¥ ãģ£ãģ¦
+تØŃ اÙĦÙģ
+Ùħ زÙĬد
+ĠÙĬ صÙĦ
+ìĹ ¼
+à¹Ģà¸Ĭ à¹ĩ
+à¹Ģà¸Ĭà¹ĩ à¸Ħ
+Ġk á»ĭ
+Ġká»ĭ p
+ĠìķĦ ì§ģ
+×IJ׳ ×Ĵ
+Ġобла ÑģÑĤÑĮ
+Ġpomoc Äħ
+Ġ×ķ ש׾
+ëĵł ì§Ģ
+ĠGi ám
+ĠSt ück
+Ġchá y
+ĠëĤĺ ìĺ¤
+ש ×Ļ×ĺת
+×ŀ×ĵ ר
+×ŀ×ĵר ×Ļ×ļ
+Ġsüre ç
+к ва
+×ij׾ ×Ļ×Ŀ
+×Ķ ×ª×Ļ
+×Ķת×Ļ ×Ļ×Ĺס
+ÙĤب اÙĦ
+Ġס ×ķ×Ĵ
+Ġס×ķ×Ĵ ×Ļ
+ÑģÑĤ олÑĮ
+ä½ķ ãĤĤ
+×ĸ׼ ×ķר
+è²· ãģĨ
+å®ī ãģı
+à¸Ħรัà¹īà¸ĩ à¸Ļีà¹ī
+kö p
+ĠÑģеÑĢ Ð²Ð¸Ñģ
+оÑĩ нÑĭÑħ
+ê±° ëŀĺ
+تأ Ùĥ
+تأÙĥ ÙĬد
+×ĵ ׾ק
+Ġпо Ñĩем
+ĠпоÑĩем Ñĥ
+пиÑģ аÑĤÑĮ
+×ij שר
+ĠH Ãłng
+ĠT ìm
+Ġtr ừ
+ãĤ» ãĥĥãĤ¯ãĤ¹
+×ķ׳ ×Ĵ
+mız da
+п Ñģи
+ĠìŀĪ ê¸°
+Ġr út
+ز اÙĨ
+تÙĨ ÙĪØ¹
+ÙħÙĤ ا
+ÙħÙĤا ÙĪÙħØ©
+Ġ׾צ ×ķר×ļ
+Ġ×ij ×Ļר×ķש׾×Ļ×Ŀ
+ãĥ´ ãĤ£
+eb ile
+ebile ceÄŁi
+ãĥ¦ ãĥ¼ãĤ
+ãĥ¦ãĥ¼ãĤ ¶
+ãĥ¦ãĥ¼ãĤ¶ ãĥ¼
+ãĤĴä½ľ ãĤĭ
+Ñģ меÑĢ
+ÑģмеÑĢ ÑĤ
+Ġì§ ģ
+Ġì§ģ ìłij
+ĠÐŁ аÑĢ
+ØŃ اض
+ØŃاض ر
+Ùħ ÙĥاÙģ
+ÙħÙĥاÙģ ØŃØ©
+ล ิà¸Ļ
+ãģ¦ ãģįãģ¦
+ÑĢоÑģ л
+ĠÄ°ÅŁ te
+ÙĤص ÙĬر
+Ġ×ij×Ĵ ×Ļ׾
+Ġ×ŀת ×IJ×Ļ×Ŀ
+Ġ×Ķ ×Ĺ×ĵ
+Ġ×Ķ×Ĺ×ĵ ש×Ķ
+ר ×ķ×¢
+Ġprodukt ów
+ĠÙħ صدر
+не ÑĨ
+ĠاÙĦعÙħÙĦ ات
+Ġçık ma
+Ġد بÙĬ
+×§ ×Ļף
+ת ×IJר
+ת×IJר ×Ļ×ļ
+׳×Ļ ×Ļ×ĵ
+صر اع
+l ève
+צ ×Ļר
+à¸Ķ ัà¸Ļ
+à¹ĥหà¹ī à¹Ħà¸Ķà¹ī
+ãĤ¿ãĤ¤ ãĥł
+Ġgi ảng
+С ÐŁ
+ĠاÙĦÙħ ØŃÙĦ
+ĠاÙĦÙħØŃÙĦ ÙĬØ©
+ĠT ất
+׾ ×ķ×ĺ
+h á»ķ
+Ġam éric
+Ġaméric ain
+Ġ×ijש׾ ×ij
+Ġ׾×IJ ×ķ×ŀ×Ļ
+Ġpe ça
+ĠÑĢаз нÑĭÑħ
+ãģĦãĤĭ ãģ¨
+ãĥĩ ãĥ³
+ס קר
+Ġ×Ķ×ŀ×Ĺ ×Ļר
+ãģ¨ãģĦãģĨ ãĤĤãģ®
+رت بط
+ĠиÑģÑĤ оÑĩ
+ĠиÑģÑĤоÑĩ ник
+สมัà¸Ħร สมาà¸Ĭิà¸ģ
+Ġ à¸Ĺัà¹īà¸ĩ
+Ġà¸Ĺัà¹īà¸ĩ à¸Ļีà¹ī
+ĠT áºŃp
+ãģ£ãģ¦ ãģĦãģĨ
+ĠاÙĦÙĪ ØµÙĪÙĦ
+Ġdéc ada
+Ġо ÑĦоÑĢм
+ĠоÑĦоÑĢм лен
+สำหรัà¸ļ à¸ģาร
+Ġog óln
+ãģĨãģ¡ ãģ«
+Ġvá rias
+ãģĻãģİ ãĤĭ
+ÙĪ Ùĩا
+à¹Ĥà¸Ľà¸£ à¸Ķ
+ĠÐłÐ¾ÑģÑģ иÑı
+人 ãĢħ
+ãģĹãģ¦ ãģįãģŁ
+Ġsı rasında
+Ġng ôn
+س ÙĨØ©
+تÙħ تع
+×ŀ׼ ×ij×Ļ
+Ġnh ấn
+×¢ ×ŀ×Ļ×ĵ
+á» ¨
+ж иÑĤÑĮ
+ãĤī ãģĽ
+gr áf
+gráf ica
+ĠÙĤ ÙĪÙĦ
+ĠÙĤÙĪÙĦ Ùĩ
+ëĭ¨ ì²´
+ห à¹īา
+หà¹īา ม
+使 ãģ£ãģ¦
+ת ×Ļ×ij
+ת×Ļ×ij ת
+i á»ĥu
+à¹ģ à¸Ĭม
+à¹ģà¸Ĭม à¸Ľ
+à¹ģà¸Ĭà¸¡à¸Ľ à¹Į
+Ạ¬
+ĠëĤĺ ëĿ¼
+ĠÙħباشر Ø©
+Ġtr Äĥm
+سÙĥ ÙĪ
+ĠاÙĦذ Ùī
+Ġbi ç
+Ġbiç im
+ت راجع
+Ġоб еÑģп
+ĠобеÑģп еÑĩ
+ĠобеÑģпеÑĩ ива
+Ġвозд ÑĥÑħ
+Ñĭв аÑĤÑĮ
+ÙĦ ØŃÙĤ
+ĠMü dü
+ĠMüdü rl
+ĠMüdürl Ã¼ÄŁÃ¼
+Ġyapt ır
+Ġפר ס
+Ġפרס ×ķ×Ŀ
+Ø· ÙĪØ±
+ÑģÑĤв оваÑĤÑĮ
+ìŀ¥ ìĿĦ
+à¸Ĺีà¹Īà¸Ķี à¸Ĺีà¹Īสุà¸Ķ
+à¸Ńั ล
+ÑĢ Ñİ
+Ùħست ÙĤبÙĦ
+Ñģл ÑĥÑĪ
+ÑģлÑĥÑĪ Ð°
+èªį ãĤģ
+Ġ׾ ×Ļ×ŀ
+Ġ׾×Ļ×ŀ ×ķ×ĵ×Ļ
+ת ש×ķ×ij
+תש×ķ×ij ×ķת
+ĠgerçekleÅŁtir il
+ĠاÙĦ اتÙ쨧ÙĤ
+ĠÑĥÑĢов не
+ĠÑĤ ÑĢав
+Ġ×Ķ×ŀ ×ķף
+ØŃÙģ Ø§Ø¸
+ĠÙħ ÙIJ
+ĠÙħÙIJ ÙĨ
+ĠÙħÙIJÙĨ ÙĴ
+Ġdem ás
+×ŀ×ķ×ĸ ×Ļ×§×Ķ
+ש ×Ļ×Ĺ×Ķ
+Ġb ú
+алÑĮ нÑĭм
+ãĤı ãģŁ
+ãĤıãģŁ ãģĹ
+ĠاÙĦÙħÙĪ Ø§Ø¯
+ת ׼׳
+×ª×Ľ×ł ×ķף
+ãĥŃ ãĥĥãĤ¯
+hi ếu
+ĠÑĥ ме
+ÙħØŃا ÙĪÙĦØ©
+×IJ ×ķשר
+Ġкон кÑĥÑĢ
+ĠконкÑĥÑĢ Ñģ
+Ġ×ŀ ×ij×Ĺ
+Ġ×ŀ×ij×Ĺ ×Ļ×ł×ª
+Ġan lam
+Ġanlam ı
+Ġli á»ĩt
+Ġв Ñħод
+ĠH ình
+ĠÙĨ ÙĬ
+ĠÙĨÙĬ ÙĪØ²
+ãĤ¸ãĥ£ ãĥ¼
+×ij ×Ļ×¥
+ÑĤелÑĮ нÑĭÑħ
+à¸Ĺุà¸ģ à¸Ńยà¹Īาà¸ĩ
+ĠkiÅŁ inin
+Ø£ Ùĥثر
+ĠиÑģÑĤоÑĢ Ð¸Ð¸
+Ġë³Ģ íĻĶ
+פ׾ ס×ĺ
+×¤×ľ×¡×ĺ ×Ļ׳×Ļ
+ĠÑģ еÑĤ
+ĠÑģеÑĤ и
+dıģ ımız
+íķĺ ëıĦë¡Ŀ
+×Ķ ×¨
+×Ķר ×ij×Ķ
+ãģĻãĤĭãģĵãģ¨ ãģ¯
+Ġphi ếu
+تØŃ سÙĬÙĨ
+ĠÅĽ rod
+ĠÅĽrod ow
+ĠÅĽrodow isk
+ĠÑĢаÑģ Ñħод
+بر ÙĬد
+Ġر ÙĬ
+ĠرÙĬ اÙĦ
+Ġ×ķ ׼×ļ
+ì§Ģ ìļĶ
+׼ ×ŀ×ķ
+Ġ×¢×ľ ×Ļ×Ķ×Ŀ
+f ÃŃcio
+Ġkar arı
+tıģ ını
+ĠС ов
+ĠСов еÑĤ
+ãģĬéĩij ãĤĴ
+м еждÑĥ
+междÑĥ на
+междÑĥна ÑĢод
+междÑĥнаÑĢод н
+Ġm á»Ŀi
+ĠاÙĦØ¥ ÙĬر
+ĠاÙĦØ¥ÙĬر اÙĨÙĬ
+ĠاÙĦرÙĪ Ø³ÙĬ
+ص ÙĨد
+صÙĨد ÙĪÙĤ
+ĠاÙĦØ¥ÙĨ ترÙĨت
+Ġt ắm
+ĠÑĤак ого
+Ġ×ij ׾×ķ×Ĵ
+Ġü crets
+Ġücrets iz
+×Ĺ×ĸ ×Ļר
+ìĸ´ ìķ¼
+ĠPh ần
+ï¼ ľ
+Ġ×ĺ ×ij×¢
+Ġ×ĺ×ij×¢ ×Ļ
+×IJ×ŀ ×IJ
+اÙĤ ÙĦ
+Ġcondi ções
+ÙĤات ÙĦ
+ĠÑĢезÑĥлÑĮÑĤаÑĤ е
+ĠÑģво ими
+צ×ij ×Ļ×¢
+gé ni
+Ġz es
+Ġzes po
+Ġzespo ÅĤ
+ÑĪ Ð¸Ð²
+Ġפר×ĺ×Ļ ×ķת
+Ùħست Ø´Ùģ
+ÙħستشÙģ Ùī
+شر ع
+Ġko ÅĽci
+Ġ×Ķ×IJ ×Ļ׳×ĺר׳×ĺ
+ĠЧ еÑĢ
+поÑĩ ÑĤ
+Ġactiv ités
+çŁ¥ ãģ£ãģ¦
+Ġ×ij ×ĸ×Ķ
+Ġyüz den
+ãģªãĤĬ ãģ¾ãģĽãĤĵ
+Ġíĺ ¹
+Ġíĺ¹ ìĿĢ
+Ġ×ŀש ׳×Ķ
+ĠÐĴ еÑĢ
+Ġ×ij×IJ×ķת ×ķ
+éĿ¢ çϽ
+éĿ¢çϽ ãģĦ
+شر ØŃ
+gr ünde
+Ùģ Ø´
+Ù쨴 ÙĦ
+Ġsé jour
+ë´ IJ
+Ġr ôle
+ش عار
+ем Ñĭе
+ĠاÙĦج سÙħ
+алÑĮ ное
+Ġìĥģ íĥľ
+ï¼ ¤
+ë¯Ģ ë¡ľ
+ĠÙĨ ÙĤØ·
+ĠÙĨÙĤØ· Ø©
+ãģĿãģĨ ãģł
+ãģĻãĤĭ ãģ®ãģĮ
+ห ู
+Ġnh á»ĭ
+Ġeconóm ica
+ס×ĺ ×ķ×ĵ
+ס×ĺ×ķ×ĵ ׳×ĺ
+มี à¹Ĥà¸Ńà¸ģาส
+Ġgest ão
+รูà¹ī วà¹Īา
+Ġlo ạt
+ĠاÙĦÙħ Ùı
+ĠاÙĦØŃ ÙħÙĦ
+ĠاÙĦعÙħÙĦ ÙĬØ©
+Ġê²ĥ ëıĦ
+ĠÐľÐ¾Ñģк ва
+×§×ĺ ×ķר
+Ġпод ÑĢоб
+ĠподÑĢоб н
+Ġl ưng
+ت Ù쨳
+تÙ쨳 ÙĬر
+ĠاÙĦ بع
+ĠاÙĦبع ض
+ئ ت
+Ðķ ÐĿ
+ìŰ 구
+à¹ĥหà¹ī à¸Ħุà¸ĵ
+ãģĤãĤĬ ãģ¾ãģĹãģŁ
+Ġbir ka
+Ġbirka ç
+Ġİ sl
+Ġİsl am
+çĹĽ ãģ¿
+Ġh ảo
+Ġм аÑı
+ĠiÅŁ çi
+ש ×
+×©× ģ
+à¸ģาร à¹Ģมืà¸Ńà¸ĩ
+×ķ×Ķ ×¨
+Ġch ó
+ëĨ Ģ
+Ġyan lı
+Ġyanlı ÅŁ
+幸 ãģĽ
+×IJר×Ĵ ×ķ׳×Ļ
+à¸Ńาà¸Ī าร
+à¸Ńาà¸Īาร ยà¹Į
+ĠинÑĦоÑĢм аÑĨиÑİ
+Ðĵ Ðŀ
+׳ ×Ĺש
+ĠìķĮ ìķĦ
+ĠÑħаÑĢакÑĤеÑĢ Ð¸ÑģÑĤ
+ĠÑħаÑĢакÑĤеÑĢиÑģÑĤ ик
+à¸Ħุà¸ĵ สามารà¸ĸ
+è¦ĭ ãģĪãĤĭ
+à¸Ĭัà¸Ķ à¹Ģà¸Ī
+à¸Ĭัà¸Ķà¹Ģà¸Ī à¸Ļ
+ĠdziaÅĤ al
+ĠdziaÅĤal noÅĽci
+à¹Ĥà¸ŀ สà¸ķà¹Į
+ĠÐļ ол
+ĠÙģ ÙĩÙĬ
+Ġ×ŀ פ׳×Ļ
+Ġ×Ķ×§ שר
+Ùħر Ùĥ
+ÙħرÙĥ ز
+Ġho á
+Ġа пп
+Ġапп аÑĢаÑĤ
+Ġp ami
+Ġpami ÄĻ
+ĠpamiÄĻ ta
+Ġç ünkü
+×ĵ ×ķף
+ãģ¯ ãģĵãģ¡ãĤī
+ĠM Ãł
+ĠÙĬ ÙĤدÙħ
+ĠпÑĢ ÐµÐ·
+ĠпÑĢез иденÑĤ
+à¸Ńุ à¸ķ
+à¸Ńุà¸ķ สา
+à¸Ńุà¸ķสา ห
+à¸Ńุà¸ķสาห à¸ģรรม
+ì§Ģ ìĽIJ
+Ġ×IJפשר ×ķת
+sch üt
+schüt z
+ĠTi ên
+Ġsay ılı
+ĠгÑĢÑĥпп Ñĭ
+оÑĩ нÑĭй
+Ġ×ľ×¢ ×ŀ×ķ×ĵ
+Ġwr zeÅĽ
+ĠwrzeÅĽ nia
+ĠÄIJ ầu
+à¹Ģà¸Ĥà¹īา รà¹Īวม
+nız da
+Ø®ÙĬ ص
+Ġgü nc
+Ġgünc el
+ĠÙĦÙĩ ذÙĩ
+ĠÙĬ عتبر
+lé gi
+ãĤı ãģĭãĤĭ
+Ġr ừng
+ظ Ùĩ
+ظÙĩ ÙĪØ±
+Ġ×ŀ×ij ×Ļף
+Ġ기 íĥĢ
+åĪĩ ãĤĮ
+lan mÄ±ÅŁ
+à¸Ĺีà¹Ī มีà¸Ħวาม
+Ġh á»ģ
+ت ÙĪØ¬Ùĩ
+ĠاÙĦØ¥ دارة
+Ġú til
+ס פ×ķ
+à¸Ħวาม รัà¸ģ
+à¹Ĥ ฮ
+Ġпол иÑĤ
+ĠполиÑĤ ик
+Ġsat ın
+ĠÅŀ imdi
+×ŀ ×ķר×Ļ×Ŀ
+ìķĺ ëĭ¤
+×Ĺ ×ķ×ķ
+×Ĺ×ķ×ķ ×Ļ×Ķ
+à¸Ħà¸Ńม à¸ŀิ
+à¸Ħà¸Ńมà¸ŀิ ว
+à¸Ħà¸Ńมà¸ŀิว à¹Ģà¸ķà¸Ńรà¹Į
+Ġا ذا
+تخ اذ
+ãĤ¨ ãĥ«
+Ġpossibilit é
+ยืà¸Ļ ยัà¸Ļ
+Ġü nivers
+Ġünivers ite
+ĠاÙĦد ÙĪØ±ÙĬ
+ĠìķĬëĬĶ ëĭ¤
+ĠìĦľ ë¡ľ
+ØŃ اÙĦ
+Ġë ¨
+Ġë¨ ¼
+Ġ먼 ìłĢ
+à¸Ĺีà¹Ī à¸ĸูà¸ģ
+ì§ ľ
+Ġsk óry
+лÑĮ ÑĨ
+à¹ĥà¸Ĭà¹ī à¹Ģวลา
+×ij×§ שת
+Ġذ ÙĪ
+æĹ¥ ãĢħ
+ĠкоÑĤоÑĢ ÑĥÑİ
+ĠÑĥÑĢов енÑĮ
+ê¹ ¨
+à¹Ħ à¸Ĺ
+ãĤµ ãĥĹãĥª
+ãĤ¸ ãĥ§ãĥ³
+ãģĻ ãģ¹ãģį
+ĠG ór
+ãĥĪ ãĤ¤
+ãĥĪãĤ¤ ãĥ¬
+ĠyaÅŁ ama
+Ġdá»ĭ p
+Ġb ữa
+à¸ĭ ุ
+Ġöl üm
+ãģ£ãģ¦ ãģıãĤĭ
+à¸ģาร à¸Ħà¹īา
+ש ער
+ĠÑĤип а
+Ġг еÑĢ
+ĠгеÑĢ Ð¾
+רק ע
+Ġu waż
+Ġuważ a
+ש×ŀ ף
+Ġhast alık
+ãĤıãĤĮ ãĤĭ
+ba ÅŁÄ±
+Ñĩ ÑĤо
+Ġ×ij ×ŀר׼×ĸ
+Ġìļ°ë¦¬ ìĿĺ
+ĠÙĥاÙĨ ÙĪØ§
+ĠØ£ بر
+Ġأبر ÙĬÙĦ
+ì¸ µ
+à¹Ħà¸Ĥ à¹Ī
+ĠÙĪ ÙĦÙĪ
+à¸Ĺ ัว
+à¸Ĺัว รà¹Į
+ĠÙĪØ£ Ùĥد
+à¸Ĭ วà¸Ļ
+׾ ×ķ×§
+æį ¨
+æį¨ ãģ¦
+Ġİç in
+p éri
+Ġy al
+Ġyal nız
+ÑĮÑı н
+Ġg ắng
+à¸ģà¹ĩ ยัà¸ĩ
+ĠУкÑĢа ин
+ĠÑģ ами
+ĠпÑĢовед ен
+à¸ķà¸ģ à¹ģà¸ķà¹Īà¸ĩ
+ĠQu ân
+é paration
+ĠbaÅŁ ında
+Ġzn ale
+Ġznale ź
+Ġznaleź Äĩ
+ãĤ± ãĥ¼
+ãĥİ ãĥ¼
+à¸ĸูà¸ģ à¸ķà¹īà¸Ńà¸ĩ
+ëª ¸
+Ġëı Į
+ĠëıĮ ìķĦ
+ĠSch üler
+Ġпод гоÑĤов
+ĠподгоÑĤов к
+ع رÙĪ
+عرÙĪ Ø¶
+la ÅŁtır
+ĠÑģоÑģÑĤав лÑıеÑĤ
+ĠпÑĢоиз вод
+ĠпÑĢоизвод ÑģÑĤва
+ĠоÑģнов е
+ĠØ´ ÙħاÙĦ
+à¸ģร ี
+ĠgörÃ¼ÅŁ me
+оÑĩ ек
+Ġ×Ĺ×ijר ×Ļ×Ŀ
+ÙħØ® اط
+Ùħخاط ر
+ï¼ Ń
+ר פ×IJ
+ĠM ẹ
+ยà¸Ńม รัà¸ļ
+Ġv ết
+خ ذ
+ĠاÙĦت Ø·
+ĠاÙĦتط بÙĬÙĤ
+à¸Ļ ึà¸ģ
+Ġ×Ķ ×Ľ×ł×¡×ª
+ĠогÑĢ Ð°Ð½Ð¸
+ĠогÑĢани Ñĩен
+ĠÃĩ alÄ±ÅŁ
+ĠاÙĦÙħÙĨت دÙī
+à¸Īำà¸Ļวà¸Ļ มาà¸ģ
+ĠÑĤоÑĢ ÑĢ
+ĠÑĤоÑĢÑĢ ÐµÐ½ÑĤ
+ĠìĤ´ ìķĦ
+à¸ŀลัà¸ĩ à¸ĩาà¸Ļ
+à¸Ĭ ัà¸Ļ
+ĠÐIJн дÑĢ
+Ġréalis é
+×ŀש ×IJ
+à¹ģ à¸Ĭ
+à¹ģà¸Ĭ รà¹Į
+Ġб ог
+มา à¹ģลà¹īว
+ĠاÙĦÙĨ ار
+Ġolmad ıģı
+×ĵ ×¢×Ķ
+ĠÑĥ веÑĢ
+ĠÑĥвеÑĢ ÐµÐ½
+ãĤĭ ãĤĤãģ®
+أ د
+أد ÙĪØ§Øª
+Ġ×Ķ×ĸ ×ķ×Ĵ
+Ø¥ عÙĦاÙħ
+h á»ı
+ĠNä he
+ĠÑĤ еÑģÑĤ
+Ġ×ŀ ×ķ׼ר
+Ġë¬¸ìłľ ê°Ģ
+ת ×ķצ×IJ×Ķ
+m ó
+mó vel
+ĠاÙĦتج ارة
+Ġмног иÑħ
+обÑī а
+Ġ×¢ סק×Ļ
+ĠEdu cação
+×§ ש×Ļ×Ŀ
+é tabl
+établ issement
+Ġд еле
+иÑĢÑĥ еÑĤÑģÑı
+آ ثار
+Ġ×Ķ×ŀ ר׼×ĸ×Ļ
+ãĥIJ ãĥ«
+ĠвÑģÑĤÑĢ ÐµÑĩ
+ãģĴ ãĤĭ
+Ġci Äħ
+ĠciÄħ gu
+ÙĬ ست
+à¸łà¸² ว
+à¸łà¸²à¸§ ะ
+Ø£ Ùħر
+Ġо жи
+Ġожи да
+Ġ á»§y
+ãĥŀ ãĥ«
+ر اس
+оÑĩ ной
+ת ×Ĵ×ķ×ij×ķת
+تع رÙĬÙģ
+ĠÑģо ÑĨиалÑĮно
+ãĤĴ éĸĭ
+ĠиÑģÑģлед ова
+Ġd ú
+Ġdú vida
+Ġsk ÅĤ
+ĠskÅĤ ada
+Ġhä ufig
+ĠвÑĭб ÑĢ
+ĠвÑĭбÑĢ Ð°ÑĤÑĮ
+ãģ®ãģ§ãģ¯ãģªãģĦ ãģĭ
+ĠÑģ илÑĮно
+ÑĤвеÑĢж ден
+ר פ
+רפ ×ķ×IJ×Ķ
+æĢĿ ãģĦãģ¾ãģĻ
+ØŃر ص
+ש×ķת ×£
+Ùħس جد
+à¹Ĥà¸Ĭ วà¹Į
+ем ÑģÑı
+в ÑĪие
+Ġм л
+Ġмл н
+Ġ׾×Ķ ×ij×Ļ×IJ
+ĠÙĬ تعÙĦÙĤ
+à¸ķ ูà¹ī
+Ġп ÑĢаз
+ĠпÑĢаз д
+ĠпÑĢазд ник
+Ġн ем
+Ġнем ного
+Ġs Ãłng
+تÙĨ سÙĬ
+تÙĨسÙĬ ÙĤ
+Ġtá» Ŀ
+Ġмед и
+ãģ« æĪ
+ã쫿Π»
+à¸Ħว à¹īา
+ãģĭ ãģijãĤĭ
+×ij׾ ×ķת
+ĠÑįк Ñģп
+ĠÑįкÑģп еÑĢÑĤ
+Ġдев ÑĥÑĪ
+ĠдевÑĥÑĪ Ðº
+ĠØŃ ص
+ÙĨØ´ Ø£
+ãģĮãģĤãĤĭ ãģ®ãģ§
+Ġت راÙħ
+ĠتراÙħ ب
+أس ÙĪØ§ÙĤ
+Ġ׾פ ׳×ķת
+Ġا ï»·
+ãģ« ãģı
+ãģ«ãģı ãģĦ
+ĠØ£ عÙĦÙī
+Ġ׾×Ķ ×ŀש×Ļ×ļ
+rä u
+ש×ŀ ×Ļ×Ŀ
+åĪĨ ãģij
+ãģĻ ãģ§
+ãģĻãģ§ ãģ«
+×Ķ׾ ׼×Ķ
+×Ĺ׾ ×Ļ×£
+Ġì ±ħ
+Ġì±ħ ìŀĦ
+à¹Ģà¸Ī ริ
+à¹Ģà¸Īริ à¸į
+éģĬ ãģ³
+ج سد
+สา à¸ĺ
+สาà¸ĺ าร
+สาà¸ĺาร à¸ĵ
+Ġbas ın
+ÑĢаР³
+г ад
+Ġho ÅŁ
+íķ µ
+×ij×Ĺ ×Ļר×Ķ
+×ŀס ×ļ
+Ġìłľ íĴĪ
+تÙħ ÙĪÙĬÙĦ
+ĠL ưu
+ë¡ľ ë¶ĢíĦ°
+Ġп об
+Ġпоб ед
+ÙħÙĨ ذ
+常 ãģ«
+ÙĤ س
+ĠاÙĦÙħ صدر
+ĠÙĪØ§ÙĦ است
+Ġkh ắp
+ĠاÙĦج اÙĨب
+Ġng uyá»ĩn
+éĸĵ éģķãģĦ
+ĠÑģÑĤ ÑĢа
+ĠÑģÑĤÑĢа Ñħ
+ĠÑģÑĤÑĢаÑħ ов
+รี à¸ļ
+Ġx ương
+Ġì° ¾
+Ġì°¾ ìķĦ
+Ġng ại
+г ал
+à¸ĭ ีà¹Ī
+Ġ×ij פ×Ļ×Ļס×ij×ķ×§
+Ц енÑĤÑĢ
+Ġaval iação
+Ġeconóm ico
+×ĸ ף
+ĠÐľ ак
+Ġinter és
+à¸ģล ิà¹Īà¸Ļ
+ÑģÑĤÑĮ Ñİ
+ĠÄij ương
+å¼· ãģı
+ĠKh ách
+à¹Ģà¸Ļืà¹īà¸Ń หา
+ĠYaz ı
+è²· ãģ£ãģ¦
+Ðł Ðķ
+à¹Ģà¸ŀิà¹Īม à¸Ĥึà¹īà¸Ļ
+สม à¸ļู
+สมà¸ļู รà¸ĵà¹Į
+Ġм иÑĢов
+×Ĵ ׳×Ļ×Ŀ
+ĠÄij ức
+à¸Ń ารà¹Į
+ص اص
+ãģĬ ãĤĪ
+ãģĬãĤĪ ãģ³
+ÃªÌ ī
+ĠاÙĦÙħؤ تÙħر
+ĠاÙĦÙħر ØŃÙĦØ©
+สà¸Ńà¸ļ à¸ĸาม
+Ġà¸Īาà¸ģ à¸Ļัà¹īà¸Ļ
+Ġت عد
+ãģĿãģ® ãģŁãĤģ
+Ġkh áng
+à¸Ļ ิà¸Ķ
+ãĥĬ ãĥ³
+ëĦ¤ ìļĶ
+ĠاÙĦ اØŃت
+ĠاÙĦاØŃت ÙĦاÙĦ
+ìļ ķ
+Ġмод ели
+ĠпÑĢоÑĨ енÑĤ
+à¸ŀวà¸ģ à¹Ģรา
+Ġ×Ķצ ×ĵ
+Ġ×Ķצ×ĵ ×ĵ×Ļ×Ŀ
+ständ e
+׳ ×Ĵר
+Ġdot yc
+Ġdotyc zÄħ
+ĠdotyczÄħ ce
+ĠÅĽ wiÄĻt
+×ŀר ×Ķ
+ãģĻãģĶ ãģĦ
+ãĥĩãĤ£ ãĥ³ãĤ°
+à¸ģาร สรà¹īาà¸ĩ
+ë Ĥ¬
+Ġì°¸ ìŬ
+Ñģ Ñħ
+ÑģÑħ ем
+ÙħÙĪ Ø³
+Ġn ấu
+Ġ׾×ŀ×¢ ׾×Ķ
+à¹Ģà¸Ľ à¹īา
+à¹Ģà¸Ľà¹īา หมาย
+Ġmù i
+ائ ز
+íĽ Ī
+×Ĺ×ij ×ķר×Ķ
+à¸ľà¸¹à¹ī à¹ĥà¸Ĭà¹ī
+Ġpa ź
+Ġpaź dzi
+Ġpaździ ern
+Ġpaździern ika
+ลà¸ĩ à¹Ħà¸Ľ
+ÙĤ اع
+Ġch áºŃm
+Ġözellik leri
+ĠÄIJ o
+ĠÄIJo Ãłn
+ж ение
+Ġh ẳ
+Ġhẳ n
+ĠaÅŁ k
+ï½ į
+ãĥij ãĤ¹
+×Ķ×ķר ×IJ×ķת
+ĠÅ »
+ĠÅ» y
+×ŀ×ĸ ׾
+ĠÑĥ кÑĢа
+ĠÑĥкÑĢа ин
+à¹Ģà¸Ĭ ิ
+à¹Ģà¸Ĭิ à¸į
+Ðł Ðĺ
+ĠzwiÄħz ku
+×Ķ×Ĺ׾×ĺ ת
+ãĤĵãģ§ãģĻ ãĤĪãģŃ
+ãģ¦ ãģĬãĤĬ
+лож иÑĤÑĮ
+×ŀ ×ķ׳×Ļ×Ŀ
+ฮ ิ
+ì° ¬
+ĠاÙĦÙħØ´ ترÙĥ
+ĠdÃ¼ÅŁ ük
+аг енÑĤ
+ĠاÙĦØ£ سبÙĪØ¹
+ĠÙĤ رÙĬب
+ин д
+инд ив
+индив ид
+индивид Ñĥ
+индивидÑĥ алÑĮн
+för der
+Ġseç en
+Ġseçen ek
+Ġét ant
+ĠлÑİб им
+каз ÑĭваеÑĤ
+ว ิà¸Ļ
+Ġ×Ķ×ij ×IJ×Ļ×Ŀ
+Ġд ов
+Ġдов олÑĮ
+ĠдоволÑĮ но
+×¢×ĵ ×Ļ×£
+Ġok re
+Ġokre ÅĽ
+ĠokreÅĽ lon
+Ġت رÙĬد
+à¹Ģมืà¹Īà¸Ń วัà¸Ļà¸Ĺีà¹Ī
+ãĤĪ ãģĭãģ£ãģŁ
+Cum h
+Cumh ur
+Cumhur ba
+Cumhurba ÅŁ
+CumhurbaÅŁ kan
+CumhurbaÅŁkan ı
+Ġn ợ
+à¸ľà¸¹à¹ī à¹Ģลà¹Īà¸Ļ
+Ġcompl ète
+à¹Ģà¸ŀ ศ
+د ÙIJ
+Ġdü z
+Ġdüz ey
+ãģ§ãģĤãĤĭ ãģĵãģ¨
+ext érieur
+× ³
+Ġinform ação
+ãĤ¯ãĥª ãĥĭãĥĥãĤ¯
+ĠPub li
+ĠPubli é
+ר ×ķ×ĵ
+à¸Ħวาม à¸Ľà¸¥à¸Ńà¸Ķà¸łà¸±à¸¢
+ĠØ£ÙĬ ض
+ĠØ£ÙĬض Ùĭا
+ت سبب
+ãģ¤ ãĤĤãĤĬ
+из ма
+à¸Ĥึà¹īà¸Ļ à¹Ħà¸Ľ
+Ùĥ ÙIJ
+ÙĦ ÙĪÙħ
+Ġש צר
+Ġשצר ×Ļ×ļ
+ãģ¯ ãĤĤãģ¡ãĤįãĤĵ
+Ġк ан
+Ġкан ал
+ãģ«ãģª ãģ£ãģ¦ãģĦãģ¾ãģĻ
+ĠاÙĦØ£ Ùĥثر
+ت اØŃ
+ÙĨت Ùĩ
+ÙĨتÙĩ اء
+ا ÙĪÙĬØ©
+ĠBug ün
+н Ñģкого
+à¸Ķ à¹Īวà¸Ļ
+é volution
+ãģ£ãģ¦ ãģĦãģ¾ãģĹãģŁ
+ãĤ ħ
+ĠV ương
+à¸łà¸²à¸ŀ ย
+à¸łà¸²à¸ŀย à¸Ļ
+à¸łà¸²à¸ŀยà¸Ļ à¸ķรà¹Į
+Ġ×Ķ ×¦×ľ×Ļ×Ĺ
+ĠاÙĦإسÙĦاÙħ ÙĬ
+ÙĦÙĬ ب
+Ġed ição
+ÑģÑĤÑĢ ÐµÐ»
+Ġkh úc
+ÙĨÙħÙĪ Ø°
+ÙĨÙħÙĪØ° ج
+׾ צ×Ķ
+ÑģÑĤав ил
+à¸ĸ า
+สรà¹īาà¸ĩ à¸Ħวาม
+ãģĦ ãģ£ãģ±
+ãģĦãģ£ãģ± ãģĦ
+ÑģÑĤав лен
+ĠاÙĦ ÙĤدس
+Ġng ược
+ب خ
+ส หร
+สหร ั
+สหรั à¸IJ
+ĠØ£ غ
+Ġأغ سط
+Ġأغسط س
+ãģĨ ãģ¾
+ãģĨãģ¾ ãģı
+ĠêµŃ ìłľ
+ØŃض ار
+Ġd ừng
+æĬ¼ ãģĹ
+ت ÙĪØ§
+تÙĪØ§ جد
+ש×ŀ ×Ĺ×Ķ
+ãģı ãĤĵ
+Ġ×ij×¢ צ
+Ġ×ijעצ ×Ŀ
+×ŀ ׳×Ļ×ķת
+×ķ ×Ļ×ĵ
+×ķ×Ļ×ĵ ×IJ×ķ
+à¸Ĭ ิà¸ĩ
+Ġprac ÄĻ
+Ġз аÑĤ
+ĠзаÑĤ ем
+ĠìŀIJ ìľł
+Ġì¤ Ģ
+Ġì¤Ģ ë¹Ħ
+Ġb áºŃ
+ĠbáºŃ c
+Ġ×Ķ×ŀ צ×ij
+ĠÙĤ ÙĬÙħØ©
+à¹Ģà¸Ń à¹Ģà¸Ĭ
+à¹Ģà¸Ńà¹Ģà¸Ĭ ีย
+Ġperch è
+ĠاÙĦع سÙĥر
+ĠاÙĦعسÙĥر ÙĬØ©
+ج ÙĬب
+ëŀ µ
+Ùħ Ùĩر
+ÙħÙĩر جاÙĨ
+Ùħ راÙĥ
+ÙħراÙĥ ز
+Ġод нако
+à¸Ķี à¹Ĩ
+Ġצ פ×ķ
+Ġkullan ılan
+Ġк ино
+ãĥĨãĤ£ ãĥ³ãĤ°
+ĠGi Ỽi
+ت ÙĪØ²
+تÙĪØ² ÙĬع
+ย ิà¸Ļ
+ยิà¸Ļ à¸Ķี
+Ġc Åĵur
+ĠiÅŁ aret
+Ġ×ij×¢ ×ĸר
+Ġ×ij×¢×ĸר ת
+Ġп аÑĨи
+ĠпаÑĨи енÑĤ
+ãģ¿ãģŁãģĦ ãģ§ãģĻ
+в ез
+ли на
+од е
+Ġ×IJ×ķת ף
+dıģ ınız
+ĠÐIJ в
+ĠÐIJв ÑĤоÑĢ
+ï¼ ®
+ĠC ần
+ĠاÙĦا Ø®
+ĠاÙĦاخ بار
+Ġê±° ìĿĺ
+Ġat enção
+Ġgeld iÄŁi
+ãĤª ãĤ¹
+ãĤªãĤ¹ ãĤ¹
+ãĤªãĤ¹ãĤ¹ ãĥ¡
+ев Ñĭе
+кÑĢÑĭ л
+à¹Ģà¸Ĭ ียà¸ĩ
+à¹Ģà¸Ĭียà¸ĩ à¹ĥหมà¹Ī
+Ġmar ço
+ĠاÙĦÙħ ادة
+Ġг ол
+Ġsprzeda ży
+Ġíķ´ ê²°
+ĠÐķ го
+ê¹ Ģ
+Ġ׾ק×ij׾ ת
+ĠاÙĦÙģ ÙĨاÙĨ
+Ġcomunic ación
+à¹Ģสà¹īà¸Ļ à¸Ĺาà¸ĩ
+íĺ ¹
+à¸Ĭ ำ
+à¸Ĭำ ระ
+Ġ׼ ×IJ×ŀ
+Ġ׼×IJ×ŀ ×ķר
+à¸Ĭ à¹Īาà¸ĩ
+ز Ùĩر
+Ġklient ów
+ива ÑİÑĤ
+ан г
+׳ ×ļ
+Ġg á»įn
+Ãľ R
+ìĺģ ìĥģ
+Ġغ زة
+ìĿĮ ìĿĦ
+Ġbez po
+Ġbezpo ÅĽ
+ĠbezpoÅĽ redni
+ĠاÙĦÙħ ÙĪØ§
+ĠاÙĦÙħÙĪØ§ Ø·ÙĨ
+ĠاÙĦÙħÙĪØ§Ø·ÙĨ ÙĬÙĨ
+ãĤĮ ãģ¾ãģĻ
+ĠмаÑĤ Ñĩ
+×IJ ×ķף
+Ġر سÙħÙĬ
+ĠÑįк он
+ĠÑįкон ом
+ĠÑįконом иÑĩеÑģк
+ãĥľ ãĥ¼
+Ġд иÑĢ
+ĠдиÑĢ ÐµÐºÑĤоÑĢ
+ĠÑģк оÑĢо
+à¸ļ ำ
+à¸ļำ ร
+à¸ļำร ุà¸ĩ
+ĠÑĦ ÑĥÑĤ
+ĠÑĦÑĥÑĤ бол
+Ġ×IJ ×Ļ׾
+Ġì¤ij êµŃ
+ìľ ¤
+eÄŁ e
+à¹Ħ à¸ģà¹Ī
+tra î
+traî n
+ĠÑĤ ÑĢÑĥб
+à¹Ģà¸ļ ื
+à¹Ģà¸ļื à¹īà¸Ńà¸ĩ
+à¹ģม à¸Ļ
+ĠتØŃ دÙĬØ«
+Ġ׼ עת
+ØŃ اسب
+lı ģa
+×§×Ļ ×Ļ×ŀ×Ļ×Ŀ
+оÑģÑĤ ÑĮÑİ
+à¸Ŀ ั
+à¸Ŀั à¹Īà¸ĩ
+Ø´ غÙĦ
+ìĽ ¹
+Ġкажд ого
+Ġbölüm ü
+หà¸Ļ ี
+Ġistedi ÄŁi
+Ġtr ưng
+ãĥ Į
+ฮ à¸Ń
+Ø£ÙĨ Ø´
+Ø£ÙĨØ´ طة
+ĠاÙĦÙħ سÙĬ
+ĠاÙĦÙħسÙĬ ØŃ
+ลัà¸ģษ à¸ĵà¹Į
+Ġn á»Ńa
+à¸Ĺีà¹Ī à¸ķà¹īà¸Ńà¸ĩà¸ģาร
+ÑĪ ÐµÐº
+л Ñij
+Ġש ×Ļ×Ķ
+Ġש×Ļ×Ķ ×Ļ×Ķ
+Ġkhu ôn
+ĠÑĤÑĢеб ованиÑı
+Ġ×ľ×¢ ×ĸ×ķר
+ĠاÙĦع Ùħر
+ราà¸Ħา à¸ĸูà¸ģ
+ÙĩÙı ÙħÙĴ
+ü st
+üst ü
+Ġден ег
+Ġn ạ
+à¸Ĥà¸Ļ ม
+Ġбл аг
+Ġблаг од
+Ġблагод аÑĢ
+ĠблагодаÑĢ Ñı
+Ø¥ سÙĦاÙħ
+à¸Ļิ ว
+çŁ¥ ãĤīãģªãģĦ
+Ø« ÙĤØ©
+Ġг олоÑģ
+×IJ×ķר ×Ĺ
+Ġtr ứng
+Ġод ном
+ĠkoÅĦ cu
+Ġ×ķ רק
+Wi ÄĻ
+WiÄĻ cej
+Ġ×IJ ×Ļ׼×ķת
+Ġ×IJ×Ļ׼×ķת ×Ļ
+Ñģ оÑģ
+Ġje żeli
+以ä¸ĭ ãģ®
+å°ı ãģķ
+å°ıãģķ ãģª
+олог ии
+Ġоб ÑģлÑĥж
+ĠобÑģлÑĥж ива
+Ùĥت ابة
+Ġê´Ģ ìĭ¬
+×¢ ש×Ļר
+Ġaras ındaki
+ĠÑĢай она
+ÙĪØ§ جب
+Ġ×ij×Ĺ×Ļ ×Ļ
+íķ´ ì£¼
+Ġg óc
+ай л
+ĠT ình
+æļ® ãĤī
+æļ®ãĤī ãģĹ
+æĻĤ ãģ«ãģ¯
+ĠгоÑĢод е
+Ġ׼×IJ ×Ļ׾
+Ġ׼×IJ×Ļ׾ ×ķ
+ĠC á»Ļng
+ãģ©ãģĨ ãģĹãģ¦ãĤĤ
+×Ĺ ×ķ×£
+تØŃ رÙĥ
+ĠÑģлов ам
+à¸Īะ à¸Ĭà¹Īวย
+ĠاÙĦÙħست ÙĤبÙĦ
+ÙĤ ض
+ÙĤض ÙĬ
+×ijס ×ķפ
+×ijס×ķפ ×ķ
+iÄĻ Äĩ
+ĠY ıl
+Ø´ ÙĬØ®
+à¸Ħุà¸ĵ à¸Īะ
+ש×ŀ ×ķת
+Ġت عرض
+Ġanál ise
+ĠÑģоб иÑĢа
+à¹Ģà¸ŀ à¸Ĭ
+à¹Ģà¸ŀà¸Ĭ ร
+Ġв ели
+Ġвели к
+สั à¹īà¸Ļ
+Ġpop ulação
+รà¹Īวม à¸ģัà¸Ļ
+×Ĺ ×ŀ
+×Ĺ×ŀ ×Ļש×Ļ
+ס ×Ļס
+åĨħ ãģ§
+Ġsob Äħ
+ĠY ay
+ĠYay ın
+ãĥ¡ ãĥĭãĥ¥ãĥ¼
+ĠпÑĢедоÑģÑĤав лÑı
+ãģł ã썿ĢĿãģĨ
+Ġê³ł ê°Ŀ
+Ġод ним
+à¹ĥà¸Ļ à¹Ģรืà¹Īà¸Ńà¸ĩ
+Ġs á»ķ
+ĠÐĹ Ð´ÐµÑģÑĮ
+Ġизмен ениÑı
+ĠìĿ¼ ìĿĦ
+ãģªãģ® ãģł
+клад Ñĭва
+ÑĢ Ð¼Ð°
+Ġ×ķ×ij ׼׾
+تأ ÙħÙĬÙĨ
+ĠпÑĢи ÑıÑĤ
+ĠпÑĢиÑıÑĤ н
+Ùħ Ùħار
+ÙħÙħار سة
+ãģ¨ãģª ãģ£ãģ¦
+Ġج ÙħÙĬÙĦ
+Ġì§ Ī
+Ġì§Ī 문
+Ġquest ão
+i é
+ié ndo
+หà¹īà¸Ńà¸ĩ à¸ŀัà¸ģ
+ãĥij ãĥ¼ãĥĪ
+ÑĤвеÑĢж да
+н Ñģкой
+з ал
+มุ à¹Īà¸ĩ
+á» Ĭ
+Ġ×Ķ×IJ×Ĺר ×ķ׳×Ķ
+ĠTh ư
+주 민
+ĠاÙĦع ب
+év én
+évén ement
+ÙĤÙĪ Ø§Ø¹Ø¯
+د Ùı
+ĠìķĬ ìĬµëĭĪëĭ¤
+Ġë³´ 기
+Ġyapıl ması
+à¹Ģร าà¸ģ
+à¹Ģราà¸ģ à¹ĩ
+ØŃ ذر
+ÙĤ صر
+ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĹãģŁ
+Ġà¹Ģà¸Ľà¹ĩà¸Ļ à¸ķà¹īà¸Ļ
+ãģ¨ ãģ«
+ãģ¨ãģ« ãģĭ
+ãģ¨ãģ«ãģĭ ãģı
+н ÑĨе
+зв Ñĥк
+ãģĹãĤĪãģĨ ãģ¨
+ĠاÙĦصØŃ ÙĬØ©
+Ġש×Ķ ×Ļ×ķ
+ĠDi ÄŁer
+ÙĤÙĦ ÙĤ
+ãĤ¸ãĥ£ ãĥ³
+Ġr á»Ŀi
+Ġл еÑĩ
+ĠлеÑĩ ениÑı
+تب اد
+تباد ÙĦ
+צ פ×Ķ
+à¸Ħวาม à¹Ģหà¹ĩà¸Ļ
+ĠØ´ ب
+Ġشب ÙĥØ©
+ר ×Ļ×§
+Ùħ عد
+Ùħعد ات
+dıģ ında
+Ġ×ijש ׳×Ļ×Ŀ
+Ġ×Ķ ×Ļשר×IJ׾
+Ġ×Ķ×Ļשר×IJ׾ ×Ļת
+Ġsı nav
+׳צ ×Ļ×Ĵ
+วัà¸ķ à¸ĸุ
+ĠاÙĦبر ÙĦÙħ
+ĠاÙĦبرÙĦÙħ اÙĨ
+t ivitÃł
+ãĤĵãģł ãĤįãģĨ
+×§×Ļ ×Ļ×ŀ
+ÙĦÙĬ Ùĥ
+ĠÄij ò
+ĠÄijò i
+ĠÐĺн ÑĤеÑĢ
+ĠÐĺнÑĤеÑĢ Ð½ÐµÑĤ
+ãģ«ãģ¨ãģ£ãģ¦ ãģ¯
+ãģ£ ãģĵ
+×§ ×ķס
+ست ØŃÙĤ
+æķĻ ãģĪãģ¦
+ãĥĢ ãĥ¡
+ĠÙħÙĨ زÙĦ
+à¹Ģà¸ĭ à¹ĩà¸Ļ
+使 ãģĪãĤĭ
+è¦ĭ ç©į
+è¦ĭç©į ãĤĤãĤĬ
+Ø£ Ùģ
+Ø£Ùģ Ùĥار
+Ġиг ÑĢов
+ĠигÑĢов Ñĭе
+Ġm ÄĻż
+ĠmÄĻż czy
+ĠmÄĻżczy zn
+ĠاÙĦØŃ ÙĤÙĬÙĤÙĬ
+ع بر
+׼×ķ׾ ׳×ķ
+íĿ ¥
+×ŀ×IJ ×ķ×Ĺר
+خت ص
+ãĥŀ ãĥŀ
+Ġ×IJ×Ĺ ×ķ×ĸ
+í ĮĢ
+Ġr á»iji
+Ġв ÑĤоÑĢ
+ĠвÑĤоÑĢ Ð¾Ð¹
+Ġl ẫn
+пÑĢ Ð¾Ð¼
+пÑĢом ÑĭÑĪ
+пÑĢомÑĭÑĪ Ð»ÐµÐ½
+пÑĢомÑĭÑĪлен н
+ĠоÑĤноÑĪ ÐµÐ½Ð¸Ñı
+Ġs ứ
+Ġм обилÑĮ
+ĠмобилÑĮ н
+ĠÑįÑĤ омÑĥ
+Ġt ạp
+ĠìĤ¬ ê±´
+ĠìķĮ 볤
+Ùĥ Ùı
+ÙĥÙı ÙħÙĴ
+Ġ×§ ×ķר×Ķ
+ĠÑĦ иÑĢ
+ĠÑĦиÑĢ Ð¼
+Ġsık ıntı
+׳ ׼
+׳׼ ×ķף
+ÙĪÙĦÙĪØ¬ ÙĬ
+ØŃ اÙĨ
+Ġlo ạn
+Ġ×IJ׾ ×£
+Ġm ắn
+abh äng
+abhäng ig
+ĠÑĥÑĢов нÑı
+Ġ׾×ij×ĵ ×ķ×§
+ÙĬ ÙħÙĨ
+lay ın
+Ġh ải
+Ġзав од
+ĠìķĦ 주
+สà¸ĸ า
+สà¸ĸา à¸ļัà¸Ļ
+Ġgüven lik
+à¹Ģà¸Ķ à¹Īà¸Ļ
+×ij×ĵ ×§
+Ġë Ī
+ĠëĪ Ħ
+ĠëĪĦ 구
+éĩįè¦ģ ãģª
+รà¸Ńà¸ĩ รัà¸ļ
+sch lie
+schlie ÃŁen
+Ġìĸ ¼
+Ġìĸ¼ ë§Ī
+Ġìĸ¼ë§Ī ëĤĺ
+ÑĤи ки
+íķľëĭ¤ ê³ł
+ãģłãģ£ãģŁ ãĤī
+Ġ×Ķ ×Ļ×ĺ×ij
+ãģªãģijãĤĮãģ° ãģªãĤīãģªãģĦ
+â Ì
+Ã¢Ì £
+Ġph ạt
+ak Ä±ÅŁ
+ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĻ
+à¹Ģà¸ĭ à¹ĩ
+ĠС егоднÑı
+Ġinsan ların
+Ġdévelop pe
+ת פר
+תפר ×Ļ×ĺ
+اÙĨت شار
+ê° ij
+Fran çois
+Ø£ÙĦ ع
+Ø£ÙĦع اب
+ãĤĴ è¶ħ
+ãĤĴè¶ħ ãģĪ
+Ġê°Ļ ìĬµëĭĪëĭ¤
+ãĤ³ ãĥ¬
+ĠмеÑģÑı ÑĨев
+íĮ ħ
+ĠاÙĦج اÙħعة
+ìĿ¸ íĦ°
+ìĿ¸íĦ° ëĦ·
+×ĵר ×ķש
+ĠÙĪØ£ شار
+ĠпÑĢав ила
+ãģĿãģĵ ãģ«
+×Ĺ ×ŀ×ĵ
+à¹Ģหà¸ķุ à¸ģารà¸ĵà¹Į
+Ġê²½ íĹĺ
+ãģ¶ ãĤĬ
+׾ ש
+׾ש ×ķף
+à¹Ģ à¸ĸ
+ĠDo ÄŁu
+ĠиÑģполÑĮзов ание
+Ġçoc uÄŁu
+магазин е
+ĠÄiji á»ĥn
+Ġas lı
+Ġaslı nda
+Ġdoen ça
+Ġس اع
+Ġساع ات
+ĠиÑģполÑĮзов аниÑı
+ר ×ķצ×Ļ×Ŀ
+ĠзнаÑĩ иÑĤ
+ĠÑĢаР¼
+ĠÑĢам каÑħ
+거 리
+Ġп ÑĭÑĤа
+ãĥģ ãĥ³
+Ġпо Ñģк
+ĠпоÑģк олÑĮ
+ĠпоÑģколÑĮ кÑĥ
+إ بر
+إبر اÙĩ
+إبراÙĩ ÙĬÙħ
+ĠÑĤÑĢ ÐµÑħ
+ĠGen ç
+س ÙĪÙģ
+Ġve ÃŃculo
+ĠNg ân
+ĠоÑĩеÑĢ ÐµÐ´ÑĮ
+à¸Ħร ึà¹Īà¸ĩ
+×IJ ×ij×Ļ
+à¸ķ à¹īม
+ãĤĴè¡Į ãģĦ
+ĠاÙĦساب ÙĤØ©
+на ÑĨи
+наÑĨи она
+наÑĨиона лÑĮн
+Ġgest ión
+ت ÙĤد
+ĠاÙĦبÙĬ اÙĨ
+ĠاÙĦبÙĬاÙĨ ات
+ĠاÙĦ اÙĨتخاب
+ĠاÙĦاÙĨتخاب ات
+à¹Ģà¸Ĭ à¹Īา
+×ĵ ×IJ×Ĵ
+Ġ׾×Ĵ ×ŀר×Ļ
+Ġت ØŃتاج
+Ġth ôn
+à¸ķ à¹īà¸Ńà¸Ļ
+à¸ķà¹īà¸Ńà¸Ļ รัà¸ļ
+女 ãģ®
+女ãģ® åŃIJ
+Ġth ợ
+Ø· ØŃÙĨ
+ารà¹Į à¸Ķ
+ת ×ŀ×Ļ×ĵ
+ĠÑģам Ñĭм
+Ġìĭľ íĸī
+إ صد
+إصد ار
+ĠNgh á»ĩ
+ìķ ķ
+س ئ
+سئ ÙĦ
+à¸Ń าร
+à¸Ńาร ม
+à¸Ńารม à¸ĵà¹Į
+à¹ģ ฮ
+׳×ĺ ׾
+Ġì¢ĭ ìķĦ
+×ķ׾ ׾
+Ġ×ij ×Ľ×ª×ij
+ãĤ« ãĥ©
+צע ×Ļר×Ļ×Ŀ
+تعب ÙĬر
+Ġ×ŀ קר×Ķ
+ĠÑĦак ÑĤоÑĢ
+Ġت ÙħاÙħ
+ĠتÙħاÙħ ا
+ëį ķ
+Ġv ưá»Ŀ
+Ġvưá»Ŀ n
+Ġd Ä±ÅŁÄ±
+ãģĦ ãģ¡
+Ġ׾ק ׳×ķת
+ĠاÙĦع ÙĦاÙĤات
+п Ñĥб
+пÑĥб ли
+Ø¥ ÙĬÙħ
+Ø¥ÙĬÙħ اÙĨ
+à¸Ńำ à¸Ļา
+à¸Ńำà¸Ļา à¸Ī
+åIJ« ãģ¾ãĤĮ
+ãĤĭ ãģŁãĤģãģ«
+ס ×Ĵ
+ס×Ĵ ׳×ķף
+تØŃ دÙĬ
+Ġaup rès
+ĠاÙĦج Ùĩا
+ĠاÙĦجÙĩا ز
+Ġ×ŀ ת×Ĺת
+ен нÑĥÑİ
+Ġз им
+à¸ģา à¹ģà¸Ł
+Ġ×ijת ×ķר
+Ġngh è
+Ġnghè o
+ĠÐĽ Ñİ
+ĠÐĽÑİ Ð±
+תק צ×Ļ×ij
+×ŀ×¢ ש×Ķ
+ĠاÙĦبÙĬ ت
+צ ×Ļפ
+ĠобÑıз ан
+ĠM á»Ĺi
+ĠТ ÑĥÑĢ
+ĠÙĪØ¨ اÙĦت
+ĠÙĪØ¨Ø§ÙĦت اÙĦÙĬ
+Ġdéc ision
+Ġب د
+Ġبد أت
+Ġc ục
+Ġb ask
+Ġbask ı
+Ġhat ırl
+Ġhatırl a
+å°ı ãģķãģĦ
+Ġgerçek ten
+à¸ľ ัà¸ģ
+åı¯èĥ½ ãģª
+×ŀ×IJ ס
+Ġcr ÃŃtica
+ĠìĿĺ ìĽIJ
+عÙĤ ÙĪØ¯
+×ĺ ׼׳
+×ĺ׼׳ ×ķ׾×ķ×Ĵ×Ļ×Ķ
+è¨Ģ ãģĪãģ°
+ĠÙĤ ÙĨا
+ĠÙĤÙĨا Ø©
+ĠìĿ´ê²ĥ ìĿĢ
+ت صر
+à¸Ł ัà¸Ļ
+ĠÑĢе ÑĨеп
+ĠÑĢеÑĨеп ÑĤ
+ĠبÙĨ Ù쨳
+ÑĢо ÑĪ
+ĠмаÑĢ ÑĤа
+Ġson ras
+Ġsonras ı
+×ķ×ij ש
+ãĥª ãĤ¹ãĤ¯
+ĠFranç ais
+á» ļ
+ê° Ķ
+Ġ×Ķ×ijר ×Ļת
+פ ×Ļצ
+פ×Ļצ ×ķ×Ļ
+ĠÙĦÙħا ذا
+ĠÐļи ев
+ĠÑģ мÑĭÑģл
+ê¸Ī ìľµ
+ãĤ·ãĥ£ ãĥ«
+ãĥ© ãĤ¤ãĥĪ
+ìĽ ĥ
+×ŀ ×Ĺר
+ãĨ į
+Ġkullan ım
+Ġ×IJצ׾ ׳×ķ
+Ġt Ãłn
+ãĥı ãĥ¼
+ãģ¨ ãģ¨ãĤĤ
+ãģ¨ãģ¨ãĤĤ ãģ«
+ÑĢ ÐµÐ³
+ÑĢег и
+ÑĢеги он
+ãģªãģı ãģªãĤĭ
+Ġch ảy
+Ġج ÙĩØ©
+ÅĦsk iej
+à¸Ńี à¹Ģม
+à¸Ńีà¹Ģม ล
+ãģį ãģ£ãģ¨
+ĠìĺĪ ìĤ°
+Ġkit abı
+Ġedu cação
+Ġbul uÅŁ
+олог иÑı
+Ġкон кÑĢ
+ĠконкÑĢ ÐµÑĤ
+×Ĵ ×Ļר
+ĠпÑĢед лаг
+ĠпÑĢедлаг аеÑĤ
+ĠY ên
+Ġíķľ ë²Ī
+Ġ×ŀ ר׼×ĸ×Ļ
+à¹Ģà¸Ľà¸´à¸Ķ à¹Ģà¸ľà¸¢
+ÑĤвеÑĢ Ð´
+ĠH á»ĩ
+ĠÐĵ ÑĢ
+à¸Ŀ à¹īา
+×Ķ ×©×§
+×Ķשק ×¢×Ķ
+Ġна Ñĥк
+ìłIJ ìĿĦ
+Ġн елÑĮ
+ĠнелÑĮ з
+ĠнелÑĮз Ñı
+г ин
+ĠB öl
+ĠBöl ge
+Ġв ла
+Ġвла ÑģÑĤи
+à¹Ģà¸Ļ à¹ĩ
+à¹Ģà¸Ļà¹ĩ à¸ķ
+ê³ ¨
+Ġö ld
+Ġöld ür
+׼׳ ×¢
+ĠاÙĦÙĩ ÙĬئة
+ت ارÙĬØ®
+ĠÐij ÑĢ
+ĠÑģ мож
+ĠÑģмож еÑĤе
+ĠL úc
+à¹Ħà¸Ľ à¸ĸึà¸ĩ
+ĠBakan ı
+Ġerklä rt
+ĠÐIJ на
+Ġsc ène
+åķı ãģĦ
+åķıãģĦ åIJĪãĤıãģĽ
+ÙħÙĩ ÙĨد
+ÙħÙĩÙĨد س
+Ġн азвание
+ив аниÑı
+ãĤĴ å¤īãģĪ
+ä»ĺãģį åIJĪ
+ãĥij ãĤ½
+ãĥijãĤ½ ãĤ³ãĥ³
+æĺİ ãĤī
+æĺİãĤī ãģĭ
+à¹Ģà¸Ńà¸ģ สาร
+à¹Ģà¸ģิà¸Ļ à¹Ħà¸Ľ
+л еп
+ãģĹãģŁ ãĤĤãģ®
+ĠC âm
+ĠCâm ara
+×§×ķ׾ ׳×ķ×¢
+Ġ×ij×Ĵ ×Ļף
+Ġoc zy
+Ġoczy wiÅĽcie
+att ivitÃł
+ãĥĵ ãĥ¥ãĥ¼
+Ġeduc ación
+İ YE
+ê¹Į ìļĶ
+ãĤ¨ ãĥªãĤ¢
+н еÑģÑĤи
+Ġm óg
+Ġmóg ÅĤ
+Ġ×§×ĺ ׳×Ļ×Ŀ
+ĠPr ä
+Ġ×ľ×¢ ×ij×ķר
+بÙĨ Ùī
+з ол
+зол оÑĤ
+Ġwn ÄĻtr
+ĠwnÄĻtr z
+Ġconstr ução
+รัà¸ļ รà¸Ńà¸ĩ
+س جÙĨ
+Ġ×§ ×ķ׳
+ס ×Ļפ×ķר
+ĠÙħ دÙī
+رض Ùī
+п лав
+ï¼ ¥
+Ġil a
+Ġila ç
+ãĤĭ ãģ¹ãģį
+ĠÙħ ÙĪÙĤÙģ
+à¸ģร ุ
+à¸ģรุ à¸ĵา
+chodzÄħ c
+ĠÑĤÑĭ Ñģ
+Ðķ вÑĢо
+ĠÙĬ ØŃدث
+ãĥ¡ ãĤ¤ãĥ³
+ĠاÙĦص ØŃÙĬ
+ĠÐĶ Ð°Ð½
+دع اء
+ãĤ´ ãĥ¼ãĥ«
+ש ×ł×ª×Ļ
+×©×ł×ª×Ļ ×Ļ×Ŀ
+à¸Ķà¹īวย à¸ģัà¸Ļ
+Ġol acaģı
+Ġ×ij ×ŀ×Ĺ×Ļר
+×Ķ ×§
+×Ķ×§ ×ŀת
+ãĥ¢ ãĥİ
+ĠçalÄ±ÅŁ tı
+Ġjó venes
+ãģĦãģı ãĤī
+ĠÙħ عدÙĦ
+ĠC Å©ng
+ĠSeg ún
+Ġdönem de
+Ġ׾ ×Ļ×ĵ×Ļ
+ãģį ãģ¡
+ãģįãģ¡ ãĤĵ
+ãģįãģ¡ãĤĵ ãģ¨
+Ù쨱 ÙĨس
+Ù쨱ÙĨس ا
+åIJij ãģį
+Ġcamp aña
+ĠÑģам оÑģÑĤоÑı
+ĠÑģамоÑģÑĤоÑı ÑĤелÑĮно
+á» Ģ
+ÙĤ ÙĪØ§
+س ÙĦاØŃ
+à¸ģระ à¹ģ
+à¸ģระà¹ģ ส
+ĠполÑĮз Ñĥ
+n qu
+nqu ête
+รà¹Īวม à¸ģัà¸ļ
+ëĬIJ ëĥIJ
+à¸Ĺีม à¸Ĭาà¸ķิ
+Ġyıll ık
+ìĬ ¬
+ĠØ£ صØŃاب
+ill é
+Ġdó la
+Ġdóla res
+Ġк ож
+Ġкож и
+ล à¹īà¸Ń
+à¹Ģรีย à¸ļร
+à¹Ģรียà¸ļร à¹īà¸Ńย
+à¹Ģà¸ŀ ิ
+à¹Ģà¸ŀิ à¹Īà¸ĩ
+ÑĢиÑĤоÑĢ Ð¸
+Ġí ijľ
+Ġíijľ íĺĦ
+ĠпеÑĢ ÐµÐ²
+ĠпеÑĢев од
+פ×Ĵ ×Ļ×¢×Ķ
+ĠdeÄŁerlendir me
+Ùģ Ø§Ø¦
+ĠвÑĭ год
+ınız ı
+×ķ׼ ×Ļ×Ĺ
+ĠдоÑģÑĤ иг
+Ġng Ãłn
+æĢĿ ãģ£ãģŁ
+ĠÐķ ÑģÑĤÑĮ
+ĠاÙĦر غÙħ
+ĠzwiÄħz ane
+رب ط
+à¸Ļ ึà¸ĩ
+Ġ׾×Ĺ ×ķ×§
+Ġszczeg óln
+Ġszczególn ie
+Ġبا ستخداÙħ
+ĠfÃŃs ico
+ע ס
+עס ×ķ×§
+سÙĦ ÙĪÙĥ
+Ġا ØŃد
+Ñĩ ÑijÑĤ
+×ĸ׼ ×Ķ
+Ġl á»ĩnh
+ĠÙĪ ØŃت
+ĠÙĪØŃØª Ùī
+à¸Ħวาม สามารà¸ĸ
+à¸Ńยูà¹Ī à¹ģลà¹īว
+à¸ģาร à¹Ģà¸Ķิà¸Ļà¸Ĺาà¸ĩ
+تخ ذ
+צ×Ļ ×ķ×ĵ
+ĠاÙĦØ£ س
+ĠاÙĦأس ÙĩÙħ
+Ġt á»ĩ
+ãģ£ãģ¦ ãģĦãģ¦
+สร ุ
+สรุ à¸Ľ
+Ġком ÑĦ
+ĠкомÑĦ оÑĢÑĤ
+ìĺ¤ ëĬĶ
+ĠÑĢаз в
+ĠÑĢазв ива
+л анд
+h änge
+ĠبÙĨ سبة
+à¹Ģà¸Ĥ ียว
+עצ ×Ŀ
+Ġ׾ ×ľ×Ľ×ª
+Ñģо ÑĨиалÑĮн
+Ġëĭ¤ìĿĮ ê³¼
+Ġרש ×ķ×ŀ
+×ŀר ×Ĺ×ij
+س ÙĤØ·
+Ġalan ı
+ĠÄij á»ĩ
+é£Łãģ¹ ãĤĭ
+à¸Ķ ึà¸ĩ
+Ġgegen über
+ĠبÙĩ ذÙĩ
+à¸ĸืà¸Ń à¹Ģà¸Ľà¹ĩà¸Ļ
+ëķ ħ
+à¸Ħà¸Ļ à¹Ħà¸Ĺย
+ãĤ¢ ãĤ¦
+ãĤ¢ãĤ¦ ãĥĪ
+ศ ัà¸ģ
+ศัà¸ģ à¸Ķิ
+ศัà¸ģà¸Ķิ à¹Į
+ÙĤÙĪ Ø§ÙĨ
+ÙĤÙĪØ§ÙĨ ÙĬÙĨ
+Ġhá»Ļ p
+ãģªãģıãģª ãģ£ãģ¦
+Ġ×IJ ×ŀ׳
+Ġ×IJ×ŀ׳ ×Ŀ
+à¹Ģà¸ķ ืà¸Ńà¸Ļ
+ĠзавиÑģ им
+ĠзавиÑģим оÑģÑĤи
+ת ×Ļ×IJ
+ת×Ļ×IJ ×ķר
+å§ĭãĤģ ãģŁ
+Ġng á»į
+Ġngá»į t
+íĴ į
+ê³¼ ìŀ¥
+Ġb ại
+ãģ§ãģį ãģ¦
+Ġcomeç ar
+à¸Ľà¸£ าà¸ģ
+à¸Ľà¸£à¸²à¸ģ à¸ı
+Ġгод Ñĭ
+м еÑģ
+ĠاÙĦÙħست ÙĪÙī
+ĠÑģам Ñĭе
+л леÑĢ
+ãģ£ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĻ
+ãģ¨ãģ® ãģĵãģ¨
+bi ó
+à¸ģล à¹Īà¸Ńà¸ĩ
+ĠاÙĦز ÙĪØ¬
+ãģ«è¡Į ãģ£ãģŁ
+à¸Ħà¹Ī à¸Ńà¸Ļ
+à¸Ħà¹Īà¸Ńà¸Ļ à¸Ĥà¹īาà¸ĩ
+ĠbaÄŁ l
+ĠbaÄŁl ant
+ĠbaÄŁlant ı
+確 ãģĭ
+確ãģĭ ãģ«
+ãĥľ ãĥ¼ãĥ«
+çµĤ ãĤıãĤĬ
+ש ×ŀר
+à¸Ĺีà¹Ī สามารà¸ĸ
+ÙĦ زÙħ
+д аеÑĤÑģÑı
+รัà¸ļ à¸Ľà¸£à¸°
+รัà¸ļà¸Ľà¸£à¸° à¸Ĺาà¸Ļ
+å¤ī ãĤıãĤĬ
+ï¼ ¢
+ĠìĺĪìĪĺ ëĭĺ
+ãĤĪãģĨ ãģ¨
+มัà¸ģ à¸Īะ
+ĠH ương
+ÙĨ Ù쨰
+×ŀ×ĵ ×ĵ
+ĠìĿ¸ ìłķ
+Ñħод иÑĤÑĮ
+ĠзавиÑģ иÑĤ
+×ķ×ĵ ×Ļ×¢
+ãģĵãģ¨ãģĮ ãģĤãĤĬãģ¾ãģĻ
+ع راÙĤ
+سط ØŃ
+à¸ģำ à¹Ħร
+ëĵ¤ ëıĦ
+×Ļצ ×Ļר×Ķ
+ãģĨ ãģĵãģ¨
+ÙĦا ØŃÙĤ
+ãģĦ ãĤĮãģ°
+ĠиÑģполÑĮз ÑĥÑİÑĤ
+ĠB ợi
+Ġשק׾ ×Ļ×Ŀ
+ÑĨи кл
+ÐIJ Ðŀ
+Ġ×ijש ׳×Ķ
+ÙĨØ´ Ø·
+Ġש ×Ļ׳×ķ×Ļ
+Ġש×Ļ׳×ķ×Ļ ×Ļ×Ŀ
+Ġpobl ación
+ĠH ưng
+ระ ว
+ระว ัà¸ĩ
+رÙĬاض Ø©
+ر صد
+تÙĤ ÙĦÙĬ
+تÙĤÙĦÙĬ د
+Ġülk em
+Ġülkem iz
+à¸Ĭ ะ
+ãĤ¯ãĥª ãĥ¼ãĥł
+èģŀ ãģĦãģŁ
+Ġwa ż
+Ġważ ne
+ê±° ëĵł
+ê±°ëĵł ìļĶ
+×ŀ×IJ ×ij×§
+×Ĺ×ĵ ש×ķת
+ĠW roc
+ĠWroc ÅĤaw
+ĠKü ltür
+s ist
+sist ência
+×¢×ĸר ×Ķ
+Ġg ương
+รà¹īาà¸Ļ à¸Ħà¹īา
+ĠÙĪØ£ ÙĪØ¶ØŃ
+ánd ose
+ãĤ· ãĥ¼ãĥ³
+×IJ׳ ר×Ĵ
+×IJ׳ר×Ĵ ×Ļ×Ķ
+ãģªãģĦ ãģ§ãģĻ
+Ġkh á»§ng
+Ġ문 ìĦľ
+Ġ×ij ×ĵ×ijר
+×ĵ ×Ļ×ķ
+×ĵ×Ļ×ķ ×ķ×Ĺ
+Ġré gl
+ÙħÙĪ Ø§Ø¯
+об оÑĢ
+обоÑĢ Ð¾ÑĤ
+Ġ×Ķ ×ij׾
+Ġ×Ķ×ij׾ ×ķ×Ĵ
+ØŃ اÙħ
+ĠاÙĦع اص
+ĠاÙĦعاص ÙħØ©
+пеÑĢ Ð°ÑĤоÑĢ
+ت Ø®ÙĦ
+تخÙĦ ص
+ãģŁãģł ãģĹ
+ت سÙħ
+à¹Ĥรà¸ĩ à¸ŀ
+à¹Ĥรà¸ĩà¸ŀ ยา
+à¹Ĥรà¸ĩà¸ŀยา à¸ļาล
+ĠY ük
+ĠYük sek
+Ġש ׳×Ļת
+Ġש׳×Ļת ף
+liÄŁ e
+Ġפ ת
+Ġפת ×ķ×Ĺ
+Ġbe ÄŁ
+ĠbeÄŁ en
+Ġ×ŀ ×ķר
+Ġ×ŀ×ķר ׼×ij
+Ġرس اÙĦØ©
+íĨµ ìĭł
+Ġaval ia
+Ġavalia ções
+Ġman h
+Ġmanh ã
+Ġìķ ŀ
+Ġìķŀ ìľ¼ë¡ľ
+ÙĤ تر
+ÙĤتر ØŃ
+à¹Ģà¸ģ ืà¸Ń
+à¹Ģà¸ģืà¸Ń à¸ļ
+Ġpropos é
+Ø£ Ùħا
+Ø£Ùħا ÙĥÙĨ
+ĠÐŀ Ðŀ
+ĠÐŀÐŀ Ðŀ
+ÙħÙĤ ار
+ÙħÙĤار ÙĨØ©
+ëĦ IJ
+ãģĦãģŁãģł ãģı
+ÙĤ ÙĬÙĦ
+Ġна ÑĪиÑħ
+ãĤ« ãĥĥãĥĹ
+×Ĺ׾ ת
+Ġëĭ¤ ë§Į
+à¸Ĺัà¹Īว à¹Ĥลà¸ģ
+ãĥį ãĤ¿
+ØŃس اس
+ãģ«ãģª ãĤĮ
+ج ائ
+جائ زة
+é change
+é conom
+économ ie
+Т Ðĺ
+סת ׼׾
+à¸Ĺัà¹īà¸ĩ สà¸Ńà¸ĩ
+ĠاÙĦØ® اÙħ
+ĠاÙĦخاÙħ س
+×§ ×ĺ×¢
+au waż
+à¸ľà¸¹à¹ī à¸Ĭาย
+à¹ģà¸Ľà¸¥ à¸ģ
+åIJĮæĻĤ ãģ«
+зн аниÑı
+ãģĦãģŁãģł ãģįãģ¾ãģĹãģŁ
+Ġ×ŀ×ij ׾×Ļ
+à¸Ĥà¸Ń à¹ĥหà¹ī
+ĠاÙĦت ربÙĬØ©
+Ġdécou vert
+Ġżyc iu
+apr ès
+Ġy ab
+Ġyab anc
+Ġyabanc ı
+ĠbaÅŁ layan
+ìĹĪ ëįĺ
+Ġhes abı
+Ġë§Į ìķ½
+ë§ Īëĭ¤
+ĠTh ánh
+ãĥ´ ãĤ¡
+à¸Ľà¸£à¸±à¸ļ à¸Ľà¸£
+à¸Ľà¸£à¸±à¸ļà¸Ľà¸£ ุà¸ĩ
+ĠM ặc
+à¹Ģหà¸ķุ à¸ľà¸¥
+ĠÐij ез
+Ġcapac itÃł
+ÅĤe ÅĽ
+ĠпÑĢе им
+ĠпÑĢеим ÑĥÑīеÑģÑĤв
+ĠÅļ wiÄĻt
+Ġpubli é
+×ŀ×¢ צ×ij
+Ùħشار Ùĥات
+à¸łà¸² ษ
+à¸łà¸²à¸© ี
+Ġdeux ième
+ĠÙħØŃ اÙ쨏
+ĠÙħØŃاÙ쨏 Ø©
+ĠSch ön
+ï½ ¤
+Ġ×Ķ ×ij×¢
+Ġ×Ķ×ij×¢ ×Ļ×Ķ
+ĠÙĪØ§ÙĦ ÙĦÙĩ
+è¨Ģ ãģ£ãģŁ
+à¸ķ à¹īาà¸Ļ
+วร รà¸ĵ
+à¸Ĺิ ศ
+ĠbaÅŁ ına
+Ġmog ÄĻ
+ש ×Ļפ×ķר
+ĠÙĪ Ø¹Ø¯
+ĠÙĪØ¹Ø¯ Ùħ
+Ġhistó rico
+Ġk ısı
+ĠìĿ´ ê²Į
+ĠPol ÃŃtica
+ĠÑģиÑĤÑĥ аÑĨии
+ĠkoÅĦ ca
+×ij×ĵ ×Ļ×§×Ķ
+ĠاÙĦسÙĬ ارات
+ãģªãĤī ãģ°
+ãĤµ ãĥ©
+ãĤĭãģĵãģ¨ãģĮãģ§ãģį ãĤĭ
+Ġdecis ão
+×ķ ×ķ×ĵ
+lä ss
+läss ig
+Ġ׾ ×Ļשר×IJ׾
+ĠÙĬ أتÙĬ
+ר ×ķ×ĸ
+ö ÄŁ
+Ã¶ÄŁ ret
+Ã¶ÄŁret im
+Ġд ек
+Ġдек аб
+Ġдекаб ÑĢÑı
+Ġש ×Ĺ×ķר
+ãģ¦ãģıãĤĮ ãģŁ
+عب ارة
+Ġélect rique
+ĠاÙĦتÙĨ ÙħÙĬØ©
+جر Ùī
+ĠìĪĺ íĸī
+à¸Ĺ ู
+ĠÑĢе алÑĮно
+Ñģп оÑģоб
+à¸Ħล à¹īาย
+Ġس عÙĪØ¯
+ön ü
+ĠÙģ ÙħÙĨ
+تÙĥ ÙĪ
+تÙĥÙĪ ÙĬÙĨ
+ĠкаÑĩ еÑģÑĤво
+ĠконÑĤ ак
+ĠконÑĤак ÑĤ
+Ġsöz leÅŁme
+à¸Ń à¹īาà¸ĩ
+Ġت ÙĪÙģ
+ĠتÙĪÙģ ÙĬر
+×Ķ×ĸ ×ĵ
+×Ķ×ĸ×ĵ ×ŀ׳×ķת
+ĠØ·ÙĪÙĬÙĦ Ø©
+Ġtér mino
+Ġ×IJ ×Ļפ×Ķ
+ãĥĵ ãĥ«
+ส à¹Ĥม
+สà¹Ĥม สร
+ĠاÙĦ اث
+ĠاÙĦاث ÙĨÙĬÙĨ
+ев иÑĩ
+Ġopin ión
+à¸Ľ วà¸Ķ
+åı¤ ãģĦ
+ร à¹Īา
+ĠB iaÅĤ
+ĠÑģÑĤ ал
+ĠÑģÑĤал о
+ó logo
+ĠìķĦ ëĭĪëĭ¤
+Ġ×IJ ×Ļת
+Ġ×IJ×Ļת ×ķ
+à¹Ģหà¹ĩà¸Ļ วà¹Īา
+à¸ļ ารà¹Į
+çĦ ¼
+çĦ¼ ãģį
+ĠìĿ´ìļ© ìŀIJ
+ĠнекоÑĤоÑĢ Ñĭе
+ks z
+ksz taÅĤ
+ksztaÅĤ c
+ãĤŃãĥ£ ãĥĥãĤ·
+ãĤŃãĥ£ãĥĥãĤ· ãĥ³ãĤ°
+Ġro ÅĽ
+ĠroÅĽ lin
+ÑĢаж а
+×ij׳×Ļ ×Ļ×Ķ
+à¸Ľà¸£ สิ
+à¸Ľà¸£à¸ªà¸´ à¸ķ
+Ġgörd ü
+×ŀ׳×Ķ ×Ļ×Ĵ
+å¤īãĤı ãģ£ãģ¦
+Ġ×IJ ×Ķ
+Ġ×IJ×Ķ ×ijת×Ļ
+à¹Ģร à¹Īà¸ĩ
+Ġön ünde
+Ġê·¸ ëĥ¥
+пол иÑĤ
+полиÑĤ иÑĩеÑģк
+ãĥ¡ ãĥĩãĤ£
+ãĥ¡ãĥĩãĤ£ ãĤ¢
+ĠDet ay
+ĠDetay lı
+ĠاÙĦصÙģ ØŃØ©
+à¸ģาร à¹Ģà¸ĩิà¸Ļ
+Ġìµľ ê·¼
+׼ ש׾
+ï¼ ©
+вÑĪ ÐµÐ³Ð¾
+íķĺ ìĭ¤
+ĠÐŃ ÑĤ
+ĠÐŃÑĤ оÑĤ
+ส ื
+สื à¸ļ
+Ġng ừng
+ĠдокÑĥменÑĤ ов
+дав аÑĤÑĮ
+ĠاÙĦشخص ÙĬØ©
+Ġצ ×¢×Ļר
+در Ùĥ
+س ØŃب
+à¹Ħมà¹Ī à¸Ħà¹Īà¸Ńย
+Ġ×Ķ×ŀ×§ ×ķ×ŀ×Ļ
+สัà¹Īà¸ĩ à¸ĭืà¹īà¸Ń
+Ġê·¸ê²ĥ ìĿĦ
+ãģĤãĤĭ ãģĦ
+ãģĤãĤĭãģĦ ãģ¯
+×IJ×ķ×ĺ ×ķ×ij
+×IJ×ķ×ĺ×ķ×ij ×ķס
+к ÑĨион
+ĠÐľ ожно
+ãģı ãģł
+ãģıãģł ãģķ
+ĠинÑĦоÑĢм аÑĨиÑı
+ï» Ł
+Ġìŀij ìĹħ
+Ġ×Ļ ×ķסף
+إ دارة
+ĠاÙĦØŃ اج
+×ł×¡ ×Ļ×¢×Ķ
+из аÑĨиÑı
+×IJ׾ ×ij
+×IJ׾×ij ×ķ×Ŀ
+п ед
+Ġ×§×ĺ ׳×Ķ
+ĠÙĨÙ쨳 Ùĩا
+ĠMinist ério
+Ġп ен
+Ġпен Ñģи
+ãĥIJ ãĥ©ãĥ³ãĤ¹
+Ġ×Ķת ×ķר×Ķ
+Ġt ạm
+ĠìĹŃ ìĭľ
+ï½ ¡
+Ġth á»±
+Ġ ısı
+ì» ¨
+ãģĹãģ£ãģĭãĤĬ ãģ¨
+Ġx ưa
+Ġc ặp
+×Ĺ ×Ļ×ij×ķר
+วัà¸Ĵà¸Ļ à¸ĺรรม
+st är
+stär ke
+ĠÑģам Ñĭй
+p isa
+pisa Äĩ
+ĠoluÅŁ an
+ĠاÙĦØ¥ ÙħاÙħ
+ĠcÄĥ ng
+Ġgü nl
+Ġgünl ük
+Ġ׳ש ×IJר
+Ġkhi á»ĥn
+ç¶ļ ãģijãĤĭ
+stit ución
+Ġcapac ité
+Ġj aki
+Ġjaki ÅĽ
+вÑĪ Ð¸Ñģ
+вÑĪиÑģ ÑĮ
+פע×ķ׾ ×ķת
+ĠØŃ ÙĬات
+ĠØŃÙĬات Ùĩ
+Ġник огда
+ÐĽ Ь
+Ġ×Ķ×¢ ×ķ×ij
+Ġ×Ķ×¢×ķ×ij ×ĵ×Ķ
+Ġch Ãło
+หลาย à¹Ĩ
+ĠÑı н
+ĠÑıн ваÑĢ
+ĠÑıнваÑĢ Ñı
+à¸Īำà¹Ģà¸Ľà¹ĩà¸Ļ à¸ķà¹īà¸Ńà¸ĩ
+Ġhö her
+ãģķãĤĮãģ¦ ãģĦãģŁ
+สà¸ĩ สั
+สà¸ĩสั ย
+ĠاÙĦ اس
+ĠاÙĦاس ÙĦاÙħ
+ĠاÙĦØ´ Ùħس
+สà¸ĸาà¸Ļ ี
+ãĤ¯ãĥ© ãĤ¹
+à¸ŀร ร
+à¸ŀรร à¸Ħ
+p õ
+põ e
+Ġpor ém
+à¸Ľà¸£à¸° สà¸ĩ
+à¸Ľà¸£à¸°à¸ªà¸ĩ à¸Ħà¹Į
+powied zie
+powiedzie Äĩ
+Ġмог Ñĥ
+Ġж ел
+Ġжел ез
+ĠاÙĦØ« ÙĤ
+ĠاÙĦØ«ÙĤ اÙģÙĬ
+ĠпÑĢав ило
+Ġgdy ż
+פש ×ķ×ĺ
+ÑĢабоÑĤ ка
+ĠÙĥ رة
+ش دد
+Ùħار Ùĥ
+Ùħ ÙĥØ©
+Ġпод пиÑģ
+×ĺ×ķ ×ķ×Ĺ
+ĠÅĽ c
+ĠÅĽc ian
+Ġر جاÙĦ
+Ġ×ª×ľ ×ķ×Ļ
+и ÑĪ
+иÑĪ ÑĮ
+Ġmé dec
+Ġmédec in
+ëįĶ ëĿ¼ëıĦ
+ĠÑĤеб Ñı
+Ġ׾×Ķ ×ķס×Ļ×£
+ãģĬ 話
+Ġà¹ģà¸ķà¹Ī à¸ģà¹ĩ
+د اÙģ
+داÙģ Ø¹
+ĠC ùng
+ãĥ»ãĥ» ãĥ»ãĥ»
+ê¶ ģ
+Ġdeber ÃŃa
+หà¸Ļà¹Īวย à¸ĩาà¸Ļ
+Ġva ÌĢ
+Ġעצ ×ŀ
+Ġעצ×ŀ ×Ŀ
+à¹Ģà¸Ĭืà¹Īà¸Ń วà¹Īา
+שק ע
+Ġ×Ķ ×Ľ×ķ׾
+Ġ×Ķ׼×ķ׾ ׾
+ни бÑĥд
+нибÑĥд ÑĮ
+ĠëĦĪ íĿ¬
+Ġоб ÑĢаÑī
+ĠобÑĢаÑī а
+Ġ×¢×ij×ķ×ĵ ת
+ĠاÙĦÙħÙĨت خب
+ıy ord
+ıyord u
+ÙĪ Ø°
+×Ĺש ×Ļ×ij×ķת
+Ġ×Ķ×¢ ×Ļ×§
+Ġ×Ķ×¢×Ļ×§ ר×Ļ
+ì¢ Į
+ยุ à¹Ĥร
+ยุà¹Ĥร à¸Ľ
+Ġа пÑĢ
+ĠапÑĢ ÐµÐ»Ñı
+sz ed
+szed ÅĤ
+д он
+à¹Ģà¸ķิ à¸ļ
+à¹Ģà¸ķิà¸ļ à¹Ĥà¸ķ
+кол о
+Ġkażde j
+å¸ °
+帰 ãĤĬ
+Ġмил ли
+Ġмилли он
+ç¾İåij³ ãģĹãģĦ
+ت ÙĤار
+تÙĤار ÙĬر
+ĠìĿ´ 루
+ĠìĿ´ë£¨ ìĸ´
+Ġsprzeda ż
+×Ķ ×ķצ×IJ×ķת
+ãĤ¢ãĤ¯ ãĤ»
+ãĤ¢ãĤ¯ãĤ» ãĤ¹
+ר ×ķ×¥
+ĠгоÑģÑĥдаÑĢÑģÑĤв енн
+Ø£ ØŃÙĥ
+Ø£ØŃÙĥ اÙħ
+ĠoluÅŁ u
+ĠA ç
+ĠAç ık
+ãĤ¸ ãĥ¼
+ç´ł æĻ´
+ç´łæĻ´ ãĤīãģĹãģĦ
+Ġ×ijש×ij ×ķ×¢
+ب ذ
+بذ ÙĦ
+สา à¹Ģหà¸ķุ
+Ġpoz osta
+Ġpozosta ÅĤ
+ØŃر Ùħ
+Ġimport ância
+leÅŁtir me
+Ġд ÑĢев
+Ġmó vil
+ĠA ynı
+Ġна лог
+Ġналог ов
+Ġ×Ĺ ×Ļפ×Ķ
+ĠÑĦоÑĢм Ñĥ
+à¸Ĺà¸Ķ สà¸Ńà¸ļ
+ĠksiÄħż ki
+Ġma ÅĤe
+Ùħس Ø£ÙĦ
+ÙħسأÙĦ Ø©
+ï¼¾ ï¼¾
+ç ãeste
+év iter
+Ġкон ÑģÑĤÑĢÑĥк
+ĠконÑģÑĤÑĢÑĥк ÑĨи
+ï¾ ŀ
+Ġת×ķ׼ ׳
+ãĤ¹ãĥĪ ãĥ¬ãĤ¹
+ĠاÙĦاÙĤتصاد ÙĬ
+×ŀ×ĵ ×Ļ
+Ġw ÅĤad
+ĠwÅĤad z
+Ø® ÙĪÙģ
+ĠмаÑĤеÑĢиал ов
+ãģ¨ãģ£ãģ¦ ãĤĤ
+Ġznaj du
+Ġznajdu jÄħ
+Ùģ Ø¦Ø©
+ãģ©ãģ® ãĤĪãģĨãģª
+æĬij ãģĪ
+׳ ×Ĺ׾
+Ġdü ny
+Ġdüny an
+Ġdünyan ın
+гÑĢ Ð°Ð½Ð¸
+гÑĢани Ñĩ
+Ġ×Ķש׾ ×Ļש×Ļ
+Ġ×Ķ×IJ ש
+åıĬ ãģ³
+ìĭŃ ìĭľ
+ìĭŃìĭľ ìĺ¤
+Ġдол л
+Ġдолл аÑĢ
+Ġпов ÑĤоÑĢ
+Ġ×Ĺ ×Ļ׳×Ŀ
+ת פת×Ĺ
+Ñĥв ели
+Ñĥвели Ñĩен
+ãĤ« ãĥª
+raw id
+rawid ÅĤow
+×ķ ×ķ׾
+ãĥŁ ãĥ¥
+ì½ ĺ
+ĠBy ÅĤ
+Ðľ ÐIJ
+ع ÙIJ
+ĠÑģовеÑĢ ÑĪ
+ĠÑģовеÑĢÑĪ ÐµÐ½Ð½Ð¾
+Ġм ой
+Ġ×ķ׾×IJ ×Ĺר
+æħ £
+æħ£ ãĤĮ
+ØŃ اÙ쨏
+Ġ무 ë£Į
+à¸Ħà¸ĵะ à¸ģรรม
+à¸Ħà¸ĵะà¸ģรรม à¸ģาร
+Ġìĸ´ ëĶĶ
+Ġdif eren
+Ġdiferen ça
+ĠاÙĦØ£ ساس
+ĠاÙĦأساس ÙĬØ©
+Ġ׾×IJ×Ĺר ×ķ׳×Ķ
+ê· ł
+Ġ×Ķש׳×Ļ ×Ļ×Ķ
+ìľĦìĽIJ ìŀ¥
+ลุ à¸ģ
+ç iler
+Ġ×Ķ×IJ ׾×ķ
+èģŀ ãģı
+Ġ×ķ×IJ פ×Ļ׾×ķ
+ĠÑĢе ализ
+ĠÑĢеализ аÑĨи
+ระยะ à¹Ģวลา
+Ġجدا Ùĭ
+تب اع
+Ġveh ÃŃculo
+Ġдол г
+à¸Ľà¸£à¸´ มาà¸ĵ
+ì¦ IJ
+Ġ׾ ×ŀ×§×ķ×Ŀ
+ĠìĤ¬ ì§Ħ
+à¸Ĭ à¹īา
+Ġ×ŀ×¢ ×ķ׾×Ķ
+Ġgö rm
+Ġgörm ek
+ĠÙĪÙĩ ذÙĩ
+пеÑĢ Ð²
+пеÑĢв ÑĭÑħ
+ê·¸ ëŀĺ
+ĠاÙĦبر ÙĬØ·
+ĠاÙĦبرÙĬØ· اÙĨÙĬ
+ĠиÑİ Ð½Ñı
+ĠÐĵ оÑĢ
+Ġ׾ ש׾×Ŀ
+ÐIJ ÐĿ
+Ġназ наÑĩен
+о оÑĢ
+ооÑĢ Ñĥж
+Ġöz elli
+Ġözelli ÄŁi
+Ġни же
+ç¶ļ ãģijãģ¦
+Ġа ÑĢенд
+Ġkat ılı
+Ġkatılı m
+ĠØ¥ Ø·ÙĦاÙĤ
+ĠÙĪØ¥ ذا
+Ġок ÑĤÑı
+ĠокÑĤÑı бÑĢÑı
+à¹Ĥà¸ķ à¹
+à¹Ĥà¸ķ๠Ĭ
+à¹Ĥà¸ķà¹Ĭ ะ
+Ġolduk ları
+Ùħ ÙĪÙĤع
+ëĤ ©
+ã썿ĢĿ ãģ£ãģ¦ãģĦãĤĭ
+Ġש ×Ļ׼×ķ׾
+วา à¸Ķ
+س ÙĬÙĦ
+à¸Ĥ วั
+à¸Ĥวั à¸į
+تØŃ ÙĥÙħ
+ì ĤŃ
+Ġconna ît
+׳ פת×Ĺ
+Ġch ặ
+Ġchặ n
+ĠÙħ ØŃÙħ
+ĠÙħØŃÙħ ÙĪØ¯
+ãģ ´
+ĠпÑĢодÑĥк ÑĨии
+зд ÑĢав
+ãģĶ è¦
+ãģĶè¦ §
+×IJ×ij ×IJ
+Ġvé ritable
+ĠØ· ÙģÙĦ
+ãĥĪãĥ© ãĥĸãĥ«
+ê³ ¡
+Ġת ×ŀ×ķ׳×Ķ
+Ġki ên
+ĠÙĤ ادر
+Ø¥ÙĤ ÙĦÙĬÙħ
+ĠпÑĢед пÑĢи
+ĠпÑĢедпÑĢи ÑıÑĤиÑı
+Ġb Äĥng
+Ġay ında
+Ġg ấp
+еÑħ ал
+Ġgi Ãłnh
+Ġд ав
+Ġдав но
+ìĺĢ ëĭ¤
+à¸Ļัà¸ģ à¹Ģà¸ķ
+à¸Ļัà¸ģà¹Ģà¸ķ ะ
+Ùħست شار
+ست راتÙĬج
+ستراتÙĬج ÙĬ
+رÙħ ز
+Ġt Ä©nh
+ë¡ Ń
+ĠÑĩ еÑĤ
+ĠÑĩеÑĤ Ñĭ
+ĠÑĩеÑĤÑĭ ÑĢе
+ĠEnt ão
+Ġص غ
+Ġصغ ÙĬرة
+×ij×Ļ×ĺ ×ķ׾
+خط ÙĪØ·
+ĠÑĢазвиÑĤ ие
+Ġamacı yla
+à¸Ĺี วี
+Ġо ÑģÑĤ
+ĠоÑģÑĤ алÑĮн
+ש×ķ׾׊ף
+Ġ׼ ׳×Ļס
+Ġ׼׳×Ļס ×Ķ
+Ġd áºŃy
+ĠyaÅŁ ayan
+Ġ×ŀ×Ķ ×ķ×ķ×Ķ
+ĠÑĥ Ñģи
+ĠÑĥÑģи ли
+×ŀ פ×Ļ
+ĠпÑĢовед ениÑı
+Ġر ب
+Ġرب Ùħا
+ĠاÙĦØ£ ÙĪØ³Ø·
+Ġìľł ì§Ģ
+Ġprac ownik
+Ġpracownik ów
+×ŀס ×ķרת
+ÙĤار ب
+à¸Ħวาม รูà¹īสึà¸ģ
+à¹ģหล ะ
+ĠاÙĦÙĨ ÙĤد
+Ġ×IJ׾ פ×Ļ
+Ùħس ئ
+Ùħسئ ÙĪÙĦ
+ев ÑĭÑħ
+клÑİÑĩ ениÑı
+×ij ×Ļ׳
+×ij×Ļ׳ ×Ļ×Ķ×Ŀ
+ש ×ķ×IJ×Ķ
+ĠÅŁ ark
+ĠÅŁark ı
+Ġsü rec
+Ġsürec in
+à¹Ģà¸Ħร à¸Ķ
+à¹Ģà¸Ħรà¸Ķ ิà¸ķ
+ãĥIJ ãĥ¬
+ĠØ´ Ø£ÙĨ
+à¹Ģà¸Ńา à¹Ħวà¹ī
+niÄĻ cie
+רצ ×Ĺ
+ĠaÅŁ ama
+׳ פ×Ĵ×¢
+Ġth á»Ŀ
+Ġkhu ẩn
+diÄŁ inde
+ÑıÑī иÑħ
+ãĥĺ ãĥ«
+Ġüber h
+Ġüberh aupt
+ĠÑĤÑĢеб ова
+ĠdÅĤ ugi
+×ĺ ×Ļף
+à¸Ĥà¸Ļาà¸Ķ à¹ĥหà¸įà¹Ī
+ĠاÙĦØ£ Ùĩ
+ĠاÙĦØ£Ùĩ ÙĦÙĬ
+ĠMü d
+ĠMüd ürü
+Ġ×Ļ×Ķ ×ķ×ĵ×Ķ
+Ñĭв аеÑĤÑģÑı
+س اط
+×Ķת ׳×Ķ×Ĵ
+×Ķ×ª×ł×Ķ×Ĵ ×ķת
+à¸ģาร à¸ľà¸¥à¸´à¸ķ
+íĴ Ģ
+สà¸ĸาà¸Ļ à¸ģารà¸ĵà¹Į
+Ġо ÑĦ
+ĠоÑĦ иÑģ
+ĠÙĦ عبة
+Ġstron ÄĻ
+Ġר×IJ ×ķ×Ļ
+×Ĺ ×ij׾
+ĠÑĢÑĭ н
+ĠÑĢÑĭн ке
+Ġ׾×ŀ×¢ ף
+اس ÙĦ
+ห ัà¸Ļ
+Ġ×IJ ×Ĺ×Ļ
+ĠпÑĢод ол
+ê°Ģ ìŀħ
+Ġ×ijר ×Ĺ
+Ġ×ijר×Ĺ ×ij×Ļ
+дж еÑĢ
+Ġ׾ ×Ĺ׾
+Ġ׾×Ĺ׾ ×ķ×ĺ
+Ġ׾×Ĺ׾×ķ×ĺ ×Ļף
+ศาส à¸Ļา
+ãĤ¢ãĤ¤ ãĥĨ
+ãĤ¢ãĤ¤ãĥĨ ãĥł
+Ġפר ×ķפ
+جز اء
+ล à¸Ńย
+Ġc iaÅĤa
+Ġgi ết
+ĠзнаÑĩ иÑĤелÑĮно
+Ġolmad ıģ
+Ġolmadıģ ını
+н д
+нд екÑģ
+تأ Ùĥد
+Ġìĸ ¸
+Ġìĸ¸ ìłľ
+ay dın
+ãĥī ãĥ¬ãĤ¹
+Ġs ắt
+Ġíĺ¸ íħĶ
+Ġë¶ ģ
+Ġë¶ģ íķľ
+ãĥij ãĤ¤
+Ġ×ŀש×Ĺ×§ ×Ļ
+à¸Ħà¸Ļ à¸Ńืà¹Īà¸Ļ
+Ġиз гоÑĤов
+ĠизгоÑĤов лен
+à¹Ģà¸ģีย ร
+à¹Ģà¸ģียร à¸ķิ
+תק שר
+ĠÑĢаÑģ ÑĩеÑĤ
+ส à¹Ģà¸ķ
+Ġl änger
+ĠiÅŁ let
+ĠiÅŁlet me
+Ġع ÙĦÙĬÙĨ
+ĠعÙĦÙĬÙĨ ا
+é lection
+ĠاÙĦغ ربÙĬØ©
+íĭ Ģ
+ãĤĤãĤī ãģĪ
+Ġкни ги
+Ø£ سÙħ
+أسÙħ اء
+Ġth á»ı
+Ġthá»ı a
+หà¸Ļ ู
+Ġ×ł×¢ ש×Ķ
+à¸łà¸²à¸¢ à¹ĥà¸ķà¹ī
+à¸ŀื à¸Ĭ
+رÙĬ Ø·
+Ùģ ÙĪØ¶
+ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸ ãģĦãģ¾ãģĹãģŁ
+ש ×ĵ×Ķ
+Ġng á»±c
+ĠÑģеÑĢ ÑĮ
+ĠÑģеÑĢÑĮ езн
+T ôi
+Ġfiyat ları
+ĠвÑģ Ñİ
+ĠC ódigo
+Ġ×Ķש ×IJ
+Ġ×Ķש×IJ ׾×Ķ
+ĠP ública
+Ø¥ Ø®
+إخ ÙĪØ§ÙĨ
+ĠзаÑıв ил
+ãĥ¦ ãĥ¼
+ר×IJ ×Ļת
+vol ución
+Ġsz ko
+Ġszko ÅĤy
+جرÙĬ دة
+Ġpens é
+ìī ¬
+ĠBüyük ÅŁehir
+ĠØ£Ùħ رÙĬ
+ĠØ£ÙħرÙĬ ÙĥÙĬ
+à¸Ļัà¸ģ ศึà¸ģษา
+Ġtod av
+Ġtodav ÃŃa
+ĠС ан
+ĠСан кÑĤ
+íķĺ ìŀIJ
+ØŃÙĪ Ø§ÙĦ
+׼ ×ķשר
+à¹Ģลย à¸Ħรัà¸ļ
+Ġal gu
+Ġalgu ém
+Ùģ Ø²
+Ġçek il
+Ġ×ĵ ר׼×Ļ×Ŀ
+ãĥIJ ãĥ©
+à¸ģà¹ĩ สามารà¸ĸ
+สà¹Īวà¸Ļ ลà¸Ķ
+íı °
+ĠP úb
+ĠPúb lico
+à¹ģà¸Ļว à¸Ĺาà¸ĩ
+×IJת ×Ĵר
+ش اش
+شاش ة
+ci ÅĽni
+ĠÃľ rün
+ÙĦÙĪ ØŃ
+ĠاÙĦ بÙĨ
+ĠاÙĦبÙĨ Ùĥ
+ì¡° ì¹ĺ
+Ġorganiz ación
+ãģĤãĤĬãģĮãģ¨ãģĨãģĶãģĸ ãģĦãģ¾ãģĻ
+s ätze
+ĠÑģем ей
+ÙĤ صد
+ÑģÑĤв еннÑĭе
+Ġpréc éd
+Ġprécéd ent
+à¸ģรุà¸ĩà¹Ģà¸Ĺà¸ŀ ฯ
+ãģ¨è¨Ģ ãģĦ
+×ij׳×Ļ ×Ļף
+ĠØŃ ÙĪ
+ĠØŃÙĪ Ø§ÙĦÙĬ
+סק ס
+ĠsaÄŁlam ak
+Ġ׾ צ×Ļ×Ļף
+×§×ĵ ש
+Ġ×Ķ×ŀ ×¢×¨×Ľ×ª
+Ġ׾×Ķ ×¢×ij×Ļר
+Ġg ünd
+Ġgünd em
+ĠнаÑĪ ÐµÐ³Ð¾
+à¹ĥà¸Ļ à¸ŀืà¹īà¸Ļà¸Ĺีà¹Ī
+à¹Ģà¸Ħร ืà¸Ń
+à¹Ģà¸Ħรืà¸Ń à¸Ĥ
+à¹Ģà¸Ħรืà¸Ńà¸Ĥ à¹Īาย
+ظ اÙĩرة
+ÙħÙĨ ظÙħ
+ÙħÙĨظÙħ ات
+Ùħت از
+追 ãģĦ
+dı kt
+dıkt an
+ĠëįĶ ìļ±
+ĠÐĿ апÑĢимеÑĢ
+tw ór
+×ŀ×ķ×¢ צ×Ķ
+Ùĥ ÙĪÙĥ
+Ð ©
+×ŀ×ĺ פ׾
+ó lica
+訪 ãĤĮ
+ĠëĮĢ ë¶Ģ
+ĠëĮĢë¶Ģ ë¶Ħ
+ãĤ¯ãĥª ãĥĥãĤ¯
+ãĤĴ éģ¸
+ãĤĴéģ¸ ãģ¶
+Ġpow sta
+Ġpowsta ÅĤ
+Ġraz ón
+×ij ×ķ×Ĺר
+ĠÑģообÑī ил
+Ġ×§ ×ij×ķ×¢
+r êt
+à¸Ķี à¸Ĥึà¹īà¸Ļ
+×ŀס ×¢×ĵ
+×ŀסע×ĵ ×ķת
+ĠÃĸ sterreich
+Ġ׳ ×Ĺש×ij
+Ùħباد رة
+ì´ ī
+×Ĵ ׳×ĺ×Ļ
+ä¿¡ ãģĺ
+du ÄŁ
+duÄŁ unu
+Ġph ú
+ĠاÙĦØ£ Ø®ÙĬر
+Ġت عتبر
+landır ıl
+ãģ¨ãģ¯ ãģĦ
+ãģ¨ãģ¯ãģĦ ãģĪ
+ĠاÙĦ Ø·ÙĦ
+ĠاÙĦØ·ÙĦ اب
+ĠN º
+éģ¿ ãģij
+اÙĦ Ùħع
+اÙĦÙħع رÙĪÙģ
+ส à¸łà¸²
+éĽ¢ ãĤĮ
+ĠпомоÑī ÑĮ
+Ġзна еÑĤ
+ãĥĹãĥ¬ ãĤ¼
+ãĥĹãĥ¬ãĤ¼ ãĥ³ãĥĪ
+Ġsup érieur
+Ġש׾ ×Ļש×Ļ
+ĠاÙĦÙĨ ÙĪØ¹
+ãĤĵãģ§ãģĻ ãģŃ
+à¸Ńà¸ļ รม
+Ġgi á»įng
+Ġwzgl ÄĻd
+ĠاÙĦÙģ ÙĤر
+è rent
+Ġ×ŀ×IJ ×Ĺ
+Ġ×ŀ×IJ×Ĺ ×ķר×Ļ
+×Ĵ ×Ĵ
+×Ļ ×Ļ×ij
+ÙħÙĦ اب
+ÙħÙĦاب س
+Ġhük ü
+Ġhükü met
+Ġ×ŀ×Ĵ ×Ļ×ij
+ĠÐŀ Ñĩ
+ĠÐŀÑĩ енÑĮ
+æĹ© ãģĦ
+Ġconstr ucción
+Ġth ượng
+ï¼ ĭ
+Ġcor ação
+à¹Ģหล à¹ĩà¸ģ
+ĠBaÅŁ b
+ĠBaÅŁb akan
+éĢ£ ãĤĮ
+ãģĻãĤĭ ãģĵãģ¨ãģĮãģ§ãģįãģ¾ãģĻ
+ĠÙĤ اÙħت
+Ġا Ùĥثر
+ÙģØ§Ø¹ ÙĦ
+ĠÑĦ оÑĢ
+ĠÑĦоÑĢ Ñĥм
+غ ذÙĬ
+ĠiÅŁ le
+ĠiÅŁle ml
+ĠiÅŁleml eri
+ĠìĤ¬ëŀĮ ìĿĢ
+Ġìŀij ìĦ±
+Ġë§Ī 볨
+Ùħ جÙĦس
+หม ู
+д в
+дв иг
+двиг а
+à¹Ģสีย à¸Ĭีวิà¸ķ
+×Ķת פת×Ĺ
+×Ķתפת×Ĺ ×ķת
+ĠмеÑĤ ÑĢо
+ĠÑģ енÑĤ
+ĠÑģенÑĤ Ñı
+ĠÑģенÑĤÑı бÑĢÑı
+ê³ §
+Ġ׾ פע
+Ġ×ľ×¤×¢ ×ŀ×Ļ×Ŀ
+à¹Ģà¸ļ ีย
+詳 ãģĹãģı
+çķ° ãģªãĤĭ
+Ġİl çe
+ĠAt at
+ĠAtat ür
+ĠAtatür k
+รุ à¹Īà¸ĩ
+Ġkald ı
+Ġ주 ìŀ¥
+Ġprés ence
+Ġн аб
+Ġнаб лÑİ
+ĠнаблÑİ Ð´Ð°
+ĠÑģам ого
+×Ĵ ×ķש
+×ŀ×ĺ ×ķפ
+×ŀ×ĺ×ķפ ׾
+ĠвÑĭб иÑĢа
+ĠìŀIJ 리
+åĪĨ ãģĭãĤīãģªãģĦ
+Ġз Ñĥб
+Ġש׼ ×ijר
+Ġد ائ
+Ġدائ Ùħا
+ĠпаÑĢ ÑĤи
+ï¼ ²
+ĠاÙĬ ضا
+ĠÑħ оз
+ĠÑħоз Ñı
+ĠÑħозÑı й
+ĠÑħозÑıй ÑģÑĤв
+ĠاÙĦØ£ ج
+ĠاÙĦأج ÙĨب
+ĠاÙĦأجÙĨب ÙĬØ©
+ĠÐĹ Ð½Ð°
+ĠAp ós
+ĠÑį неÑĢ
+ĠÑįнеÑĢ Ð³Ð¸
+Ġy ans
+Ġyans ı
+ĠJust i
+ĠJusti ça
+Ġpré vu
+ม วล
+ìŀ¥ ëĭĺ
+à¸ģระ à¸ļ
+à¸ģระà¸ļ วà¸Ļ
+à¸ģระà¸ļวà¸Ļ à¸ģาร
+×ŀ ×ŀ
+×ŀ×ŀ ×ķצע
+Ġh ẹ
+Ġhẹ n
+зд ание
+Ġak ÅŁ
+ĠakÅŁ am
+×ĺ ×ķפ
+Ġgere kt
+Ġgerekt i
+Ġgerekti ÄŁini
+Ġnar z
+Ġnarz ÄĻdzi
+é po
+épo que
+ĠTh ần
+Ġwys oko
+Ġwysoko ÅĽci
+à¸ľà¸¹à¹ī à¸Ľ
+à¸ľà¸¹à¹īà¸Ľ à¹Īวย
+ĠÙĬ بدÙĪ
+ÑĤелÑĮ ного
+Ġвз глÑıд
+Ġjed nÄħ
+ĠìĿĺ 견
+Ġ à¸Ĥà¸ĵะà¸Ĺีà¹Ī
+פ ×Ļ×ĵ
+ìĥģ ëĭ´
+Ġm ỡ
+×Ķ ×ŀ׾
+×Ķ×ŀ׾ צ×ķת
+ĠÑģоÑģÑĤ о
+ĠÑģоÑģÑĤо иÑĤ
+Ġав и
+Ġави а
+ĠL änder
+تص ÙĪÙĬر
+×ŀ×ĵ ×Ļ×Ķ
+ìłĪ ì°¨
+ãģ¨ ãĤĬ
+ãģ¨ãĤĬ ãģĤ
+ãģ¨ãĤĬãģĤ ãģĪ
+ãģ¨ãĤĬãģĤãģĪ ãģļ
+ĠÑĢ Ñıд
+ĠÑĢÑıд ом
+ĠNh ất
+ĠاÙĦÙĥ اÙħÙĦ
+×Ĺ׾ ׾
+ĠGi ấy
+צ ×ĺר
+צ×ĺר ×£
+Ġ׾×ij ×ĺ׾
+Ġим еÑĤÑĮ
+ס×ŀ ×ķ×ļ
+Ġparticip ação
+íķľëĭ¤ ë©´
+ÙħÙĨت دÙĬ
+ÙħÙĨتدÙĬ ات
+ĠeÄŁ len
+g änge
+رب ØŃ
+ãĤ® ãĥ£
+ĠاÙĦر ÙĤÙħ
+à¸ĭ à¹īำ
+ĠH óa
+×ŀר ×Ĺ×§
+ØŃÙħ اÙħ
+بÙĪ Ùĥ
+ĠArt ÃŃculo
+ãĥĦ ãĤ¢ãĥ¼
+×Ķפ ׼×Ķ
+×Ĺ׾ ×ķף
+ĠпеÑĢе Ñħод
+len miÅŁ
+زر اعة
+Ġseñ or
+ãģ£ãģ¦ ãģįãģ¦
+Ø¥ Ø´
+إش ارة
+Ġpod ÃŃa
+ĠÃľ lke
+н ÑģкаÑı
+Ġadapt é
+Ġdüzen len
+Ġdüzenlen en
+ĠÑģÑĤ ала
+ĠÙĬ ØŃتاج
+Ġn ier
+Ġnier uch
+Ġnieruch omo
+Ġnieruchomo ÅĽci
+ãģĵãģ¨ãģĮ ãģĤãĤĭ
+ยà¸Ńà¸Ķ à¹Ģยีà¹Īยม
+ĠÙħ ج
+ĠÙħج اÙĨÙĬ
+Ġз аб
+Ġзаб ол
+Ġзабол ев
+Ġзаболев аниÑı
+ĠÅĽ ro
+ĠÅĽro dk
+ĠÅĽrodk ów
+Ġ×Ķ ×ľ×IJ×ķ×ŀ×Ļ
+Ġdok ÅĤad
+ĠdokÅĤad nie
+ãģŁãģı ãģªãģĦ
+ãģ¯ãģļ ãģ§ãģĻ
+ã썿ĢĿ ãģ£ãģ¦ãģĦãģŁ
+é cran
+ìĹħ ì²´
+trzym aÅĤ
+ÑģÑĤв еннÑĭй
+ĠNot ÃŃc
+ĠNotÃŃc ias
+Ùħ رÙĬ
+ÙħرÙĬ ض
+æ°Ĺ è»
+æ°Ĺè» ½
+æ°Ĺ軽 ãģ«
+ëĵ £
+Ġ×ĵ ×ķ×IJר
+Ġ׾ ×ŀ׳
+Ġ׾×ŀ׳ ×ķ×¢
+ĠçalÄ±ÅŁ ıyor
+ĠÅŁ idd
+ĠÅŁidd et
+ĠM ặt
+Ġate ÅŁ
+ĠполÑĥÑĩ ениÑı
+à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩ มืà¸Ń
+Ġgrö ÃŁer
+د ائ
+دائ رة
+Ġbul un
+Ġbulun maktadır
+à¹Ģห ร
+à¹Ģหร ีย
+à¹Ģหรีย à¸į
+à¸Ļัà¸ģ à¸Ĺà¹Īà¸Ńà¸ĩà¹Ģà¸Ĺีà¹Īยว
+Ġalan ında
+ĠÑĥ зна
+Ġл еÑĩение
+売 ãĤĮ
+Ġçev ir
+Ġdeste ÄŁi
+ĠheiÃŁ t
+âĸ ²
+ØŃ Ø·
+à¸Ħำ à¸ķà¸Ńà¸ļ
+ãĤªãĥ³ ãĥ©ãĤ¤ãĥ³
+Ġ×ij×Ĺ×Ļ ×Ļ×Ŀ
+ãĥ¦ ãĥĭ
+Ġdüzenle me
+Ġmodal itÃł
+سر ط
+سرط اÙĨ
+×ŀ׼ ×ķף
+ĠданнÑĭ й
+تر ت
+ترت ÙĬب
+à¸ļาà¸ĩ à¸Ħà¸Ļ
+ĠÄIJ á»ĭnh
+ม ูล
+มูล à¸Ħà¹Īา
+ÙĨ ÙĤص
+à¸ģาร รัà¸ģษา
+ĠÑĦ он
+ĠÑĦон д
+ãĤĪãģĨ ãģ«ãģªãģ£ãģŁ
+Ùħع اÙĦ
+ÙħعاÙĦ جة
+ĠOs man
+ĠOsman lı
+иÑĩеÑģк ом
+à¸Ńยาà¸ģ à¸Īะ
+ãģķãģ¾ ãģĸ
+ãģķãģ¾ãģĸ ãģ¾
+ãģķãģ¾ãģĸãģ¾ ãģª
+Ġת ×ķ׼׾
+×¢ צ×ij
+ĠاÙĦع سÙĥ
+ĠاÙĦعسÙĥ رÙĬ
+Ġvé hic
+Ġvéhic ule
+Ġ×Ļצ ×Ĺ×§
+ĠاÙĦÙĪ ØŃ
+ĠاÙĦÙĪØŃ ÙĬد
+ĠاÙĦع دÙĪ
+ĠQu ản
+Ġê³µ ëıĻ
+بد ÙĦ
+ĠÄij ảng
+Ġm á»ĩnh
+Ġnie zb
+Ġniezb ÄĻ
+ĠniezbÄĻ dn
+Ġyayın lan
+обÑī и
+Ġgö tür
+צ פ
+צפ ×ķ×Ļ
+ĠÙĦÙĬ بÙĬ
+ĠÙĦÙĬبÙĬ ا
+ØŃ ÙĪØ§
+Ġд об
+Ġдоб ÑĢо
+иÑĢÑĥ ем
+ĠاÙĦØŃÙĥÙĪÙħ ÙĬØ©
+m Ã¤ÃŁig
+Ġed ición
+влек аÑĤелÑĮ
+влекаÑĤелÑĮ н
+Ġת ש׾×ķ×Ŀ
+Ġ×Ķש ×ķ׳×Ļ×Ŀ
+มิ à¸ĸุ
+มิà¸ĸุ à¸Ļ
+มิà¸ĸุà¸Ļ ายà¸Ļ
+é£Łãģ¹ ãģ¦
+ĠìĪĺ ì§ij
+ס ×ij×Ļ
+ĠиÑİ Ð»Ñı
+Ġà¹Ħà¸Ķà¹ī à¹ģà¸ģà¹Ī
+׾×Ĺ ×Ŀ
+tr ä
+trä gt
+ãģĿãĤĤ ãģĿãĤĤ
+ÐĿ Ðķ
+Ġв нÑĥÑĤ
+ĠвнÑĥÑĤ ÑĢи
+ãģ¨ ä¸Ģç·Ĵãģ«
+ãĤ« ãĥķãĤ§
+Ġ×ij×Ĺ ×ĵר
+×Ĺ ×ŀש
+ãĤ¨ ãĥį
+ãĤ¨ãĥį ãĥ«
+ãĤ¨ãĥįãĥ« ãĤ®
+ãĤ¨ãĥįãĥ«ãĤ® ãĥ¼
+à¸Ĥà¸Ńà¸ĩ à¸ķัวà¹Ģà¸Ńà¸ĩ
+بÙĤ اء
+פס ×Ļ׼
+פס×Ļ׼ ×ķ׾×ķ×Ĵ
+ãĥ¡ ãĥĥ
+ãĥ¡ãĥĥ ãĤ»
+ãĥ¡ãĥĥãĤ» ãĥ¼ãĤ¸
+ÙĦ ÙĤب
+A Äŀ
+שק ×Ļ×¢
+ÙĤ ساÙħ
+×ĵ×ķ×Ĵ ×ŀ×Ķ
+æ·± ãģĦ
+íĸĪ ëĬĶëį°
+ĠrozwiÄħz anie
+à¸Ļัà¹Īà¸Ļ à¹Ģà¸Ńà¸ĩ
+×Ļצ ×ij
+Ġtr ông
+à¹ĥà¸Ĭà¹ī à¸ļริà¸ģาร
+ĠاÙĦÙħÙĪ Ø³Ùħ
+ĠдеÑĤ и
+ãģĹãģĭ ãģªãģĦ
+ס ×Ļף
+Ġréfé rence
+à¹ģห à¹īà¸ĩ
+ãĤĤãĤī ãģ£ãģŁ
+Ġ׾ ר׼
+Ġ׾ר׼ ×ķש
+شع ÙĪØ±
+ĠÐij ог
+Ġlaz ım
+Ġ×Ļש ׳×Ŀ
+Ġп аÑĢÑĤ
+ĠпаÑĢÑĤ неÑĢ
+ĠÑĥ ника
+ĠÑĥника лÑĮн
+Ġmaté riel
+×ŀר ×§
+Ġph ưá»Ŀng
+Ġз ай
+Ġзай м
+Ùģ ÙĤد
+Univers itÃł
+×¢ ר׼×Ļ×Ŀ
+Ġba ño
+Ġн оÑı
+ĠноÑı бÑĢÑı
+à¸Ľ à¹īาย
+Ġt ats
+Ġtats äch
+Ġtatsäch lich
+ĠÑĤÑĢ ÐµÑĤÑĮ
+Ñį м
+ãĥĻ ãĥ¼ãĤ¹
+Ġnh á»±a
+ìĬ¤ íģ¬
+ĠعبداÙĦ ÙĦÙĩ
+Ġת ×ķר×Ķ
+أش ÙĬ
+أشÙĬ اء
+ĠÙĦÙĦ غا
+ĠÙĦÙĦغا ÙĬØ©
+Ùħ ÙĪØ§ÙĤ
+ÙħÙĪØ§ÙĤ Ùģ
+ĠgÅĤówn a
+Ġart Ä±ÅŁ
+Ġ×ŀ×§ ×ķ×ŀ×Ļ
+ãĤ¯ãĥ© ãĥĸ
+Ġس ÙĪÙī
+ĠìŬ ìĦ±
+اس ر
+اسر ائÙĬÙĦ
+Ġ׳ ×Ľ×ª×ij
+ย à¹īà¸Ńà¸Ļ
+Ġdeber á
+Ġph ẫu
+ÑİÑī ем
+ĠÙĦدÙĬ ÙĨا
+×ŀ×ĺ ×Ķ
+Ġ׳ ×ķ׾×ĵ
+ĠвÑģÑĤÑĢ ÐµÑĩа
+ãĤīãĤĮ ãģ¦ãģĦãģ¾ãģĻ
+ĠcaÅĤ ej
+ย ึ
+ยึ à¸Ķ
+поÑĤ ен
+поÑĤен ÑĨи
+Ġл иÑĤ
+ĠлиÑĤ еÑĢ
+ĠлиÑĤеÑĢ Ð°ÑĤÑĥÑĢ
+Ġкажд ом
+ĠíĮ IJ
+ĠíĮIJ ëĭ¨
+à¸Ī ู
+Ġpres ença
+ãģªãĤĵ ãģ§
+Ùħ ÙĬاÙĩ
+ин ÑĦоÑĢм
+инÑĦоÑĢм аÑĨион
+инÑĦоÑĢмаÑĨион н
+ĠìŀIJ ìŰ
+ר׼ ש
+Ġöd ül
+ç¶ļ ãģı
+Ġп Ñģ
+ĠпÑģ иÑħ
+ĠпÑģиÑħ олог
+ت ذÙĥر
+Ġìŀħ ìŀ¥
+ล à¸Ķà¹Į
+ìĦł ê±°
+ãģ£ãģ¦ ãģĬãĤĬãģ¾ãģĻ
+Ġ×Ļ ×¢
+Ġ×Ļ×¢ ×§×ij
+ĠاÙĦØ· عاÙħ
+ãĥĨ ãĤ¹ãĥĪ
+ĠTu ấn
+Ġparticip ación
+×ŀ×ķ×ŀ ×Ĺ×Ķ
+×Ĵר ס×Ķ
+ĠاÙĦتÙĨ ÙģÙĬ
+ĠاÙĦتÙĨÙģÙĬ ذÙĬ
+ĠбезопаÑģ н
+ge f
+gef ähr
+Ø´ ÙĪØ±
+Ġmy ÅĽli
+ÙĪØ§ Ø´ÙĨ
+ÙĪØ§Ø´ÙĨ Ø·ÙĨ
+׳×ķס ×¢
+Ùĥ Ùĩ
+ÙĥÙĩ رب
+ÙĥÙĩرب اء
+Ġmus iaÅĤ
+ìĭ ¸
+ãĥĸãĥ© ãĥĥãĤ¯
+Ġcré é
+ÙĨÙĩ ار
+owo ÅĽÄĩ
+ÙħØŃا ÙĥÙħ
+ĠwÅĤa ÅĽ
+ĠwÅĤaÅĽ c
+ĠwÅĤaÅĽc iciel
+ĠÙĬ ؤ
+ĠÙĬؤ دÙĬ
+×ŀ×¢ ×ķ׳
+×IJ ×ij׾
+خط أ
+ĠÑħ олод
+×ĸ ×ķ׾
+ãģĵãĤĮ ãĤī
+ãģĵãĤĮãĤī ãģ®
+Ġbás ica
+ฤ à¸Ķ
+ฤà¸Ķ ูà¸ģ
+ฤà¸Ķูà¸ģ า
+ฤà¸Ķูà¸ģา ล
+èIJ½ãģ¡ çĿĢ
+ãģªãģĦ ãģĵãģ¨
+ص ÙĪÙħ
+ÙĨج ØŃ
+׳ק ×ķ×ĵ
+׳ק×ķ×ĵ ת
+кл аÑģÑģ
+íķĺìĭľ ëĬĶ
+ëĦ ĺ
+Ġש×IJ ×Ļ׳×ķ
+ĠС ейÑĩаÑģ
+may acaģı
+Ġyap ılır
+Ġcategor ÃŃa
+عب اد
+ĠТ еп
+ĠТеп еÑĢÑĮ
+×Ķ×Ļס×ĺ ×ķר×Ļ
+h ế
+ãĤ³ ãĥ¼ãĥī
+Ġcabe ça
+ج Ùħا
+جÙħا Ùĩ
+جÙħاÙĩ ÙĬر
+ä½İ ãģĦ
+ĠÑĤоваÑĢ Ð¾Ð²
+à¸Ĭาว à¸ļà¹īาà¸Ļ
+ĠÑģÑĤан ов
+ĠÑģÑĤанов иÑĤÑģÑı
+ĠавÑĤом обилÑĮ
+ĠÑģлÑĥÑĩ ай
+à¸Ńั à¸ŀ
+ĠG iriÅŁ
+ĠìĿ¼ ëĭ¨
+ĠпÑĢ Ð¾Ñģ
+ĠпÑĢоÑģ моÑĤÑĢ
+ãģªãģıãģª ãģ£ãģŁ
+มี à¸Ľà¸±à¸įหา
+ïº İ
+éc oute
+ĠÙħ ÙĪØ¬ÙĪØ¯
+Ġس رÙĬع
+ĠÙĪÙĩ ÙĨا
+ĠÙĪÙĩÙĨا Ùĥ
+à¸Ħุà¸ĵ สม
+à¸Ħุà¸ĵสม à¸ļัà¸ķิ
+Ġìļ° ìĦł
+à¸ŀระ à¸ŀุà¸Ĺà¸ĺ
+好 ãģ¿
+ظ ÙĦÙħ
+Ġм акÑģ
+ĠмакÑģ ималÑĮ
+ĠмакÑģималÑĮ но
+ãĥª ãĤ¢ãĥ«
+à¹ģมà¹ī วà¹Īา
+ĠاÙĦØŃ ÙĪØ§Ø±
+ãĥĹãĥ© ãĤ¹
+Ġع ÙĦاÙĤØ©
+Ġíĸī ëıĻ
+Ġgönder il
+Ġl ãi
+ĠsaÄŁ lıkl
+ĠsaÄŁlıkl ı
+ĠÑĪ Ð°Ð³
+Ġ×ij×IJר ×Ķ
+prowadzi Äĩ
+ãģĦãģı ãģ¤ãģĭ
+Ġبت ارÙĬØ®
+Ġ×ij×IJ×ķת ×Ķ
+Ġmó c
+ĠÐľ не
+ãĥĹãĥ¬ ãĥ¼
+×IJ ×ĸר×Ĺ
+åł´åIJĪ ãģ«ãģ¯
+使 ãģĪ
+à¹Ģร ืà¸Ńà¸Ļ
+ĠÐŁ еÑĤ
+ĠÐŁÐµÑĤ ÑĢ
+ãģ«åħ¥ ãĤĭ
+Ùħ ادة
+à¹Ģà¸ĩ ืà¹Īà¸Ńà¸Ļ
+à¹Ģà¸ĩืà¹Īà¸Ńà¸Ļ à¹Ħà¸Ĥ
+ĠÑģоÑģÑĤоÑı ние
+ôn ica
+ĠÑĦ ев
+ĠÑĦев ÑĢа
+ĠÑĦевÑĢа лÑı
+Ġ×ķ ×ĸ
+Ġ×ķ×ĸ ×IJת
+à¸Ħร ิ
+à¸Ħริ ส
+ĠÐķ Ñīе
+ãģ£ãģ¦ãģĹãģ¾ ãģĦãģ¾ãģĹãģŁ
+ĠпÑĢав иÑĤелÑĮ
+ĠпÑĢавиÑĤелÑĮ ÑģÑĤв
+Ġtä glich
+Ġëĭ¹ ìĭľ
+×ŀ×ķ×¢ ×ŀ×ĵ
+Ġдв оÑĢ
+æī ķ
+æīķ ãģĦ
+ĠÑģÑĤан еÑĤ
+Ġвозд ейÑģÑĤв
+ĠвоздейÑģÑĤв и
+Ġf ête
+à¹Ģส า
+תק ×ķ×ķ×Ķ
+Ġu yar
+Ġuyar ı
+à¸ģลัà¸ļ à¹Ħà¸Ľ
+Ġgi ưá»Ŀng
+Ġв а
+Ġва ÑĪи
+ĠÄij áºŃu
+ĠSpa ÃŁ
+ĠìķĦ ë§Ī
+à¹Ħà¸Ķà¹ī à¸ĩà¹Īาย
+Ġ×Ķ×ŀ ×ijקש
+æĸ° ãģŁ
+æĸ°ãģŁ ãģª
+ılı yor
+пл ан
+Ġ×Ķ×ijר ×Ļ×IJ×ķת
+ĠaÄŁ rı
+Ġsay gı
+建 ãģ¦
+Ġnaj wyż
+Ġnajwyż sz
+سÙĬاس ات
+ãģĬ å¾Ĺ
+ĠاÙĦع ÙĦÙĬ
+ĠاÙĦعÙĦÙĬ ا
+Ġcoraz ón
+ì¹ĺ ë£Į
+หัว à¸Ĥà¹īà¸Ń
+Ġب ØŃÙĬ
+ĠبØŃÙĬ Ø«
+зв езд
+بÙĪ Ø§Ø¨Ø©
+ÐĽ Ðĺ
+ÙĦا زÙħ
+Ġroz p
+Ġrozp oc
+Ġrozpoc zÄĻ
+触 ãĤĮ
+ĠاÙĦج ÙħÙĩ
+ĠاÙĦجÙħÙĩ ÙĪØ±
+Ġsp ÄĻd
+ĠspÄĻd z
+วิà¸Ĺยา ศาสà¸ķรà¹Į
+ив аеÑĤÑģÑı
+Ġдан ной
+Ġreprés ente
+ĠÄij á»ĭch
+Ġ×¢×ŀ ×ķ×§
+à¸Ńัà¸Ļ à¸ķร
+à¸Ńัà¸Ļà¸ķร าย
+Ġestr atég
+Ġestratég ia
+pad ÅĤ
+Ġв полн
+Ġвполн е
+ĠпÑĢедоÑģÑĤав лен
+×Ĺ׾ ×ķ×§
+×Ĺ׾×ķ×§ ת
+ãĤ¢ ãĥĬ
+ĠاÙĦغ ذ
+ĠاÙĦغذ ائÙĬ
+ĠÑĥ зн
+ĠÑĥзн аÑĤÑĮ
+à¸ĭ à¹īาย
+å½ĵ ãģ¦
+ØŃÙĬ اء
+Ġbás ico
+×§×ķ×ij ×¢
+ĠاÙĦÙħ باراة
+ĠاÙĦÙĩ اتÙģ
+Ġ׼ ׳×Ĵ×ĵ
+à¸Ľà¸£à¸° หย
+à¸Ľà¸£à¸°à¸«à¸¢ ัà¸Ķ
+Ðļ ак
+à¸Ĺีà¹Ī à¸Ļà¹Īา
+à¸Ĺีà¹Īà¸Ļà¹Īา สà¸Ļà¹ĥà¸Ī
+ãģ¾ ãģģ
+ï½ ¢
+Ñģк оп
+Ġson rasında
+Ġur zÄħd
+ĠurzÄħd zenia
+׼×ķ ×ķ׳
+׼×ķ×ķ׳ ת
+Ġ׾×Ķת ×ŀ×ķ×ĵ
+Ġ׾×Ķת×ŀ×ķ×ĵ ×ĵ
+ĠÑģ ли
+ĠÑģли ÑĪ
+ĠÑģлиÑĪ ÐºÐ¾Ð¼
+ĠÑģÑĤ Ñĥд
+ĠÑģÑĤÑĥд енÑĤ
+Ġ×Ķ ×ķ×ĵ
+Ġ×Ķ×ķ×ĵ ×¢×Ķ
+ë¹Ħ ìļ©
+à¸Ńยาà¸ģ à¹ĥหà¹ī
+Ġb á»ģ
+ยุ à¸Ĺà¸ĺ
+Ðĺ ÐĿ
+س ائر
+Ø£ صÙĪÙĦ
+ĠاÙĦغ رÙģ
+ãģĵãģ¨ãĤĤ ãģĤãĤĬãģ¾ãģĻ
+è¾¼ ãģ¾ãĤĮ
+ĠاÙĦساب ع
+Ġc á»§
+ãģĦãģŁãģł ãģĦãģŁ
+ì§ ĵ
+ìĤ¬ 무
+powied ź
+تÙģ Ùĥ
+تÙģÙĥ ÙĬر
+иÑĢов ки
+ĠíĨµ íķ´ìĦľ
+ãĤ¨ ãĤ¹ãĥĨ
+ĠдеÑıÑĤелÑĮ ноÑģÑĤÑĮ
+ĠданнÑĭ м
+Ġ×¢ ×ķר
+Ġ×¢×ķר ׼×Ļ
+×ķ×ĵ עת
+Ġhayat ını
+Ġb Äħd
+ĠbÄħd ź
+obs ÅĤug
+à¹Ģà¸ŀียà¸ĩ à¹ģà¸Ħà¹Ī
+à¸ĭ à¹Īา
+è²ł ãģij
+ĠÑģÑĤÑĢ ÐµÐ¼
+ĠÄij á»īnh
+ĠÐł ÑĥÑģ
+ĠN ữ
+Ġ׾×Ķש ×Ļ×Ĵ
+Ġjed noc
+Ġjednoc ze
+Ġjednocze ÅĽnie
+Ġ×Ķ×Ĵ ×ij×ķ×Ķ
+أخ ÙĦاÙĤ
+ĠнаÑģ ел
+ĠнаÑģел ениÑı
+ĠÙĬ ÙĨب
+ĠÙĬÙĨب غÙĬ
+ãģĮ ãģĭ
+ãģĮãģĭ ãģĭ
+×Ĵ עת
+Ðŀ Ðł
+ĠналиÑĩ ии
+Ġë§Ī ì§Ģ
+Ġë§Īì§Ģ ë§ī
+Ġíĸī ìĤ¬
+Ġtre ÅĽci
+Ġê°Ģ ì¹ĺ
+ì¦ ĺ
+Ġана лог
+×Ķצע ת
+в лад
+влад е
+ĠÑģдел ал
+Ġ׳ ×Ĵ×Ļש
+Ġ׳×Ĵ×Ļש ×ķת
+полн ение
+à¸Ĩ à¹Īา
+ĠD ön
+׼׾׼ ׾×Ķ
+×ŀ×ĸ ×Ĵ
+Ùħ Ùģ
+ÙħÙģ Ùĩ
+ÙħÙģÙĩ ÙĪÙħ
+×Ķ ×ĵ
+×Ķ×ĵ פס
+×Ķ×ĵפס ×Ķ
+ãģĻãģİ ãģ¦
+Ġг ÑĢ
+ĠгÑĢ Ð½
+×ŀ×ĺ ×ķס
+Ġ기 ìĸµ
+ï¾ Ł
+ĠpÅĤ yn
+ĠGr ünde
+ĠBü cher
+Ġwed ÅĤug
+ãģ¾ãģł ãģ¾ãģł
+Ġ׳×Ķ ×ĵר
+ĠÙĬست Ø·ÙĬع
+ĠHi á»ĩp
+ãĤŃãĥ£ãĥ³ ãĥļ
+ãĤŃãĥ£ãĥ³ãĥļ ãĥ¼ãĥ³
+Ġth á»ķ
+Ġeuropé enne
+à¸ļ ัà¸ĩ
+à¸ļัà¸ĩ à¸Ħัà¸ļ
+ĠszczegóÅĤ owo
+׳ שק
+ãĥķ ãĥ©ãĥ³ãĤ¹
+×ŀ×ķ×ŀ ×Ĺ×Ļ
+Ġcom ún
+Ġç arp
+ØŃت ÙĬا
+ØŃتÙĬا ج
+ØŃتÙĬاج ات
+ëĭ´ ëĭ¹
+ä½ķ 度
+ä½ķ度 ãĤĤ
+×ĵ ×ij×§
+ãģį ãĤĮ
+ãģįãĤĮ ãģĦ
+Ġк ам
+Ġкам еÑĢ
+ĠespecÃŃf ico
+Ġtel éfono
+à¸ķัà¹īà¸ĩ à¸Ńยูà¹Ī
+I Åŀ
+ãģ© ãĤĵãģ©
+ãģ©ãĤĵãģ© ãĤĵ
+עצ ×ŀ×IJ×Ļ
+à¸Ķัà¸ĩ à¸Ļีà¹ī
+ĠÑĦоÑĢм иÑĢов
+ĠÑĦоÑĢмиÑĢов а
+×ķ×ŀ ×ij
+Ġkullan ımı
+Ðľ Ðŀ
+×¢ ש×Ļ
+עש×Ļ ×Ļ×Ķ
+Ġön lem
+à¹Ģà¸Ń à¹ĩ
+à¹Ģà¸Ńà¹ĩ ม
+×ŀשק ×Ļ×¢
+ר ×Ļ×Ĺ
+à¸Ĥ ัà¸Ķ
+ĠíĻ ľ
+ĠíĻľ ìļ©
+à¸ĭ ะ
+ãĤĪãģĨ ãģ«ãģªãĤĬãģ¾ãģĹãģŁ
+ĠÑĢаÑģ пÑĢ
+ĠÑĢаÑģпÑĢ Ð¾ÑģÑĤ
+ĠÑĢаÑģпÑĢоÑģÑĤ ÑĢан
+ĠÑĢаÑģпÑĢоÑģÑĤÑĢан ен
+׼×Ļ ×ķף
+ÙĤب ض
+تص رÙĬØŃ
+تصرÙĬØŃ ات
+Ġо ÑĢи
+ĠоÑĢи г
+ĠоÑĢиг ина
+ĠоÑĢигина л
+ĠاÙĦع اÙĦÙĬ
+à¹ģหà¹Īà¸ĩ à¸Ļีà¹ī
+ãĥķãĤ¡ ãĥ¼
+ãģ¦ãģĦ ãģį
+ãģ¦ãģĦãģį ãģŁãģĦ
+פ תר
+פתר ×ķ׳×ķת
+Ġ×ij ×Ļ×Ĺ
+Ġ×ij×Ļ×Ĺ ×ĵ
+Ġod by
+Ġodby ÅĤ
+ĠоÑĩеÑĢ ÐµÐ´
+Ġtr ương
+ãĤŃ ãĥ³
+×ŀ ×ķפ
+×ŀ×ķפ ×¢
+ëĵľ 립
+ëĵľë¦½ ëĭĪëĭ¤
+à¸ŀืà¹īà¸Ļ à¸IJาà¸Ļ
+ìŀIJ 격
+ĠVi á»ĩn
+ĠDes pués
+Ġ×IJ׾ ×Ļ׳×ķ
+Ġdur ée
+íĩ ´
+Ġmü zik
+i ếu
+ĠÑĢаз меÑīен
+Ġк Ñĥд
+ĠкÑĥд а
+غ ض
+غض ب
+ĠTamb ém
+à¸Īัà¸Ķ สà¹Īà¸ĩ
+à¸ģาร à¹ģสà¸Ķà¸ĩ
+onom ÃŃa
+Ġан г
+Ġанг ли
+Ġангли й
+Ġанглий Ñģк
+Ġzn al
+Ġznal az
+Ġznalaz ÅĤ
+תר ×Ĵ
+תר×Ĵ ×ķ×Ŀ
+ĠÑģ нов
+ĠÑģнов а
+ĠÑĩаÑģ а
+Ġcommun auté
+ĠespecÃŃf ica
+ĠL á»ĭch
+Ġli é
+Ùģ Ø¬Ø±
+à¹Ģà¸ģ à¹Īà¸ĩ
+ع اÙĦ
+عاÙĦ ج
+Ø£ÙĨ ظ
+Ø£ÙĨظ ÙħØ©
+ES İ
+ĠاÙĦØŃ دÙĬد
+à¸ŀระ à¸Ńà¸ĩà¸Ħà¹Į
+Ġפר שת
+Ġдв иж
+Ġдвиж ениÑı
+ĠاÙĦج ارÙĬ
+à¸ĺาà¸Ļ ี
+неÑģ ен
+ĠاÙĦÙĨ ÙĩائÙĬ
+Ġб еÑĢ
+ĠбеÑĢ ÐµÐ¼
+ĠбеÑĢем енн
+Ġdépart ement
+à¹Ģà¸Ĺ ีย
+à¹Ģà¸Ĺีย à¸ļ
+ĠÐľ аÑĢи
+ĠнекоÑĤоÑĢ ÑĭÑħ
+об еÑģп
+обеÑģп еÑĩен
+×Ĺ ×ķ×ĸ
+×Ĺ×ķ×ĸ ×Ķ
+ÙĨت ج
+à¸Īะ à¹Ħà¸Ķà¹īรัà¸ļ
+á» °
+Ġél éments
+ع ط
+عط اء
+Ġt ắt
+i á»ĩm
+ÑİÑīиÑħ ÑģÑı
+ãģĹãģ °
+ãģĹãģ° ãĤīãģı
+Ġпом ожеÑĤ
+à¸Ĥà¸ĵะ à¸Ļีà¹ī
+Ġ×¢ שר×ķת
+éģķ ãģ£ãģ¦
+ĠпÑĢ Ð¾Ð³
+ĠпÑĢог н
+ĠпÑĢогн оз
+Ġt ÅĤ
+ĠtÅĤ um
+ĠtÅĤum acz
+T ür
+Tür kiye
+ãģį ãģ£
+ãģįãģ£ ãģĭãģij
+Ġ×Ķ׳ ×ķ׼
+Ġ×Ķ׳×ķ׼ ×Ĺ×Ļ
+ĠìĥĿ ìĤ°
+ĠÑĦоÑĢм Ñĭ
+ç¾İ ãģĹãģĦ
+à¸Ľà¸£ ึà¸ģ
+à¸Ľà¸£à¸¶à¸ģ ษา
+Ġlum ière
+ãĤª ãĥ¼ãĥĹ
+ãĤªãĥ¼ãĥĹ ãĥ³
+à¸Ľ ืà¸Ļ
+วั สà¸Ķ
+วัสà¸Ķ ุ
+еÑĢÑĤ в
+ÙĥÙĦ Ùģ
+ï½ £
+à¸ĺรรม à¸Ķา
+׳ ×ĺר
+ĠпÑĢедÑģÑĤав лÑıеÑĤ
+Ġanál isis
+Ġb ãi
+با ÙĤÙĬ
+à¸Ľà¸£à¸° à¹Ģà¸Ķ
+à¸Ľà¸£à¸°à¹Ģà¸Ķ à¹ĩà¸Ļ
+ĠÑģлÑĥÑĩ аÑı
+ĠÑģлÑĥÑĩаÑı Ñħ
+ÐĽ ÐIJ
+สัà¸ĩ à¹Ģà¸ģ
+สัà¸ĩà¹Ģà¸ģ à¸ķ
+Ġprz ec
+Ġprzec ież
+Ùħ صÙĦ
+ÙħصÙĦ ØŃØ©
+ש×ķ×§ ×ķ׾×ĵ
+ĠобоÑĢÑĥд ованиÑı
+Ġtr waÅĤ
+رÙĪ Ùħ
+ìķĪ ëĤ´
+ĠNgh á»ĭ
+Ø® Ø´
+à¸ļา à¸Ħาร
+à¸ļาà¸Ħาร à¹Īา
+Ġоп ÑĨион
+ĠÑģозд аниÑı
+ãĤ³ ãĤ¹ãĥĪ
+Ġ×Ķ×¢ ׾×Ļ
+Ġ×Ķ×¢×ľ×Ļ ×ķף
+lä uft
+ãĥĻ ãĤ¹ãĥĪ
+Ġr ê
+Ġrê ve
+×IJ ×ij×Ļ×ij
+×Ļ ×Ļ×ļ
+ë¶ Ļ
+ãĤ¤ãĥ³ ãĥī
+ÅĤo ży
+ÅĤoży Äĩ
+ع ائÙĦ
+عائÙĦ Ø©
+Ø£ ÙĪØ±
+Ø£ÙĪØ± اÙĤ
+à¸Ĺà¹īà¸Ńà¸ĩ à¸ĸ
+à¸Ĺà¹īà¸Ńà¸ĩà¸ĸ ิà¹Īà¸Ļ
+Ġä hn
+Ġähn lich
+ãĥŁ ãĥĭ
+à¸ľ ู
+à¸ľà¸¹ à¹īà¸Ļ
+à¸ľà¸¹à¹īà¸Ļ ำ
+ĠмаÑĤеÑĢиал Ñĭ
+Ġкап иÑĤ
+ĠкапиÑĤ ал
+ï¼ ¦
+Ġseç il
+Ġh ứng
+Ġintéress ant
+ãģ£ãģ¦ ãģĦãģı
+Ġe ÄŁer
+ëIJĺ ìĹĪìĬµëĭĪëĭ¤
+Ġan laÅŁma
+ãģĶ åĪ©ç͍
+Ġ×ij ×ĸ׼
+Ġ×ij×ĸ׼ ×ķת
+ëĿ¼ ë©´
+ĠÙĬ ÙĪØ³
+ĠÙĬÙĪØ³ Ùģ
+أسÙĦ ØŃØ©
+ĠGef ühl
+ĠноÑĢм алÑĮн
+ãĥĻ ãĥ³
+ãģķãĤĮ ãĤĭãģĵãģ¨
+ĠÐij еÑģ
+ãģ¨ãģĦ ãģĪãģ°
+ĠÙħ ÙĩÙħ
+ĠÙħÙĩÙħ Ø©
+ãģ§ãģĹãĤĩãģĨ ãģŃ
+ĠêµŃ ëĤ´
+à¹Ģม à¹ĩà¸Ķ
+×ŀ×ij קר
+ĠاÙĦد ÙĨÙĬ
+ĠاÙĦدÙĨÙĬ ا
+à¸Ĭ ู
+к ÑĢÑĥÑĤ
+Ġtho áng
+Ġ׳ ×ĵר
+Ġ׳×ĵר ש
+ĠÑĢаÑģÑģ казал
+ĠAu ÃŁerdem
+פ ×IJר
+פ×IJר ×§
+Ġ×ŀש×Ĺ×§ ×Ļ×Ŀ
+צ ר׼×Ļ×Ŀ
+×ŀ×ĵ ×ķ
+×ŀ×ĵ×ķ ×Ļ×§
+èĭ¦ ãģĹ
+ĠÑģ иг
+ĠÑģиг нал
+ĠM á»įi
+Ġtr ữ
+Ġnast ÄĻp
+ĠnastÄĻp nie
+Ġì¶Ķ ì§Ħ
+ĠاÙĦÙģ ÙĨد
+ĠاÙĦÙģÙĨد ÙĤ
+koÅĦ czyÅĤ
+ส ีà¹Ī
+×§ ×Ļ×ij
+×§×Ļ×ij ×ķ×¥
+ĠнÑĥж нÑĭ
+大 åĪĩ
+大åĪĩ ãģª
+æıĽ ãģĪ
+ת ×ķס
+ת×ķס פת
+ãģ£ãģ¦ ãģĦãģªãģĦ
+Ġм Ñı
+ĠмÑı г
+ĠмÑıг к
+Ġjak ie
+Ġjakie ÅĽ
+à¸ķำ à¸ļ
+à¸ķำà¸ļ ล
+ĠìŀĪ ì§Ģ
+×ij×ĺ ×IJ
+ĠоÑĤлиÑĩ но
+ÙĤ ÙIJ
+ĠавÑĤом об
+ĠавÑĤомоб и
+ĠавÑĤомоби лÑı
+دÙĬÙħÙĤرا Ø·ÙĬ
+ĠاÙĦ ÙĪØ§
+ĠاÙĦÙĪØ§ ØŃد
+Ġس ÙĪØ±ÙĬØ©
+Ø£ غÙĦ
+أغÙĦ ب
+ĠÑįк ÑĢан
+ãĥĹ ãĥ©ãĤ¤
+Ġjeste ÅĽ
+ãĥIJ ãĥª
+Ġ×Ķ×IJ ×ķ×ķ×Ļר
+ائ Ùĥ
+à¸Ńยà¹Īาà¸ĩ ยิà¹Īà¸ĩ
+ÑĢ ÐµÐºÑĤ
+Ġum o
+Ġumo ż
+Ġumoż li
+Ġumożli w
+Ġumożliw ia
+Ġnäch ste
+ĠìŀĪ ì§Ģë§Į
+ĠпÑĢед н
+ĠпÑĢедн аз
+ĠпÑĢедназ наÑĩен
+Ġma çı
+Ġp omi
+Ġpomi ÄĻd
+ĠpomiÄĻd zy
+ĠاÙĦÙĦ ÙĤاء
+à¹Ģà¸Ķ à¸Ńะ
+Ġнов оÑģÑĤи
+×ŀ׊׾×Ķ
+رÙĬاض ÙĬ
+à¸Ķ à¸Ļ
+à¸Ķà¸Ļ à¸ķรี
+ب صر
+ìĬ¤ íĥĢ
+scri pción
+Ġnap isa
+Ġnapisa ÅĤ
+Ġ׳ש ×ŀ×¢
+ĠاÙĦÙħØŃ ÙĦÙĬ
+Ġhi á»ĥn
+×IJ ×Ĺ
+×IJ׊ר×IJ×Ļ
+Ġг ÑĢаниÑĨ
+æīĭ ç¶ļãģį
+Ùĥ سب
+Ġà¹ģà¸ķà¹Ī à¸ĸà¹īา
+à¸Ķาว à¸Ļà¹Į
+à¸Ķาวà¸Ļà¹Į à¹Ĥหลà¸Ķ
+ãĤĭãģĵãģ¨ãģĮãģ§ãģį ãģ¾ãģĻ
+åŁºæľ¬ çļĦãģ«
+ÙĪÙĦ اد
+rä ume
+د ÙģØ§Ø¹
+×Ļצ ×¢
+ĠO czy
+ĠOczy wiÅĽcie
+ĠÅ ģ
+ĠÅģ a
+اÙĦÙĬ اب
+اÙĦÙĬاب اÙĨ
+áºł I
+ĠBir liÄŁi
+×Ķ ×ķצ
+×Ķ×ķצ ×IJת
+ĠÄij ua
+Ġê·¸ëŁ¬ ëĭĪê¹Į
+Ġréal ité
+ع ÙĦاÙĤات
+J este
+Jeste ÅĽ
+Ġмн ож
+Ġмнож еÑģÑĤво
+ï¼ «
+ãĥĹãĥŃ ãĤ¸ãĤ§
+ãĥĹãĥŃãĤ¸ãĤ§ ãĤ¯ãĥĪ
+ĠÑĦ л
+ظ ÙĨ
+×Ĵ׾ ×Ĵ׾
+ĠmÅĤod zie
+ĠmÅĤodzie ż
+à¸Ļà¹īำ à¸ķา
+à¸Ļà¹īำà¸ķา ล
+ÐĽ Ðķ
+×ij ×ķ×ĺ
+Ġ׾×Ķ ×Ĵ×Ļ×ĵ
+ãģĵãģ¨ãĤĤ ãģĤãĤĭ
+ز اد
+×ŀ×Ļ×ĵ ×¢
+ĠgÅĤówn ie
+ãĥı ãĤ¦
+ãĥıãĤ¦ ãĤ¹
+б ел
+Ġét ape
+ðŁĺ Ģ
+Ġмод елÑĮ
+a ģını
+ש ×Ĺ×§
+ש×Ĺ×§ ף
+Ġni ño
+à¸Ĭ à¹īาà¸ĩ
+à¹Ģล ีย
+ĠÑĦоÑĢм е
+ĠاÙĦØ´ رÙĬÙģ
+ĠÑĥд аÑĢ
+arr iv
+arriv ée
+Ġmies iÄĻ
+ĠmiesiÄĻ cy
+ØŃ رÙĥ
+ØŃرÙĥ ات
+ĠDi á»ħn
+ÐĿ Ы
+ãģ¾ãģ£ãģŁ ãģı
+Ġ×Ļ ×¨×ķ×§
+еÑģÑĤ еÑģÑĤв
+еÑģÑĤеÑģÑĤв енн
+Ġê·¸ ëŁ¼
+ĠاÙĦÙħ تÙĪ
+ĠاÙĦÙħتÙĪ Ø³Ø·
+Ġbéné fic
+Ġbénéfic ie
+Ġwy bra
+Ġwybra Äĩ
+ĠاÙĦز ÙħÙĨ
+ĠпÑĢин Ñı
+ĠпÑĢинÑı л
+Ù쨱 ØŃ
+Ġk sz
+Ġksz taÅĤ
+ĠksztaÅĤ t
+ק׾ ×ĺ
+×ij×ĵ×Ļ×§ ת
+Ġgi ấ
+Ġgiấ c
+Ġpropriet Ãł
+деÑĢж ан
+ĠKö ln
+ĠGü zel
+×Ļפ ×ķ×Ļ
+ĠCu á»Ļc
+ÑįÑĤ аж
+تر ÙĥÙĬ
+ترÙĥÙĬ ز
+лож ений
+Ġп Ñĥ
+ĠпÑĥ ÑĤи
+اخت ÙĦاÙģ
+åĩºãģ¦ ãģıãĤĭ
+à¸ļุ à¸ģ
+âĿ ¤
+ÑĦ ан
+פש ×ĺ
+à¸ļัà¸Ļ à¹Ģà¸Ĺ
+à¸ļัà¸Ļà¹Ģà¸Ĺ ิà¸ĩ
+ĠاÙĦس اد
+ĠاÙĦساد س
+ĠاÙĦÙĤ ÙĪÙħ
+ĠاÙĦÙĤÙĪÙħ ÙĬ
+Ġyönet ici
+Ùĩ ÙĪØ§Øª
+ÙĩÙĪØ§Øª Ùģ
+Ġrespons ável
+Ġпод деÑĢжива
+ĠاÙĦسÙĦ Ø·
+ĠاÙĦسÙĦØ· ات
+ãģĹãģ¦ ãģĬãģı
+ãĥļ ãĥĥãĥĪ
+à¸Ľ ุà¹Īม
+Ġogl Äħda
+ÙĨا ÙĤ
+ÙĨاÙĤ Ø´
+à¸Ħà¸Ńà¸Ļ à¹Ĥà¸Ķ
+ĠMü sl
+ĠMüsl ü
+ĠMüslü man
+ĠMo ż
+ĠMoż na
+Ġnum érique
+Ġv á»ı
+ĠسÙĬ تÙħ
+Ġyer leÅŁ
+монÑĤ аж
+Ġgo ût
+ãģ¦ ãģĬãĤĬãģ¾ãģĻ
+ĠKh ánh
+Ġе дин
+Ġедин ÑģÑĤв
+اÙĨ Ø®Ùģ
+اÙĨØ®Ùģ Ø§Ø¶
+ìĭľ íĹĺ
+Ġl ặng
+ĠÑĢ Ð¾Ð»ÑĮ
+à¸ķัว à¹ģà¸Ĺà¸Ļ
+à¸Ħà¹Īา à¹ĥà¸Ĭà¹ī
+à¸Ħà¹Īาà¹ĥà¸Ĭà¹ī à¸Īà¹Īาย
+Ġver füg
+Ġverfüg bar
+ìĻĶ ëĭ¤
+ãģĦ ãģļ
+ãģĦãģļ ãĤĮ
+ĠиÑģÑģлед ованиÑı
+меÑī а
+×Ķ ×Ĺ
+×Ķ×Ĺ ×ĸר
+à¹ģà¸Ł à¸Ĭัà¹Īà¸Ļ
+ت صرÙģ
+Ø¥ رÙĩاب
+Ġexerc ÃŃcio
+Ġé lev
+Ġélev é
+สัà¸įà¸įา à¸ĵ
+Ãĸ Z
+ãĥĹ ãĥŃãĤ°
+ãĥĹãĥŃãĤ° ãĥ©
+ãĥĹãĥŃãĤ°ãĥ© ãĥł
+Ġw ewnÄĻtrzn
+Ġhen üz
+é£Ľ ãģ³
+à¹Ģà¸Ķ à¸Ńรà¹Į
+Ñģ Ñĥж
+ÑģÑĥж ден
+شع ÙĪØ¨
+ãģ²ãģ¨ ãĤĬ
+Ġwy ÅĤÄħ
+ĠwyÅĤÄħ cznie
+Ġпло Ñħо
+ÐĶ Ðķ
+Ạ¦
+Ù쨹 اÙĦÙĬ
+ÙģØ¹Ø§ÙĦÙĬ ات
+ĠاÙĦع شر
+ÑģÑĤÑĥп ил
+Ġy arg
+Ġyarg ı
+нÑİ Ñİ
+×ķ×IJ ×ij
+Ġu ç
+Ġuç ak
+ë² ½
+تÙĪ ÙĤÙĬ
+تÙĪÙĤÙĬ ع
+Ġì¤ij ìĭ¬
+׳×Ļ×ķ ×ķ×ĺ
+Ø£ ÙĥÙĦ
+ç½® ãģĦãģ¦
+éłĤ ãģį
+Ġ×Ķת ×ij
+Ġ×Ķת×ij ×Ļ×¢×Ķ
+Ġdür fen
+Ùħ ÙĤاÙĦ
+ÙħÙĤاÙĦ ات
+Ġز ÙħÙĨ
+à¸ŀฤ ศ
+à¸ŀฤศ à¸Ī
+à¸ŀฤศà¸Ī ิà¸ģ
+à¸ŀฤศà¸Īิà¸ģ ายà¸Ļ
+ĠнеÑģк олÑĮ
+ĠнеÑģколÑĮ ки
+ĠнеÑģколÑĮки Ñħ
+Ġcrian ça
+มิ à¸ķร
+×ŀ׼ ×Ļר×ķת
+à¸ģาร à¸ļริหาร
+Ġtélé charg
+Ġ×IJ×ķ×Ķ ×ijת
+ĠBü ro
+ä½ľ ãģ£ãģŁ
+ĠKi ÅŁi
+ç¾İåij³ ãģĹ
+à¹Ģลย à¸Ħà¹Īะ
+à¸ŀà¸ļ à¸ģัà¸ļ
+à¸Ī à¹īา
+Ġç er
+Ġçer ç
+Ġçerç eve
+ãĤĴä½ľ ãģ£ãģ¦
+ĠпеÑĢв ÑĥÑİ
+×ŀצ ר×Ļ×Ŀ
+×IJ׾ ×ķ×Ķ
+×IJ׾×ķ×Ķ ×Ļ×Ŀ
+Ġagr é
+Ġagré able
+Ġay ır
+İL İ
+ãĤ ¥
+Ġíĺ Ħ
+ĠíĺĦ ìĭ¤
+ثاÙĦ Ø«
+ת ×ĸ
+ת×ĸ ×ķ׳×Ķ
+ãģ¨ãģĦ ãģ£ãģ¦
+ãģ¨ãģĦãģ£ãģ¦ ãĤĤ
+Ġا بÙĪ
+ĠÑģоб ак
+é£Łãģ¹ ãģŁ
+Ġдан ном
+à¹Ģล ิ
+à¹Ģลิ ศ
+Ġí ļ
+Ġíļ ¨
+Ġíļ¨ ê³¼
+ãĤĤãĤī ãģĪãĤĭ
+׳ צ׾
+ÑĦ ик
+ÑĦик Ñģ
+Ġjeste ÅĽmy
+ת×Ĺ×ķש ×Ķ
+à¹Ħมà¹Ī à¸Ħวร
+ĠØŃ سÙĬÙĨ
+à¸ģาร ลà¸ĩà¸Ĺุà¸Ļ
+ë´ ¤
+ĠÐĺ менно
+à¸ļ à¸Ńรà¹Į
+à¸ļà¸Ńรà¹Į à¸Ķ
+ĠC ảnh
+ìĦľ ë¹ĦìĬ¤
+Ġпол ов
+Ġполов ин
+Ġзам еÑĩа
+ãģĦãĤį ãĤĵãģª
+Ġ×ij ×Ļ×§
+Ġ×ij×Ļ×§ ש
+л ÑĥÑĪ
+ãĤĴ è¿İ
+ãĤĴè¿İ ãģĪ
+جرÙĬ ÙħØ©
+Ġt ây
+ĠاÙĦÙĨ ÙĪ
+ĠاÙĦÙĨÙĪ ÙĪÙĬ
+ÃĤ N
+ì¿ ł
+หà¸Ļ าว
+Ġ×ij׊ש×ij×ķף
+ز ار
+à¸Ķ าร
+à¸Ķาร า
+ĠÅĽ l
+ĠÅĽl ub
+มีà¸Ħวาม สุà¸Ĥ
+Ġn hu
+Ġnhu áºŃn
+ÙħØŃ طة
+à¹Ģสืà¹īà¸Ń à¸ľà¹īา
+ĠТ олÑĮко
+ĠÙĥ س
+ĠÙĥس ارة
+ÙħØ´ رÙĪØ¹
+niÄĻ cia
+×¢ ׼ש×Ļ×ķ
+ت ÙĦÙģ
+تÙĦÙģ Ø²ÙĬ
+تÙĦÙ쨲ÙĬ ÙĪÙĨ
+Ġl Æ°á»Ľi
+ĠÐľÐ¾Ñģк вÑĭ
+Ġré serve
+Ġan laÅŁ
+ĠanlaÅŁ ıl
+Ġed eceÄŁi
+รà¸Ńà¸ĩ à¹Ģà¸Ĺà¹īา
+Ġب Ø·
+Ġبط رÙĬ
+ĠبطرÙĬ ÙĤØ©
+ãģ¦ãģĹãģ¾ ãģ£ãģ¦
+ãĤĤãĤī ãģ£ãģ¦
+بر ج
+æ± ļ
+æ±ļ ãĤĮ
+Ġch oc
+Ġchoc ia
+Ġchocia ż
+Ġzob ac
+Ġzobac zyÄĩ
+пÑĢ Ñı
+пÑĢÑı жен
+ĠÑĨ иÑĦ
+ĠÑĨиÑĦ ÑĢ
+Ġм ам
+Ġвз ÑıÑĤÑĮ
+Ġch ạm
+ج سÙħ
+ØŃÙħ اس
+à¹Ģล à¹Īม
+à¸ŀิ ษ
+×Ķפ ׼×ķ
+à¸Ĭà¹Īà¸Ńà¸ĩ à¸Ĺาà¸ĩ
+Ġв ек
+Ġвек а
+Æ¡ Ìģ
+Æ¡Ìģ i
+ĠTi á»ģn
+Ġtr ầm
+мÑĭ ÑĪ
+мÑĭÑĪ Ð»
+ĠÑĤ Ñĥ
+ĠÑĤÑĥ ÑĢиÑģÑĤ
+Ġch c
+Ġchc Äħ
+Ġав г
+Ġавг ÑĥÑģÑĤ
+ĠавгÑĥÑģÑĤ а
+ס ×IJ×ķת
+Ġר ×Ĵ׾
+à¸ľà¸¥ à¸ģระà¸Ĺ
+à¸ľà¸¥à¸ģระà¸Ĺ à¸ļ
+å¤īãĤı ãĤĭ
+Ġ×Ķ×IJ×Ĺר ×ķ׳×Ļ×Ŀ
+سÙģ ÙĬر
+ĠÑĩа Ñīе
+ãģĦ ãĤī
+ãģĦãĤī ãģ£
+ãģĦãĤīãģ£ ãģĹãĤĥ
+×ķ×ŀ ׳×Ļ×Ŀ
+Ġart tır
+ĠCh á»ĭ
+Ġì¡° ì§ģ
+ĠÑĥÑģп еÑħ
+Ġ×¢ ×ķס
+Ġ×¢×ķס ×§
+ĠìĥĿ ëªħ
+ÑĨ иÑĤ
+Ġreg ión
+Ðŀ ÐĿ
+ĠdoÄŁ um
+ĠyaÅŁ ad
+ĠyaÅŁad ıģı
+à¸Ĺà¸Ķ ลà¸Ńà¸ĩ
+Ġgöz ü
+ש ×Ļר×Ķ
+дÑĥм ал
+Ġda ģı
+Ġdaģı t
+à¸Ĺีม à¸ĩาà¸Ļ
+Ġti á»ģm
+ĠاÙĦÙĥ بر
+ĠاÙĦÙĥبر Ùī
+ì¹ Ń
+ĠGü nc
+ĠGünc elle
+ĠGüncelle me
+ê¹ Ĭ
+ĠобоÑĢÑĥд ование
+ĠÑĢеÑĪ Ð°
+á» ¤
+Ġп иÑĤ
+ĠпиÑĤ аниÑı
+à¹Ģรีย à¸ļ
+×Ľ×ª ×Ļ×ij×Ķ
+Ġп он
+Ġпон ÑĢав
+ĠпонÑĢав и
+Ġ×Ķ ×ķ׾×ĵ
+Ġ×Ķ×ķ׾×ĵ ת
+Ġê² ģ
+Ġê²ģ ëĭĪëĭ¤
+ĠпеÑĢв ой
+ãĥ©ãĤ¤ ãĥķ
+ĠÅŁi ir
+kr ÄĻ
+krÄĻ c
+Ġthi á»ĥu
+à¹Ģลย à¸Ĺี
+à¹Ģลยà¸Ĺี à¹Ģà¸Ķียว
+×ĺ×¢ ׳×ķת
+ائ ÙĩÙħ
+Ġ×IJ ס×ķר
+ĠплаÑĤ еж
+تر دد
+Ġmożli we
+Ġkh Ỽ
+ĠkhỼ p
+تÙģØ§Ø¹ ÙĦ
+ĠÑĪ ÐºÐ¾Ð»ÑĮ
+ĠÑĪколÑĮ н
+ĠÙĤ صة
+Ġmét ier
+nÄĻ ÅĤa
+หล à¹Īà¸Ń
+Ġ á»§ng
+Ġprz egl
+Ġprzegl Äħd
+ĠاÙĦÙħ تعÙĦ
+ĠاÙĦÙħتعÙĦ ÙĤØ©
+ĠÑģÑĭ н
+Ġв олн
+ãĥĩ ãĥ¼ãĥĪ
+ĠÐŃ ÑĤи
+Ġк ÑĢоме
+à¸Ħ ารà¹Į
+׳ק ×ķ×ĵ×Ķ
+Ġ׾ש×ŀ ×ķ×¢
+Ġ×ĸ ×ķ׼ר
+ï¼ §
+ÙĬ ÙİØ§
+Ġgi á»ıi
+åĥį ãģı
+ĠÑģ ни
+ĠÑģни жен
+à¹ģà¸Ķ à¸Ķ
+รุ à¸Ļ
+รุà¸Ļ à¹ģรà¸ĩ
+Ġhi á»ĩp
+ograf ÃŃa
+à¹Ģà¸Ī à¸Ńรà¹Į
+Ġдв иг
+Ġдвиг аÑĤ
+ĠдвигаÑĤ ел
+Ġü y
+Ġüy eler
+Ġüyeler i
+Ġб Ñĥк
+ĠбÑĥк в
+ãĤĤ å¤ļãģı
+Ġthi á»ĩt
+ĠPa ÃŃs
+ĠØ· بÙĬعÙĬ
+à¹ģà¸Ī à¸ģ
+ĠاÙĦص ØŃÙĬØŃ
+Ġapp ré
+Ġappré ci
+Ġdecis ión
+Ġë°ĺ ëĵľ
+Ġë°ĺëĵľ ìĭľ
+ĠÑĤеб е
+ãĤ· ãĥ¼ãĤº
+ãĤ·ãĥ¼ãĤº ãĥ³
+Ġд алÑĮн
+ĠìĬ ¤
+ĠìĬ¤ ìĬ¤
+ĠìĬ¤ìĬ¤ ë¡ľ
+ĠTh á»ĥ
+Ġkar ÅŁ
+ĠkarÅŁ ıs
+ĠkarÅŁÄ±s ında
+ĠK ön
+ĠKön ig
+ив ание
+×ij ×ķצע
+г лаÑģ
+Ġtw ó
+Ġtwó rc
+à¸Ľà¸ģ à¸Ħร
+à¸Ľà¸ģà¸Ħร à¸Ńà¸ĩ
+ĠG ÅĤ
+ĠGÅĤ ówn
+ĠUnter stüt
+ĠUnterstüt zung
+Ġд ÑĥÑħ
+ĠдÑĥÑħ ов
+Ø£ ÙħاÙĨ
+×Ĺש ש
+ت ظ
+تظ اÙĩر
+ĠлÑİб ом
+à¸ķ าร
+à¸ķาร าà¸ĩ
+Ġkr ól
+Ø£ ØŃدث
+ì¡Į ëĭ¤
+Ðļ ÑĥÑĢÑģ
+ãĥĥ ãĥĦ
+×ŀ×§ ×ķ×ij׾
+ĠÑģимв ол
+Ġdés orm
+Ġdésorm ais
+w üns
+wüns che
+Ñĥ ни
+Ñĥни ÑĨип
+ÑĥниÑĨип алÑĮн
+หลัà¸ģ สูà¸ķร
+ÙĨت شر
+Ġа л
+Ġал к
+Ġалк ог
+Ġалког ол
+ĠÑĥ ÑĩиÑĤÑĭва
+à¸ģำ à¸ģัà¸ļ
+Ġ׾ פע×ķ׾
+ĠìŰ ê²°
+s Äħd
+ĠاÙĦØ£ ÙĬ
+ĠاÙĦØ£ÙĬ اÙħ
+غÙĬ اب
+Ġна ÑĢ
+ĠнаÑĢ ÐºÐ¾
+×ŀ×ķ×ĵ ×¢
+ĠÑģеÑĢ Ð¸Ð¸
+пиÑģ Ñĭва
+สิ ว
+ç¶ļ ãģĦãģ¦
+çͳãģĹ è¾¼ãģ¿
+Ġ׾ ×Ĵר
+Ġ׾×Ĵר ×ķ×Ŀ
+Ġд ем
+Ġдем о
+Ġë³´ ëĤ´
+تÙĩ دÙĬد
+ĠÙħØ´ ÙĬرا
+Ġdu y
+Ġduy á»ĩt
+ĠwiÄĻks ze
+Ùħع اÙĬ
+ÙħعاÙĬ ÙĬر
+ĠG da
+ĠGda ÅĦsk
+Ġr ah
+Ġrah ats
+Ġrahats ız
+ר ×ķצ×Ķ
+l ös
+lös ung
+ĠТак им
+ÑĪ ÐµÐ´
+ÑĪед ÑĪ
+ع زÙĦ
+Ġרש ×Ļ×ŀת
+Ġ׾×Ķ ×Ļ׼
+Ġ׾×Ķ×Ļ׼ ×ł×¡
+Ġп ÑĥÑĤ
+ĠпÑĥÑĤ еÑĪ
+ĠпÑĥÑĤеÑĪ ÐµÑģÑĤв
+Ġnot ÃŃcia
+Ġal Ä±ÅŁ
+ĠalÄ±ÅŁ ver
+ĠalÄ±ÅŁver iÅŁ
+ĠwÅĤ os
+ĠwÅĤos ów
+Ġب غ
+Ġبغ داد
+Ġver öffent
+Ġveröffent licht
+ĠKh á
+Ġt án
+ëIJĺ 기
+Ġë°© 문
+Ùģ ÙĬÙĦ
+à¹Ģà¸ģิà¸Ķ à¸Īาà¸ģ
+åı¯ æĦĽ
+åı¯æĦĽ ãģĦ
+à¸ĸ ุà¸ĩ
+Ġz ewnÄĻtrzn
+à¸łà¸²à¸©à¸² à¸Ńัà¸ĩà¸ģฤษ
+Ġmá xima
+Ġul us
+Ġulus lararası
+Ġ׳×Ķ ×ł
+à¸Ĥà¹Īาว สาร
+ĠìĿĺ ìĤ¬
+à¹Ģหล ืà¸Ńà¸ĩ
+Ġد ÙĤ
+ĠدÙĤ ائÙĤ
+สืà¹Īà¸Ń สาร
+ë¨ ¼
+ĠÑģоÑģÑĤоÑı нии
+สมา à¸Ħม
+á» Ĥ
+ĠÐľÐ¾Ñģ ков
+ĠÐľÐ¾Ñģков Ñģк
+×ŀס ×ķ×Ĵ׾
+ãģĭ ãģĭãĤĬ
+ĠTr uyá»ģn
+à¹ģà¸Ĥà¹ĩà¸ĩ à¹ģรà¸ĩ
+×ŀ×Ĺ ×ĸ×Ļ×§
+à¹Ĥà¸ģ à¹ī
+ÙĬس ر
+ìĶ ©
+×IJ ×ķ×§
+×IJ×ķ×§ ×ĺ
+×IJ×ķ×§×ĺ ×ķ×ijר
+Ġprox imité
+ÙħÙĨ Ùĩج
+ĠاÙĦج ز
+ĠاÙĦجز ائ
+ĠاÙĦجزائ رÙĬ
+ĠÄIJi á»ĥm
+Ġден еж
+Ġденеж н
+ÙģØŃ ص
+Ùģ Ø¦
+ĠÐij Ñĥд
+×Ĵ×Ļ×ĵ ×ķ׾
+ĠÐĴ едÑĮ
+عÙĦ اÙħØ©
+Ġ×IJ×Ĺר ×ķ׳×ķת
+ãģĦãģŁãģł ãģĦãģ¦
+سÙĦ ØŃ
+ØŃ ÙĦÙħ
+ز ÙĪØ§Ø±
+Ùĥ سر
+×ĺ קס
+Ġб ан
+Ġбан ков
+ĠпÑĢ Ð¾Ð¶
+ĠпÑĢож ива
+li wo
+liwo ÅĽci
+ĠTi ếp
+ĠاÙĦÙħÙĨ اسب
+ĠاÙĦØ® ÙĬار
+ãģĬ ãģĭ
+ãģĬãģĭ ãģĴ
+à¸Ķà¸Ńà¸ģ à¹Ħมà¹ī
+ä mp
+ämp fe
+à¸ķัà¹īà¸ĩ à¹ĥà¸Ī
+Ġза ÑīиÑĤ
+ĠзаÑīиÑĤ Ñĭ
+ĠTh ưá»Ŀng
+Ġص Ùģ
+ĠصÙģ ØŃØ©
+×Ĺ×ķר ×£
+ãĥIJ ãĥĥãĤ°
+Ġ×ĵ ×Ļ×Ĵ
+Ġ×ĵ×Ļ×Ĵ ×Ļ×ĺ
+Ġ×ĵ×Ļ×Ĵ×Ļ×ĺ ׾×Ļ
+Ġ×Ķ×Ĺ ×ķ׾×Ļ×Ŀ
+в еÑī
+веÑī а
+Ġк ÑĥлÑĮÑĤ
+ĠкÑĥлÑĮÑĤ Ñĥ
+ĠкÑĥлÑĮÑĤÑĥ ÑĢÑĭ
+ĠاÙĦاÙĨ ترÙĨت
+Ġhö ch
+Ġhöch st
+Ġíĺ ķ
+Ġíĺķ íĥľ
+Ġв ой
+Ġвой нÑĭ
+ÐĽ Ðŀ
+ìĭł ìļ©
+Ġ×ŀ×ij ×ķס
+Ġ×ŀ×ij×ķס ס
+×ŀ׳ ×Ļ×¢
+Ġfiyat ı
+ĠÑģл Ñĥж
+ĠÑģлÑĥж бÑĭ
+à¸Ĺั ศ
+à¸Ĺัศ à¸Ļ
+ãģĵãģ¨ãģĮ å¤ļãģĦ
+Ġ×Ķ×ŀש ת
+Ġ×Ķ×ŀשת ×ŀש
+å¯Ħ ãģĽ
+×ŀש׾ ×ķ×Ĺ
+æĻĤ çĤ¹
+æĻĤçĤ¹ ãģ§
+à¸ŀร ี
+à¸ŀรี à¹Ģมีย
+à¸ŀรีà¹Ģมีย รà¹Į
+à¸ŀรีà¹Ģมียรà¹Į ลีà¸ģ
+Ġdiffic olt
+Ġdifficolt Ãł
+ãĥ¬ ãĤ¹ãĥĪ
+ãĥ¬ãĤ¹ãĥĪ ãĥ©ãĥ³
+สม à¹Ģà¸Ķà¹ĩ
+สมà¹Ģà¸Ķà¹ĩ à¸Ī
+Ġж ид
+Ġжид к
+Ġzu peÅĤ
+ĠzupeÅĤ nie
+ĠÙħ جر
+ĠÙħجر د
+ãģĮ å§ĭ
+ãģĮå§ĭ ãģ¾
+ãĤŃãĥ£ ãĥ©
+Ġ×IJ ×ķ×ķ×Ļר
+ãģĬ äºĴ
+ãģĬäºĴ ãģĦ
+Ġpot rÃł
+ĠPa ÅĦst
+ĠPaÅĦst wo
+Ġب ÙĬاÙĨ
+ĠبÙĬاÙĨ ات
+Ġин огда
+ĠÑĢ Ð°
+ĠÑĢа ÑģÑĤв
+ĠÑĢаÑģÑĤв оÑĢ
+Ġ×ĸ ×ŀ׳
+ยิ à¹īม
+Ä Ĩ
+ãģ¾ ãģķ
+ãģ¾ãģķ ãģ«
+ãĥķãĤ¡ ãĤ¤ãĥ«
+Ġgörd Ã¼ÄŁÃ¼
+สà¸ĩ à¸Ħร
+สà¸ĩà¸Ħร าม
+ĠArk adaÅŁ
+ĠrozwiÄħz ania
+×ŀ ×ķ×ĺ
+pi ÄĻ
+piÄĻ t
+ص غر
+ส ย
+สย าม
+ãĤĨ ãģ£ãģıãĤĬ
+Ġtr ần
+Ġeconom ÃŃa
+Ġgeh ören
+ãĤ·ãĥ§ ãĥ¼
+ĠsÅĤ ucha
+à¸ŀà¸Ń à¹ĥà¸Ī
+ĠоÑĤмеÑĤ ил
+ÙĨت ÙĤÙĦ
+Ġprop ósito
+ĠваÑĪ ÐµÐ³Ð¾
+Ġnh ắn
+à¹ģà¸ĸ ว
+Ġком иÑģ
+ĠкомиÑģ Ñģи
+waż nie
+Ġy avaÅŁ
+×ŀ ×Ļ×§
+×ŀ×Ļ×§ ×ķ×Ŀ
+ש×IJ׾ ת
+Ġyıll arda
+ĠÐ ®
+ĠЮ ÑĢ
+×ł×¡ ×Ļ×ij×ķת
+ת צ
+תצ ×ķ×Ĵ
+Ġод нÑĥ
+Ġ à¸Ńยà¹Īาà¸ĩà¹Ħร
+Ġà¸Ńยà¹Īาà¸ĩà¹Ħร à¸ģà¹ĩà¸ķาม
+ëģ ¼
+à¹Ħล à¹Ī
+تس ÙĦÙĬÙħ
+بÙĦ اغ
+Ġì ī
+Ġìī ½
+Ġìī½ ê²Į
+ãĥļ ãĥ³
+зв ÑĥÑĩ
+ĠW äh
+ĠWäh rend
+Ġ×Ļ ×Ļת
+Ġ×Ļ×Ļת ׼ף
+Ġkh uyên
+Ġv ẽ
+Ġа меÑĢ
+ĠамеÑĢ Ð¸Ðº
+ĠамеÑĢик ан
+ĠамеÑĢикан Ñģк
+ع جب
+ãĥĽãĥ¼ãĥł ãĥļãĥ¼ãĤ¸
+Ġник ÑĤо
+ĠÙĤ Ùİ
+ĠÙĤÙİ Ø§ÙĦ
+ĠÙĤÙİØ§ÙĦ Ùİ
+ÐIJ ÐĹ
+Ùħ جÙħÙĪØ¹
+ÙħجÙħÙĪØ¹ ات
+Ġnecess itÃł
+Ġpob li
+Ġpobli żu
+Ġph ấn
+ĠСо обÑī
+ÙħÙĤ اط
+ÙħÙĤاط ع
+Ġ×Ķצ ×ķר×ļ
+la ÅŁtırma
+ว ิà¸Ķ
+วิà¸Ķ ี
+วิà¸Ķี à¹Ĥà¸Ń
+Ġ그리 ìĬ¤
+Ġ그리ìĬ¤ ëıĦ
+ãĤ¿ãĤ¤ ãĥŁ
+ãĤ¿ãĤ¤ãĥŁ ãĥ³ãĤ°
+×§×ĺ ×Ĵ×ķר
+×§×ĺ×Ĵ×ķר ×Ļ×Ķ
+Ġ×Ĺ ×ķפ
+Ġ×Ĺ×ķפ ש×Ļ
+أ جر
+Ġим ени
+ĠÑĢан ее
+à¹Ģà¸ŀืà¹Īà¸Ńà¸Ļ à¹Ĩ
+ĠJes ús
+Ñģо един
+Ñģоедин ен
+Ġר ×Ĺ×ķ×§
+à¹Ĥà¸ļ รา
+à¹Ĥà¸ļรา à¸ĵ
+ĠH Æ¡n
+Ġth áºŃp
+تع ÙĬÙĬÙĨ
+Ġtart Ä±ÅŁ
+ĠtartÄ±ÅŁ ma
+ĠGes pr
+ĠGespr äch
+תר ×ķפ
+תר×ķפ ×ķת
+Ġcat égorie
+Ġоказ Ñĭва
+ĠналиÑĩ ие
+Ġprésent é
+Ġk ull
+Ġkull and
+Ġkulland ı
+Ġü nl
+Ġünl ü
+ĠÙģ Ùĥرة
+из аÑĤоÑĢ
+×IJ ×ķ׳
+×IJ×ķ׳ ×Ļ×ij
+×IJ×ķ׳×Ļ×ij רס
+×IJ×ķ׳×Ļ×ijרס ×Ļ×ĺת
+ĠÑĢаÑģÑģ маÑĤ
+ĠÑĢаÑģÑģмаÑĤ ÑĢ
+ĠÑĢаÑģÑģмаÑĤÑĢ Ð¸Ð²Ð°
+تÙĥÙĦ Ùħ
+Ùĥت رÙĪ
+ÙĥترÙĪ ÙĨÙĬ
+ĠÑģо ÑĩеÑĤ
+ĠÑģоÑĩеÑĤ а
+ãĤĴè¦ĭ ãģĽ
+Ġng ừa
+ĠÐł еÑģп
+ĠÐłÐµÑģп Ñĥб
+ĠÐłÐµÑģпÑĥб лик
+ãĤ¦ ãĤ©
+ãĤ¦ãĤ© ãĥ¼
+ĠÐľ еждÑĥ
+ĠìŀĪ ê²Į
+Ġm â
+ĠìļĶ ì²Ń
+ض ار
+ลุ à¹īà¸Ļ
+ëĮĢ íķĻêµIJ
+×ĸ ×Ļ׼
+×ĸ×Ļ׼ ר×ķף
+ãĤ¹ ãĥļ
+ãĤ¹ãĥļ ãĥ¼ãĤ¹
+ĠкÑĢаÑģ оÑĤ
+ï¼ ¨
+ê¼ Ń
+ãĤĴ éĽĨ
+ãĤĴéĽĨ ãĤģ
+ë° Ŀ
+Ġ×Ķ׳ ×IJ
+Ġ×Ķ׳×IJ ש×Ŀ
+Ġê°Ģ ìļ´
+Ġê°Ģìļ´ ëį°
+تÙĥÙĦ Ù쨩
+ĠØŃ ÙĤÙĬÙĤÙĬ
+Ġh alk
+Ġhalk ın
+ÑİÑī ÑĥÑİ
+ĠÑģп ин
+סר×ĺ ף
+ĠпеÑĢв ого
+Ġпол ож
+Ġполож иÑĤелÑĮн
+Ġд л
+Ġдл иÑĤелÑĮн
+ĠV Ä©nh
+ê´ ´
+ĠÑģÑĭ ÑĢ
+ĠíĨµ íķĺìŬ
+ë³ij ìĽIJ
+à¹Ĥรà¸ĩ à¸ĩาà¸Ļ
+รัà¸ļ à¸ľà¸´à¸Ķ
+รัà¸ļà¸ľà¸´à¸Ķ à¸Ĭà¸Ńà¸ļ
+تج ÙĨب
+s ÅĤ
+sÅĤ uch
+ãĤ¢ãĥ« ãĥIJ
+ãĤ¢ãĥ«ãĥIJ ãĥł
+ëī´ ìĬ¤
+Ġpat ië
+Ġpatië nt
+Ġìĺ ¤í
+Ġìĺ¤í ŀ
+Ġìĺ¤íŀ Ī
+Ġìĺ¤íŀĪ ëł¤
+ĠDer ne
+ĠDerne ÄŁi
+wró ci
+wróci Äĩ
+Ġоб Ñī
+ĠобÑī еÑģÑĤв
+ĠобÑīеÑģÑĤв енно
+ĠêµIJ ìĪĺ
+tıģ ımız
+Ġ×Ķ×ŀש ×Ļ×ij
+k örper
+Ġпозв ол
+Ġпозвол иÑĤ
+ĠChi ến
+أخ ÙĪ
+ĠAy dın
+à¸Ķà¹īาà¸Ļ ล
+à¸Ķà¹īาà¸Ļล à¹Īาà¸ĩ
+Ġdr u
+Ġdru ż
+Ġdruż yn
+Ġë°ľ íijľ
+ĠTh ảo
+جÙĩ اد
+à¸ģระà¸Ĺ ูà¹ī
+Ġк ÑĢов
+ĠкÑĢов и
+Ġiçer ik
+Ġnad zie
+Ġnadzie jÄĻ
+ĠС моÑĤÑĢ
+Ġph ức
+ج تÙħاع
+جتÙħاع ÙĬØ©
+ком пон
+компон енÑĤ
+Ġб ил
+Ġбил еÑĤ
+ãĥIJ ãĥ³ãĥī
+ĠPol ÃŃcia
+اÙĦ تÙĩ
+اÙĦتÙĩ اب
+ØŃر Ùģ
+ت خط
+تخط ÙĬØ·
+ãĤ³ ãĥ¼ãĥ
+ãĤ³ãĥ¼ãĥ Ĵ
+ãĤ³ãĥ¼ãĥĴ ãĥ¼
+・・ ・
+à¸ĭ à¸Ńย
+Ġcréd it
+è²· ãģ£ãģŁ
+ĠпоÑĢ Ñıд
+ĠпоÑĢÑıд ке
+Ġph ó
+Ġw ida
+Ġwida Äĩ
+جر ائÙħ
+à¸ľ ี
+ĠbÄĻd ÄĻ
+Ġ×ŀ פת×Ĺ
+ãĥij ãĥ¼ãĥ
+ãĥijãĥ¼ãĥ Ĩ
+ãĥijãĥ¼ãĥĨ ãĤ£
+ãĥijãĥ¼ãĥĨãĤ£ ãĥ¼
+ĠKa ż
+ĠKaż dy
+ĠнеобÑħодим оÑģÑĤи
+à¸Ł à¸Ńรà¹Į
+à¸Łà¸Ńรà¹Į ม
+Ġмал ÑĭÑĪ
+Ġпл оÑĤ
+ĠÑĥ ÑģÑĤÑĢой
+ĠÑĥÑģÑĤÑĢой ÑģÑĤва
+à¸ĸ à¸Ńà¸Ļ
+ĠoluÅŁtur ul
+ĠÅĽwi ad
+ĠÅĽwiad om
+Ùħع Ùĩد
+ĠпÑĢоиз веден
+Æ ł
+ר ×Ļש
+Ùħست Ø«
+Ùħستث Ùħر
+׳×Ļ ×Ļר
+pa ñ
+Ġ; -)
+Ġë°ľ 견
+Ġgör üyor
+Ùħؤ ÙĦÙģ
+ĠÄIJ á»ģ
+ĠاÙĦÙĨ ÙĪØ§Ø¨
+×Ĺ×§ ×Ļר×Ķ
+Ġm á»ıi
+è¿° ãģ¹
+ÐĿ ик
+ìŀĸ ìķĦ
+ìŀĸìķĦ ìļĶ
+prowadzi ÅĤ
+l óg
+lóg ica
+פס ×ĺ
+פס×ĺ ×Ļ×ij׾
+Ġ×ŀ ×ĵ×Ķ
+Ġ×ŀ×ĵ×Ķ ×Ļ×Ŀ
+ãģĵãģĵ ãģ¾ãģ§
+×Ķ ×ª×Ĺ
+×Ķת׊׾×Ķ
+Ġפ ×ķס
+Ġפ×ķס ×ĺ×Ļ×Ŀ
+Ġн ев
+Ġнев оз
+Ġневоз можно
+ĠdostÄĻp ny
+Ġغ اÙĦ
+ĠغاÙĦ ب
+Ġbez pieczeÅĦst
+ĠbezpieczeÅĦst wa
+åĪĨ ãģĭãĤĭ
+ĠF ührung
+à¸ģ ีà¹ī
+gem Ã¤ÃŁ
+à¸Ĭà¹Īวà¸ĩ à¹Ģวลา
+Ġìļ°ë¦¬ ëĤĺ
+Ġìļ°ë¦¬ëĤĺ ëĿ¼
+ãģ¥ ãģıãĤĬ
+ĠاÙĦÙħ سÙĦ
+ĠاÙĦÙħسÙĦ ØŃØ©
+Ġlibert é
+клÑİÑĩ ение
+Ġzam ów
+Ġzamów ienia
+รà¸ĸ à¹Ħà¸Ł
+Ø£ ÙģÙĦ
+Ø£ÙģÙĦ اÙħ
+Ùħ راج
+Ùħراج عة
+Ġë¹Ħ êµIJ
+ĠاÙĦت اب
+ĠاÙĦتاب عة
+Ġë§Į ëĤĺ
+Ġб Ñĥм
+ĠбÑĥм аг
+Ġgé nero
+Ġìŀĺ 못
+×ŀ פ×ķר×ĺ
+è²·ãģĦ çī©
+ĠÙĦدÙĬ Ùĥ
+Ġ×ľ×¢ ×Ļת
+Ġ×ľ×¢×Ļת ×Ļ×Ŀ
+ĠsÅĤ ab
+ĠпÑĢедÑģÑĤав лÑı
+ãĤ¿ ãĤ¤ãĥĪ
+ãĤ¿ãĤ¤ãĥĪ ãĥ«
+Ùħ ص
+Ùħص Ø·Ùģ
+ÙħصطÙģ Ùī
+Ġdifficult é
+ãĥĨãĤ£ ãĥĸ
+Ġpew noÅĽci
+ĠpewnoÅĽci Äħ
+Ġ무 ìĬ¨
+إ رس
+إرس اÙĦ
+Ġд алÑĮ
+ĠдалÑĮ ÑĪе
+Ġ׾ ×ł×¡
+Ġ×ľ×ł×¡ ×ķת
+หมูà¹Ī à¸ļà¹īาà¸Ļ
+×ŀס×ŀ ׼×Ļ
+أسÙĦ ÙĪØ¨
+Ġzw ÅĤ
+ĠzwÅĤ as
+ĠzwÅĤas zc
+ĠzwÅĤaszc za
+ĠпÑĢ ÐµÐ¶
+ĠпÑĢеж де
+ĠоÑĢганиз аÑĨиÑı
+Ġdön emin
+Ġdönemin de
+Ġ Ủ
+ĠỦ y
+ä¸ĭ ãģĴ
+ĠпоÑģлед ние
+Ġgü ne
+Ġgüne ÅŁ
+Ġ×IJ ×ĸר
+Ġ×IJ×ĸר ×Ĺ×Ļ
+ãģ§ãģĤ ãĤįãģĨ
+ĠÙĨ ÙĤ
+ĠÙĨÙĤ اط
+æŃ£ ãģĹãģĦ
+ĠÑĢ ÐµÐ³
+ĠÑĢег иона
+ĠFör der
+ê²½ ìĺģ
+dıkl ar
+dıklar ını
+trzym aÄĩ
+أش Ùĥ
+أشÙĥ اÙĦ
+×Ķת ×IJ
+×Ķת×IJ ×ŀ×Ķ
+à¸Ĺำà¹ĥหà¹ī à¹Ģà¸ģิà¸Ķ
+ĠGeb ä
+ĠGebä ude
+ĠСеÑĢ Ð³
+ĠСеÑĢг ей
+Ġз доÑĢов
+ĠздоÑĢов ÑĮÑı
+Ġr ãi
+ĠпÑĢед ÑĥÑģ
+ĠпÑĢедÑĥÑģ моÑĤÑĢ
+ĠпÑĢедÑĥÑģмоÑĤÑĢ ÐµÐ½
+Ġ×Ķצ ×Ļ×ij
+Ġ×Ķצ×Ļ×ij ×ķר×Ļ
+Ġdés ir
+Ġн оÑĩ
+ĠноÑĩ ÑĮ
+möglich keiten
+Ġ×IJ×Ĺר ×ķ׳×Ļ×Ŀ
+Ġsoir ée
+ĠNh áºŃn
+Ù ª
+à¸Ľà¸£à¸°à¸§à¸±à¸ķิ ศาสà¸ķรà¹Į
+êµIJ íĨµ
+ĠØ£ Ø®ÙĬ
+Ġdé cid
+Ġdécid é
+Ġwy ja
+Ġwyja ÅĽni
+Ġ สิ
+Ġสิ à¸ĩ
+Ġสิà¸ĩ หา
+Ġสิà¸ĩหา à¸Ħม
+à¹ģ à¸Ńรà¹Į
+หà¸Ļà¹īา à¸Īà¸Ń
+ס תר
+Ġê ¶
+Ġê¶ Į
+Ġê¶Į 리
+pl ätze
+ب Ø·ÙĦ
+ê±´ ìĦ¤
+Ġ×IJ ×Ļ×ŀ×Ļ
+Ġ×IJ×Ļ×ŀ×Ļ ×Ļ׾
+ãģ ½
+تر اث
+×IJ׾ ×Ļ×ŀ×ķת
+Ġdispon ÃŃveis
+Ġz ale
+Ġzale ży
+à¸Ľà¸£à¸°à¸Ĭา สัมà¸ŀัà¸Ļà¸ĺà¹Į
+ĠÅļw iat
+Ġpor ówn
+Ġporówn a
+Ġ׾×ĺ ×ķ×ijת
+×Ķ×ĸ ×ŀ׳×Ķ
+Ġ×Ľ×ª ×ķצ×IJ×Ķ
+Ġ×ij ק׾
+Ġ×ijק׾ ×ķת
+ĠоÑĤ кÑĢ
+ĠоÑĤкÑĢ Ñĭва
+ãĥij ãĥ¯ãĥ¼
+ë¿IJ ë§Į
+Ġв ÑģÑı
+ĠвÑģÑı к
+ãģ¨ãģª ãģ£ãģ¦ãģĦãĤĭ
+Ġgi áºŃn
+Ġок ÑĢÑĥ
+ĠокÑĢÑĥ жа
+ĠокÑĢÑĥжа ÑİÑī
+ĠUnivers ität
+ĠÑĢ Ð¾Ð¶
+ĠÑĢож д
+ĠÑĢожд ениÑı
+Ø® ÙĬÙĦ
+Ġкомпани й
+ĠÑĢазлиÑĩ нÑĭе
+ĠЦ ена
+׳×Ļ ×ķ×ĸ
+׳×Ļ×ķ×ĸ ׾
+׳×Ļ×ķ×ĸ׾ ×ĺר
+Ġê³µ ê°Ħ
+Ġê°ľ ëħIJ
+landır ma
+ĠÑĥдал ен
+à¸ŀัà¸ģ à¸ľ
+à¸ŀัà¸ģà¸ľ à¹Īà¸Ńà¸Ļ
+Ġprote cción
+Ġb ÅĤ
+ĠbÅĤ ÄĻd
+Ã Ī
+Ġíĸī ë³µ
+ĠÅŁ ü
+ĠÅŁÃ¼ phe
+Ġí Ķ
+ĠíĶ ¼
+Ġíͼ íķ´
+Ġëĭ¤ 르
+à¹Ħมà¹Ī à¹Ģà¸ģิà¸Ļ
+ãģ¿ ãģª
+ãģ¿ãģª ãģķãĤĵ
+ĠпоÑĤ ÑĢеб
+ĠпоÑĤÑĢеб иÑĤел
+ĠاÙĦÙĥÙĦ اÙħ
+ìķĦ ë²Ħ
+ìķĦë²Ħ ì§Ģ
+ãĤĴ使 ãģ£ãģŁ
+Ġbụ i
+ĠпоÑĤ еÑĢ
+ĠпоÑĤеÑĢ Ñı
+ĠØ¢ ÙĦاÙģ
+ĠнаÑģÑĤоÑıÑī ее
+ãģıãģªãĤĬ ãģ¾ãģĹãģŁ
+clus ão
+ãĤ³ ãĥĶãĥ¼
+צ פ×Ļ
+צפ×Ļ ×Ļ×Ķ
+Ø® ÙĦا
+Ø®ÙĦا ص
+ล à¹īำ
+ãĥ¯ ãĤ¤ãĥ³
+Ġมี à¸Ļา
+Ġมีà¸Ļา à¸Ħม
+ش خص
+شخص ÙĬات
+Ġ×ĸ ×§
+Ġ×ĸ×§ ×ķ×§
+×Ļ ×Ļצ
+×Ļ×Ļצ ×Ĵ
+èĢĥãģĪ æĸ¹
+Ġürün ü
+ĠиÑģп ол
+ĠиÑģпол ни
+Ġcompañ ero
+×§ צ×Ķ
+×ŀ×¢ ׳×Ļ×§
+Ùħ ØŃÙħد
+Ġc ámara
+Ġп ед
+Ġпед аг
+Ġпедаг ог
+м аÑĢ
+маÑĢ Ðº
+×Ķת ׳×Ĵ×ĵ
+ĠìĨĮ ê°ľ
+Ġcom unitÃł
+ê³ ¤
+ĠNg Ãłi
+สà¸ĩ à¸ļ
+ĠmieszkaÅĦ ców
+ĠÙĨ ÙĩائÙĬ
+iv ité
+Ġи де
+Ġиде алÑĮн
+ĠØ£ سبÙĪØ¹
+Ġ×Ļ ×¢×ľ
+Ġ׾ ר×IJש
+Ġ׾ר×IJש ×ķ׳×Ķ
+ĠзапиÑģ и
+ĠкоÑĢ Ð¿ÑĥÑģ
+วà¸ĩ ศ
+วà¸ĩศ à¹Į
+ĠÐĶ Ð¼
+ĠÐĶм иÑĤ
+ĠÐĶмиÑĤ ÑĢ
+Ġkön nt
+Ġböl ges
+Ġbölges inde
+׼ ×Ļ׼
+׼×Ļ׼ ר
+ĠاÙĦØ¥ Ø«ÙĨ
+ĠاÙĦإثÙĨ ÙĬÙĨ
+Ġng á»Ļ
+ì¹ ł
+د راج
+Ġu da
+Ġuda ÅĤo
+ìº IJ
+بر ÙĨاÙħج
+ĠÑģÑĥд еб
+ĠÑģÑĥдеб н
+Ġzun ächst
+ĠEduc ación
+ãģ¨ãģª ãģ£ãģ¦ãģĦãģ¾ãģĻ
+Ġ×Ķ×IJ ×ŀ×Ļת×Ļ
+Ġİ nt
+Ġİnt ernet
+ĠcaÅĤ ego
+ãĥĹãĥª ãĥ³
+إ بد
+إبد اع
+ĠпоÑĢ ÑĤал
+à¹Ĥà¸ķ à¹ī
+Ġ×Ķ×§ ש×ķר
+пл од
+ĠÙħ د
+ĠÙħد رÙĬد
+×ŀסע ×ĵ×Ķ
+ĠØ´ÙĬ ئ
+ĠØ´ÙĬئ ا
+à¸ģà¹Īà¸Ń สรà¹īาà¸ĩ
+Ġì°¸ ê³ł
+à¹Ģà¸Ĺ ร
+à¹Ģà¸Ĺร à¸Ķ
+Ġ×ij×ŀ קר×Ļ×Ŀ
+Ġb ât
+Ġbât iment
+åij¼ ãģ³
+ç´ł æķµ
+ç´łæķµ ãģª
+przedsiÄĻbior st
+przedsiÄĻbiorst w
+Ġ×ł×ª ×ķ׳×Ļ×Ŀ
+×Ĺ׾ ×ķ×Ŀ
+ร วย
+Ùħ ÙĪØ¶ÙĪØ¹
+ĠÑģоб ÑĢан
+вед ÑĥÑī
+ĠÑĤе аÑĤ
+ĠÑĤеаÑĤ ÑĢ
+m eye
+meye ceÄŁi
+Ġpien iÄħ
+ĠpieniÄħ d
+ĠpieniÄħd ze
+ÑĢез иденÑĤ
+ØŃ صر
+ìĺ ¥
+à¹Ģย ืà¸Ńà¸Ļ
+ĠÑĥ ни
+ĠÑĥни веÑĢ
+ĠÑĥнивеÑĢ Ñģ
+ĠÑĥнивеÑĢÑģ иÑĤеÑĤ
+ĠاÙĦر ØŃ
+ĠاÙĦرØŃ ÙħÙĨ
+ĠÑĤеÑħ нолог
+ĠÑĤеÑħнолог ии
+ìĹIJ ëĦĪ
+ìĹIJëĦĪ ì§Ģ
+Ġíķ Ń
+ĠíķŃ ìĥģ
+à¸ĺ า
+à¸ĺา à¸ķุ
+ĠEspañ ol
+×ĵ×Ĵ ש
+Ġêµ ī
+Ġêµī ìŀ¥
+Ġêµīìŀ¥ íŀĪ
+ĠÅĤ at
+ĠÅĤat wo
+Ġk á»ĭch
+إ ز
+إز اÙĦØ©
+ĠдейÑģÑĤв ие
+ĠsaÄŁ layan
+สุà¸Ķ ยà¸Ńà¸Ķ
+Ġzosta Äĩ
+Ġdispon ÃŃvel
+ïº į
+ver ständ
+verständ lich
+tw or
+twor zyÄĩ
+ع جز
+à¹Ģà¸Ĥ à¹īม
+ยà¹Ī à¸Ńม
+Ġstrat ég
+Ġstratég ie
+à¸ľà¸¥ à¹Ħมà¹ī
+Ġê°ģ ì¢ħ
+ĠÙħ ÙĪØ§
+ĠÙħÙĪØ§ ض
+ĠÙħÙĪØ§Ø¶ ÙĬع
+اØŃ تج
+اØŃتج اج
+Ġ Ấ
+ĠẤ n
+×ŀ ×ŀש׾×Ķ
+ĠÅŁek il
+×ŀ ×Ĺ׾
+×ŀ×Ĺ׾ ×ķת
+Ġ à¸ĺ
+Ġà¸ĺ ัà¸Ļ
+Ġà¸ĺัà¸Ļ วา
+Ġà¸ĺัà¸Ļวา à¸Ħม
+Ġìĭ¤ ìłľ
+Ġìĭ¤ìłľ ë¡ľ
+ì¤ij ìķĻ
+ëįĶ ëĿ¼
+ĠÑĪ Ð¸ÑĢ
+ĠÑĪиÑĢ Ð¾ÐºÐ¾
+Ġsol ución
+วาà¸ĩ à¹ģà¸ľà¸Ļ
+×IJ×ķ×ĺ ×ķ×ŀ
+×IJ×ķ×ĺ×ķ×ŀ ×ĺ×Ļ
+ĠÑĢ ÐµÑģÑĤ
+ĠÑĢеÑģÑĤ оÑĢ
+ĠÑĢеÑģÑĤоÑĢ Ð°Ð½
+ëį ¸
+ÑĤ ÑĢад
+ÑĤÑĢад и
+ÑĤÑĢади ÑĨион
+ÑĤÑĢадиÑĨион н
+มะ à¹Ģรà¹ĩ
+มะà¹Ģรà¹ĩ à¸ĩ
+à¹Ĥ ส
+Ġol masını
+×ŀ×ķס ר
+ĠоÑĤноÑĪ ÐµÐ½Ð¸Ð¸
+Ġê°ĢëĬ¥ ìĦ±
+Ġy uk
+Ġyuk arı
+ìĨ Ķ
+ĠÑģ ÑĦ
+ĠÑģÑĦ еÑĢе
+Ġ×§ ×ķפ
+ãĤ± ãĥ¼ãĤ
+ãĤ±ãĥ¼ãĤ Ń
+âĢķ âĢķ
+ĠاÙĦØ£ ÙĦÙħ
+ĠاÙĦØ£ÙĦÙħ اÙĨÙĬ
+Ả N
+ת×ķ׼ ׳×Ļ×ķת
+ĠÑģÑĥÑīеÑģÑĤв ÑĥеÑĤ
+æĪij ãĢħ
+ĠاÙĦص ادر
+ĠTr á»įng
+Ġа д
+Ġад миниÑģÑĤ
+ĠадминиÑģÑĤ ÑĢа
+ĠадминиÑģÑĤÑĢа ÑĨи
+ĠдÑĢÑĥг ими
+Ñģп еÑĪ
+عÙĦاÙħ ات
+Ġа б
+Ġаб Ñģол
+ĠабÑģол ÑİÑĤ
+ĠабÑģолÑİÑĤ но
+ฤ à¸Ķู
+é tr
+étr anger
+нÑı ÑĤи
+нÑıÑĤи е
+×¢ ×ķ׳
+×¢×ķ׳ ש
+ĠÙĤ ائ
+ĠÙĤائ ÙĦا
+Ġм аÑģ
+ĠмаÑģ ло
+ãĥī ãĤ¤
+ãĥīãĤ¤ ãĥĦ
+å¿ħè¦ģ ãģĮãģĤãĤĬãģ¾ãģĻ
+×ŀ×ķ×ĸ ×Ļ×IJ
+×ŀ×ķ×ĸ×Ļ×IJ ×ķף
+ĠNgo ại
+Ġkê nh
+à¸ģาร à¸Ńà¸Ńà¸ģà¹ģà¸ļà¸ļ
+×ŀ פק
+×ŀפק ×ĵ
+ÙħÙĨ از
+ÙħÙĨاز ÙĦ
+ë· °
+íĹ ¤
+ÙħÙĩ ارات
+Ġpropri été
+פ×Ĵ ×Ļש×Ķ
+Ñĩ ÑĢ
+ÑĩÑĢ ÐµÐ¶
+ÑĩÑĢеж ден
+×Ķ ×ķצ×IJ×Ķ
+ØŃÙĥ ÙĬÙħ
+ĠíĻ Ī
+ĠíĻĪ íİĺìĿ´ì§Ģ
+åİ ³
+åݳ ãģĹãģĦ
+×¢ ×ŀ×ĵ×Ķ
+ĠAu ÃŁen
+سÙĪ Ø¡
+ë¹ Ī
+ĠÙĪ Ø®
+ĠÙĪØ® اصة
+ин ÑĤеÑĢ
+инÑĤеÑĢ ÐµÑģ
+èĩ´ ãģĹãģ¾ãģĻ
+Ġhük üm
+à¹Ħà¸Ĥ มัà¸Ļ
+Ġdav ran
+Ġdavran Ä±ÅŁ
+à¹Ģà¸ķ ียà¸ĩ
+в ÑĢем
+вÑĢем енно
+à¹Ģà¸Ĺศ à¸ģา
+à¹Ģà¸Ĺศà¸ģา ล
+å¼ķ ãģ£
+å¼ķãģ£ è¶ĬãģĹ
+×IJר ×ķ×Ĺ
+×IJר×ķ×Ĺ ×ª
+à¹Ģ วิ
+à¹Ģวิ รà¹Į
+à¸Ńยà¹Īาà¸ĩ รวà¸Ķà¹Ģรà¹ĩว
+ĠìŬ íĸī
+ĠÑĢан ÑĮ
+ĠÑĢанÑĮ ÑĪе
+Ġzob ow
+Ġzobow iÄħ
+ĠzobowiÄħ z
+Ġ×ķ׼ ×ŀ×ķ×ijף
+ĠاÙĦÙħ Ùĩ
+ĠاÙĦÙħÙĩ ÙĨÙĬ
+ãĤ¢ ãĤ¸
+ãĤ¢ãĤ¸ ãĤ¢
+ë°© ìĨ¡
+à¸Ńà¸Ńà¸ģ à¸ģำลัà¸ĩ
+à¸Ńà¸Ńà¸ģà¸ģำลัà¸ĩ à¸ģาย
+am éli
+améli orer
+å½ĵãģŁãĤĬ åīį
+Ġreg elm
+Ġregelm Ã¤ÃŁig
+ãģĬ åĭ
+ãģĬåĭ §
+ãģĬåĭ§ ãĤģ
+Ġm ưá»Ŀi
+بر Ùħج
+ĠNat ürlich
+ĠD Å©ng
+ĠاÙĦر جاÙĦ
+Ġthé p
+Ġol muÅŁtur
+×ŀ×ķס ×Ļ×§×Ķ
+f älle
+주 íĥĿ
+ĠاÙĦÙģ Ø±Øµ
+Ġnaj wiÄĻks
+ĠnajwiÄĻks zy
+Ġça ÄŁ
+ĠçaÄŁ rı
+ì¸ ł
+ĠvÃŃ ct
+ĠvÃŃct ima
+ĠÑģовеÑĢ ÑĪен
+×Ķ×Ļ ×Ļת×Ļ
+à¹Ģà¸Ķ ี
+à¹Ģà¸Ķี à¹ĭ
+à¹Ģà¸Ķีà¹ĭ ยว
+ü yü
+Ġд оп
+Ġдоп олн
+Ġдополн иÑĤелÑĮно
+à¹ģà¸ķà¸ģà¸ķà¹Īาà¸ĩ à¸ģัà¸Ļ
+Ġá l
+Ġál bum
+à¸Ľà¸£à¸°à¸Īำ à¸Ľà¸µ
+ĠÑĦ едеÑĢ
+ĠÑĦедеÑĢ Ð°Ð»ÑĮн
+Ġobs ÅĤ
+ĠobsÅĤ ugi
+à¹Ģร ืà¹Ī
+à¹Ģรืà¹Ī à¸Ńย
+à¹Ģรืà¹Īà¸Ńย à¹Ĩ
+ëģ Į
+Ġngh ìn
+ĠBaÅŁkan lıģı
+تأ سÙĬ
+تأسÙĬ س
+Ġ×ij×ij ×ķקר
+Ġ×¢×ij×ķ×ĵ ×ķת
+Ġبص ÙĪØ±Ø©
+ãĤıãģij ãģ§ãģ¯ãģªãģĦ
+führ er
+ãĤ¹ ãĤŃ
+ãĤ¹ãĤŃ ãĥ«
+ĠاÙĦÙĤ ض
+ĠاÙĦÙĤض ÙĬØ©
+Ġдолж ноÑģÑĤ
+ÙģØ§Ø± ÙĤ
+Ġcomeç ou
+Ġorganis é
+Ġxu ân
+ĠÑģообÑī аеÑĤ
+ĠпÑĢи д
+ĠпÑĢид еÑĤÑģÑı
+TÃľ RK
+ãĥ¬ ãĥ¼ãĤ·ãĥ§ãĥ³
+Kh ông
+است Ùģ
+استÙģ Ø§Ø¯Ø©
+ä¸ĬãģĮ ãģ£ãģ¦
+Ġum ie
+Ġumie jÄĻ
+ĠumiejÄĻ tn
+ĠumiejÄĻtn oÅĽci
+ëĤ ¸
+à¹Ģà¸Ļ à¸Ńรà¹Į
+×ĵ×ķ ×ķ×Ĺ
+ÃŃs imo
+I ÃĬ
+IÃĬ N
+Ġalcan ç
+Ġ à¸ķุ
+Ġà¸ķุ ลา
+Ġà¸ķุลา à¸Ħม
+ש׾ ×ĺ×ķף
+Ġél è
+Ġélè ves
+ĠÄij u
+ĠÄiju á»ķi
+ĠØ£ Ùģ
+ĠØ£Ùģ Ø±ÙĬ
+ĠØ£Ù쨱ÙĬ ÙĤÙĬ
+ĠØ£Ù쨱ÙĬÙĤÙĬ ا
+ãĤĴæİ¢ ãģĻ
+ĠпÑĢед ложениÑı
+ج اد
+ĠÑħоÑĤ ÑĮ
+Ñģ ал
+Ñģал он
+à¸Ľà¸£à¸° à¹Ģม
+à¸Ľà¸£à¸°à¹Ģม ิà¸Ļ
+ãĤŃ ãĥĥãĥģ
+ãĤŃãĥĥãĥģ ãĥ³
+×ij×ĵ×Ļ×§ ×ķת
+Ġch ù
+Ġchù a
+ÐĴ иде
+ÐĴиде о
+иÑĢов ка
+ĠÑħоÑĤ иÑĤе
+Ġspéc ifique
+รส à¸Ĭาà¸ķิ
+è¾¼ ãĤĵãģł
+伸 ãģ³
+×Ķצ׾ ×Ĺת
+ãģ©ãģ® ãĤĪãģĨãģ«
+سع ادة
+Ġл ид
+Ġлид еÑĢ
+ม à¸ĩ
+มà¸ĩ à¸Ħล
+ØŃ اÙħÙĦ
+หล ุà¸Ķ
+à¸Ńยà¹Īาà¸ĩ à¸ķà¹Īà¸Ń
+à¸Ńยà¹Īาà¸ĩà¸ķà¹Īà¸Ń à¹Ģà¸Ļืà¹Īà¸Ńà¸ĩ
+ãģķãģĽãģ¦ éłĤ
+تس ÙĪÙĬ
+تسÙĪÙĬ ÙĤ
+ĠaÅŁaģı d
+ĠaÅŁaģıd aki
+ĠÑĨ елÑĮ
+ĠÑĨелÑĮ Ñİ
+ĠAra ÅŁtırma
+à¸Ĥัà¸ļ รà¸ĸ
+Ùĩ ذÙĩ
+ลà¸ĩ à¸Ĺะ
+ลà¸ĩà¸Ĺะ à¹Ģà¸ļ
+ลà¸ĩà¸Ĺะà¹Ģà¸ļ ียà¸Ļ
+تÙĥ اÙħÙĦ
+Ġc io
+Ġcio è
+ãģ¦ ãģĬãģı
+ĠاÙĦصØŃ ÙģÙĬ
+ĠíĬ¹ ìłķ
+полн иÑĤÑĮ
+ãĤĵ ãģĺãĤĥãģªãģĦ
+ãĤĵãģĺãĤĥãģªãģĦ ãģĭ
+ĠاÙĦج Ùĩ
+ĠاÙĦجÙĩ ات
+ĠÑĥÑģпеÑĪ Ð½Ð¾
+Ġв ок
+Ġвок ÑĢÑĥг
+ĠÑģиÑĤÑĥ аÑĨиÑı
+Ġ×Ķ×IJ ×ŀר
+Ġ×Ķ×IJ×ŀר ×Ļ×§
+Ġ×Ķ×IJ×ŀר×Ļ×§ ×IJ×Ļ
+×ŀ ×Ĵ×ĸ
+×ŀ×Ĵ×ĸ ×Ļף
+Ġак ÑĤÑĥ
+ĠакÑĤÑĥ алÑĮн
+é ta
+éta is
+Ġmog ÅĤa
+ĠÑĤоÑĩ ки
+Ġ×ŀ×Ķ ×ŀ×¢
+Ġ×ŀ×Ķ×ŀ×¢ ×¨×Ľ×ª
+มี à¸Ľà¸£à¸°à¸ªà¸´à¸Ĺà¸ĺà¸´à¸łà¸²à¸ŀ
+×Ļר ×Ļ×ĵ×Ķ
+×Ĵר ×ŀ׳
+×Ĵר×ŀ׳ ×Ļ×Ķ
+Ġг лав
+Ġглав ное
+Ġ미 ëŀĺ
+Ġ׳׼ ×ķ׳×Ķ
+ĠÙĪ Ø·ÙĨÙĬ
+op port
+opport unitÃł
+Ġh á»§y
+ĠÙĦ تØŃ
+ĠÙĦتØŃ ÙĤÙĬÙĤ
+Ġó rg
+Ġórg ão
+ãĤ¹ ãĥĶ
+ãĤ¹ãĥĶ ãĥ¼ãĥī
+Ġön ü
+Ġönü ne
+Ùħع اÙħÙĦ
+ש×ŀ ×Ļר×Ķ
+ĠвеÑģÑĮ ма
+ĠwiÄĻks zo
+ĠwiÄĻkszo ÅĽÄĩ
+Ġاست راتÙĬج
+ĠاستراتÙĬج ÙĬØ©
+ĠÙģ Ø¥
+ĠÙ쨥 ذا
+à¹Ģà¸Ĭืà¹Īà¸Ń ม
+à¹Ģà¸Ĭืà¹Īà¸Ńม à¸ķà¹Īà¸Ń
+Ġ׾ פר
+Ġ׾פר ×ĺ×Ļ×Ŀ
+Ùħض ÙĬ
+ĠGer çek
+Ġçocuk ların
+ÙĪØ« ائÙĤ
+ĠÙħساء Ùĭ
+Ġunterstüt zt
+Ġpré st
+Ġprést amo
+ĠÐłÐ°Ð· меÑĢ
+ĠÅŁ eker
+Ġsé culo
+×ij×Ķ ×Ļר
+Ø´Ùĩ ÙĪØ±
+Ġ à¸Ńีà¸ģ
+Ġà¸Ńีà¸ģ à¸Ĺัà¹īà¸ĩ
+Ġlleg ó
+à¸¨à¸´à¸¥à¸Ľ ะ
+æĪij ãģĮ
+æĪijãģĮ å®¶
+ع ÙĤÙĪ
+عÙĤÙĪ Ø¨Ø§Øª
+ĠF älle
+Ġs ÅĤuż
+ĠsÅĤuż b
+ĠاÙĦØŃÙĤ ÙĪÙĤ
+Ġпл иÑĤ
+Ġи ноÑģÑĤ
+ĠиноÑģÑĤ ÑĢан
+ĠиноÑģÑĤÑĢан н
+à¹ĥà¸Ļ à¸Ĥà¸ĵะà¸Ĺีà¹Ī
+ãĤ« ãĥĨ
+ãĤ«ãĥĨ ãĤ´
+ãĤ«ãĥĨãĤ´ ãĥª
+à¸Ńิ ส
+à¸Ńิส ระ
+à¹Ģà¸ľà¸¢ à¹ģ
+à¹Ģà¸ľà¸¢à¹ģ à¸ŀร
+à¹Ģà¸ľà¸¢à¹ģà¸ŀร à¹Ī
+ãģĬ ãģĦ
+ãģĬãģĦ ãģĹãģĦ
+است ÙĤÙĦ
+استÙĤÙĦ اÙĦ
+تØŃ ض
+تØŃض ÙĬر
+åĬ© ãģij
+Ùħر اÙģÙĤ
+Ġ×ĵ ×ķר
+Ġ×ĵ×ķר ש
+×ŀת×Ļ ×Ļ×Ĺס
+ס ×Ļ׼
+ס×Ļ׼ ×ķ×Ŀ
+íĮĮ íĬ¸
+Ġwy ÅĽ
+ĠwyÅĽ w
+ĠwyÅĽw iet
+ĠwyÅĽwiet l
+ĠاÙĦاÙĨ ساÙĨ
+ĠStra ÃŁen
+ï¼ ¬
+ãģ« åŁº
+ãģ«åŁº ãģ¥
+Ġcap ÃŃtulo
+ลุ ย
+Ġ×Ķ×ŀ×§ צ×ķ×¢×Ļ
+ãģĤãĤĭ ç¨ĭ度
+á» ¢
+ĠاÙĦ ÙĦا
+ĠاÙĦÙĦا زÙħØ©
+æķĻ ãģĪ
+Ġרש ×IJ×Ļ
+з ав
+зав иÑģ
+завиÑģ им
+à¸Ľà¸±à¸Ī à¸Īัย
+à¹Ģà¸ĭ ล
+à¹Ģà¸ĭล ลà¹Į
+Ġdiffé rence
+ĠAlt ın
+Ġк ÑĢай
+ĠкÑĢай не
+Ġз ло
+Ġgün ümüz
+Ġн аÑĤÑĥÑĢ
+ĠнаÑĤÑĥÑĢ Ð°Ð»ÑĮн
+×Ĵ×ķ׾ ש×Ļ×Ŀ
+Ġк аÑĤегоÑĢ
+ĠкаÑĤегоÑĢ Ð¸Ð¸
+Ġз нак
+à¸ģà¹Īà¸Ńà¸Ļ หà¸Ļà¹īา
+à¸ģà¹Īà¸Ńà¸Ļหà¸Ļà¹īา à¸Ļีà¹ī
+ĠÙħÙĨ ت
+ĠÙħÙĨت خب
+ãĥĽ ãĥ¼ãĥ«
+Ġе вÑĢо
+ส ว
+สว ม
+ĠìľĦ ìĽIJ
+ĠìľĦìĽIJ ëĭĺ
+ĠاÙĦØŃ ÙĪØ«
+ĠاÙĦØŃÙĪØ« ÙĬ
+ĠÑģодеÑĢж иÑĤ
+ãĥķãĤ¡ ãĥĥãĤ·ãĥ§ãĥ³
+Ġ à¸ģัà¸Ļ
+Ġà¸ģัà¸Ļ ย
+Ġà¸ģัà¸Ļย ายà¸Ļ
+ãĤª ãĥª
+ãĤªãĥª ãĤ¸
+ãĤªãĥªãĤ¸ ãĥĬãĥ«
+Ġб ÑĢенд
+ãĤĴæĮģ ãģ£ãģ¦ãģĦãĤĭ
+Ġinvers ión
+Ġê° ĸ
+Ġê°ĸ ê³ł
+Ġnov itÃł
+ê´Ģ ê´ij
+Ġà¸ŀ ฤษ
+Ġà¸ŀฤษ à¸łà¸²
+Ġà¸ŀà¸¤à¸©à¸łà¸² à¸Ħม
+×ķר ×Ĺ×Ļ×Ŀ
+׼׾ ×ķ׾
+Ġng ạc
+×Ļ ×Ļש
+×Ļ×Ļש ×ķ×ij
+f äll
+fäll ig
+ĠÑĤÑĢеб ÑĥеÑĤÑģÑı
+Ġcar á
+Ġcará cter
+Ġprinc ÃŃpio
+ĠÅĤ az
+ĠÅĤaz ien
+ĠÅĤazien k
+Ġgi ãn
+ÑģÑĤÑĢа ива
+Ùħس اب
+Ùħساب ÙĤØ©
+à¹Ģà¸Ħรืà¹Īà¸Ńà¸ĩ à¸Ķืà¹Īม
+ترÙĥ ÙĬب
+vol ução
+ĠÐŁ оÑĩ
+ĠÐŁÐ¾Ñĩ ем
+ĠÐŁÐ¾Ñĩем Ñĥ
+казал оÑģÑĮ
+ĠпÑĢимен ениÑı
+à¹Ģà¸Ĺ ียม
+íĮ Ķ
+à¸Ĥà¹īà¸Ń à¹Ģสà¸Ļà¸Ń
+à¸Ľà¸±à¸į à¸įา
+Ġоб ÑĥÑĩ
+ĠобÑĥÑĩ ениÑı
+ĠÑģеÑĢ Ð¸
+ĠÑģеÑĢи ал
+Ġingl és
+ĠÙĦ Ùĥرة
+Ġ×ĺ ׾
+Ġ×ĺ׾ פ×ķף
+Ġìł ij
+Ġìłij ê·¼
+×IJ ×ķ×Ĵ
+×IJ×ķ×Ĵ ×ķס
+×IJ×ķ×Ĵ×ķס ×ĺ
+ĠболÑĮÑĪ Ð¾Ðµ
+ĠÐļон еÑĩно
+×¢×Ļת ×ķ׳
+×¢×Ļת×ķ׳ ×IJ×Ļ
+Ġкноп к
+Ġз н
+Ġзн аÑĤÑĮ
+ĠÄij á»±
+ĠÄijá»± ng
+вл аж
+влаж н
+×ŀ ×Ļ×ĺ×ij
+ãĤ¬ ãĤ¤
+ãĤ¬ãĤ¤ ãĥī
+........ ..
+Ġà¸ģ ุม
+Ġà¸ģุม à¸łà¸²à¸ŀ
+Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀ ัà¸Ļ
+Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļ à¸ĺ
+Ġà¸ģà¸¸à¸¡à¸łà¸²à¸ŀัà¸Ļà¸ĺ à¹Į
+be z
+bez pieczeÅĦst
+bezpieczeÅĦst w
+ãĥijãĥij æ´»
+ع اط
+عاط Ùģ
+ĠÄij áºŃm
+Ġз ÑĢ
+ĠзÑĢ ÐµÐ½Ð¸Ñı
+Ġbor ç
+Ġнед ел
+Ġнедел Ñİ
+Ġh á»ı
+Ġhá»ı ng
+ìŀ¥ ìķł
+ìŀ¥ìķł ìĿ¸
+ĠاÙĦع ÙĦاÙĤØ©
+Ġíģ ¬
+Ġíģ¬ ê²Į
+à¹Ħร à¹Ī
+à¸ļา à¸Ķ
+à¸ļาà¸Ķ à¹Ģà¸Īà¹ĩà¸ļ
+à¸Ŀ รั
+à¸Ŀรั à¹Īà¸ĩ
+à¸Ŀรัà¹Īà¸ĩ à¹Ģศ
+à¸Ŀรัà¹Īà¸ĩà¹Ģศ ส
+ר ×¢×Ļ
+רע×Ļ ×ķ׳×ķת
+Ġë Į
+ĠëĮ ĵ
+ĠëĮĵ ê¸Ģ
+Ġnaj b
+Ġnajb li
+Ġnajbli ż
+Ġnajbliż sz
+ĠиÑģполÑĮз ÑĥеÑĤÑģÑı
+Ġcient ÃŃf
+ĠcientÃŃf ico
+×¢ ×ŀ×§
+Ġg ợi
+Ø´ ØŃÙĨ
+ĠÅĽ m
+ĠÅĽm ier
+ĠÅĽmier ci
+à¸Ħาสิà¹Ĥà¸Ļ à¸Ńà¸Ńà¸Ļà¹Ħลà¸Ļà¹Į
+×Ĺש×ij ת×Ļ
+Ġn ingu
+Ġningu ém
+è¾¼ ãĤģ
+ãģ ·
+ĠÑĥ г
+ĠÑĥг ол
+ï½ °
+פת ×Ļ×Ĺ
+פת×Ļ×Ĺ ×ª
+Ġ×Ķר×IJש ×ķ׳×Ļ×Ŀ
+p ósito
+ãĤŃ ãĥ¬ãĤ¤
+ãģ© ãģĵãĤį
+à¹Ģà¸Ĺà¹Īา à¹Ħ
+à¹Ģà¸Ĺà¹Īาà¹Ħ หร
+à¹Ģà¸Ĺà¹Īาà¹Ħหร à¹Ī
+ĠинÑĤеÑĢ ÑĮеÑĢ
+ĠØŃ اج
+ĠØŃاج Ø©
+สี à¸Ĥาว
+ìĸ ¼
+Ġn á»Ļ
+Ġná»Ļ p
+ĠÃŃ nd
+ĠÃŃnd ice
+สำ รวà¸Ī
+Ġкажд ой
+Ġhot éis
+Ġnast ÄĻ
+ĠnastÄĻ pn
+Ġ×Ķ×§ ×ķ×ĵ
+Ġ×Ķ×§×ķ×ĵ ×Ŀ
+פ ×ķפ
+פ×ķפ ×ķ׾
+פ×ķפ×ķ׾ ר×Ļ
+вÑĪ ÐµÐ¹
+ãĤ·ãĥ³ ãĥĹ
+ãĤ·ãĥ³ãĥĹ ãĥ«
+ĠzdjÄĻ Äĩ
+ĠгÑĢÑĥпп а
+Ġпом еÑī
+ĠпомеÑī ениÑı
+ãģ©ãģĨ ãģĦãģĨ
+ĠиÑģп ÑĭÑĤа
+Ġog ÅĤ
+ĠogÅĤ os
+ĠogÅĤos zen
+ĠogÅĤoszen i
+สรà¹īาà¸ĩ สรร
+สรà¹īาà¸ĩสรร à¸Ħà¹Į
+à¸ŀร รà¸ĵ
+Ġçık Ä±ÅŁ
+ĠÑĩаÑģÑĤ ноÑģÑĤи
+Ġ×ķ ×Ļ×ķתר
+ç¶ļãģį ãĤĴ
+ç¶ļãģįãĤĴ èªŃ
+ç¶ļãģįãĤĴèªŃ ãĤĢ
+à¸ģร ั
+à¸ģรั ม
+г ÑĢаÑĦ
+Ġв лад
+Ġвлад елÑĮ
+ĠвладелÑĮ ÑĨ
+Ġistedi ÄŁ
+ĠistediÄŁ iniz
+×ij׾ ×¢
+×ij×ľ×¢ ×ĵ×Ļ
+ÙħÙĪ Ø§Ùģ
+ÙħÙĪØ§Ùģ ÙĤØ©
+Ġ×Ļ ×ķר
+Ġ×Ļ×ķר ×§
+ãĤ«ãĥ¼ãĥī ãĥŃãĥ¼ãĥ³
+ĠاÙĦÙħØ´ ÙĥÙĦ
+ĠاÙĦÙħØ´ÙĥÙĦ Ø©
+ĠêµŃ íļĮ
+ס פ×ĺ
+ספ×ĺ ×ŀ
+ספ×ĺ×ŀ ×ijר
+Ġìĸ´ ëłµ
+Ùĥ اÙħ
+ÙĥاÙħ ÙĬرا
+sch lü
+schlü sse
+ĠØ« ÙĨ
+ĠØ«ÙĨ ائÙĬ
+ìī ½
+ĠÐŀ Ñģоб
+ĠÐŀÑģоб енно
+Ġин веÑģÑĤи
+ĠинвеÑģÑĤи ÑĨи
+اØŃ تÙħ
+اØŃتÙħ اÙĦ
+E Äŀ
+EÄŀ İ
+íķĺ ê²łëĭ¤
+Ġ×IJ ×ijר×Ķ
+Ġ×IJ×ijר×Ķ ×Ŀ
+Ġ×ij×Ĺ ×Ļ׳×Ŀ
+Ø£ ÙĪØ¶
+Ø£ÙĪØ¶ اع
+Ġdé l
+Ġdél ai
+Ġ×IJ×ķ×Ķ ×ij×Ļ×Ŀ
+ĠÑģо Ñħ
+ĠÑģоÑħ ÑĢ
+ĠÑģоÑħÑĢ Ð°Ð½Ð¸
+ĠдоÑģÑĤ иж
+ĠдоÑģÑĤиж ени
+สิà¹Īà¸ĩ à¹ģ
+สิà¹Īà¸ĩà¹ģ วà¸Ķ
+สิà¹Īà¸ĩà¹ģวà¸Ķ ล
+สิà¹Īà¸ĩà¹ģวà¸Ķล à¹īà¸Ńม
+ĠاÙĦÙħ باشر
+ĠÑĦ иг
+ĠÑĦиг ÑĥÑĢ
+мож ем
+׾×ŀ×Ļ×ĵ ×Ķ
+Ġcin é
+Ġciné ma
+Ġb ada
+Ġbada ÅĦ
+جب ÙĩØ©
+Ġд еп
+Ġдеп ÑĥÑĤ
+ĠдепÑĥÑĤ аÑĤ
+Ġdist ância
+ĠاÙĦÙħ عار
+ĠاÙĦÙħعار ضة
+thè se
+ü nc
+ünc ü
+Ġдан ного
+ĠBel gi
+ĠBelgi ë
+Ġ×ij ×ij×§
+Ġ×ij×ij×§ ש×Ķ
+ย à¹Īาà¸Ļ
+Ġsol ução
+Ġ×Ķצ ×ĺר
+Ġ×Ķצ×ĺר פ×ķ
+ĠØ£ÙĨ ØŃ
+ĠØ£ÙĨØŃ اء
+Ġد ÙħØ´
+ĠدÙħØ´ ÙĤ
+มั à¹ī
+มัà¹ī ย
+Ùħ غرب
+است عÙħاÙĦ
+ĠS ÅĤow
+ĠëıĻ ìĭľ
+ĠëıĻìĭľ ìĹIJ
+ĠÑģ оÑģ
+ĠÑģоÑģ ед
+ì²Ń ìĨĮ
+ì²ŃìĨĮ ëħĦ
+Ġг ÑĢаÑĦ
+ĠгÑĢаÑĦ ик
+Ġìŀij ìĿĢ
+Ġyet i
+Ġyeti ÅŁtir
+ĠìĿ´ê²ĥ ìĿ´
+ห à¹Īาà¸ĩ
+Ø¥ ÙħÙĥاÙĨ
+Ø¥ÙħÙĥاÙĨ ÙĬØ©
+است عراض
+ÙħØ® در
+ĠÑĩ ÑĥÑĤÑĮ
+Ùħ دÙĬر
+ÙħدÙĬر ÙĬØ©
+Ġà¹Ģม ษ
+Ġà¹Ģมษ ายà¸Ļ
+Ġм еÑħ
+ĠмеÑħ аниз
+ĠмеÑħаниз м
+ĠÑģ Ñĥм
+ĠÑģÑĥм мÑĥ
+Ġv ö
+Ġvö ll
+Ġvöll ig
+Ġд ÑĢÑĥз
+ĠдÑĢÑĥз ÑĮÑı
+ãĤĴåĪ©ç͍ ãģĹãģ¦
+à¸ļรร à¸Īุ
+po życz
+×ŀש ׼
+×ŀש׼ ×ł×ª
+×ŀ×©×Ľ×ł×ª ×IJ
+Ġeuropé en
+Ġpropri é
+Ġproprié taire
+Ġkh ấu
+ãģĦãģŁãģł ãģijãĤĭ
+Ġtec rü
+Ġtecrü be
+×Ķ ×ij
+×Ķ×ij ׳×Ķ
+Ġcu Ì
+ĠcuÌ ī
+ĠcuÌī a
+×IJ ×ķ×ķ
+×IJ×ķ×ķ ×Ļר×Ķ
+Ġ׼×ķ׾ ×ķ
+U lus
+Ulus lararası
+Ġ׳ ×ķת
+Ġ׳×ķת ף
+ãģ« åIJij
+ãģ«åIJij ãģijãģ¦
+ë¹ Ľ
+à¸Ĺ ัà¸ģษ
+à¸Ĺัà¸ģษ ะ
+س ÙĤÙĪ
+سÙĤÙĪ Ø·
+Ġв н
+Ġвн еÑĪ
+ĠвнеÑĪ Ð½Ðµ
+Ġur z
+Ġurz ÄĻd
+Ġá mb
+Ġámb ito
+à¸Ń à¸ĺิ
+à¸Ńà¸ĺิ à¸ļาย
+Ġ ÅĤad
+ĠÅĤad n
+ê±´ ì¶ķ
+wód zt
+wództ w
+Ġquest ões
+Ġש ×§
+Ġשק ×Ļ×ij׾
+Ġmiejsc owoÅĽci
+Ġв ал
+Ġвал ÑİÑĤ
+hä user
+หà¸Ļ à¸Ńà¸ĩ
+ãģ¨ åħ±
+ãģ¨åħ± ãģ«
+ãĥı ãĥ¼ãĥī
+Ġê°ľ ìµľ
+ĠоÑģнов ном
+Ġм ÑıÑģ
+اع ت
+اعت ÙĤاÙĦ
+สà¸ĸ ิ
+สà¸ĸิ à¸ķิ
+N gu
+Ngu á»ĵn
+ĠÙħ جÙĦ
+ĠÙħجÙĦ Ø©
+à¹ģà¸Ĥ à¸Ļ
+ĠاÙĦÙĦÙĬ بÙĬ
+פע×Ļ׾ ×ķ×Ļ×ķת
+Ġ×Ķר פ×ķ×IJ×Ļ
+פר ×ķפ
+פר×ķפ ×Ļ׾
+×§ ׾×IJ
+ק׾×IJ ס×Ļ
+Ùĥت Ø´Ùģ
+ãģ«ãģª ãģ£ãģ¦ãģĹãģ¾ãģĨ
+à¹Ģà¸Ħล à¹ĩà¸Ķ
+à¹Ģà¸Ħลà¹ĩà¸Ķ ลัà¸ļ
+Ġì» ´
+Ġì»´ íĵ¨
+Ġì»´íĵ¨ íĦ°
+Ġ×Ĺ×Ļ ×ķ×ij×Ļ
+Ġnä m
+Ġnäm lich
+åij¼ ãģ°
+åij¼ãģ° ãĤĮ
+ĠÑĢ Ð¾Ð»
+ĠÑĢол и
+Ġspécial isé
+à¸Ļ วัà¸ķ
+à¸Ļวัà¸ķ à¸ģรรม
+ÙĨص ÙĪØµ
+пеÑĢ ÐµÐ´
+пеÑĢед аÑĩ
+thè que
+Ġר×IJ ×Ļת×Ļ
+ãĥĢ ãĤ¦ãĥ³
+ãĤı ãģĭ
+ãĤıãģĭ ãģ£ãģ¦
+беÑĢ ÐµÐ¶
+ĠÑģ ек
+ĠÑģек ÑĢ
+ĠÑģекÑĢ ÐµÑĤ
+ĠпоÑģÑĤоÑıн н
+à¸Ĥà¸Ļ สà¹Īà¸ĩ
+Ġm ük
+Ġmük em
+Ġmükem mel
+еÑĤ еÑģÑĮ
+ĠاÙĦسÙĨ ÙĪØ§Øª
+ĠìłĦ íĺĢ
+Ġ×Ķ×ŀ×§ ×ķר×Ļ
+Ġmü d
+Ġmüd ah
+Ġmüdah ale
+Ġwy b
+Ġwyb ór
+Ġtend ência
+إ دار
+إدار ÙĬØ©
+Ġunterstüt zen
+ת ×ijר
+ת×ijר ר
+Ġdi á
+Ġdiá logo
+ĠÃĸ nce
+ĠÃĸnce ki
+ãĤ¹ãĥĿ ãĥĥãĥĪ
+ëĦ £
+ĠG eli
+ĠGeli ÅŁ
+ãĤĴ éĢļ
+ãĤĴéĢļ ãģĹãģ¦
+ĠFuÃŁ ball
+Ġsal ari
+Ġsalari é
+ĠпÑĢодÑĥк ÑĤов
+صÙģ ÙĤØ©
+รว à¸ļ
+รวà¸ļ รวม
+à¹ĥà¸Ļ à¸IJาà¸Ļ
+à¹ĥà¸Ļà¸IJาà¸Ļ ะ
+Ġkay na
+Ġkayna ģı
+Ġìŀij íĴĪ
+ĠвÑĭ ÑĢаж
+ĠвÑĭÑĢаж ен
+ĠÑģÑĤ еп
+ĠÑģÑĤеп ени
+ĠاÙĦÙħ ÙĪØ¬ÙĪØ¯
+ĠاÙĦÙħÙĪØ¬ÙĪØ¯ Ø©
+ล à¹īม
+Ġnaj czÄĻ
+ĠnajczÄĻ ÅĽcie
+ĠnajczÄĻÅĽcie j
+Ġz wy
+Ġzwy k
+Ġzwyk ÅĤ
+Ġê·¸ëłĩ ì§Ģ
+à¸ģระ à¸Ī
+à¸ģระà¸Ī าย
+Ġëĭ µ
+Ġëĭµ ë³Ģ
+ĠÑĢе ак
+ĠÑĢеак ÑĨи
+ĠÅĽwie ż
+ĠÑģÑĤоим оÑģÑĤи
+ÙħÙĨ اÙĤ
+ÙħÙĨاÙĤ Ø´
+ÙħÙĨاÙĤØ´ Ø©
+ĠÑħоÑĩ Ñĥ
+ãĥľ ãĥ¼ãĥī
+Ġróż nic
+Ġк ÑĢÑĭ
+ĠкÑĢÑĭ ÑĪ
+âľ ĵ
+ãĤ³ãĥ³ ãĥĨãĥ³
+ãĤ³ãĥ³ãĥĨãĥ³ ãĥĦ
+ĠпÑĢед поÑĩ
+×ŀר ×ij×Ļת
+ĠØ´ Ùĥ
+ĠØ´Ùĥ را
+Ġд ал
+Ġдал ек
+Ġдалек о
+بر ÙĬØ·
+برÙĬØ· اÙĨÙĬا
+ع ÙĨا
+عÙĨا ÙĬØ©
+ĠÑĢаÑģÑģ каз
+ĠÑĢаÑģÑģказ Ñĭва
+Ø£ ÙĦÙĪ
+Ø£ÙĦÙĪ Ø§ÙĨ
+æĮģ ãģ£ãģ¦
+æĮģãģ£ãģ¦ ãģĦ
+Ùħباد ئ
+×Ķ ×¢×ijר
+×Ķ×¢×ijר ת
+Ġyay ı
+Ġyayı ml
+Ġyayıml a
+m át
+mát icos
+à¸ģ ัà¸ĩ
+à¸ģัà¸ĩ วล
+Ġ׾ פת
+Ġ×ľ×¤×ª ×ķ×Ĺ
+à¸ŀฤ à¸ķิ
+à¸ŀฤà¸ķิ à¸ģรรม
+í Ĥ¬
+Ġок ÑĢÑĥг
+Ġ×ŀצ ×ķ×ķ×Ķ
+ÐĽ ени
+ÐĽÐµÐ½Ð¸ н
+ĠTri á»ģu
+ãĤ³ãĥŁ ãĥ¥
+ãĤ³ãĥŁãĥ¥ ãĥĭ
+ãĤ³ãĥŁãĥ¥ãĥĭ ãĤ±
+ãĤ³ãĥŁãĥ¥ãĥĭãĤ± ãĥ¼ãĤ·ãĥ§ãĥ³
+Ùĥ ÙĨÙĬ
+ÙĥÙĨÙĬ سة
+ãĤĴ ä¸Ńå¿ĥ
+ãĤĴä¸Ńå¿ĥ ãģ«
+ĠmiÄĻd z
+ĠmiÄĻdz yn
+ĠmiÄĻdzyn ar
+ĠmiÄĻdzynar od
+ĠmiÄĻdzynarod ow
+ÙĦ ÙĨ
+ÙĦÙĨ دا
+بر ش
+برش ÙĦÙĪÙĨ
+برشÙĦÙĪÙĨ Ø©
+à¸ģระ à¸ķุ
+à¸ģระà¸ķุ à¹īà¸Ļ
+Ġg ı
+Ġgı da
+à¸Ľà¸£à¸° à¸Ĺัà¸ļ
+à¸Ľà¸£à¸°à¸Ĺัà¸ļ à¹ĥà¸Ī
+Ġë¶Ī 구
+Ġë¶Ī구 íķĺê³ł
+ĠÙĨ Ø·
+ĠÙĨØ· اÙĤ
+ĠÐľ ожеÑĤ
+Pr äs
+Präs ident
+ĠÑģк оÑĢ
+ĠÑģкоÑĢ Ð¾ÑģÑĤÑĮ
+Ġ×Ķ×ij ×ķקר
+еÑħ аÑĤÑĮ
+Ġg ạo
+Ġש×IJ ×Ļ׳×Ŀ
+Ġ×ij׳ ×ķ×Ĵ
+Ġ×ij׳×ķ×Ĵ ×¢
+Ġо пиÑģание
+Ġucz ni
+Ġuczni ów
+à¹Ģà¸Ń à¹ĩà¸Ļ
+Ġت Ø´
+Ġتش رÙĬÙĨ
+Ġnh ãn
+ë¹ ¨
+Ġcaract ère
+×¢ ׾×Ļ
+×¢×ľ×Ļ ×Ļ×Ķ
+楽ãģĹ ãĤģãĤĭ
+ĠÑģ аÑħ
+ĠÑģаÑħ аÑĢ
+дÑĥм аÑĤÑĮ
+ĠÐĴоз можно
+ص ÙĬاÙĨ
+صÙĬاÙĨ Ø©
+öm ür
+ส ล
+สล à¹ĩ
+สลà¹ĩ à¸Ń
+สลà¹ĩà¸Ń à¸ķ
+ë¡ ¯
+Ġth ói
+gr Ã¶ÃŁe
+Ġksi ÄĻ
+ĠksiÄĻ g
+ĠÑĢ Ð¾Ð¼
+ĠÑĢом ан
+ÙĤ اسÙħ
+×ŀ×ij ×ķ×Ĵ
+×ŀ×ij×ķ×Ĵ ר×Ļ×Ŀ
+bes ch
+besch äft
+beschäft ig
+×Ķצע ×Ķ
+ĠÃģ rea
+ĠзаÑıв к
+Ä ¹
+ĠлÑİб ого
+Ġ ม
+Ġม à¸ģร
+Ġมà¸ģร าà¸Ħม
+ÑĦ из
+ÑĦиз иÑĩеÑģк
+ин ÑĦ
+инÑĦ ек
+инÑĦек ÑĨи
+اÙĦ Ø·
+اÙĦØ· ائÙģ
+Ġкол л
+Ġколл екÑĤив
+ез жа
+Ġس بØŃ
+ĠسبØŃ اÙĨ
+ĠسبØŃاÙĨ Ùĩ
+sch lä
+schlä ge
+Ġд и
+Ġди аг
+Ġдиаг ноÑģÑĤ
+ĠоÑĤмеÑĤ иÑĤÑĮ
+Т Ь
+ĠاÙĦ در
+ĠاÙĦدر اسÙĬ
+עצ ×ŀ
+עצ×ŀ ×IJ×ķת
+Ġdém arch
+Ġdémarch e
+Ġ×ĺ ×ķ×¢
+Ġ×ĺ×ķ×¢ ף
+Ġfuncion ários
+á» µ
+׾ ׼×IJ
+׾׼×IJ ×ķר×Ķ
+à¸ĭ à¹Ī
+à¸ĭà¹Ī à¸Ńม
+ĠÑĩ Ñĥв
+ĠÑĩÑĥв ÑģÑĤво
+âĸ ¼
+п ÑĥÑī
+пÑĥÑī ен
+Ġм еÑĢ
+ĠмеÑĢ Ð¾Ð¿
+ĠмеÑĢоп ÑĢи
+ĠмеÑĢопÑĢи ÑıÑĤиÑı
+Ġu çu
+Ġuçu ÅŁ
+ãĤĴåĪ©ç͍ ãģĻãĤĭ
+a ÄŁ
+aģ lı
+ìĺĪ ìĪł
+à¹ģ ยà¹Ī
+ĠاÙĦÙĥ Ùħ
+ĠاÙĦÙĥÙħ بÙĬ
+ĠاÙĦÙĥÙħبÙĬ ÙĪØªØ±
+ت ÙĪÙĬ
+تÙĪÙĬ تر
+à¹Ģà¸Ĭ ีà¹Īยว
+à¹Ģà¸Ĭีà¹Īยว à¸Ĭา
+à¹Ģà¸Ĭีà¹Īยวà¸Ĭา à¸į
+á» Ķ
+Ġhi ếm
+ذا Ùĥرة
+Ġ×Ķ×ŀ×Ļ ×ķ×Ĺ×ĵ
+ĠìĪ ľ
+ĠìĪľ ê°Ħ
+ĠK ı
+ĠKı sa
+Ġgele ceÄŁi
+пÑĢо ÑĦеÑģÑģиона
+пÑĢоÑĦеÑģÑģиона л
+Ġog ó
+Ġogó le
+ĠgÅĤ ów
+ĠgÅĤów ne
+ĠÑģÑĤ илÑĮ
+×IJ פ׾
+×IJפ׾ ×Ļ×§
+×IJפ׾×Ļ×§ צ×Ļ×Ķ
+สม ารà¹Į
+สมารà¹Į à¸Ĺ
+สมารà¹Įà¸Ĺ à¹Ĥà¸Ł
+สมารà¹Įà¸Ĺà¹Ĥà¸Ł à¸Ļ
+Ġth ánh
+ÐŁ од
+ÐŁÐ¾Ð´ ÑĢоб
+ÐŁÐ¾Ð´ÑĢоб нее
+ĠاÙĦت ÙĪÙĨ
+ĠاÙĦتÙĪÙĨ سÙĬ
+Ġbah çe
+à¹ģà¸ģà¹ī à¸Ľà¸±à¸įหา
+é ducation
+eu rop
+europ ä
+europä ische
+ĠK si
+ĠKsi ÄĻ
+ĠëĦ ĺ
+ĠëĦĺ ìĸ´
+Ġv üc
+Ġvüc ud
+Ġyay g
+Ġyayg ın
+Ġnie kt
+Ġniekt óry
+Ġniektóry ch
+ãģŃ ãģĩ
+Ġк аж
+Ġкаж еÑĤÑģÑı
+к аж
+каж еÑĤ
+ĠاÙĦ دÙĬÙħÙĤرا
+ĠاÙĦدÙĬÙħÙĤرا Ø·
+ĠاÙĦدÙĬÙħÙĤراط ÙĬØ©
+æŃ ©
+æŃ© ãģĦãģ¦
+Ġv az
+Ġvaz ge
+Ġvazge ç
+Ġмин ималÑĮ
+ĠминималÑĮ н
+ãĥij ãĤ¿
+ãĥijãĤ¿ ãĥ¼ãĥ³
+Ġë Ĭ
+ĠëĬ IJ
+ĠëĬIJ ëĤĮ
+ãģ¡ ãĤĩãģĨ
+ãģ¡ãĤĩãģĨ ãģ©
+Ġ à¸ģร
+Ġà¸ģร à¸ģà¸İ
+Ġà¸ģรà¸ģà¸İ าà¸Ħม
+تج دÙĬد
+ĠØ´ اÙħÙĦ
+หลัà¸ģ à¸IJาà¸Ļ
+ĠмаÑĢ ÑĪ
+ĠмаÑĢÑĪ ÑĢÑĥÑĤ
+Ġv ÃŃt
+ĠvÃŃt ima
+Ġquiz á
+ay gı
+×ĵ×ijר ×Ļ×ķ
+Ġиз д
+Ġизд ели
+Ġиздели Ñı
+п ла
+пла Ñĩ
+плаÑĩ ива
+ä»» ãģĽ
+Ġéquip é
+ä¹ħ ãģĹãģ
+ä¹ħãģĹãģ ¶
+ä¹ħãģĹãģ¶ ãĤĬ
+Ġк аÑĤ
+ĠкаÑĤ ал
+ĠкаÑĤал ог
+ส à¹īม
+ĠÑĢ ÐµÐ¹
+ĠÑĢей ÑĤ
+ĠÑĢейÑĤ инг
+Ġth uyá»ģn
+ĠاÙĦÙħ ÙĤدس
+esp ère
+ãģ«åħ¥ ãģ£ãģŁ
+หมาย à¹Ģลà¸Ĥ
+ת×Ĺ×ķש ת
+à¸Ļ à¹Īะ
+Ġpe ÅĤ
+ĠpeÅĤ ne
+Ġpé rd
+Ġpérd ida
+หม วà¸Ķ
+หมวà¸Ķ หมูà¹Ī
+иÑĩеÑģк ÑĥÑİ
+çµĤ ãĤı
+çµĤãĤı ãģ£ãģŁ
+Ġ×Ĵ ×ķ×Ĵ׾
+à¸Ĺำ à¸Ħวาม
+à¸Ĺำà¸Ħวาม สะà¸Ńาà¸Ķ
+Hot éis
+Ġз аÑĢ
+ĠзаÑĢ ÐµÐ³Ð¸ÑģÑĤ
+ĠзаÑĢегиÑģÑĤ ÑĢи
+ĠзаÑĢегиÑģÑĤÑĢи ÑĢова
+ĠÑģ обÑĭÑĤи
+ĠÑģобÑĭÑĤи Ñı
+Ġ×ĸ ׼×IJ
+ÙħÙĨظ ÙĪÙħØ©
+Ġ×Ķ×ŀ צ
+Ġ×Ķ×ŀצ ×Ļ×IJ×ķת
+Ùħ ÙĥÙĪÙĨ
+ÙħÙĥÙĪÙĨ ات
+ä¸ĬãģĮ ãĤĭ
+Ġm ÄĻ
+ĠmÄĻ sk
+หรืà¸Ń à¹Ģà¸Ľà¸¥à¹Īา
+ëĤ ®
+Ġnok tas
+Ġnoktas ı
+ĠболÑĮÑĪ Ð¸Ð¼
+ĠлÑĥÑĩ ÑĪиÑħ
+Ø´Ùĩ ÙĬد
+à¸Ńำ à¸Ļ
+à¸Ńำà¸Ļ วย
+à¸Ńำà¸Ļวย à¸Ħวาม
+à¸Ńำà¸Ļวยà¸Ħวาม สะà¸Ķวà¸ģ
+Ġе в
+Ġев ÑĢ
+ĠевÑĢ Ð¾Ð¿
+ĠевÑĢоп ей
+à¸ī าย
+ìĦ Ń
+Ùħ Ù쨧
+ÙħÙ쨧 ÙĪØ¶
+ÙħÙ쨧ÙĪØ¶ ات
+ë¹ Į
+赤 ãģ¡ãĤĥãĤĵ
+ĠÑĥдал оÑģÑĮ
+ĠÐ¥ оÑĤ
+ĠХоÑĤ Ñı
+przedsiÄĻbior c
+ĠH ôm
+íķĺìĺĢ ìĬµëĭĪëĭ¤
+Ġн аг
+Ġнаг ÑĢÑĥз
+ĠнагÑĢÑĥз к
+Ġ×ij×Ļ׳ ׾×IJ×ķ×ŀ×Ļ
+Ġê°ĢëĬ¥ íķľ
+ĠH ữu
+à¸Ń ุà¸Ķ
+à¸Ńุà¸Ķ ม
+ת ×ķפ
+ת×ķפ ×¢×Ķ
+Ġmi ÅĤo
+ĠmiÅĤo ÅĽci
+ksi Äħż
+ksiÄħż ka
+ĠاÙĦÙĦ عبة
+à¸ī าà¸ģ
+สะ สม
+×ŀ תר
+×ŀתר ×Ĺש
+Ġlég ère
+Ġ׾צ פ
+Ġ׾צפ ×Ļ×Ķ
+ĠиÑģÑĤоÑĢ Ð¸Ñı
+Ġ ãĥĪãĥ©
+ĠãĥĪãĥ© ãĥĥãĤ¯
+ĠãĥĪãĥ©ãĥĥãĤ¯ ãĥIJãĥĥãĤ¯
+Ġк а
+Ġка ÑĦе
+×ŀס×ŀ ×ļ
+Ġc üm
+Ġcüm le
+à¹Ģà¸Ħลืà¹Īà¸Ńà¸Ļ à¹Ħหว
+ãģĬ ãģĿ
+ãģĬãģĿ ãĤīãģı
+ìŀIJ ëıĻ
+ìŀIJëıĻ ì°¨
+à¸Ńั à¸ķ
+à¸Ńัà¸ķ à¹Ĥà¸Ļ
+à¸Ńัà¸ķà¹Ĥà¸Ļ มั
+à¸Ńัà¸ķà¹Ĥà¸Ļมั à¸ķิ
+ĠÅŁ ik
+ĠÅŁik ay
+ĠÅŁikay et
+extr ême
+kr ä
+krä fte
+ëĤ Ļ
+íķ ij
+ì² Ļ
+íĺ Ī
+ì° į
+âĻ ¡
+ìŀ Ķ
+ë¢ °
+íĿ Ķ
+íĿ IJ
+âĩ Ĵ
+ë§ Ľ
+ìĬ Ī
+á» Ĵ
+ìĺ µ
+âĹ İ
+í Ĥ¨
+ê¿ Ī
+ìĪ ¨
+ìĽ ¨
+ë§ ¥
+ï½ Ģ
+ï¼ ª
+Ạ¨
+ãħ İ
+Ñ Ĺ
+ìĦ ¬
+ì¹ ¼
+ï¼ ¶
+ìĽ ł
+ëŁ ´
+Å ĥ
+ëĤ ¼
+ëĭ IJ
+âĢ ¹
+ë¦ Ń
+ì§ IJ
+âĢ ¤
+Ã ħ
+ëľ ¨
+íĦ ¸
+íľ ĺ
+ê² ģ
+ë´ ħ
+Ã ĺ
+ëŃ Ķ
+ëĺ ij
+âĹ ĩ
+ìĹ ĺ
+ï» ´
+ë§ ¹
+ï¾ Ŀ
+ìĬ ·
+íĥ ķ
+ï¼ ł
+ì» ´
+ëł Į
+ì½ ľ
+ï» ¹
+ãħ ł
+ì¡ ¸
+ëħ ¹
+âĤ º
+âĸ ¶
+íĥ IJ
+êµ ´
+íij ¸
+Ñ Ķ
+íĶ ½
+Ð ħ
+ë° ¤
+Ô ģ
+ì² ¨
+ì¶ ĺ
+ë² Ĺ
+ë© ¸
+ï¼ »
+ï¼ ½
+ï¼ ·
+ì° Į
+Ã Ĵ
+íı ´
+ìĵ ¸
+ì´ Į
+ëģ Ķ
+ëĶ ©
+ëĩ Į
+ë© Ģ
+ë² ¨
+ï¼ µ
+ë§ ¡
+ëĭ «
+ภ¿
+ãģ ±
+ìĩ ¼
+ìº ł
+ë® ¤
+ê± ±
+ì» ¬
+âĦ ĥ
+ëĶ ±
+ëĥ Ī
+ìĭ ±
+íĻ Ī
+ëŀ IJ
+ìħ Ģ
+ìł ł
+Ð Ĩ
+ëł ī
+ï½ ħ
+ï½ ı
+íĻ Ģ
+ëĽ °
+á» ®
+í Ĥ¹
+ê½ ĥ
+ï» ¤
+ïº Ķ
+êº ¼
+ìķ ī
+âĻ ¦
+ï½ ģ
+ìĵ ´
+ãĢ ī
+ì° ®
+ì¤ ĺ
+á» ª
+ëģ Ħ
+ëIJ ¨
+ìķ Į
+íĿ ĺ
+íħ IJ
+ãĢ Ī
+ê² ª
+ëĭ ¥
+ê² ¼
+á» Į
+ë§ ¨
+ëģ Ĭ
+ë² ¤
+ëij Ķ
+íĿ ¡
+á» ¬
+ë¬ ĺ
+ãģ ī
+ëŀ «
+íĶ Ī
+í ħį
+ìŀ ĥ
+ï½ ī
+ìģ ľ
+âĸ ½
+ë¬ »
+âĸ ³
+ï¼ ¸
+ìģ ĺ
+ì¶ °
+ìĬ ´
+ìķ ±
+ìĩ Ħ
+Ạ®
+ï´ ¿
+ï´ ¾
+âĤ ½
+ëĦ ĵ
+ë£ ©
+ì³ ¤
+ê´ ľ
+Ã Ļ
+á» ľ
+ï¿ £
+ëĵ Ń
+ë© ĺ
+ê» ´
+ëł ´
+Ð ĥ
+ë¬ µ
+ì§ Ŀ
+ãģ º
+ðŁĺ Ĥ
+ëŀ ¬
+ìł Ĭ
+ê´ Ħ
+ìŀ Ĭ
+íŀ Į
+ìĦ ¯
+âĪ Ģ
+âĸ ¡
+ëĢ Į
+ëŀ Ļ
+ï½ ĥ
+Ạ¶
+ï¾ Ħ
+ïº ĺ
+ë¹ ¼
+Ã Į
+âĸ ·
+ê¸ į
+ë© ĭ
+ãģ ĥ
+ìĺ Ĩ
+ìĺ ®
+ëª ¬
+ë¡ ¤
+ëł ¬
+ëĬ ¦
+âĸ ª
+ì¼ ĵ
+ìľ Ī
+ì§ §
+ï½ ½
+ëĥ ī
+ï¾ Į
+ëĺ IJ
+ï¼ ĥ
+á» Ħ
+ì´ ¬
+ì¶ ¤
+ï¼ ¹
+ï» Ń
+âĤ «
+ï½ ĩ
+ìĺ ·
+ëĸ ¨
+âī «
+ë¦ ¿
+âľ ¨
+Ù ±
+ì¯ ¤
+ê¹ Ķ
+ðŁĺ Ĭ
+ìĪ «
+ê³ ±
+êµ ³
+ï½ ĭ
+ภĮ
+Ä ł
+ëĶ ¸
+ë° ij
+ìħ ĭ
+íİ ´
+âľ ħ
+íĥ ij
+ëĪ ĩ
+íı ¼
+ðŁĺ į
+ìĺ Ľ
+ï» £
+Ñ ĺ
+ì© Į
+ë¦ ħ
+ìĿ į
+ï½ ¸
+ëį ľ
+ãģ ħ
+íİ ¼
+ëĭ Ŀ
+ë¿ Į
+ì¼ °
+ìĭ «
+ë° ¥
+íĽ Į
+ì¨ Į
+ë¹ Ļ
+ï½ İ
+ë´ Ħ
+ìĦ ¹
+ï½ ²
+ìĮ ĵ
+Ò ij
+ë° į
+ëł Ģ
+íĨ ¤
+ï½ ¯
+ë¤ Ħ
+ê½ ¤
+ï½ Ĵ
+ìķ ¨
+ï½ ¼
+ê¹ IJ
+íģ IJ
+âĦ ĸ
+ë§ º
+ïº ®
+ëħ ģ
+ê² ¸
+ï» ł
+íĬ ľ
+Å ¹
+ë¥ Ń
+ëĪ ī
+ï½ Ķ
+íĮ ¬
+ìŀ ĩ
+ï ¬ģ
+ï» ¨
+ëij ¥
+ëŀ Ħ
+Ù ¬
+íĭ ´
+ìŀ ī
+Ú ¾
+ìĽ ħ
+ï» ®
+ëĭ ī
+âī ª
+âĹ Ħ
+ëĪ Į
+íĽ ¼
+ì¤ į
+Å ¸
+ì¤ ¬
+ì¾ Į
+ï½ ĵ
+ï¾ Ĭ
+ðŁı »
+ï¾ ī
+Ð ģ
+íĺ IJ
+ï¾ Ļ
+ê¼ ¬
+íŀ IJ
+âĢ ¥
+ëŁ Ń
+ë§ ŀ
+ìĥ ¤
+ïº Ĵ
+íĭ ±
+ë½ ij
+Ã ķ
+âĪ ļ
+ëĤ Ħ
+ê¹ Ŀ
+ëĨ Ī
+Ạº
+ìħ Ī
+ìĮ į
+âĢ ¡
+ï¼ ±
+ìģ ¨
+âĺ º
+ëĴ ·
+ìĺ ³
+ðŁij į
+ëª ½
+ëĤ Ń
+ïº Ń
+ë© Ī
+á» Ī
+íķ Ģ
+ëĭ Ļ
+ë¦ ĩ
+ìķ ¤
+ìį ¼
+ãĥ µ
+Ñ £
+ìľ Ĺ
+â ŃIJ
+ï¾ ĺ
+íĹ ¬
+ê¾ ¼
+ìķ Ĺ
+ï» Į
+ê± ·
+ëħ ķ
+ë¡ ±
+ìķ Ĭ
+ï¾ Ģ
+ìĩ ł
+íĮ ©
+ïº ª
+ë§ Ļ
+ï¼ ¿
+ê¿ Ķ
+íİ ľ
+ë£ ¸
+íĶ Ķ
+ï» ³
+ëı ķ
+ìĭ ¼
+á» İ
+ë§ ĺ
+ì¢ ĭ
+íĨ ¡
+ï½ ±
+íĿ ij
+á» ¸
+ì¦ Į
+ì¹ ¸
+ëŃ ĺ
+ï¾ Ĺ
+ï» ĭ
+íĬ Ģ
+ë¥ Ļ
+ì½ ©
+ëģ Ĺ
+ëį ´
+ìħ ľ
+Â ¸
+ë» IJ
+ìĥ µ
+ê² IJ
+ëĵ ¬
+ë£ °
+ãħ ĭ
+ìĹ ī
+á» ĸ
+ëĦ Į
+ï½ ¶
+ë´ ĩ
+ëĤ ³
+ãĤ ľ
+ëĸ »
+íİ Ģ
+ëį ©
+íķ ¸
+Ã ·
+ê¼ ¼
+ëĶ ľ
+ë° ´
+ë© į
+âĹ ¯
+ìĹ ij
+ìĻ ¼
+ïº ij
+ë¶ ķ
+ë¡ ¬
+ï½ Į
+íĨ ¨
+ïº ´
+ëł ĺ
+ê° ¤
+ìĪ ²
+Ñ ĵ
+ìħ ī
+ï» ĵ
+ëĪ Ķ
+ëį §
+âĢ ¼
+ï» ²
+ê° ±
+ê¿ Ģ
+ëĭ ·
+Ạ¸
+Ạª
+Æ Ĵ
+ëį ¤
+ìĪ Ń
+ï½ Ĥ
+ï½ Ī
+Å ł
+ë£ ¬
+Ñ µ
+ëĸ ¡
+ëĥ Ħ
+ìĦ °
+ëĵ Ī
+ï¾ ĥ
+ëĩ ¨
+ï½ IJ
+êµ ½
+ìĹ ½
+ëĤ Ģ
+ë¬ ¶
+ï½ ·
+ìı Ł
+íĺ Ķ
+ê¼ Ī
+ëģ Ī
+ì¥ IJ
+ïº Ĺ
+Ä Į
+ëĪ ł
+ëĸ ¼
+íĢ ´
+âī ¥
+ëĭ Ń
+ì± Ļ
+ê» ı
+ë© ¤
+ìĥ ĺ
+ëį ®
+ë£ ¡
+ìĤ ½
+ãĪ ľ
+Ä ¨
+âĢ §
+ï½ º
+Ä £
+ì¦ ī
+ï¼ ¼
+Û ©
+âĪ Ļ
+ë° ı
+ë¹ ħ
+ðŁĺ Ľ
+íĪ ´
+ðŁĴ ķ
+ãĢ Ĵ
+ìŀ ĺ
+ïº ¤
+ï½ ĸ
+ë© ľ
+ë² ¼
+ëĿ Ħ
+ëļ ľ
+ï» ĺ
+ìĥ Į
+ï½ Ħ
+ì© Ķ
+ï½ Ļ
+ïº ©
+Û ŀ
+âĺ İ
+ìł ¤
+ëIJ ©
+Å Ŀ
+âŀ ¡
+ï» §
+Ð ı
+ì« ĵ
+ê³ ½
+É ij
+ãĥ ²
+ëĤ «
+ë¦ ī
+ì¢ ģ
+ë° Ń
+ðŁĺ ģ
+ë¹ µ
+ì² ©
+ì» µ
+ðŁĺ ĺ
+ë± ħ
+âī Ī
+ë¹ ļ
+ï» ľ
+ðŁĻ ı
+íģ °
+ìĦ ŀ
+ï¾ ļ
+ìĺ ¹
+ë¼ Ī
+ëĤ ¯
+ëŀ ©
+íļ ¡
+ï½ ķ
+íĥ ĵ
+ëĿ ł
+ê³ ģ
+ëĵ Ģ
+ìĹ ł
+ï¼ º
+ë§ ij
+ëĭ ¿
+ì¿ ¨
+ãİ ¡
+Ð Ĭ
+íĦ ±
+Å ¨
+ïº ³
+ï¾ ı
+âĭ ħ
+ê¼ ´
+âī ¤
+íĮ ģ
+Î ©
+ê¶ ¤
+ìĪ į
+âľ ¿
+ì½ ¤
+ëĪ ħ
+íĨ ±
+ãħ ľ
+áIJ ħ
+Å Ĵ
+ðŁij ī
+ï» ¦
+Ð ª
+ë¥ ľ
+íķ «
+ï¾ ĭ
+âĻ «
+ê¹ ľ
+ë° ¸
+ëĶ ĺ
+íĿ ī
+ï¾ ģ
+ï¾ Ľ
+ëł Ľ
+ê² ¹
+ì¿ ¼
+ï» ¬
+âŀ ¤
+ðŁĻ ģ
+ïº ł
+ëĨ ¨
+ë¯ ¹
+ê¸ ĭ
+ë» Ķ
+ê¹ ĥ
+ëij ij
+íĭ ¸
+íİ Ļ
+âŀ ĸ
+ãĥ ½
+ì§ ļ
+ï½ ¬
+ï» ¥
+íĮ ½
+âĢ Ĵ
+ì ĮĢ
+ìŃ ī
+ëļ ±
+ãĤ ŀ
+íĭ Ī
+ãĤ IJ
+ëī ĺ
+Î £
+ê³ °
+ë¹ Ĺ
+ï¾ İ
+ðŁĺ Ń
+íĿ ł
+ìĹ ¿
+ê° ļ
+ì¤ Į
+ë§ µ
+ï½ ³
+ãģ ¢
+ï» Ĺ
+âī ¦
+Ú ¤
+ë łģ
+ê¼ ½
+ï» «
+âī §
+ì´ Ľ
+ìł Ŀ
+Ạ°
+âĻ £
+ìº ĺ
+âĪ ĩ
+ê² ī
+ë° Ł
+ï» Ķ
+íĸ ĩ
+âĸ Ĵ
+ðŁij ı
+Ã ŀ
+ðŁĺ Ĩ
+ïº ¼
+âĿ Ĺ
+ìº Ķ
+ì¹ ©
+ëĸ ¤
+ëĥ ħ
+âĶ ľ
+ï½ »
+Î Ķ
+áĥ ¦
+ìŀ İ
+âĺ Ģ
+âĪ ¼
+ðŁĶ ¥
+ë° Į
+ìł ĸ
+íĹ Ľ
+Î ķ
+ïº ĥ
+ë¶ ī
+âĪ ŀ
+íĥ Ń
+Ã ĭ
+âģ Ħ
+ãħ ĩ
+ëĦ ¥
+ëĭ ®
+ëł ·
+íĮ Ŀ
+ìº ¡
+ë· Ķ
+ì© į
+íĤ ´
+ëļ «
+âĵ Ĵ
+íķ į
+âĻ Ĥ
+ï¾ Ĩ
+âĨ ©
+ìį ©
+ïº ķ
+íĿ Ļ
+Ñ ľ
+íĤ ·
+íĿ °
+íĥ ±
+ëķ IJ
+ï¾ Ĵ
+× ĥ
+ëĮ Ħ
+ìĺ ´
+ìķ µ
+ê¹ ¥
+ëŀ Ń
+ìª ¼
+ãİ Ŀ
+ðŁĺ ħ
+ëı ĭ
+ëª «
+ïº ¸
+ë® ¬
+ë² ħ
+ëij ł
+ìħ °
+ì» ·
+ëĶ ª
+ëħ Ķ
+ãħ ¡
+ìĶ »
+íķ ı
+ëį ±
+ïº ¨
+ï¾ į
+ï½ µ
+ì¢ Ģ
+íİ Į
+ï» °
+ïº £
+Æ £
+ðŁ¤ £
+ï· º
+ëĤ ļ
+âĭ Ĩ
+ë³ į
+ðŁĺ Ħ
+ìĸ Ģ
+ìĻ ł
+ëĨ Ķ
+íĹ ¨
+ï» Ľ
+ï» Ŀ
+á» ¶
+ìĸ ĺ
+ìİ Ħ
+Ú Ĩ
+ï» ŀ
+ëĢ IJ
+ê² Ķ
+ï» µ
+âĹ ¦
+íļ Ł
+ê¹ ģ
+ê° ĵ
+ëĶ ´
+ìı ĺ
+ëļ Ŀ
+á» ł
+ëŀ ´
+ëĦ ī
+âĺ ŀ
+ï½ ĺ
+Å ½
+ë¦ İ
+âĸ ¬
+ëŃ ī
+âĩ Ľ
+ìį ¬
+ïº Ł
+Ë ľ
+ë¶ ĵ
+ìĽ °
+Å ľ
+ëŃ ĩ
+á» ²
+Ë ļ
+ëķ Ģ
+âĺ ij
+ðŁı ¼
+ìĸ ½
+âĮ Ĵ
+Ð İ
+É ¾
+íĮ ¡
+ï¾ ħ
+ìŀ Ń
+ï½ ¨
+ì¹ «
+ìľ Į
+Ò Ľ
+êµ ¿
+ëĭ ¦
+âĶ Ķ
+ï¾ ij
+ì§ ĸ
+ìº Ħ
+ãĢ ĥ
+Ê ¼
+ê² Ł
+ï½ §
+Ä ¢
+íİ ł
+ë§ ·
+ê° ĩ
+ìĭ ¹
+ðŁĴ ¦
+ï¾ ľ
+ëĬ Ļ
+ë² ¡
+Å ¿
+ðŁĺ ĭ
+ðŁĴ ª
+ì¿ Ħ
+ë© ķ
+ìŃ ¤
+ëĬ Ħ
+ðŁĮ ¸
+ãĤ Ŀ
+Ç İ
+ï½ ļ
+Ä Ĺ
+ëģ ĵ
+ê¶ IJ
+áµ ī
+ãĥ Ĥ
+ê» į
+ðŁĺ ¦
+ãĢ Ŀ
+ðŁ¤ Ĺ
+Ñ Ł
+ìĹ İ
+âľ Į
+ìī IJ
+Ã Ĩ
+íĹ IJ
+ðŁİ ī
+Î ij
+ï½ Ń
+ðŁĴ Ļ
+ìĽ ¬
+íĢ ĺ
+ï» ¢
+ðŁĺ İ
+íij ¼
+íĿ ©
+ï» Ħ
+íħ Ģ
+ëł IJ
+ì¥ ¬
+Ð ĭ
+ìĥ ·
+ëľ ¬
+ðŁĺ ĥ
+ëĦ ¬
+ë¥ ¨
+ìĽ į
+ï½ Ĩ
+ï½ ´
+ãĥ ħ
+Ã ı
+ï» ª
+âĻ ł
+ëĬ ¬
+ë± Ģ
+ë° ĭ
+ìĥ Ģ
+ï½ ¾
+ëĤ ±
+ì» ¸
+ðŁĴ ĸ
+ðŁij Į
+Ñ ŀ
+ì§ ±
+Ë Ĩ
+ðŁĵ ļ
+âŃ ķ
+ï¬ Ĥ
+ï» ¡
+ëij ¬
+íĪ ¼
+âĸ ¸
+ê° ¯
+ê¹ ħ
+ï½ ®
+ëĺ ¥
+Ä ¡
+íĮ Ł
+Ð Į
+ìĨ Ł
+ïº ĵ
+ï» ¼
+Ã Ľ
+ãĥ ¾
+ëĮ ĵ
+íĴ ĭ
+ìķ ĵ
+ï½ ¹
+ëĤ ¡
+ðŁij ĩ
+Ạ¼
+ãĢ Ł
+ðŁĮ Ł
+íĥ ł
+ãĢ Ĩ
+âĢ Ł
+ë¸ IJ
+ðŁĮ ¹
+ìł ¼
+ðŁĵ Į
+ìĶ ¬
+âĹ Ģ
+ðŁĴ ĵ
+ê¹ İ
+ìĤ IJ
+ìĶ Į
+Ñ Ľ
+âĶ Ī
+ë² ³
+ãİ ŀ
+Õ ¡
+íĤ µ
+ðŁ¤ Ķ
+ëĢ Ķ
+ìĬ IJ
+íĻ ī
+âľ ¦
+ëľ ¯
+ìł ¯
+ëĶ §
+Î ¦
+Ë Ī
+ìī ¼
+âĹ Ĭ
+ëľ ©
+ëľ °
+ï¾ IJ
+ë¿ Ķ
+ìĹ ®
+ì· Į
+ïº §
+Î Ĵ
+ëµ Ļ
+ï» Ĭ
+ì° Ķ
+íİ Ħ
+ðŁĴ Ĺ
+Ạ´
+ì° ¢
+íľ ¼
+ê½ Ĥ
+ì± Ķ
+ìī ´
+âĸ ¾
+íĪ °
+ëĭ Ľ
+âĿ £
+ï½ ª
+ðŁĴ ľ
+Ë ĺ
+ãħ ¤
+âĨ Ĺ
+íĸ Ħ
+âĻ ¬
+ìķ °
+ïº ľ
+âī ¡
+ãĢ ĵ
+ìij ¥
+íĮ į
+íī ģ
+ë» Ĺ
+íľ ł
+íľ ©
+âľ Ī
+íĢ Ħ
+ìĸ ĩ
+ì¢ ĩ
+íŀ Ļ
+ëª ¹
+ãĤ Ľ
+ðŁĺ ±
+ëį Ł
+๠ħ
+êµ ¶
+Ù «
+ìĶ ģ
+âľ ª
+ï¾ Ī
+ðŁĻ Į
+âļ ¡
+Î ļ
+ì¼ Ī
+ï¾ Ķ
+ï¾ Ĥ
+êµ ī
+ïº »
+ðŁĴ ĭ
+á¹ £
+Ó Ļ
+ìĨ ľ
+ìĹ £
+âľ ©
+ìľ Ļ
+ïº °
+Ạ²
+ìŀ £
+âĿ Į
+âĺ ģ
+ìķ İ
+Ä ½
+Û ģ
+ãĦ ±
+ëŁ ¿
+íĮ ¸
+ê½ ī
+ìı ł
+ðŁį Ģ
+âĨ Ķ
+ëŃ ¡
+ï» ģ
+ï¼ Ħ
+ðŁĴ ¥
+âĺ Ľ
+íĹ ·
+ëij ¡
+Î ł
+Î ¤
+âĦ ĵ
+ïº ·
+Î Ļ
+ëı Ķ
+ì§ ¤
+âĶ ĥ
+ãĦ ·
+Ç Ĵ
+ðŁ¥ °
+ëĶ ķ
+ìļ ¥
+ì¸ Ħ
+íĽ Ķ
+ïº ĩ
+ïº ¬
+ðŁĺ ¢
+ë¹ ¡
+ìĶ ¹
+Å ³
+Ë Ŀ
+íİ ij
+ï¾ ĵ
+ðŁĴ ļ
+ëĬ ij
+êº ¾
+íĨ °
+Ã ¿
+Ð Ħ
+ëĮ IJ
+ë½ Ģ
+ì· Ħ
+ðŁ ĵį
+ðŁĻ Ī
+âĹ Ī
+ê¿ ĩ
+ì¼ Ħ
+íİ «
+ðŁĩ ·
+âĶ ĭ
+âļ ł
+ë± ī
+ì į°
+ìĻ Ī
+É ª
+ïº ĭ
+ðŁĺ ľ
+Î Ł
+ðŁ ĻĤ
+âļ ½
+Å Ī
+ë¹ Ķ
+íĮ ľ
+๠ı
+ìĸ ¹
+íĪ Ń
+ðŁ¥ ĩ
+ãĦ ´
+ëĶ ¥
+ìŃ Ī
+âĪ Ĩ
+ëĸ ³
+ë± ĥ
+ìŀ ¦
+ï» IJ
+Î ľ
+âľ §
+Ï į
+ìł ĵ
+âĹ ķ
+ëĴ Ģ
+ï» Ģ
+ðŁĶ ´
+ê½ ģ
+ëĮ Ī
+ëİ Į
+ãĤ İ
+⦠ģ
+ì½ §
+ï¯ ¾
+âĿ ¯
+ภħ
+ðŁĻ Ħ
+âĿ Ģ
+ðŁĶ ¹
+âĩ IJ
+êµ µ
+âĩ Ķ
+ë¶ IJ
+ðŁĴ Ľ
+Î ¾
+íĥ ¬
+âĿ Ħ
+Ò £
+ãĢ °
+âĪ ij
+âĺ ¼
+âī ł
+Ò ¯
+ïº ¯
+ê¿ ¨
+âľ ĸ
+Ê ĸ
+íĢ Ģ
+ê¾ Ģ
+íĹ Ŀ
+âĶ £
+ãİ ľ
+ëĶ Ľ
+ëľ ¸
+ï º«
+ê¿ °
+ðŁĩ ¹
+Ç IJ
+Û Ĵ
+ë£ »
+ïº ĸ
+Ñ ļ
+ëĬ ł
+Û ķ
+ê¹ ¡
+ë¿ ľ
+ì² ¼
+ï¨ ij
+ë¥ µ
+ìį ¸
+íħ ħ
+íij ¹
+Ö Ģ
+ï³ Į
+ãħ £
+ìij ¤
+ì½ ķ
+ëķ ł
+ðŁĮ ¿
+íĥ Ķ
+ìĽ ģ
+Î ¶
+âŀ ľ
+ìĬ ĺ
+íĽ Ĺ
+ë© §
+ìī ĺ
+Õ ¶
+á¹ ĩ
+ðŁİ ģ
+ï½ ¿
+ï¼ Ĥ
+á¼ IJ
+âľ ķ
+âŀ ¢
+ëĦ ¨
+ì» «
+ì¯ Ķ
+ì° ľ
+ðŁĴ °
+íħ Ŀ
+ãİ ı
+ë³ ¶
+Ò ĵ
+âĨ ³
+ìĥ ´
+íģ ĺ
+âĸ Ģ
+ë² Ļ
+ภĥ
+á½ ¶
+Ä ķ
+⬠ĩ
+ë¤ ĺ
+ðŁİ µ
+âľ ļ
+ïº ı
+Î ¡
+âĹ ī
+ðŁĴ «
+Ð Ī
+ìĸ Ħ
+ì§ Ļ
+ï» ĥ
+ðĿij Ĵ
+ëŃ Ħ
+âĿ ¥
+âĿ ĸ
+âĺ Ŀ
+Ê ¹
+Ḡ¥
+âĢ ¿
+ãħ ħ
+ê¸ ģ
+ëķ ¡
+ëį ¥
+âĪ ©
+ê» Ħ
+ë® Į
+Ò ±
+âĪ Ĺ
+ëł Ļ
+ïº Į
+Ë IJ
+ðŁĺ ³
+ðŁij ©
+ðŁİ ¶
+ì¿ µ
+ðŁ¤ ©
+ê· ¤
+ëĮ Ķ
+ïº IJ
+Ï İ
+ì¶ ¥
+ï½ Ĭ
+á¹ Ń
+ë¤ ¼
+âĸ «
+ì§ ł
+á¼ Ģ
+ê» ij
+ëĮ ģ
+íĢ ¸
+âĻ Ľ
+ðŁĴ ŀ
+âĸ °
+ðĿij ĸ
+ëĿ ¤
+ठ¦
+ì´ ĺ
+ðŁĺ ĩ
+ëĶ ¤
+Î Ĺ
+ðŁĻ ĩ
+Ë Ľ
+ì© ¡
+âĪ §
+Õ ¥
+Ñ Ļ
+ëIJ ¬
+ëĸ Ħ
+ðŁĮ ·
+ìĹ Į
+ðŁĺ ¥
+ëĪ ´
+ï» ļ
+É Ľ
+ïº Ħ
+ï» ı
+Å Į
+ë² ļ
+ìĭ £
+ïº Ģ
+Î ĵ
+ðŁĺ Į
+Ë Ļ
+ëŀ ı
+ðŁĶ ¸
+ðŁĵ ·
+ëģ ½
+íģ ½
+ðŁĴ ¡
+ðŁĮ ±
+ëº ı
+ìģ ł
+ìĥ IJ
+ëı Ĺ
+ì¸ °
+ëĪ ķ
+Î Ŀ
+âģ ī
+ðŁĮ ¼
+íĮ ł
+âĭ ¯
+áĥ ĺ
+âľ ¤
+ê± Ķ
+íĮ İ
+ðŁĴ ¯
+ìı Ļ
+íĹ ī
+Ù Ń
+ì½ °
+ïº ¿
+ï» ±
+ì± Į
+âĺ ķ
+ðŁİ Ģ
+Ä Ŀ
+ë° §
+ìĤ ¿
+áij ķ
+ðŁį ĥ
+âĩ ¨
+Î Ľ
+ë§ ´
+ë³ ķ
+á ijIJ
+âĸ ĵ
+ðĿ ijľ
+âĻ »
+íĤ ¥
+Õ ¸
+ãĪ ±
+ëº Ģ
+ì² ¸
+ïº Ľ
+ðŁı Ĩ
+ðŁĩ ª
+âĿ ĵ
+Ä Ģ
+ì½ ¥
+ðŁĩ §
+á½ ·
+âľ Ĥ
+ìŀ ¼
+ï§ ¡
+ðŁĵ ¸
+âĻ ¯
+É Ķ
+á½ ¸
+âĮ ª
+ï» ĸ
+ï¥ §
+âļ «
+âĶ Ĺ
+ðŁĮ Ī
+ï» ©
+ðŁĵ ²
+Ï Ī
+ðŁĺ ¡
+ðĿij İ
+ìľ ½
+ì§ ¬
+ì§ Ĭ
+á½ ³
+ìĮ ¤
+ëĤ į
+âī Ĵ
+ðŁij ¨
+âĺ ĺ
+Ó ©
+âĤ ĵ
+âĪ Ĥ
+ï¹ ģ
+ðŁĴ IJ
+íħ ĥ
+ðŁı ½
+ê· Ħ
+ðŁĺ ı
+ðŁĮ º
+ðŁĺ Ķ
+ï½ «
+âľ İ
+ëµ Ī
+ðŁĩ ¸
+âĢ £
+âŀ Ķ
+ëĺ ĺ
+ìĥ ¬
+Ê ĥ
+⬠ħ
+ì© IJ
+ðŁĻ Ĩ
+ðŁİ Ħ
+Ä ¾
+⣠¶
+áĥ IJ
+âĺ »
+ì± ķ
+ìģ ©
+ë½ ķ
+ìº £
+ðŁij Ī
+ðŁĻ ĭ
+ï¾ ĸ
+Ò ļ
+Õ «
+ìĮ Ī
+ë² §
+ðŁĩ ®
+ï½ Ŀ
+ðŁį ģ
+ìĹ ¥
+Ä ³
+ë½ IJ
+íį ½
+íĽ ij
+âĤ ¹
+ãħ ģ
+ìĶ ½
+ðŁĶ ģ
+ठ¯
+ê¾ ¹
+ëī ľ
+âĹ ¡
+íķ Į
+Î ĺ
+ë£ ¹
+ìĻ ĵ
+ðŁĩ ¦
+ðŁij Ģ
+âĶ Į
+á¿ ¦
+ëĦ Ľ
+ìĦ £
+ìŃ Ļ
+ï± ł
+Î ŀ
+Ê »
+á¿ ¶
+âĿ Ŀ
+ê± Ģ
+ëĸ ´
+ãĦ ¹
+ðŁĴ İ
+Ï ¹
+⼠ħ
+ï» ķ
+ãĥ ±
+ï½ Ľ
+ëĮ ķ
+ë¹ ½
+ì¥ Ķ
+ì¿ ¤
+ðŁĸ ¤
+Ñ Ĵ
+ê¹ į
+ëİ Ģ
+ìĭ ¯
+ë» ¤
+ðŁĵ ŀ
+ðŁĵ £
+ðŁĺ Ŀ
+ìį ¹
+ìĹ ¡
+ì° IJ
+á½ IJ
+ï» Ī
+âľ į
+Ä ı
+ðŁĮ ŀ
+âĦ ¦
+ê½ Ŀ
+ë» ĺ
+ìĪ ±
+âĶ ĺ
+ðŁĮ »
+âĤ ´
+âŀ ¨
+íIJ ģ
+ê ¶Ī
+âĺ ¢
+ðŁĺ Ī
+ï½ ©
+âĦ Ĺ
+ê° Ń
+ê° ¸
+ë» ij
+ì¥ ´
+ì» ¥
+ï¤ Ĭ
+ï» Ĵ
+ðŁĺ ķ
+âĺ Ķ
+ìĺ IJ
+ðŁļ Ĺ
+ëĹ Ħ
+ë§ ı
+Õ ½
+âĸ »
+⣠µ
+ìī °
+ï» ij
+âĻ ©
+Î ¥
+ðŁĺ £
+âĬ Ĥ
+ãħ Ĥ
+ìħ ¸
+íı Ħ
+âľ ½
+ì¦ Ļ
+âĸ £
+ê± į
+ê¿ ĭ
+ì« Ħ
+ìº ĩ
+ðŁĩ µ
+ðŁij ij
+âľ ĺ
+ðĿij Ľ
+ìį ½
+ìº ī
+ï¬ µ
+ðŁĶ º
+âĦ ®
+íĥ ¤
+ðŁĩ º
+ðŁĴ µ
+íħ ¨
+ï½ ij
+Î ¨
+ìĥ ¹
+ìĸ ķ
+ì¹ µ
+ðŁĵ ±
+ठµ
+ðŁij Ĭ
+ðŁĴ Ħ
+ðŁĴ Ŀ
+ãĮ Ķ
+ìĻ ģ
+Ð ĩ
+à® IJ
+âĸ ¹
+á´ Ľ
+âĹ ĺ
+ëº ¨
+íĥ ī
+ìĸ Į
+ðŁIJ ¶
+ãĤ ij
+Ë ĩ
+Å ı
+á½ ¹
+ìħ §
+ï¹ °
+ðĿij ¡
+ðŁĶ Ŀ
+ðŁĺ »
+ðŁĴ ĥ
+ðŁ¤ ¦
+ðŁį Ĵ
+íĢ µ
+âľ Ĩ
+ë¹ ´
+ï§ ¤
+ï» Ļ
+á´ Ĺ
+ðŁĮ ´
+Í ¾
+ëĮ ij
+ì¨ ĭ
+ìµ ¸
+ðŁİ Ī
+ðŁı ł
+á½ ±
+Û Ĩ
+á¿ ĸ
+âĢ Ľ
+ì° ¼
+íķ ¥
+íĹ ´
+ðŁĩ ¬
+ì° Ŀ
+âĪ ł
+ï¼ ĩ
+âĬ Ļ
+âĿ ij
+ëĦ ĭ
+ëŀ Ĺ
+ë° ī
+ìĹ Ĭ
+ì¢ Ĩ
+íĮ ¥
+ï° ²
+ðŁĵ ĸ
+ðŁĺ ®
+âļ ª
+ðŁĺ ļ
+âĿ ŀ
+ðĿij Ł
+ðŁİ Ĥ
+Å ķ
+áIJ Ī
+êº ½
+ì± ł
+ïº Ŀ
+ê¿ ī
+áĥ ł
+ðŁı ĥ
+ðŁĴ ¸
+âĿ ģ
+âĹ ¾
+Ú ª
+á¹ ĥ
+íĬ ¬
+ðŁĩ ±
+íİ Ń
+ðŁĺ ŀ
+ë¾ °
+á¹ Ľ
+ëĽ ¸
+âĿ Ĥ
+êĴ ³
+âĶ IJ
+íĵ °
+âŀ ł
+ê´ ĺ
+ëħ ĺ
+ë» ¥
+ì¾ ħ
+ðŁĺ IJ
+âĪ ª
+ðŁij ģ
+âĪ ´
+âĹ ģ
+ëº IJ
+ìŀ ¤
+ì± Ĺ
+ðŁı ¾
+Î §
+á½ »
+âŀ ¥
+ìŁ Ī
+ï» ī
+âĸ Į
+ãĥ ®
+ðŁ¤ ¤
+âĩ ĵ
+ì¼ ł
+á´ ı
+ë§ ¬
+ë» £
+ðŁĴ ¬
+ðŁį ĵ
+Ä ¸
+Ù ¹
+Ê ¿
+á½ °
+ëķ ľ
+ì° ¡
+ì° »
+íİ į
+ðŁİ ¯
+ðŁį Ĥ
+ðŁij §
+âĻ ¢
+áĨ ŀ
+âĻ §
+âļ ľ
+âľ ī
+ëĵ ¦
+ëŃ £
+ìĪ ı
+ìĵ ±
+Å Ń
+Ê Ĭ
+âĴ ¸
+âĩ ©
+ðŁĴ Ķ
+Õ µ
+Ð ī
+Ò »
+ë§ £
+ìĽ ľ
+ì¿ ¡
+íĽ ħ
+íĽ ¤
+ïº ¢
+âľ ĭ
+âĪ Ī
+ðŁĮ į
+Ê ľ
+ëĬ ª
+ëĴ ¹
+ïº ²
+âĸ Ħ
+ãħ Ī
+ëļ ¤
+íİ ©
+âĪ ¨
+ðŁ¤ ª
+áĥ ļ
+ê³ ¶
+íĬ ķ
+ðŁĺ ¬
+âĪ «
+ðŁij ĭ
+Ò IJ
+íĬ ¿
+ðŁĶ µ
+ðŁĴ ¨
+ðŁĮ Ļ
+ëĩ ©
+âľ ³
+ë¨ ģ
+ëº Ħ
+ìĻ ij
+ìº ħ
+íı Ī
+ðĿij Ļ
+ðŁĴ ĺ
+ãİ ¥
+âĿ ı
+âľ °
+ï¯ ¿
+ëµ IJ
+ì¼ IJ
+ïº ±
+Õ ´
+ï¬ Ģ
+âľ ´
+ðŁ¤ Ń
+ðŁij Ĩ
+⼠Ķ
+ê· ĵ
+ìĮ Į
+ðŁ¤ ·
+Û Ķ
+ðŁ§ ¡
+ðŁĺ ĵ
+Î ĸ
+âı °
+ê² ľ
+ëĭ ³
+ëİ ħ
+ë° Ī
+ï® IJ
+ðŁı ¡
+âĨ ª
+âĵ Ķ
+âľ Ĭ
+Ï ²
+Ü IJ
+ðŁĩ ³
+Ö Ĥ
+âľ ı
+ìĸ Ĺ
+ì« Ļ
+ðŁĺ ²
+Ä Ń
+âĻ Ń
+âĶ ı
+âĹ Į
+ðŁĺ ¯
+áµ Ĵ
+íĬ ł
+Ä ·
+Ê ģ
+ठŁ
+á¹ ģ
+á¼ °
+á¿ Ĩ
+â «
+â« ¸
+ëį «
+ì³ ĩ
+ì¼ ¤
+íĽ ¨
+ðŁĴ Ł
+Ê Ģ
+Ê ³
+ëĵ IJ
+âķ °
+âĿ ĩ
+Ç Ģ
+Ç Ķ
+É ´
+âĺ ļ
+âĺ ľ
+ê¶ Ĥ
+ì« Ĵ
+ì± Ī
+ðŁĩ ¨
+ðŁİ ¥
+ðŁĵ Ŀ
+Ä §
+ðĿ ijIJ
+Û Ī
+ठ¬
+ì¬ IJ
+íĹ ¥
+âĻ ¨
+ðŁį ´
+ï¹ ı
+Ë ĭ
+ðŁ¥ º
+âĸ ¨
+íĻ ĭ
+âĪ ħ
+ëģ Ļ
+ëŀ ł
+ìĨ ¥
+âĢ ĸ
+ðŁ¤ ĺ
+ðŁIJ »
+áµ ķ
+Ç Ŀ
+âĺ ı
+ïº ļ
+ï» Ĥ
+ðŁļ ©
+ìĪ Ł
+Ë Ĭ
+⤠µ
+ðŁĴ §
+ã ħį
+ë© ©
+Æ ¬
+Î ĩ
+âĩ §
+âĵ ļ
+ìĤ ¯
+ìĪ ¯
+ëĨ ĭ
+âľ ¯
+ðŁļ Ģ
+Ú ĺ
+Ú ¨
+âľ Ń
+ê² ħ
+íĮ °
+íľ Ļ
+ðŁĮ Ĭ
+ðŁİ ĵ
+ðŁĺ Ļ
+Ë ĥ
+ðŁĴ ģ
+ðŁij İ
+âĺ ¹
+ðŁĺ «
+ðŁĴ »
+ëĤ µ
+ìĿ Ĭ
+íĮ »
+Ò ³
+á½ ²
+âŀ ŀ
+ëĤ ij
+ëĿ Ī
+ì£ ¤
+ï» ¯
+ðŁĩ ©
+ðŁ¥ ³
+âĴ ¼
+ðŁ¦ ĭ
+âĺ Ĥ
+ðŁĺ °
+ðŁĻ ĥ
+ðŁĺ Ĵ
+Û İ
+Ï ķ
+Ḡ¤
+ë£ ½
+ìĬ ¥
+ðĿij ī
+É IJ
+ðŁį İ
+âķ ¯
+âķ ¹
+ຠ²
+ï¾ ł
+ë¹ ķ
+ïº Ĩ
+Ê º
+Ó §
+âĨ ł
+ëĥ ĩ
+ìİ Ī
+ìŁ ¤
+ï± ¢
+âķ ¬
+âĺ ł
+ðŁİ Ĭ
+ãį į
+ãİ İ
+âĺ °
+âľ ĥ
+ãħ ī
+ë¯ Ī
+ë¹ ¤
+ìı Ń
+ðĿij ¢
+ðŁIJ ¾
+Å ĭ
+ðŁij ¶
+âĶ Ľ
+ï¿ ¢
+áĥ ¡
+Ä ¼
+Å Ĩ
+Ñ IJ
+ìĥ Ľ
+ìĺ Į
+ì± ¤
+íħ ģ
+íļ ĥ
+ï³ Ĭ
+ðĿij Ķ
+ðŁĩ «
+âĭ °
+ðŁĺ ¨
+âĤ ©
+Õ ¬
+Ḡį
+á» ´
+âĨ ĺ
+âĺ ¯
+ãħ ı
+ìł ¬
+âĻ Ķ
+ðŁĶ Ķ
+ðŁĺ ł
+ðŁĻ Ĭ
+à® ľ
+á¹ ħ
+âĹ IJ
+âĿ Ī
+âŀ ½
+ìĥ ħ
+ðĿij ł
+Æ ¢
+âĭ Ļ
+ê° Ľ
+ëĿ µ
+ë£ Ł
+ìı ľ
+ïº ģ
+ðŁĴ Ń
+âĬ ĥ
+ðŁIJ °
+ãħ Į
+Ü ĵ
+âŀ ķ
+á½ ģ
+ìķ ³
+ðĿij Ŀ
+ðŁİ ¬
+É ¡
+ठĹ
+áIJ ī
+ì© ľ
+ì¶ §
+ï³ ī
+ï» ħ
+ðĿIJ ŀ
+ठ¶
+ðŁĵ ¢
+ðŁį ĭ
+ðŁĴ ħ
+ï¾ ķ
+⬠Ĩ
+âĪ µ
+ðŁ¤ ij
+áĥ £
+Æ Ħ
+Ñ ¹
+á¼ Ķ
+ê° ł
+ê´ Į
+ê· IJ
+ëĽ ´
+ì± ĺ
+ï® Ń
+ïº ¹
+ïº ¾
+âľ Ĺ
+âĿ ¦
+ðŁij ¦
+áĥ Ĺ
+Ù ²
+á½ ´
+âĪ ı
+âľ ®
+ê¹ °
+ë² µ
+ìĦ Ģ
+ì© Ŀ
+ïº ŀ
+ïº ½
+ðŁĩ Ń
+Ë Ĥ
+ðŁį ij
+ðŁį Į
+ðŁĶ »
+ê¹ ¬
+ìĬ Ń
+ìľ ·
+ðŁĽ ij
+Ç §
+ë¼ Ľ
+ïº ¡
+ïº º
+ðĿij ļ
+ðŁĵ ¦
+ðŁĶ İ
+ðŁĹ ĵ
+áĥ Ķ
+âľ Ĵ
+âľ ¡
+ðŁĮ µ
+âĶ ķ
+ëĢ Ŀ
+ðŁį Ĭ
+âĺ ĥ
+ìĺ ħ
+ঠ¬
+ðŁ¦ ģ
+âİ ¯
+ðŁIJ ķ
+Ñ ¿
+ॠ¤
+༠ĭ
+ê· Ī
+ì« Į
+ðŁĩ °
+âĿ ī
+ì« Ģ
+íĿ Ħ
+ðĿIJ ¢
+ðŁļ ¨
+âĻ ¤
+ðŁĺ ©
+ðŁį į
+ðŁĺ ij
+ðŁļ ļ
+Ö Ħ
+ë «
+ë« ¼
+ठı
+á¿ ·
+âĮ ©
+âĺ IJ
+âŀ £
+ê¸ ±
+ê¼ ¿
+ëĦ Ŀ
+ìı ´
+ìļ ¤
+ì¿ ±
+íİ IJ
+ðŁĴ ¢
+ì´ IJ
+âĩ ij
+âĶ ĵ
+âģ ¾
+Ü Ŀ
+ðŁ į°
+â´ °
+Æ ı
+Ï Ł
+Ú º
+Û ĥ
+áĦ Ĵ
+âĪ Ł
+âĿ į
+ãĦ ²
+ìľ ħ
+ì¤ ı
+ðŁĩ ²
+êº Ħ
+ðŁİ ¤
+âľ £
+⸠Ŀ
+ï¸ µ
+ຠ§
+áĢ Ļ
+âķ ł
+Õ ¯
+âı ©
+ðĿij £
+ðŁĴ £
+Å ĺ
+ॠIJ
+âģ ĥ
+âĮ ĺ
+ê» Į
+ìĮ Ķ
+ðĿij ĺ
+ðŁ¤ ĵ
+Õ ¿
+ठŃ
+âĮ ļ
+âľ Ŀ
+ðŁIJ ¼
+Ë Į
+âķ ļ
+ï¦ Ĺ
+âĿ ķ
+âķ £
+ðŁIJ ±
+à® ¤
+Ñ ¾
+ठļ
+ठľ
+ìĪ Ħ
+ìļ ľ
+ðŁİ ®
+É Ĵ
+Ú ·
+ຠį
+âĨ µ
+â Īĺ
+âĿ Ĭ
+ë¿ į
+ìIJ Ī
+ìļ ĺ
+ì¯ §
+íĥ ¯
+ìĸ ı
+ï¸ °
+ðŁĩ ¯
+ðŁ§ ļ
+ðŁĺ µ
+ðŁĺ ·
+ðŁĮ ³
+ຠ¥
+Ä ī
+Ä ¥
+âľ ¶
+á¿ ¾
+âĬ ±
+âĺ ¾
+ê° ī
+ê¼ °
+ëº ij
+ðŁĶ Ĭ
+ðŁĸ IJ
+Å ¤
+Ò «
+à® ®
+âĮ Ī
+âĹ Ĺ
+ëĦ µ
+ëħ ľ
+ëľ ¹
+ðĿij ¥
+ðŁĴ ¿
+ðŁĽ Ĵ
+Ê Ĵ
+áŀ ĵ
+ðŁIJ Ŀ
+ðŁ¦ Ħ
+ðŁį ·
+âĺ Ł
+ï¸ ¶
+ðŁ¤ Ł
+Ô ±
+âĨ ²
+âĪ İ
+âľ «
+ëĩ ½
+ëı IJ
+ëķ Ħ
+ï¦ ³
+ï§ Ŀ
+ïº Ļ
+ðŁij »
+ðŁĵ º
+êµ ¼
+ìĮ ©
+ðŁĮ ²
+È ±
+íĶ ķ
+ðŁĺ ¤
+ãĮ ¢
+Ê Ķ
+ठ¡
+á¼ Ī
+ëİ ĥ
+ë© ±
+ë® Ī
+ðĿIJ «
+âĬ ķ
+ëĥ ł
+ë» ¬
+íĭ Ķ
+Õ ¤
+á¼ ±
+âľ ¥
+âĺ Ħ
+âĪ ¥
+âļ ķ
+ðŁij Ħ
+ðŁİ ħ
+ຠĻ
+âĶ ¬
+á½ µ
+Õ ¾
+Ö ģ
+âĹ Ķ
+ê¿ į
+ëĸ µ
+ë© İ
+ë® ´
+ìķ ´
+áĥ ľ
+á¼ ¡
+âĶ Ĭ
+âķ ®
+âĹ ¼
+ðŁį ¾
+ðŁĽ į
+ðŁij Ĺ
+ðŁ¤ ŀ
+âľ Ħ
+Õ Ģ
+ঠ²
+Ë ī
+⣠¨
+Ä ¯
+Ï Ĭ
+á´ ľ
+ë¹ ³
+ï³ ĭ
+ï¿ ł
+Ä ª
+âĤ ¸
+âľ ±
+ê» IJ
+ëĭ »
+ë§ ¸
+ìŀ ¿
+ì© ¨
+ì ŃIJ
+ì° ¿
+íħ Ł
+ðĿIJ §
+ðĿij ij
+ðŁĮ İ
+ðŁĵ ®
+ðŁķ Ķ
+âĹ Ļ
+âĹ »
+âŀ §
+ìŁ Ŀ
+âľ ¬
+ãĥ °
+âģ Ī
+â ĵĺ
+ðŁ ĴĮ
+ï¬ ĥ
+ຠĶ
+ìĶ °
+ðŁĺ ª
+× Ģ
+ìĥ ¨
+ïŃ ĭ
+ðŁį ķ
+ðŁĺ ´
+Ï ³
+á¼ Ħ
+á½ ħ
+âĩ ¢
+âķ Ń
+ìĺ »
+íĬ ¤
+Ü ĺ
+⤠´
+âĹ į
+áŀ Ł
+ðŁį º
+áŀ ļ
+ðŁı Ĭ
+ðŁIJ ·
+Ê Į
+á½ º
+âģ »
+ê½ Į
+ëĪ Ĺ
+ë Ĺı
+ì¿ °
+íĢ ¼
+íį ħ
+ï· ²
+ðŁĮ ı
+ðŁį «
+ðŁį ³
+ðŁİ °
+ðŁij °
+ðŁĴ ²
+ᥠĻ
+ðŁIJ Ł
+ï¿ ¡
+ðŁĹ £
+ðŁį ľ
+âľ ²
+ãİ ¢
+ðŁĶ °
+á¼ ¸
+á½ ij
+Ä İ
+áĦ Ģ
+âĻ ķ
+ëł Ŀ
+ìĪ ´
+ïŃ Ń
+Ó ľ
+Ô Ģ
+ëĢ ľ
+ëĥ Ķ
+ìĬ Ľ
+ì« ij
+ìº ¥
+ìº ¬
+ðĿij ¦
+ðŁĶ ¶
+ì¾ ¨
+ðĿIJ ļ
+ðŁį »
+ðŁĴ į
+ðŁ¤ ¡
+ðŁķ Ĭ
+â½ ĩ
+âĵ IJ
+ðŁį Ń
+ðŁį ª
+ðŁĶ Ĩ
+Ò ¡
+á´ ĩ
+É Ĺ
+Ü Ķ
+âĦ İ
+âĿ ĥ
+ëĹ Ģ
+ï² Ķ
+ïº Ī
+ðĿIJ »
+ðŁĴ Ĭ
+ðŁļ «
+Ñ °
+Ñ ³
+ठ·
+âĹ ł
+ðŁij ¤
+ï¾ ĩ
+âĺ ĵ
+ðŁį µ
+ðŁ¤ ¨
+âĸ Ń
+à® ´
+Ü ¢
+Ü ¬
+à´ ®
+ðŁķ º
+Ô ¹
+Õ £
+à´ ¯
+á ´Ģ
+âĮ ī
+âľ IJ
+âŀ ¦
+ê¹ ½
+ëĮ ľ
+ðŁı ¥
+ðŁĵ ©
+Ò ¹
+Ó ĺ
+ठħ
+âĿ §
+Æ Ĺ
+âĹ ½
+ðŁij «
+ðŁİ §
+ðŁij £
+âľ »
+ðŁĻ ħ
+ðŁĺ ĸ
+ðŁĴ ®
+ຠ°
+ðŁĶ ľ
+ðŁį Ħ
+ðŁ¤ Ŀ
+á ĥĿ
+áŀ Ģ
+âĩ ¦
+Ê ¾
+Ò ®
+Õ ¼
+ठĨ
+âĹ ħ
+âļ ĵ
+âļ ĸ
+ê¿ ©
+ë¯ Ħ
+ìIJ IJ
+ìŀ °
+ì§ Ń
+íĭ ĭ
+íİ ¨
+íĻ §
+ï² ij
+ðŁİ Ĺ
+Ù ³
+ðŁij ¸
+ঠ®
+ðŁij ķ
+Ú µ
+âĢ ¾
+âŀ °
+ðŁij ¯
+ðŁİ ¼
+ðŁı ģ
+Ä º
+Ê ı
+Ú ³
+âı ±
+ê½ Ī
+ëĿ Į
+ìĮ ī
+ìĹ ·
+ìŀ ´
+íĹ ¹
+íľ ¨
+ðĿĹ ²
+ðŁĮ IJ
+ðŁİ Ļ
+ðŁı µ
+íĽ Ļ
+ðĿij ħ
+ðŁĺ ¶
+âĵ ħ
+âķ ¥
+ðŁį ı
+ï¦ İ
+Õ ©
+ðĿIJ Ħ
+Ó £
+Ú ¿
+âĻ ļ
+ðŁĶ Ĺ
+Ḡ«
+âĭ ®
+âĸ ¦
+⼠½
+âľ µ
+ãħ Ĩ
+ãħ Ĭ
+ëĦ Ļ
+ëĿ ¨
+ë¥ Ħ
+ìĦ ¦
+ì§ °
+ì§ ¹
+íī Ī
+ï§ ij
+ï» ĩ
+ðŁĮ ¾
+ðŁı ĸ
+ðŁIJ ij
+ðŁĴ ³
+ðŁĵ Ĩ
+Û ĩ
+Ü ķ
+á½ ½
+ëĦ ľ
+à´ ²
+à´ ³
+ຠŃ
+áĥ Ľ
+âĿ Ķ
+âij ħ
+áĥ ¥
+ðŁĵ ħ
+âŀ ³
+á´ µ
+ï¹ ¡
+ï¹ ¶
+Î Ĩ
+ठ¥
+áī µ
+âĿ Ļ
+âĿ ±
+ëī ł
+ëİ ł
+ëı Ľ
+ë¿ ħ
+ìĶ ¸
+íij ¯
+íŀ ī
+íŀ Ľ
+ï§ Ħ
+ïŃ ĺ
+ïº ¦
+ï» ¸
+ðĿij Ĥ
+ðĿij ı
+Ï ij
+Ú ł
+áĢ Ķ
+áŀ Ķ
+á¹ ¢
+ëĦ ¸
+ðĿIJ ¨
+ðŁĩ ´
+Õ °
+ðŁij ł
+ðŁį Ĩ
+ðŁı Ģ
+ðŁ ijIJ
+ðŁį ĩ
+ðŁIJ £
+áĪ Ń
+Ü ª
+ðŁ ĮĢ
+áŀ ĺ
+âĩ Ħ
+ðĿIJ Ģ
+Ê Ļ
+âĶ ¼
+ðŁı ¿
+Æ ·
+È ł
+Ñ ½
+âĤ ¨
+ê´ Ń
+ê¹ »
+ëĶ ¨
+ìĪ Ģ
+ì¾ °
+íĨ Ī
+ï® §
+ï¯ ½
+ðŁĶ ħ
+ðŁĶ ®
+Å ¢
+Ê °
+Ñ ¸
+ठ£
+âĬ Ĺ
+ëª Ħ
+ï¹ ·
+ïº ħ
+ðĿIJ µ
+ðŁĮ ¶
+ðŁĵ °
+ðŁĶ ·
+ðŁĸ Ĵ
+ðŁ¤ ²
+ëī ©
+ðŁİ Ĩ
+ðŁ§ IJ
+ðŁį ®
+âĨ º
+âĿ ¢
+ðŁij ª
+ðŁij ±
+âĨ ¡
+áŀ ı
+Ú ķ
+ðŁį ¹
+ðŁĴ Ģ
+Ë ®
+Ó ¨
+Ö ħ
+ठĩ
+âĤ ¡
+âĪ ķ
+âĺ ī
+ê¹ ¼
+ê¼ IJ
+ì½ ¸
+ðĿIJ ¬
+ðŁı ħ
+ðŁij Ļ
+ðŁĴ ī
+ðŁ¤ Ļ
+È ĺ
+É ³
+É ¹
+Ù º
+áĢ Ħ
+á¿ ³
+âļ ĺ
+âĿ Ĩ
+ëĨ ī
+ìĸ į
+ìĺ ĩ
+ì¥ ĺ
+íĸ ħ
+íĻ ij
+ï® Ĭ
+ï¿ Ń
+ðĿĴ IJ
+ðĿĹ ¢
+ðŁĶ ĸ
+ðŁĶ ¨
+ðŁļ ij
+ðŁļ ²
+Æ ¸
+âĹ ¥
+ðĿIJ Ń
+ðŁį ½
+âĹ ij
+âĵ ĩ
+ðŁĶ ±
+âľ ¼
+ï¹ ĥ
+âķ ±
+ãĢ Ĺ
+ðŁı ĭ
+ðŁļ ´
+ðĿIJ ®
+Ä ļ
+Õ ı
+Ä ¶
+áĥ ij
+á¹ ¬
+Ä Ī
+Ä Ĵ
+Ò °
+Ó ķ
+â IJ
+âIJ £
+âĹ ¢
+âļ Ļ
+ãħ Ĺ
+ê° ¬
+ê³ ª
+ê» Ģ
+ëĦ ´
+ëİ ģ
+ëĿ Ķ
+ë¬ ½
+ëŃ į
+ìĩ ³
+ì° ¹
+íĮ ¹
+íŀ Ŀ
+ï® ĭ
+ï ¶Ī
+ðĿĴ Ĥ
+ðŁ¥ Ģ
+ðŁ¦ ħ
+Ê ĺ
+á¼ ij
+âģ İ
+ðŁį ŀ
+âĨ ĸ
+âĨ Ļ
+ðŁİ ĥ
+âĦ ¡
+âĭ ±
+ðŁĶ į
+ಠ¨
+áµ ĥ
+âĶ «
+⦠¿
+ðŁĩ »
+Æ ¤
+Ò ı
+Ò ·
+Û ī
+à® ķ
+Ḡ³
+ï¬ ±
+ðŁĨ Ķ
+Ú Ń
+Û ¦
+áħ ¡
+âĦ ¹
+ê¿ İ
+ëķ Ķ
+ë¼ ī
+ìļ §
+ì² µ
+ì´ ¨
+íĬ Ī
+íĸ IJ
+ðĿĹ ĺ
+ðŁĩ ¿
+ðŁİ ĸ
+ðŁij ħ
+ðŁ ĵĺ
+ðŁļ Ļ
+ðŁĽ µ
+à¶ ½
+⼠µ
+ðĿIJ ³
+ðĿIJ ¸
+âļ Ķ
+ðŁij Ń
+Ó ij
+âĶ ¯
+ðŁħ ¿
+ðŁĺ ¹
+ï¿ «
+â¼ ¤
+ðŁĴ ĩ
+ðŁĵ İ
+ðŁĸ ĭ
+ঠ¸
+ðĿIJ į
+Ä ²
+Ï ĭ
+Ñ ¬
+Ú ¬
+Ü Ĵ
+á´ ¬
+ï¨ Ħ
+É £
+Ë ij
+Ï µ
+Ò Ŀ
+Û ¥
+Ü ł
+๠Ľ
+áĥ ķ
+áĬ ķ
+á¾ ¶
+âĤ ·
+âĩ ¾
+âķ ©
+âĸ IJ
+âĺ ª
+âĺ ®
+âĿ ļ
+âĿ Ń
+âŀ ±
+âµ İ
+ãı Ĭ
+ë© ĵ
+ìĹ ¾
+ìª Ħ
+íĵ Į
+íķ ¼
+ïŃ ¬
+ðĿij Ĩ
+ðĿij ŀ
+ðĿĸ Ĭ
+ðŁİ ¸
+ðŁı Ħ
+ðŁij µ
+ðŁĴ ł
+ðŁĶ ĺ
+ðŁ¥ Ĥ
+Å ª
+à· ĥ
+á´ ¼
+âĬ °
+ë³ ı
+ë´ £
+ï¥ ľ
+ðŁĵ Ī
+ðŁķ ¯
+ðŁ§ Ģ
+âĻ IJ
+ðŁĨ Ĺ
+ðŁĵ ķ
+ðŁ§ ģ
+Ü «
+âĿ IJ
+Õ ķ
+འķ
+âŀ Ŀ
+ঠķ
+ðĿIJ ¶
+É ¢
+Î Ħ
+áĨ ¢
+âĤ ±
+Õ į
+à¡ ķ
+á´ °
+Ḡ©
+⼠·
+âĿ ®
+ê¡ ĵ
+ëı ¤
+ëĹ IJ
+ëµ Į
+ìij Ī
+íı ¿
+íĹ µ
+ðĿIJ İ
+ðŁĨ ĺ
+ðŁı Ł
+É ¥
+Õ »
+à¡ Ķ
+ठĸ
+á´ ¸
+âİ Ļ
+âİ ¥
+âı ³
+ëģ ķ
+ëĬ ī
+ì¡ į
+ì¹ ¡
+ï¦ ¶
+ï¬ Ł
+ï® «
+ï® ¯
+ï± ĥ
+ï ·»
+ïº µ
+ðĿĹ Ķ
+ðĿĹ ¡
+ðŁİ ¨
+ðŁĶ Ĵ
+Ú Ľ
+ठ§
+âŀ ¹
+áĢ Ģ
+ðŁį ħ
+âĹ ¤
+ठł
+ðŁIJ ¥
+áĥ Ĵ
+ðŁı Ŀ
+ðŁį ¼
+ãĮ §
+âĿ Ľ
+ðŁIJ Ī
+ঠ¯
+áĢ ŀ
+ãĢ ĸ
+áŀ Ļ
+ঠª
+Õ Ĩ
+âĬ Ĩ
+âľ ¾
+ðŁIJ Ĺ
+ï¹ ¿
+Ä ¦
+Ü Ł
+ಠł
+ಠ¥
+áŀ ī
+á´ ¥
+á´ ©
+á½ Ģ
+á½ ¡
+âĨ ķ
+âŀ ¯
+ê¡ ij
+ëij £
+ë± Į
+ìĪ ij
+ìľ Ķ
+ìŀ ½
+ì¨ į
+ðĿij Ģ
+ðŁĮ Į
+ðŁį ¦
+ðŁį ©
+ðŁIJ ļ
+ðŁĵ Ĵ
+ðŁĵ ¹
+ðŁ¥ ij
+Ä ĭ
+Ë Ĺ
+Ñ «
+Õ ¢
+Ú °
+â ĮĢ
+âĹ Ĥ
+âĹ £
+âľ Ľ
+âĿ Ĵ
+âĿ ĺ
+âŀ Ļ
+âŀ ²
+ãİ į
+ê¡ IJ
+ëŀ ĸ
+ìĬ Ŀ
+ìĽ ¤
+ì¡ ĭ
+ì¨ °
+íĹ Ļ
+ï¥ ¸
+ï³ į
+ï» İ
+ðĿij ĵ
+ðŁĵ Ĭ
+ðŁļ ¼
+ï¦ ģ
+ðĿķ Ĵ
+ðŁ ijľ
+ðŁij ¿
+ðŁĩ ½
+à· Ħ
+âĸ ´
+ãį ī
+âĬ ĩ
+ðŁ§ ¸
+Ú ¡
+â¾ ĥ
+ðŁĹ »
+âĵ ij
+ðŁ¤ ¸
+ðŁ¤ ¯
+êĴ °
+ðĿIJ ĵ
+âĶ ´
+êĴ ±
+áĢ ĺ
+â ĽĦ
+ï¹ ¹
+Ó Ķ
+áĥ ±
+Ü ¡
+ß ŀ
+âĻ ı
+âľ ¸
+ìij ¨
+ðĿIJ Ŀ
+ðĿIJ ¥
+ðŁį ī
+ðŁij ¼
+ðŁ¥ Ŀ
+Æ Ķ
+Ý ¬
+ठ«
+ຠļ
+á´ ´
+á½ ĸ
+âĤ ¶
+âİ ¢
+âĿ ħ
+⣠«
+ãİ Ľ
+ë® ¨
+ëº Į
+ë¼ ĺ
+ìĨ Ŀ
+ìľ ³
+ìŀ Į
+ì£ Ĺ
+ìª ĺ
+ì» ¹
+ï· ¼
+ïº Ĥ
+ðĿIJ ´
+ðĿIJ ¼
+ðŁĮ ļ
+ðŁı «
+ðŁĴ ¤
+ðŁĴ ¶
+ðŁĴ ¼
+Ê ķ
+Ê ½
+â² Ł
+ãī ł
+ê¡ Ĵ
+ëľ Ģ
+ìĥ ¾
+ì¸ ¤
+ï¥ ģ
+ðĿļ Ĭ
+ðŁļ ĥ
+âŀ Ľ
+ìħ ´
+áĦ ĭ
+âĩ Ĺ
+ï§ ·
+âĺ ĸ
+ðŁIJ ¦
+⸠ľ
+ðŁĴ ´
+ðŁ¤ ļ
+ãĬ Ĺ
+âĮ Ľ
+áĪ Ľ
+༠º
+â½ ī
+ðŁı ¢
+âĵ ŀ
+âĺ ½
+ãĢ Ļ
+ðŁ¤ ®
+Å IJ
+áĥ ¬
+ðĿĹ »
+ðŁį ĸ
+Æ Ĭ
+Ê Ł
+ß ĭ
+ठĭ
+áµ Ķ
+á¿ ĥ
+âĦ ī
+âĮ ĭ
+âı ²
+âĵ Ī
+âĵ ¢
+âķ Ķ
+âļ ij
+âĿ ĭ
+âĿ İ
+â µľ
+âµ £
+ëĴ Ī
+ëľ ģ
+ë¶ ĩ
+ìį »
+ìĺ Ń
+ì§ ¢
+íĹ Ģ
+ï§ Ĭ
+ï ¬¸
+ï± ¡
+ðĿIJ º
+ðĿij §
+ðĿĺ ¦
+ðŁĵ ¥
+ðŁĺ Ł
+ðŁ¥ IJ
+Ä ĸ
+É ¨
+áĢ IJ
+áĥ ĵ
+Ạĵ
+á¼ ¶
+á½ Ħ
+âĤ ¤
+âĮ ľ
+âĮ Ł
+âİ ł
+⼠¸
+âµ į
+âµ ı
+âµ ĵ
+ãĢ ĺ
+ë ·¸
+íħ ¼
+ï¦ Į
+ïŃ Ħ
+ïŃ İ
+ðĿĻ ļ
+ðĿļ ĺ
+༠ĵ
+ëŃ ħ
+áIJ Ľ
+ãİ ¾
+ï¨ Ģ
+ðŁĹ ½
+âĻ ŀ
+Ë ĸ
+âĹ ŀ
+ðŁ¤ «
+ðŁĺ Ĺ
+ï½ ¦
+ðŁ¤ ¢
+âģ ĩ
+ãĢ µ
+ðŁį Ķ
+áĬ ł
+ðŁĺ ¼
+ðĿĹ ®
+ðŁIJ ³
+ðĿIJ ĭ
+ðŁĨ ļ
+ðŁĶ Ľ
+Ñ »
+Ü ¨
+à® ²
+âľ ŀ
+âµ Ļ
+êµ £
+ì¸ ¨
+ðĿ IJľ
+ðĿĺ °
+ðŁĶ ½
+Ç »
+Ç ¿
+Ê ĩ
+Î IJ
+Ð Ģ
+Ñ ¡
+Ñ ²
+Ò Ĵ
+Ù ¶
+ß ķ
+à¶ ±
+áIJ ģ
+âģ ŀ
+âĸ §
+⼠Ī
+âľ ľ
+âľ ¹
+⣠¹
+⤠ĩ
+ê² Ĭ
+ê¾ ľ
+ë¯ IJ
+ë³ IJ
+ìħ ©
+ìIJ ¬
+ìij ¹
+ï¤ Ķ
+ï¦ ļ
+ï¬ ł
+ïŃ Ķ
+ïº ¶
+ðĿĴ ı
+ðĿĸ Ĩ
+ðĿĹ ¶
+ðŁı Ĥ
+ðŁIJ ½
+ðŁĴ ©
+ðŁĵ ½
+ðŁĹ ¨
+ðŁĹ º
+ðŁĺ ¸
+ðŁ¥ §
+Å Ĺ
+Ê İ
+Ò Ļ
+× ²
+ठĪ
+á¼ ´
+á¿ ij
+âµ ī
+ãħ ĵ
+ì½ ´
+ðĿĸ ĵ
+ðŁĵ Ĺ
+ðŁĶ ª
+ðŁĸ į
+Ï Ĵ
+ðŁij ¬
+áĥ Ļ
+âĨ ¬
+âĶ ¤
+⼠¹
+âĻ Ł
+ðŁļ ¶
+ðŁij ¾
+âĪ ĭ
+ðŁIJ ¯
+༠İ
+âľ ·
+ï¨ Ļ
+âĶ »
+ðŁij ¹
+áĦ ī
+ຠª
+â¾ ı
+â½ ħ
+ãİ ĸ
+Ñ ´
+Õ ®
+Ú ¼
+áĢ ķ
+áĨ ¼
+ëŃ ı
+ðŁIJ ¸
+ðŁļ £
+Æ Ŀ
+Ô »
+áĥ ¢
+ðŁį ¯
+É ¦
+Õ ¦
+âĻ ĭ
+ï¬ «
+ðĿĹ ¦
+Ç ļ
+É ±
+ठī
+á´ Ħ
+âĻ ĵ
+⼠°
+⣠ª
+ëĥ ĺ
+ë¢ ¸
+ìĤ ij
+ï® Ķ
+ðĿķ ĸ
+ðĿĹ §
+ðŁĩ ¼
+ðŁĵ ĭ
+ðŁļ ľ
+ðŁ¥ ¤
+Ä ®
+Å ·
+ß Ĭ
+ॠ¥
+à® ª
+áŀ Ħ
+áµ Ģ
+Ḡħ
+á¼ ¢
+âĪ Ŀ
+âĬ ¹
+âĴ ¶
+âķ ´
+⼠±
+⼠³
+⼠º
+âŀ Ł
+ãı Ħ
+ê¸ Ķ
+ê¹ Ł
+ëĩ °
+ë¹ »
+ìĤ ¥
+ìĽ »
+ì° Ł
+íĥ °
+íĨ º
+íļ ½
+ï¤ ´
+ï¥ ¾
+ï³ Ŀ
+ðĿIJ ¦
+ðĿĴ ľ
+ðĿĴ Ł
+ðĿļ Ĺ
+ðŁİ Ń
+ðŁı ĵ
+ðŁı ³
+ðŁı º
+ðŁIJ į
+ðŁij ĥ
+ðŁĴ ı
+ðŁ¤ ĸ
+ðŁ¤ µ
+Õ ²
+âµ Ķ
+ëĺ ¬
+ï¦ £
+Ê Ĥ
+áĨ «
+áŀ ij
+ðĿĸ İ
+ðĿĹ ĸ
+áĦ ĥ
+âĩ ł
+áĢ ¡
+འĦ
+âŀ ¸
+ï¦ Ļ
+âĩ ļ
+ðŁIJ ¬
+ðŁIJ ¢
+â¾ Ĵ
+ðŁIJ ¤
+ðŁĶ «
+ãĢ ŀ
+ï¸ º
+ðŁĺ º
+â½ ´
+ðŁĨ ķ
+âģ ¿
+ðŁį ¨
+ಠķ
+ðŁļ ĺ
+áŀ ħ
+ঠħ
+áŀ ¢
+ਠľ
+â ļĮ
+ãĢ ½
+à· ´
+âĵ Ľ
+áĢ ľ
+ìĨ ¨
+Ë ©
+Ü Ĺ
+âĭ ¼
+ðŁĻ ī
+Å Ĭ
+É ĵ
+Ê ²
+Î °
+Ñ ¼
+Ô ¿
+à¡ IJ
+༠ľ
+འ¦
+á¶ ľ
+âĤ ²
+âĨ ¨
+âĬ ¥
+âķ §
+âĻ ľ
+ãĭ ¡
+ë´ ¬
+ë¶ ij
+ìī ¿
+ìİ ħ
+ìł ±
+ì° §
+ï² ¡
+ðĿĴ Ľ
+ðĿķ £
+ðĿĹ ľ
+ðŁį ²
+ðŁİ ©
+ðŁIJ IJ
+ðŁIJ ł
+ðŁij ½
+ðŁĴ ij
+ðŁĵ ľ
+ðŁķ µ
+ðŁ ļĮ
+ðŁĽ £
+Ê ĭ
+Ó ¯
+Ù ¸
+ß Ķ
+ß Ļ
+à¡ ĵ
+á´ į
+Ḡ¿
+âı º
+âĸ ¥
+ë¤ ½
+íľ ij
+ðĿIJ ¹
+ðĿĸ Ķ
+ðĿļ İ
+ðŁĵ Ħ
+ðŁ¦ ·
+Æ ĥ
+ঠŁ
+âĮ Ĥ
+âĺ Ń
+â² ļ
+ëĿ ķ
+ðŁİ £
+à® ĩ
+འĨ
+áħ µ
+áĹ ľ
+âĢ ½
+âĮ £
+âģ ½
+ðŁĵ ¬
+ðŁ¤ §
+âĩ ª
+â½ £
+âĹ Ł
+ï¨ Ĺ
+êĴ ª
+ðŁĽ Ģ
+Ç Ĥ
+ðŁ¥ ¶
+ðŁİ į
+ï¿ ©
+ðŁij Ĵ
+áµ Ī
+ï¸ ¿
+áħ ©
+â¾ ¦
+à° ¤
+á´ ĸ
+ਠ¬
+ຠĹ
+༠»
+Ñ º
+ਠª
+á´ ³
+ðĿIJ Ī
+à» Ģ
+á´ ¿
+âĤ į
+âĩ ¡
+⼠ª
+ðĿIJ Ĥ
+ðĿĴ ķ
+ðŁ IJľ
+Ê į
+Ñ ±
+འĥ
+ë® IJ
+ìĽ ¡
+ìľ ģ
+ðĿIJ ¿
+ðĿķ ł
+ðŁij Ľ
+Æ ª
+Ï º
+Ó ¬
+Ù ¿
+Ý £
+ઠī
+à® ¹
+འij
+áĨ ¯
+áµ ĩ
+âĩ ¥
+âı ª
+âĻ °
+âļ Ń
+âļ ¾
+ãħ Ħ
+êĢ °
+ê° Ĺ
+ê² ĭ
+ê² »
+ê¶ ľ
+ê¼ ĩ
+ê½ ¹
+ëĤ Ł
+ëħ Ī
+ëĭ ¢
+ë§ Ł
+ëª Ĩ
+ëµ Ģ
+ì½ ±
+íĩ ĺ
+íľ ľ
+ï§ ¾
+ï± µ
+ï² ¢
+ï² ¤
+ðĿĴ Ĭ
+ðĿĺ ¯
+ðŁį Ĺ
+ðŁı į
+ðŁIJ ĺ
+ðŁĵ ¡
+ðŁĶ ŀ
+ðŁ¤ ³
+ðŁ¥ ģ
+ðŁ¥ Ĺ
+ðŁ¦ Ĭ
+Ä µ
+Æ ¦
+Ç µ
+É ¯
+Î ı
+Õ Ħ
+Ü ¥
+འģ
+ᨠł
+âķ «
+ãİ ī
+ë· ´
+ìĨ İ
+ìİ Į
+ì£ µ
+íĽ ł
+ï§ ª
+ï³ ı
+ï» º
+ðĿij ģ
+ðĿij ĩ
+ðĿĴ Ĩ
+ðŁİ ł
+ðŁIJ Ķ
+ðŁij Ł
+Å ĸ
+ठĮ
+á¾ ½
+ê¦ Ĵ
+à® Ł
+á´ ±
+ðŁı °
+ðŁIJ ŀ
+འĢ
+áĢ ħ
+âĬ ¿
+ðŁIJ §
+ἠģ
+â¼ Ī
+âĶ ¿
+ðŁ¥ ´
+â¼ ¿
+ðŁ§ ľ
+ãħ ¿
+âĦ «
+ãĢ ³
+ãĬ Ļ
+â¼ Ģ
+ï ¦¬
+ðŁı ¬
+ðŁĵ »
+áĬ Ľ
+áĦ ħ
+ຠĬ
+ຠĽ
+áħ ³
+ðŁij ®
+à® ±
+âĺ ĩ
+ðĿIJ ı
+à´ µ
+à» ģ
+འı
+འ¢
+ᥠ±
+âĤ £
+ï¥ ¦
+ïŃ Ļ
+ï´ ©
+ï¹ Ĥ
+ðŁį £
+ðŁķ ¹
+Ï ĸ
+à¶ ¸
+ຠ¢
+áĭ Ń
+âİ Ŀ
+âĹ Ŀ
+âĻ Ī
+âĻ İ
+ê½ ¥
+ì³ Ķ
+ì¼ ij
+ï± °
+ðĿij ĥ
+ðŁĮ ª
+ðŁį ¡
+Å İ
+Ê ¦
+Ñ §
+Ó İ
+Ô ´
+Ú Ī
+ß ĵ
+ß §
+ठĶ
+áĪ «
+áĪ µ
+áĹ ©
+á´ ł
+á¼ ł
+âĢ Ĺ
+âģ ij
+âĦ ı
+âĸ ĩ
+â² £
+ãĦ ³
+ãī ®
+ê³ Ĺ
+ëĦ Ĵ
+ëĸ «
+ë¡ Ħ
+ë¹ °
+ë½ ģ
+ìĦ ģ
+ìĮ ĺ
+ìŁ Į
+ì³ ī
+ì¼ ķ
+ï¬ »
+ï³ İ
+ï¹ ¸
+ï¹ ¾
+ðĿIJ Ĩ
+ðĿij ·
+ðĿĽ ¼
+ðŁİ ı
+ðŁİ ŀ
+ðŁIJ Ļ
+ðŁij Ĥ
+ðŁĵ ģ
+ðŁĸ ±
+ðŁļ į
+ðŁļ §
+ðŁĽ ¡
+ðŁ¤ Ĵ
+ðŁ¥ ŀ
+ðŁ¥ ©
+ðŁ¦ Ģ
+ðŁ¦ ĸ
+Ë ¢
+Ü ļ
+à® µ
+áĢ ģ
+áī °
+âı Ń
+âĻ ¿
+ê³ ĺ
+ëı Ŀ
+ëķ ĥ
+ìħ Į
+ìĴ ¸
+ìĽ Ł
+íħ Ħ
+íľ «
+ï§ ĺ
+ï¿ ¬
+ðŁı ·
+ðŁĶ §
+ðŁ¥ Ī
+Æ ĸ
+áŀ ĩ
+áŀ ĸ
+âģ º
+âĹ ľ
+âŀ ©
+ê¦ Ń
+ëĻ ¤
+ïŃ ¼
+ðĿĻ ĸ
+ðĿĻ £
+ðĿĻ ¤
+ðŁĮ Ŀ
+ðŁĶ ij
+ðŁĽ ł
+ຠĩ
+âĺ £
+ãĦ ¨
+ðĿĸ Ĺ
+Ó ĵ
+âĨ £
+ðŁ¥ ī
+ðŁĮ ł
+ðŁĺ ½
+ãİ ł
+Å §
+ðŁIJ Ĵ
+ï§ IJ
+ðŁĺ ¿
+âĪ ¬
+ðŁIJ ®
+⣠±
+ಠ¡
+â¾ ¼
+à° ²
+Ë ¶
+âĸ ¿
+Õ Ī
+áŀ İ
+áħ ¥
+áŀ Ĺ
+Õ §
+ðŁ¤ IJ
+ðŁį ł
+ঠ¤
+à¶ º
+âĻ į
+ìĺ Ļ
+íĺ ĵ
+ï¹ º
+ðŁĽ ³
+Å ī
+á´ İ
+âı ľ
+âĶ ³
+ê¸ ·
+ì¡ Ķ
+ðĿĴ Ī
+ðĿĴ į
+ðĿĴ ¹
+ðĿĵ ĩ
+ðĿķ Ł
+ðĿĹ ¹
+ðŁĮ ħ
+ðŁı ´
+Ä Ķ
+Ä ¤
+Å µ
+Ç ¾
+Ï ŀ
+Ï ¶
+Ô ³
+Ü Ĩ
+ß ©
+à¡ Ĵ
+ठĺ
+à¶ ļ
+འĸ
+áģ Ĭ
+áĥ ŀ
+áĦ Ĥ
+áĭ «
+á´ º
+Ḡ£
+Ḡª
+á¹ Ĥ
+á¼ ·
+á¿ ĩ
+âĩ Į
+âı ¬
+âĻ Į
+â® Ł
+â´ »
+âµ Ł
+ê¦ ķ
+ê¦ ª
+ê¦ ®
+ê² Ħ
+ê¾ IJ
+ëĥ ij
+ëķ ĭ
+ë¡ ¸
+ë¬ Ģ
+ìĩ ¤
+ìĪ ©
+ìľ ķ
+ìŃ ĺ
+ì· °
+ì ·¸
+íľ Ģ
+ï¤ £
+ï§ į
+ï± Ħ
+ï³ ij
+ðĿIJ ¤
+ðĿĴ ĵ
+ðĿĴ ¶
+ðĿĹ ¼
+ðĿĻ Ĭ
+ðŁĩ ¾
+ðŁĮ Ľ
+ðŁĮ ®
+ðŁİ ĩ
+ðŁİ ²
+ðŁı Ľ
+ðŁij ¥
+ðŁij ´
+ðŁĴ Ĩ
+ðŁĵ Ĥ
+ðŁĵ §
+ðŁķ IJ
+ðŁĸ ķ
+ðŁĺ §
+ðŁĻ Ģ
+ðŁļ Ĵ
+ðŁĽ «
+ðŁ¤ ł
+ðŁ¥ ļ
+ðŁ¥ Ľ
+ðŁ¥ £
+Ç ¯
+È §
+Î Ĭ
+Ò ²
+× °
+Û ij
+áĥ ©
+áĦ Į
+áĪ į
+áī ¥
+áı Ĥ
+âģ ±
+âĬ ¢
+âĹ ĵ
+âĿ °
+ë¿ ¡
+ìĽ ©
+íģ Ń
+íĨ ³
+íĬ Ħ
+íĵ ¸
+ï¥ £
+ï¥ ´
+ï± IJ
+ï± ¯
+ï³ ļ
+ðĿĸ ĺ
+ðĿĺ Ģ
+ðŁIJ Ĭ
+ðŁIJ Į
+ðŁij ļ
+ðŁĵ ĥ
+ðŁļ Ľ
+ðŁļ ª
+ðŁ¤ °
+Ä ´
+áĥ ®
+áĹ ¨
+âĻ ®
+â² ŀ
+ãĪ Ķ
+ì ħį
+ãħ ĥ
+ï¥ ¡
+ຠ¡
+Õ İ
+Õ º
+⬠Ľ
+â½ ¤
+ðĿIJ ²
+âŀ µ
+áĢ Ľ
+âĶ ħ
+âĨ Ł
+â¼ Ĭ
+ðŁĮ ½
+ðŁļ ¿
+ï¦ Ĭ
+ãĦ £
+⼠©
+ï© Ľ
+ðŁį ±
+â¾ ¨
+à´ ¤
+áŀ ģ
+ຠŀ
+Ê ļ
+ðĿIJ Ĵ
+à´ ±
+áŀ ľ
+à® ©
+à° Ĺ
+à´ ļ
+âĩ £
+ï¦ ķ
+Õ ħ
+Æ ĺ
+âĤ ¦
+âĶ Ħ
+ï¦ Ł
+ï¦ «
+ðĿIJ ģ
+ðĿIJ ĥ
+ðŁį ¸
+ðŁIJ ²
+Å ¶
+É ĸ
+ß ĺ
+ภ¦
+འĶ
+áĨ ·
+âģ ķ
+âĵ Ĥ
+âĿ ľ
+ï¥ ¥
+ï¬ ®
+ðĿĹ Ŀ
+ðĿĹ ¿
+ðŁİ ¾
+ðŁĹ Ŀ
+ðŁ¦ Į
+Æ ħ
+Ç ª
+Ò Ĺ
+Ü Ľ
+ß ł
+à¡ ij
+áī £
+áĬ Ń
+á¹ ¡
+âŀ ¼
+âŀ ¾
+â´ ±
+ãī ¡
+ê³ ¯
+ë½ Ī
+ìĤ ĺ
+ìī ij
+ì «ĺ
+íĮ ĥ
+íĻ °
+ï¤ Ĺ
+ðŁĮ ¬
+ðŁĮ °
+ðŁį ¤
+Ä »
+Å ĩ
+Æ ¨
+É ķ
+Ò ¢
+Ò º
+Ö į
+× ±
+Ú ±
+Ú ½
+Û IJ
+ठĽ
+à· Ģ
+๠ļ
+ຠ«
+á´ ¹
+á ½Ķ
+á¾ ³
+âĤ Ĵ
+âĨ ´
+âĩ Ŀ
+âī ħ
+â Į¨
+âĵ ĵ
+âĸ ¢
+âļ ¬
+âŀ Ń
+â² Ĵ
+ãİ ¿
+ê¿ ´
+ëĪ ±
+ëį ¬
+ëİ IJ
+ëIJ «
+ëĶ «
+ë± ģ
+ìĥ ¥
+íĮ ¼
+ïŃ ĵ
+ï® ¥
+ï² °
+ðĿIJ ĩ
+ðĿIJ ij
+ðĿij Į
+ðĿĵ ª
+ðĿķ ļ
+ðĿĺ ª
+ðĿĺ ¼
+ðĿļ Ľ
+ðŁĩ ¶
+ðŁĮ Ħ
+ðŁĮ ķ
+ðŁĮ ¤
+ðŁĮ §
+ðŁį ¬
+ðŁİ ĭ
+ðŁİ »
+ðŁı ¨
+ðŁIJ ĩ
+ðŁij ĵ
+ðŁĵ IJ
+ðŁĵ Ļ
+ðŁĶ ¼
+ðŁķ Ĵ
+ðŁĸ ı
+ðŁĸ ¥
+ðŁ¤ ¬
+ðŁ¥ Ĭ
+ðŁ¥ Ĵ
+ß Į
+ຠĦ
+á¼ µ
+âķ ¡
+â² ¤
+â´ ¼
+âµ ¢
+ãĪ ¯
+ëĵ ¸
+ëŁ ĩ
+ëº į
+ðĿĻ §
+ðŁį Ī
+ðŁĶ ¬
+ðŁĸ Ĭ
+ðŁ¤ ¾
+Ë ¡
+Ü ©
+âĮ ¡
+âŃ ij
+â² ¦
+ë© ī
+ì¼ Ń
+ï¿ ¤
+ðĿĴ İ
+ðĿĹ ¥
+ðŁIJ µ
+ðŁķ ¶
+ðŁķ ¸
+ðŁ¤ ľ
+Õ ª
+áĪ ĭ
+ðŁ¥ µ
+ï° ģ
+áµ IJ
+âķ ĵ
+áĢ ĸ
+âĭ Ī
+É ŀ
+âŀ ®
+ॠ°
+ãĨ ģ
+ðŁĴ ±
+ðŁı Ń
+áĨ ¨
+ðŁį ļ
+ðŁ¦ IJ
+á´ »
+âĺ Į
+à´ ķ
+Õ ±
+áħ ®
+ðĿIJ Į
+Å ¦
+ຠķ
+âľ Ļ
+Ë ³
+Ô µ
+âķ Ĵ
+ðĿĹ Ĺ
+ðĿĹ ł
+Ú ļ
+ঠ§
+âĨ Ŀ
+âĻ ī
+ãĮ »
+ì¹ Ĭ
+ðĿĹ º
+ðŁ§ ĺ
+ì³ £
+ï¬ Ŀ
+ðŁij º
+Ç Ł
+Î Ī
+Î «
+Ñ ¥
+Ô ²
+Õ ¨
+Ü ¦
+ঠĨ
+ঠ¥
+áIJ ¢
+á¼ ģ
+á¼ ĺ
+á¼ ¦
+âĵ Ŀ
+ãĪ °
+ãİ Ĺ
+ê² ¡
+ë¨ Ģ
+ì£ Ķ
+ì´ ¤
+ìµ Ŀ
+ï§ ´
+ïŃ Ĭ
+ï² Ł
+ðĿIJ ·
+ðĿij ĭ
+ðĿĵ ī
+ðĿĺ µ
+ðŁĴ ·
+ðŁĽ ©
+ðŁ§ ¹
+Å Ķ
+Ê ŀ
+Ë ¥
+Î Į
+Ñ ©
+Ó IJ
+Ó ł
+Ú ij
+Ú Ĵ
+ß ¨
+ઠĪ
+áIJ ĥ
+á¹ ¯
+âĤ ĭ
+âĤ µ
+âĦ ħ
+âĦ ł
+âĪ £
+âī º
+âī »
+âĬ Ľ
+âĮ IJ
+âİ ĵ
+âĺ ¸
+âĻ Ĵ
+âļ Ĵ
+âľ ĩ
+âľ ł
+â´ ·
+âµ ĸ
+ãĦ ¸
+ãī ¢
+ãī °
+êĩ ´
+ê´ ¸
+êº ł
+ëĤ ı
+ëĤ ¢
+ëIJ Ģ
+ëº ´
+ìĥ ľ
+ìį ħ
+ì¤ «
+ì± ¦
+ìº ij
+ì¼ ģ
+ì¿ ³
+íĤ ģ
+íħ ¡
+íĴ Ĥ
+íĴ ī
+íľ Ħ
+ïŃ ª
+ï® ¬
+ï¯ ¦
+ï± ª
+ï² ı
+ï ´Ģ
+ï» Ĩ
+ï¿ ¦
+ðĿij Ĺ
+ðĿĸ Ļ
+ðŁĮ ¡
+ðŁį Ŀ
+ðŁį §
+ðŁİ «
+ðŁı ĺ
+ðŁı ª
+ðŁIJ ĭ
+ðŁIJ Ľ
+ðŁIJ º
+ðŁij ĸ
+ðŁij ŀ
+ðŁij ·
+ðŁĵ Ģ
+ðŁ ĶĦ
+ðŁĶ Į
+ðŁķ Ļ
+ðŁĻ į
+ðŁĻ İ
+ðŁ¦ į
+Ç °
+É Ł
+Ê Ĩ
+Ô ¼
+Ú ľ
+ঠ¡
+ঠ¶
+áĴ ĥ
+á¼ ©
+âĵ ķ
+â² Ī
+ê° °
+ê¹ ł
+êº ħ
+ëĦ ¹
+ë¯ ĵ
+íIJ Ī
+ï§ ¶
+ï® ij
+ï² ¨
+ðĿĴ ī
+ðĿĴ Ķ
+ðĿĹ ¨
+ðĿĻ ŀ
+ðĿļ Ĵ
+ðĿļ ķ
+ðŁIJ İ
+ðŁ¤ ķ
+ðŁ§ Ķ
+Ï °
+Ô Ŀ
+âĮ Ĭ
+âĴ ¾
+ãī £
+ïŃ ©
+ðĿļ ŀ
+Ê ij
+ঠ¦
+áĦ ĩ
+âī ĥ
+â² Ģ
+ìŁ İ
+ðĿij ¶
+ðĿĵ ²
+ðŁ İ·
+ðŁļ ¹
+ຠģ
+áł ł
+ãĦ ļ
+ðŁIJ ¿
+ἠļ
+âķ ³
+ðŁIJ Ń
+âĴ ¹
+ðĿĸ ļ
+âĻ ĸ
+ãĪ ²
+âĨ ¾
+áĦ Ĩ
+âķ Ľ
+ðŁ¤ į
+â½ ¥
+ðŁ Į¨
+âĪ ®
+ãĮ ĺ
+ãį ij
+ï¹ Ģ
+âĵ Ĺ
+âĬ Ħ
+ðŁı ¹
+Ë Ĵ
+ðŁ¤ ±
+ãı ľ
+ðŁİ Į
+ï¥ Ń
+ঠ£
+ðŁİ ¹
+ãĬ Ł
+à´ °
+ðĿIJ Ķ
+à´ ¨
+འļ
+âľ º
+Õ ·
+ðŁij ³
+ঠľ
+âĺ ĭ
+âĻ Ĭ
+ãĢ Ľ
+È ĭ
+à® °
+áĥ ¨
+âĦ ķ
+íij Ģ
+ðĿĵ ĥ
+ðŁ¦ Ķ
+Ä ¿
+Å Ģ
+Æ ³
+É ļ
+Ö ĥ
+Ü £
+ß Ł
+ঠŃ
+à§ ¡
+à¶ »
+ຠ£
+འĩ
+Ḡ¨
+á½ Ī
+â½ ¬
+ê¡ Ķ
+ì³ Ħ
+ï¨ ī
+ðĿIJ ¡
+ðĿĺ ¢
+ðŁį ¿
+ðŁİ Ł
+ðŁı ī
+ðŁĶ IJ
+ðŁļ ħ
+ðŁ¤ ½
+Æ į
+Ç «
+Ç ½
+È ļ
+Î ī
+Ó ¤
+Ó ª
+Õ Ĭ
+Ù ¼
+Ú ´
+ß Ŀ
+à¶ ľ
+á¼ ķ
+á¿ ¥
+âİ ŀ
+ãĢ ļ
+ãī ¤
+ê³ ¸
+ê· ģ
+ëĵ Ħ
+ëĵ ķ
+ì¨ Ķ
+ì± ¨
+ðĿIJ ¾
+ðĿij »
+ðĿĶ ¼
+ðĿķ Ŀ
+ðĿĺ Ń
+ðŁĨ Ļ
+ðŁĵ ¤
+ðŁĶ Ł
+ðŁĹ ¼
+Ä ľ
+Æ ģ
+Æ ¿
+Ç ³
+Ç ·
+É ĥ
+É ł
+Ê ī
+Ê §
+Ë ²
+Ï ´
+Õ ģ
+Õ ŀ
+Ö ĩ
+Û Ĥ
+Û ĵ
+ß Ĺ
+ß ¦
+ঠ¹
+à® ³
+à´ ¸
+à» Ĥ
+áĪ Ŀ
+áĪ ª
+áĭ µ
+áIJ Ĭ
+áĴ ª
+áļ ĸ
+áŀ Ľ
+á´ ¢
+áµ ı
+áµ Ń
+á¶ «
+Ḡı
+ẠĴ
+á¼ ¥
+á½ ķ
+á½ ¼
+âĤ Ĭ
+âĦ Ĥ
+âĦ ©
+âĩ ī
+âī £
+âĮ ł
+âİ Ł
+âı ®
+âķ ĺ
+âĹ ĸ
+âĺ ©
+âĻ ij
+âĻ ²
+âļ Ľ
+ãĦ Ł
+ãī ±
+ãİ ļ
+ê¡ ķ
+êª ĸ
+ê° ¹
+ê² Ĩ
+êµ Ħ
+ëĩ ¬
+ëĭ ¯
+ëı ł
+ëĴ ¬
+ëĸ Ī
+ëĸ ½
+ëĺ Ķ
+ëŀ ¸
+ë¸ ħ
+ë» ł
+ë¿ Ł
+ìĤ µ
+ìĬ ī
+ìľ °
+ìł ĭ
+ìł Ķ
+ì¥ ¡
+ìŃ Ŀ
+ì¼ ¬
+íĪ ĩ
+íī ľ
+íį Ħ
+íĽ ¾
+íĿ £
+ï¤ ©
+ï¤ ¯
+ï¦ ľ
+ï¦ §
+ï§ ľ
+ï¨ Ī
+ï¬ ª
+ï ¬´
+ïŃ ½
+ï® ī
+ï¯ ŀ
+ï° Ĵ
+ï± ĩ
+ï¿ Ħ
+ðĿIJ ħ
+ðĿij Ħ
+ðĿij º
+ðĿĴ Ĺ
+ðĿĵ ®
+ðĿķ Ľ
+ðĿķ ŀ
+ðĿĸ ij
+ðĿĺ ģ
+ðĿĺ Ĩ
+ðĿĺ ¶
+ðĿĻ ¢
+ðĿļ ľ
+ðŁĮ ĥ
+ðŁĮ ¦
+ðŁį Ł
+ðŁİ İ
+ðŁı Ļ
+ðŁIJ ©
+ðŁIJ «
+ðŁIJ ´
+ðŁij Ķ
+ðŁĵ ī
+ðŁĵ Ľ
+ðŁĶ ī
+ðŁĸ ¼
+ðŁĹ ĥ
+ðŁĹ ¯
+ðŁļ ĩ
+ðŁļ IJ
+ðŁļ µ
+ðŁ¤ ¶
+ðŁ¥ ĭ
+ðŁ¥ ĵ
+ðŁ¥ ®
+ðŁ¦ İ
+ðŁ¦ ł
+ðŁ§ Ĵ
+ðŁ§ ¨
+Æ IJ
+Ç į
+Ó Ģ
+Ô Ľ
+ಠ°
+à´ Ļ
+áĢ Ĵ
+ê² Ŀ
+ê¹ ¹
+ë© ¥
+ìĸ Ķ
+ï¤ ģ
+ï¤ ı
+ï¦ ī
+ï¦ ĵ
+ï§ ī
+ï² Ŀ
+ðĿĹ ŀ
+ðĿĹ ±
+ðŁĮ ĭ
+ðŁį ¶
+ঠļ
+ìķ ľ
+ðĿIJ ¯
+ðĿļ Ŀ
+à° ¨
+འĺ
+འł
+á¡ ¥
+á¾ °
+âģ į
+âĶ °
+⬠ľ
+ðĿIJ ł
+ðĿij ¯
+ðĿĹ Ľ
+ðĿĵ »
+ðĿĸ Ī
+âŀ »
+áŀ ł
+â¡ ±
+â» ij
+ðŁ§ µ
+ï¦ ¢
+ðŁij ĺ
+ãĤ Ķ
+â¼ Ł
+ãĬ ¤
+ï¦ Ŀ
+ãĮ ¦
+âĢ ¸
+ðŁĶ Ļ
+ã ¹
+ã¹ ¦
+ï¹ ħ
+ï© Į
+ãī ¨
+ï¸ ½
+âį ¥
+ðŁļ ī
+ðŁ¥ ľ
+âĵ ľ
+â» Ŀ
+ï¨ ľ
+ðŁĴ Ĵ
+áĦ ij
+â¾ ŀ
+ï¨ ģ
+à´ ª
+áĦ İ
+âŀ ´
+ঠ·
+áħ ¬
+áŀ §
+âĨ ¢
+âķ ¦
+âľ ij
+Ë ¬
+Õ IJ
+༠Ķ
+Ê ¤
+Ë ¨
+ठŀ
+à» ĥ
+༠ļ
+âĵ ¥
+âķ ľ
+ðŁIJ ĸ
+á¼ Ļ
+á¼ ¤
+ìĨ °
+È Ĥ
+Ê ±
+à® ļ
+áĥ §
+á´ ĭ
+á´ ®
+âĿ ¡
+âŀ ·
+ëĿ ¡
+ï§ ¢
+ï¯ ¡
+ðĿķ ķ
+ðŁħ °
+ðŁ¦ ¸
+Ç ¸
+Ó ŀ
+Ô ¶
+Ö Ĩ
+Ú ģ
+Û ĭ
+áİ ¥
+á¾ ¿
+âĶ Ń
+âĶ ®
+êĢ Ģ
+ê± ĺ
+ëIJ Ń
+ë½ Ħ
+ìĶ IJ
+ì¸ Į
+íģ ł
+íĻ ±
+ï¥ ī
+ï¨ ĸ
+ðĿij ´
+ðĿĸ Ĵ
+ðĿĺ ¨
+ðĿ ļĮ
+ðŁIJ ¡
+ðŁij ¢
+ðŁĵ Ķ
+Å ħ
+Æ İ
+È ©
+Ò ª
+Ô ĥ
+áĥ «
+Ḡĩ
+⼠Ł
+ê» Ń
+ë¨ Ħ
+ìŁ Ģ
+ì¤ ´
+íļ IJ
+ï¤ ³
+ðŁŁ ¢
+Æ §
+È ¼
+Ê Ŀ
+Ë Ħ
+Ë ħ
+Ë į
+Ë §
+Ò ¥
+Õ Ķ
+Ø ı
+Ø ¼
+ß IJ
+ß ľ
+ठĵ
+ঠĻ
+à® ĵ
+à¶ ´
+༠į
+༠Ĵ
+འ£
+áĢ Ĥ
+áĢ Ĭ
+áĦ Ħ
+á Īĺ
+áĭ Ĭ
+áĮ į
+áij ĭ
+áŀ Ĥ
+áł ¢
+á¡ Ŀ
+á´ ¦
+áµ į
+áµ ¨
+Ḡ¡
+Ḡ¯
+á¼ £
+âģ Ĥ
+âĦ ĺ
+âĦ ľ
+âĦ ³
+âĦ µ
+âĨ ¦
+âĩ Ĩ
+âĪ ·
+âĬ ļ
+âĮ «
+âĮ ¯
+âİ Ľ
+âİ ľ
+âİ ¤
+âİ ¦
+âİ ®
+âij ī
+âĶ ī
+âķ Ļ
+âĸ Ĥ
+âĹ Ń
+âĺ Ĭ
+âĺ į
+âĺ Ĵ
+âļ Ĩ
+⼠§
+⼠²
+âŀ ĺ
+⥠Ħ
+â´ ³
+â´ ½
+âµ Ī
+ãī ¯
+ãİ ij
+ã§ ¬
+êĻ ¬
+ê§ ģ
+ê³ ¬
+ê´ ŀ
+ê» ľ
+ëħ ĵ
+ëĭ ¼
+ëį ĸ
+ëĸ ±
+ëĿ °
+ë¡ ¹
+ë¢ ´
+ë£ Ģ
+ë¤ ł
+ë¨ ķ
+ëŃ ¥
+ìĦ ¶
+ìħ ¤
+ìĮ ķ
+ìį ª
+ìı ©
+ìĴ Ģ
+ìĶ ¯
+ìĿ Ķ
+ìĿ ľ
+ìł Ń
+ì§ ¦
+ì¨ ©
+ì² ¬
+ì³ ¥
+ì¼ ¯
+íĢ «
+íĢ Ń
+íĥ ¸
+íĵ ģ
+íķ ¬
+íĹ ¸
+íĽ ķ
+íľ Ń
+íĿ Ĺ
+ï¤ Į
+ï¤ ª
+ï§ ¿
+ï¬ Ħ
+ï¬ ħ
+ïŃ ij
+ïŃ «
+ïŃ º
+ï® Ĥ
+ï® ¢
+ï® ¨
+ï° İ
+ï° ł
+ï² £
+ï³ IJ
+ï³ Ĵ
+ï³ ĺ
+ï³ ľ
+ï¹ ¼
+ï¿ ¨
+ðĿIJ ©
+ðĿĴ ļ
+ðĿķ Ķ
+ðĿķ ¤
+ðĿĸ Į
+ðĿĹ £
+ðĿĹ °
+ðĿĹ ´
+ðĿĺ Ĥ
+ðĿĺ ¥
+ðĿĺ ®
+ðĿĺ ¸
+ðĿĻ Ģ
+ðĿĽ ¾
+ðĿľ ı
+ðŁĮ ģ
+ðŁĮ ľ
+ðŁĮ ¥
+ðŁĮ ¯
+ðŁį IJ
+ðŁİ Ĵ
+ðŁı Ķ
+ðŁı ķ
+ðŁı ®
+ðŁIJ Ĥ
+ðŁIJ ī
+ðŁIJ ¹
+ðŁĶ ķ
+ðŁĶ ļ
+ðŁķ ij
+ðŁķ £
+ðŁĹ ŀ
+ðŁĹ ¡
+ðŁĹ ¿
+ðŁļ Ĩ
+ðŁļ Ĭ
+ðŁļ ĵ
+ðŁļ ķ
+ðŁļ ¾
+ðŁĽ ģ
+ðŁĽ İ
+ðŁĽ ı
+ðŁ¤ ´
+ðŁ¥ ķ
+ðŁ¥ ĸ
+ðŁ¥ ł
+ðŁ¥ ¥
+ðŁ¦ Ĩ
+ðŁ¦ ī
+ðŁ¦ ļ
+ðŁ§ ij
+ðŁ§ ¥
+ðŁ§ ¿
+Å °
+Æ º
+É §
+ઠĩ
+à® £
+áĪ Ī
+áĬ ¤
+áĭ ®
+áĮ Ī
+áĮ µ
+ᥠ²
+âĵ Ł
+êĻ ³
+ê° Ĭ
+ëķ ģ
+ëķ ¨
+ìĬ ģ
+ï¦ µ
+ï¬ ²
+ðĿĸ į
+ðĿĺ Į
+ðĿĺ ³
+ðĿĻ ©
+ðŁį Ļ
+ðŁĸ ĸ
+áī ³
+áĭ ¨
+áĸ ĩ
+áŀ Į
+á¹ §
+âķ ª
+âŀ ļ
+â² ĺ
+ê ķ
+êķ ¥
+ï¤ ·
+ï® £
+ï¯ ł
+ðĿĴ ĸ
+ðĿķ ĺ
+ðĿĸ ĩ
+ðĿĹ Ł
+ðĿĹ ª
+ðĿĹ ¯
+ðĿĻ ł
+ðŁĵ ı
+ঠĹ
+âĴ »
+â² ł
+ðĿĵ µ
+Ê £
+à° ľ
+áĬ ¢
+áŀ IJ
+Ḡ·
+âĦ Ľ
+âĩ Ģ
+âĩ Ĭ
+êĴ ¦
+ê¦ ł
+ï® ¤
+ðŁį Ľ
+ðŁ¤ Ľ
+ᨠ¾
+âŀ º
+áķ ¯
+ἠı
+âĩ Ĥ
+âĶ ¹
+âĻ Ĺ
+ðŁĸ ¨
+ê¦ ı
+ઠ°
+áļ ¨
+ðŁ¤ ¥
+ðŁ§ ¢
+ãIJ Ĥ
+ãĦ ¥
+ðŁĸ Į
+â¼ Ĵ
+ãĬ §
+âį ©
+ðŁ¦ ij
+âĶ ·
+ï© IJ
+ï© ¡
+ðĵ Ī
+ðĵĪ Ĵ
+â» Ħ
+ï¨ Ĵ
+âĦ ª
+Ò §
+Ú Į
+âĢ ¶
+⺠ł
+â» ģ
+âĨ ¸
+áĦ IJ
+ãħ IJ
+à» Ħ
+áĹ ª
+âĨ ¼
+âĩ ĭ
+âĩ ĺ
+âĮ ij
+âĸ ©
+ðĿIJ Ĺ
+Ä Ĭ
+ঠī
+ìī ł
+É ¤
+ß į
+ß ı
+áµ Ĺ
+âĤ ¥
+âĵ ī
+âĶ ł
+âĶ ¨
+âķ Ħ
+ä ¤
+ä¤ Ģ
+ê» ¸
+ï® ģ
+ðĵ Ĥ
+ðĵĤ ĥ
+ðŁ¦ ķ
+Æ Ľ
+ঠĩ
+ãı ĺ
+ï® ¼
+Ú ĵ
+Ú Ŀ
+ঠĵ
+à¶ ¯
+á´ ħ
+á½ Ļ
+âģ ¼
+âĸ İ
+â¼ ©
+ä Ķ
+äĶ Ģ
+ë» ¡
+ìĽ ½
+íģ Ħ
+ï¥ ¼
+ï± ī
+ï¹ »
+ðĿĸ ĭ
+ðĿĻ Ī
+ðĿĻ ª
+ðĿ ϶
+ðŁIJ Ħ
+ðŁIJ Ĩ
+áİ ¢
+ḠĮ
+âĿ ´
+ðŁı ¸
+È Ŀ
+É ¸
+Î ħ
+Ï ľ
+Ó ¢
+Õ ¹
+à´ ħ
+ຠĪ
+áĭ °
+áij İ
+áł µ
+á¡ ł
+á´ ī
+Ḡµ
+á¿ ´
+âĵ £
+âĶ ¶
+â½ ¯
+ê² ¥
+ê¿ ĺ
+ëģ İ
+ëİ Ī
+ëĶ ¯
+ë² °
+ìĺ ¯
+ìĽ ¸
+ìŀ Ĺ
+ì§ ĺ
+ì¬ ¬
+ì· ¬
+íģ ħ
+íĵ Ķ
+íĽ Ŀ
+ï¤ ®
+ï¤ ¹
+ï¥ ²
+ï¯ ĸ
+ðĿĵ ħ
+ðĿĻ Ħ
+ðŁĵ ¶
+ðŁĹ Ĵ
+ðŁ¥ Ķ
+ðŁ¥ Ń
+Å ®
+Å ´
+Æ ī
+Æ «
+Ç ģ
+Ç £
+Ç º
+Ç ¼
+È į
+È ¯
+É ľ
+Ê ¬
+Ë ģ
+Ë ¤
+Ë µ
+Ï Ľ
+Ò ¤
+Ò ¬
+Ó ı
+Ó Ľ
+Ó ¡
+Ó ³
+Ô Į
+Ô ¬
+Õ ³
+Ù »
+Ú ī
+Ú §
+Ü ľ
+ß ª
+ठĿ
+ঠĽ
+ਠĨ
+ઠķ
+ઠ¡
+à® İ
+à° ¬
+ൠ»
+ൠ¼
+à¶ ł
+à¶ Ń
+à¶ ¶
+à· Ĩ
+༠½
+áĢ ļ
+áħ ¢
+áĨ ¸
+áĪ Ģ
+áĪ ķ
+áĪ °
+áī ¡
+áī ¤
+áĬ ¦
+áĬ «
+áĭ ĭ
+áĭ į
+áİ ¯
+áij Ń
+áķ Ĺ
+ᣠĽ
+ᥠĴ
+á© ī
+áŃ º
+á´ ¡
+áµ ĺ
+áµ Ľ
+á¶ ł
+Ḡģ
+Ḡĭ
+á¹ Ļ
+á¹ Ŀ
+á¹ ¦
+Ạħ
+á¼ Ĥ
+á½ ĥ
+á½ į
+á½ §
+á¾ ·
+âĢ µ
+âĤ İ
+âĦ Ŀ
+âħ Ģ
+âĨ ŀ
+âĨ §
+âĩ ħ
+âĪ ĥ
+âī ı
+âī ½
+âĬ ŀ
+âĬ ¡
+âĬ §
+â Ĭ¶
+âĭ Ħ
+âİ Ĵ
+âİ ¡
+âİ £
+âİ ª
+âı İ
+âĵ ĥ
+âĵ ĸ
+âĵ ¨
+âķ ĭ
+âķ ĸ
+âķ ¢
+âķ ²
+âĸ Ĩ
+âĸ Ĭ
+âĸ į
+âĸ ®
+âĺ ¡
+âĺ ¦
+âĺ ±
+âĺ ¿
+âĻ ĺ
+âĻ Ŀ
+âļ °
+⼠ij
+âŀ ª
+⤠Ŀ
+⤠¢
+⤠·
+â§ «
+⨠Ń
+⨠¯
+â± £
+â² İ
+âµ Ľ
+ãħ Ķ
+ãĪ ı
+ãī ²
+ãī ³
+ãĬ ij
+ãĭ Ľ
+ãİ IJ
+ê² ¤
+ê· ¿
+ê¹ ŀ
+ê» ¨
+ê¼ į
+ê¿ ¸
+ëĥ ¬
+ëĩ IJ
+ëĭ ł
+ëį ¯
+ëĹ Į
+ëĹ ij
+ë¥ Ģ
+ëª ĥ
+ëª ¯
+ë± ¡
+ë³ ĵ
+ë³ ½
+ë µľ
+ìĤ ³
+ìħ ¥
+ìĩ ½
+ìı ¨
+ìı ¸
+ìķ į
+ìĸ ĸ
+ìŁ ¨
+ì¢ ĥ
+ì¢ į
+ì¥ ij
+ì§ ¼
+ì© ĥ
+ì® ľ
+ì® ¸
+ì³ ij
+ì´ ¥
+ì¾ ĥ
+íħ ¦
+íĪ ¿
+íĵ ½
+íķ ³
+íĸ ı
+íĹ ł
+íĿ «
+ï¤ ĵ
+ï¤ ĺ
+ï¥ İ
+ï¥ ¶
+ï¦ ħ
+ï¦ ½
+ï§ ĩ
+ï¬ Ĩ
+ï¬ ³
+ï® ĩ
+ï® Ī
+ï® Ŀ
+ï® ©
+ï® ±
+ï¯ ĺ
+ï¯ Ļ
+ï¯ ¢
+ï¯ £
+ï¯ ¤
+ï¯ ¥
+ï± Ĥ
+ï² Ĩ
+ï² ª
+ï´ ¼
+ïº ī
+ïº Ĭ
+ïº ¥
+ðĿij ¨
+ðĿij ©
+ðĿij ²
+ðĿ ĴĮ
+ðĿĴ ª
+ðĿĴ ®
+ðĿĵ Ĥ
+ðĿĵ Ī
+ðĿĵ ¯
+ðĿĶ ¨
+ðĿķ Ģ
+ðĿķ Ĩ
+ðĿķ ¦
+ðĿķ §
+ðĿķ «
+ðĿķ ·
+ðĿĹ µ
+ðĿĹ ¸
+ðĿĺ Ħ
+ðĿĺ Ļ
+ðĿĺ ł
+ðĿĺ ¬
+ðĿĻ į
+ðĿĻ ij
+ðĿĻ ¡
+ðĿ ύ
+ðĿĻ ·
+ðĿļ į
+ðĿĽ ¿
+ðŁ ĥ
+ðŁĥ ı
+ðŁħ ĺ
+ðŁ ī
+ðŁī ij
+ðŁİ ¡
+ðŁİ ª
+ðŁİ ±
+ðŁİ ³
+ðŁİ º
+ðŁı İ
+ðŁı Ĺ
+ðŁı ļ
+ðŁı ŀ
+ðŁı ¦
+ðŁı §
+ðŁIJ ģ
+ðŁIJ ħ
+ðŁIJ ĵ
+ðŁĴ Ĥ
+ðŁĵ ij
+ðŁĵ ĵ
+ðŁĵ ¨
+ðŁĵ «
+ðŁĶ ĭ
+ðŁĶ Ń
+ðŁĶ ¯
+ðŁķ Ĺ
+ðŁļ Ĥ
+ðŁļ ¢
+ðŁļ ¦
+ðŁļ ¬
+ðŁĽ ĭ
+ðŁĽ Į
+ðŁĽ ¬
+ðŁĽ ¶
+ðŁŁ ¡
+ðŁ¥ ĺ
+ðŁ¥ Ł
+ðŁ¥ ¦
+ðŁ¦ ĩ
+ðŁ¦ Ī
+ðŁ§ Ĭ
+ðŁ§ Ĺ
+ðŁ§ ¤
+Ê ·
+Ë ¹
+á¹ ļ
+á½ ¥
+âĦ Ł
+ê² ¯
+ê» «
+ë° ·
+ìĥ Ĩ
+ìĽ Ŀ
+ì¨ ī
+ì« ı
+ï¯ ķ
+ðĿľ ĭ
+É ²
+Ò Ń
+Ó Ī
+འĽ
+áĭ ĵ
+áĻ Ń
+áł ©
+á¹ ®
+âĦ Ĵ
+âĨ »
+âµ ĥ
+ëĢ ¨
+ëł §
+ìī ¥
+ìĮ ľ
+ìĹ ¶
+ì¨ Ī
+ìª ¾
+íı ½
+íļ Ķ
+íĽ µ
+ï¤ ¸
+ï¦ IJ
+ï§ Ĺ
+ï§ ļ
+ï¬ ¯
+ðĿIJ Ĭ
+ðĿķ Ĺ
+ðĿĹ ļ
+ðĿļ ĸ
+ðŁħ ´
+È ĥ
+É Ŀ
+Ï ±
+Ó Ĺ
+ठ¢
+áħ ł
+áī ¦
+áij Į
+áĴ ¼
+áŀ ¡
+áł ¨
+áł Ń
+ᨠħ
+ᨠĶ
+á´ ĺ
+á¶ ¦
+Ḡİ
+á¼ ħ
+á¼ ¹
+âĨ ¯
+âĵ İ
+ãı Į
+ê ī
+êī Ĥ
+ëĨ §
+ëĿ ±
+ì¢ ¡
+íĪ ½
+ï¤ ĩ
+ï¤ Ľ
+ðĿIJ ķ
+ðĿĵ ¸
+ðĿĵ ¼
+ðĿĹ ķ
+ðĿĺ Ī
+ðŁı £
+ðŁı ¤
+ðŁĹ Ħ
+Ñ ·
+Ò ł
+áµ ĸ
+á¼ ¨
+ë¬ Ħ
+ï° ´
+âĪ ½
+Õ Ń
+Ú ¹
+ॠŁ
+áĢ Ĩ
+áŀ Ĵ
+ãĢ ¶
+ê¦ «
+ï¸ ĵ
+ðĿIJ Ľ
+ðĿĺ Ĺ
+ðŁı ľ
+ì« Ń
+ðŁ§ ŀ
+འĤ
+âĨ ¿
+âĩ ı
+âĵ ģ
+âĶ §
+âķ ģ
+âķ ¤
+ê¦ Ĺ
+ê¦ ¤
+ðŁı Ī
+áŀ ķ
+Ô ½
+ઠĹ
+ଠĨ
+âķ ķ
+ï½ ł
+â¼ ¦
+â¼ ¯
+â¾ ·
+âĶ ĸ
+ଠĵ
+âĺ Ĺ
+âį ĭ
+ï¨ Ŀ
+â¼ ¥
+ï¦ ª
+âĦ Ĭ
+ãĢ ´
+âį ¢
+ð¡ Ī
+ð¡Ī ½
+ï© ¨
+ãĢ »
+ãı ĥ
+ï¦ ¡
+ï¨ ĺ
+ðŁIJ ĥ
+ðŁĨ ĸ
+ðŁĹ ¾
+ãĦ ĩ
+Þ ĭ
+â¼ ¼
+ï¨ Ń
+Þ Ģ
+Þ Ħ
+Þ Ī
+Þ IJ
+âĮ Ħ
+â» ĺ
+ãŁ ¢
+á ħ§
+ðIJĮ ¿
+Ë »
+ಠĹ
+áĢ ĩ
+áŀ Ĭ
+âķ ĩ
+ãĩ ¼
+ãİ °
+Õ Ĵ
+Ü Ī
+ß ¥
+à¿ IJ
+áĢ Ł
+âĨ ¥
+âķ Į
+â½ Ģ
+â½ °
+â¾ Ĭ
+ä Ħ
+äĦ Ģ
+ðĵ IJ
+ðĵIJ į
+ðŁİ ¦
+âĤ ¯
+âĬ ĺ
+âĦ į
+Ê µ
+Ñ ¶
+Ú ĥ
+ঠĶ
+à´ ¦
+áİ ¶
+áĵ ķ
+á¹ ¨
+âĤ ł
+âĩ °
+âĹ Ĵ
+â¿ Ĭ
+ê· ±
+ì¹ ķ
+íĪ ©
+ïŃ Ģ
+ðĿĴ ¸
+ðĿĵ Ĭ
+ðĿĺ ©
+Ç ¦
+É «
+áĬ ¨
+È ¹
+Ê ¯
+Î ª
+Ú Ģ
+áĮ ¸
+áİ »
+áı ķ
+áı ´
+á² Ĥ
+á½ ¨
+âı Ŀ
+âĺ Ļ
+ëĥ ¨
+ëĦ ¼
+ëĪ Ļ
+ë£ ħ
+ìĶ ¼
+ìķ Ŀ
+ìļ ¬
+ìľ ±
+ï¥ Ĥ
+ï¦ ¹
+ï¬ ¹
+ïŃ ģ
+ï³ Ī
+ðĿĶ ħ
+ðĿĺ ¤
+ðĿĻ ı
+ðĿĻ Ļ
+ðŁķ ī
+ðŁ§ Ļ
+Ḡij
+ê´ ¼
+ëģ į
+ëĹ ´
+ëĿ ³
+ë° ŀ
+ë° ¢
+ëµ ĺ
+ìĤ Ķ
+ìĦ Ħ
+ì¼ ļ
+íĢ ł
+íĬ ±
+íĮ ĸ
+ï¤ ij
+ï¦ ´
+ï¦ ¸
+ï´ į
+ðĿĺ ·
+Ä ¬
+Å ¬
+Æ Ģ
+Æ ĭ
+Æ ľ
+Ç ij
+Ç ĺ
+Ç ŀ
+Ç ¥
+Ç ®
+É °
+É ¶
+É ·
+É ½
+Ê Ī
+Ê IJ
+Ë İ
+Ë Ł
+Ë ¦
+Ë ¯
+Ï IJ
+Ï ĵ
+Ï ¢
+Ï ¤
+Ï ª
+Ï Ń
+Ï ®
+Ï »
+Ñ ł
+Ñ Ń
+Ò ¨
+Ó Ŀ
+Ô ¡
+Ô ·
+Õ ī
+Õ ĵ
+Õ ĸ
+Õ ļ
+Õ Ŀ
+Ö İ
+Ø ¿
+Ú ħ
+Ú į
+Ú Ķ
+Û Ĭ
+Û ¾
+Ü Ļ
+Ý Ĵ
+Ý ĺ
+ß Ĵ
+ß ĸ
+ठĬ
+ठIJ
+ঠı
+ঠĸ
+à§ Ł
+ઠ®
+ઠ¹
+à® ħ
+à® Ĩ
+à° ¡
+à° °
+ಠļ
+ಠ®
+ಠ¯
+à´ Ł
+à´ ·
+ൠ¾
+à¶ ij
+à¶ ŀ
+༠¼
+འĵ
+áĢ ĵ
+áĤ ¦
+áĥ ĸ
+áĥ Ń
+áĥ ¯
+áħ ¨
+áħ ª
+áĨ °
+áĪ ģ
+áĪ İ
+áĪ ĵ
+áĪ ¥
+áĪ ²
+áĪ ´
+áĪ »
+áī ł
+áī ²
+áī ¶
+áĬ £
+áĬ ¥
+áĬ ª
+áĭ ĺ
+áĭ ²
+áĭ ¶
+áĮ £
+áį ¡
+áį £
+áİ ¬
+áİ ¾
+áIJ ¡
+áķ ķ
+áĸ ±
+áĹ IJ
+áĹ Ń
+áĺ ī
+áļ ±
+ἠŁ
+áŀ ¥
+ᣠĶ
+áł £
+áł ª
+áł °
+áł ´
+ᤠĸ
+ᥠ£
+á ®
+á® ł
+á ¯
+ᯠĻ
+á °
+á° į
+á´ Ĭ
+á´ ¾
+áµ ģ
+áµ İ
+áµ ŀ
+áµ ¤
+á¶ ħ
+á¶ ĺ
+á¶ Ł
+á¶ ¢
+á¶ ¤
+á¶ ±
+á¶ »
+Ḡī
+Ḡŀ
+Ḡº
+á¹ ĵ
+á¹ Ĺ
+á¹ ª
+ẠĬ
+Ạı
+ẠĽ
+á¼ ĥ
+á¼ Į
+á¼ ¿
+á½ Ĥ
+á½ ĵ
+á½ Ĺ
+á½ ¦
+á¾ ±
+á¾ ´
+á¿ ĺ
+á¿ Ł
+á¿ ¸
+âģ ĺ
+âĤ ij
+âĤ Ľ
+âĤ ¿
+âĦ ĩ
+âĦ ŀ
+âĦ ±
+âĩ Ł
+âĩ ²
+âĪ ¤
+âĪ ¶
+âī Ĥ
+âī ¾
+âĬ ¨
+âĬ ³
+âĬ ·
+âĭ Į
+âĭ ĺ
+âĮ ķ
+âĮ ¥
+âĮ µ
+âĮ º
+âį £
+âį ²
+âį µ
+âİ ĩ
+âı ĥ
+âı IJ
+âı ł
+âı ¤
+âı ¶
+âı ¸
+âı ¹
+âij Ĥ
+âĴ ·
+âĴ º
+âĵ ¡
+âĵ ¤
+âĶ ¾
+âĸ ĺ
+âĸ µ
+âĹ ª
+âĹ ·
+âĺ ¨
+âĺ «
+âĺ ²
+âĺ ³
+âĻ Ĩ
+âļ ¤
+âļ ¥
+⼠ĵ
+⼠´
+⼠¾
+âŀ «
+âŀ ¿
+⣠·
+⤠ij
+⤠«
+⤠¶
+⤠½
+â§ ª
+⨠Ģ
+â ©½
+⬠¡
+⬠¢
+⬠¤
+â² ĸ
+â² ª
+âµ Ģ
+⸠®
+⸠½
+ãĢ ł
+ãĢ ·
+ãĦ Į
+ãĦ ĺ
+ãħ ij
+ãĪ İ
+ãĪ IJ
+ãĬ ľ
+ãĮ ĵ
+ãĮ ł
+ãİ Ł
+ãİ ¤
+ãİ §
+㬠®
+ä Ī
+äĪ Ģ
+ä °
+ä° Ģ
+ê ħ
+êħ ī
+êĩ Ĺ
+ê Ī
+êĪ į
+ê§ Ĥ
+ê§ Ĭ
+êª Ģ
+ê² Ī
+ê² į
+ê³ Ģ
+êµ ł
+ê½ IJ
+ê¾ Ī
+ê¿ ±
+ëĥ ı
+ëĦ ij
+ëħ ¤
+ëĩ ¸
+ëĪ ¼
+ëī ħ
+ëĬ £
+ëĭ º
+ëį ŀ
+ëIJ Į
+ëķ ¸
+ëĺ ł
+ëĻ ĩ
+ëĻ Ī
+ëľ ½
+ëŀ Ķ
+ëł ľ
+ë£ IJ
+ë§ Ģ
+ë§ Ĭ
+ëª Ģ
+ë¬ Ń
+ë¯ ¾
+ë³ ľ
+ë´ Ĭ
+ëµ ī
+ë· ľ
+ë¸ Ģ
+ë¹ ĭ
+ìģ Ħ
+ìĤ £
+ìĤ »
+ìĦ µ
+ìħ Ĵ
+ìī Ī
+ìī Ķ
+ìĬ Į
+ìĬ Ļ
+ìIJ ´
+ìĵ º
+ìķ ļ
+ìķ º
+ìĸ ľ
+ìĹ ª
+ìĺ ľ
+ìĻ ¤
+ìļ Ľ
+ìļ º
+ìĿ ħ
+ìĿ ı
+ìĿ Ń
+ìĿ ¶
+ìł Ľ
+ì¡ Ī
+ì¢ ī
+ì¢ Ķ
+ì© ł
+ìŃ Į
+ì¯ ©
+ì´ £
+ì¸ ķ
+ì¹ Ł
+ì¾ ¡
+ì¿ Ļ
+íģ ĩ
+íģ ī
+íĩ Ģ
+íĪ ¶
+íĸ ij
+íĸ ¤
+íĹ ħ
+íľ ı
+íĿ Ŀ
+ï¤ Ĵ
+ï¤ ķ
+ï¤ ¬
+ï¥ ħ
+ï¥ ĩ
+ï¥ ı
+ï¥ ļ
+ï¥ Ł
+ï¦ Ħ
+ï¦ Ī
+ï¦ ¨
+ï¦ ©
+ï¦ ²
+ï§ ģ
+ï§ ĥ
+ï§ Ķ
+ï§ ł
+ï§ £
+ï§ ®
+ï ŃIJ
+ïŃ ĸ
+ïŃ ¦
+ïŃ ´
+ïŃ µ
+ïŃ ¶
+ïŃ ¸
+ï® Į
+ï® İ
+ï® ŀ
+ï® Ł
+ï® ¡
+ï® ª
+ï¯ Ķ
+ï¯ Ĺ
+ï¯ ļ
+ï¯ Ľ
+ï¯ Ŀ
+ï¯ Ł
+ï¯ §
+ï¯ ¨
+ï¯ «
+ï¯ ¯
+ï¯ °
+ï¯ ±
+ï¯ ²
+ï¯ ³
+ï¯ ´
+ï¯ µ
+ï¯ ¶
+ï° Ģ
+ï± ħ
+ï± Ķ
+ï± ´
+ï² ģ
+ï³ ķ
+ï· ½
+ï¸ ķ
+ï¸ ±
+ï¹ £
+ï¹ ½
+ï» į
+ï¾ ±
+ðĿIJ Ļ
+ðĿIJ ½
+ðĿij ¤
+ðĿij ®
+ðĿij µ
+ðĿĴ ĥ
+ðĿĴ Ħ
+ðĿĵ Ń
+ðĿĵ ·
+ðĿĶ ĸ
+ðĿĶ ŀ
+ðĿĶ ¢
+ðĿĶ ¦
+ðĿĶ ¬
+ðĿķ Ħ
+ðĿķ Ĭ
+ðĿķ İ
+ðĿķ Ļ
+ðĿķ ľ
+ðĿķ Ń
+ðĿķ ³
+ðĿķ ¸
+ðĿķ ¾
+ðĿ ĸī
+ðĿĸ ı
+ðĿĺ ĩ
+ðĿĺ ī
+ðĿĺ ĸ
+ðĿĺ Ľ
+ðĿĺ ŀ
+ðĿĺ «
+ðĿĺ ¾
+ðĿĻ ĩ
+ðĿĻ ī
+ðĿĻ ĭ
+ðĿĻ İ
+ðĿĻ ĺ
+ðĿĻ ¥
+ðĿļ ĥ
+ðĿļ IJ
+ðĿļ Ķ
+ðĿľ ĥ
+ðŁĦ ·
+ðŁħ Ŀ
+ðŁħ ¾
+ðŁĨ Ĥ
+ðŁĨ ĵ
+ðŁĮ Ĥ
+ðŁĮ Ĩ
+ðŁĮ ī
+ðŁĮ ij
+ðŁĮ ĺ
+ðŁĮ ©
+ðŁĮ «
+ðŁį ¢
+ðŁį ¥
+ðŁİ Ľ
+ðŁİ ¢
+ðŁİ ´
+ðŁij ¡
+ðŁĴ ¾
+ðŁĵ Ń
+ðŁĶ Ī
+ðŁĶ ¦
+ðŁĶ ²
+ðŁĶ ³
+ðŁķ ĵ
+ðŁķ ķ
+ðŁķ ĺ
+ðŁķ Ł
+ðŁķ ·
+ðŁĹ ³
+ðŁļ Ħ
+ðŁļ Ķ
+ðŁļ ĸ
+ðŁĽ IJ
+ðŁĽ ¤
+ðŁĽ ¸
+ðŁ ł
+ðŁł ³
+ðŁ¤ ¹
+ðŁ¥ ĥ
+ðŁ¥ ¨
+ðŁ¥ ª
+ðŁ¥ ¾
+ðŁ¦ ĥ
+ðŁ¦ Ĵ
+ðŁ¦ Ļ
+ðŁ¦ ¶
+ðŁ§ ł
+ðŁ§ ª
+ðŁ§ Ń
+ðŁ§ ²
+𣠷
+𣷠Ń
+ð¦ ĺ
+ð¦ĺ Ĵ
+Æ ij
+Ç Ļ
+È ®
+Ø ł
+Ú Ħ
+Ü Ģ
+ß ¢
+áī Ģ
+áĬ IJ
+áİ ł
+Ạŀ
+ëĪ ŀ
+ëķ Ł
+ë£ ģ
+ë¤ Ĺ
+ìĦ ¥
+ìħ ij
+ìĸ IJ
+ìĽ Ľ
+ì£ ķ
+íİ ı
+íĽ ĵ
+ï¥ º
+ï³ Ľ
+ï´ «
+ðĸ §
+ðĸ§ ·
+ðĿķ ģ
+ðŁIJ ª
+ðŁĴ Ī
+ðŁĵ ł
+ðŁķ Ľ
+ðŁķ ´
+Ñ Ŀ
+Ó Ĭ
+ॠ²
+ઠª
+áĥ ¤
+áį IJ
+á¶ °
+á¼ Ŀ
+á½ ©
+âĭ ĭ
+âĴ ½
+âĻ ¾
+â ½Ķ
+â¾ ¯
+ãĦ Ĵ
+ãħ ļ
+ëIJ į
+ë· ģ
+ìĭ Ģ
+ìļ Ŀ
+ì¥ °
+ìº ´
+íĭ ī
+íĿ ½
+ï¦ Ģ
+ï¦ ¿
+ï§ ħ
+ï§ ĵ
+ïŃ ¯
+ï® Ĩ
+ðIJ¤ ķ
+ðĿIJ Ł
+ðĿĴ ħ
+ðĿĵ ľ
+ðĿĶ °
+ðĿĶ »
+ðĿĺ į
+ðĿĻ ¯
+ðŁĦ ½
+ðŁħ Ĥ
+ðŁħ Ķ
+ðŁħ ½
+ðŁĵ ´
+ðŁ§ ĸ
+Ó Ĵ
+Ḡ²
+ëī ¼
+Ç ı
+È ĵ
+Ê ¸
+Õ Ĥ
+Û ħ
+ß ¡
+ß £
+à® ¯
+à° Ī
+ಠ¸
+ຠ®
+༠ķ
+áĢ İ
+áĨ ¡
+áIJ ĭ
+áIJ ķ
+áij ¯
+áŀ Ĩ
+ᨠķ
+á© Ī
+âģ ħ
+âĨ ļ
+âĶ İ
+âł ©
+â² Ĥ
+â² Ķ
+â² ¨
+ãĬ ļ
+íĵ ²
+ðĿij Ī
+ðĿij ¬
+ðĿij ¹
+ðĿĴ ¾
+ðĿĵ ±
+ðĿĵ ½
+ðĿķ ¯
+ðĿķ »
+ðĿĺ ½
+ðĿļ Ĩ
+ðŁĦ °
+ðŁIJ ¨
+Ò ķ
+ಠħ
+ï¨ Ĩ
+ðĿij °
+ðŁĦ ¸
+Ô İ
+Ø į
+Ù µ
+ಠ¶
+áĢ Ī
+áĺ Ĺ
+áł ¸
+á¡ ¡
+ᨠ²
+á© ģ
+á´ ·
+áµ §
+âķ ¨
+âļ ģ
+â¾ Ŀ
+ãĢ ¼
+ãĦ ı
+êĴ «
+ê¦ ¥
+ê¦ ©
+ê¦ ²
+ìĺ ¼
+íĵ IJ
+ðĵ ĩ
+ðĵĩ ¼
+ðĿķ ¿
+ðŁĽ ´
+ë¨ ľ
+ಠµ
+à´ İ
+༠Ģ
+âĩ ĸ
+ãĪ «
+âĵ Ģ
+áħ ´
+áļ ¾
+ἠŀ
+ἠ«
+ᥠ´
+âĨ Ľ
+âĨ ¶
+âĩ ¤
+âķ Ł
+âĺ ·
+âļ IJ
+ðŁ§ ´
+á¹ ³
+âĶ į
+âĶ Ĵ
+âĶ ©
+âĶ ¦
+â¾ µ
+ઠľ
+ઠ¤
+âĩ Ļ
+âĶ ±
+âķ Ģ
+â½ Ĭ
+ï½ Ł
+ଠ¡
+ðł ®
+ðł® ·
+âķ ĥ
+â° Ķ
+ãĬ ¦
+ðŁİ IJ
+ãĩ °
+â¼ Ŀ
+â¾ Ķ
+â½ Ĵ
+âł Ĵ
+ï¨ ¦
+ï© Ĵ
+ï¨ ²
+ï© ĸ
+ðĵı ¸
+ãĮ ĥ
+ðĸ ¤
+ðĸ¤ IJ
+ï¦ Ń
+âĬ ħ
+â¾ ³
+ä´ ¥
+ï© ķ
+ðŁĮ Ķ
+áŀ ĭ
+âļ į
+â¼ ĭ
+ãİ ĺ
+ðIJĮ ²
+É ©
+áİ ij
+âĨ ®
+âĩ ĥ
+âļ İ
+ãĩ ±
+ãĭ ©
+ãĮ ¶
+êĻ ª
+ëİ ¬
+ï¨ IJ
+ï¨ Ľ
+ï© Ĭ
+ï© į
+ðĵ ħ
+ðĵħ º
+Ï ¡
+È ij
+É Ĥ
+Ô ĵ
+ß İ
+à´ §
+áĢ ī
+áĢ ĭ
+áĢ ij
+áĢ ł
+áļ Ļ
+ᨠĦ
+ᨠ©
+ᨠ¹
+á© ĵ
+ᬠľ
+á´ Ļ
+áµ ij
+âĤ Ń
+âĨ °
+âľ ģ
+â½ IJ
+ãĭ ¯
+ãĮ ½
+íĨ ¢
+ï¤ ¿
+ðŁ Ĥ
+ðŁĤ »
+È Ĵ
+Í º
+Ô ¥
+Õ ij
+Ú ¶
+à§ İ
+à¶ ®
+ຠĸ
+ຠľ
+ຠ½
+áĥ »
+áħ ¯
+áĭ ŀ
+áĸ ķ
+á ´Ī
+á¶ Ĩ
+Ḡľ
+á¹ ¼
+á¿ ¨
+âĦ ĭ
+âĦ Ń
+âĪ ±
+âĮ ĵ
+âĶ ĩ
+âĶ ¢
+â± ®
+â² Ħ
+ãĩ ¾
+ãĪ ¬
+ë¸ ¡
+ìIJ ī
+íĻ Ľ
+ðĿķ ª
+Æ ¹
+Í ²
+Ó ģ
+Û ¼
+ঠ«
+áħ Ł
+áī Ĩ
+áį Ī
+Ạĸ
+á½ ī
+âĶ ¸
+â½ ©
+ê ľ
+êľ ¥
+êµ ħ
+ëĤ Ķ
+ëĦ ł
+ëĩ Ĺ
+ëĻ Ŀ
+ìļ ¯
+ìļ ·
+ìŁ Ľ
+ì· IJ
+íŁ ¬
+íŁ ®
+íŁ °
+ï¦ Ĩ
+ï¦ ±
+ï² ŀ
+ï³ ¤
+ï³ ¥
+ðIJĮ ¸
+ðĿĶ ı
+ðĿķ ®
+ðĿĺ £
+ঠĪ
+âı ı
+ãĦ ĸ
+ê² ĩ
+ëĸ ĺ
+ëľ ·
+ëŀ Ĵ
+ë¡ ĵ
+ë¢ ī
+ë£ ĥ
+ë§ ĭ
+ë² ĭ
+ìĤ ·
+ìĪ ķ
+ì Į¨
+ìĵ »
+ìĸ Ĭ
+ìĻ ¬
+ìĿ »
+ì¦ ģ
+ìµ ¤
+ì· ĥ
+íĢ ľ
+íħ ī
+íį ł
+íı ħ
+íij ±
+íķ ķ
+íĸ ł
+íĿ ķ
+Æ Ļ
+Æ ļ
+Æ ŀ
+Ç ĥ
+Ç Ĭ
+Ç ľ
+Ç ¤
+Ç Ń
+Ç ¹
+È Ģ
+È ģ
+È ħ
+È ī
+È Ĺ
+È Ł
+È ¤
+È ¥
+È ¨
+È µ
+È º
+È »
+É Į
+É ®
+Ê ħ
+Ê ¥
+Ê ¨
+Ë ĵ
+Ë Ķ
+Ë ł
+Ë £
+Ë ¸
+Í ´
+Ï Ĺ
+Ï ĺ
+Ï Ļ
+Ï ļ
+Ï Ŀ
+Ï ¨
+Ï ¬
+Ï ¾
+Ï ¿
+Ñ ª
+Ò Ģ
+Ò ľ
+Ò ¼
+Ò ½
+Ó Ĥ
+Ó ħ
+Ó ĩ
+Ó į
+Ó ĸ
+Ó Ł
+Ó «
+Ó ±
+Ô Ĩ
+Ô ĩ
+Ô º
+Õ ĭ
+Ö ī
+Ø Ī
+Ø Ĭ
+Ø ½
+Ø ¾
+Ù ·
+Ú Ĥ
+Ú Ĭ
+Ú ĸ
+Ú Ĺ
+Ú £
+Ú «
+Ú ¸
+Û Ģ
+Û į
+Û ½
+Ü ī
+Ü ¤
+Ý §
+Ý ´
+Þ ĥ
+Þ ¤
+Þ ¥
+ß ļ
+ß Ľ
+ß ¤
+àł į
+àł ĵ
+àł ³
+à¡ ¢
+ॠł
+à§ ł
+à§ º
+ਠĬ
+ਠIJ
+ਠ®
+ਠ¯
+ਠ°
+ਠ¸
+ઠĨ
+ઠ³
+ઠµ
+ઠ½
+ଠĮ
+ଠĺ
+ଠ½
+à® ĥ
+à® ¸
+à° Ĩ
+à° ķ
+à° ¦
+ಠĨ
+ಠĬ
+ಠĮ
+ಠIJ
+ಠĽ
+ಠ¤
+ಠ¦
+ಠª
+ಠ²
+ಠ¹
+à´ Ĩ
+à´ ı
+à´ Ĺ
+à´ «
+à´ ¹
+ൠº
+ൠ½
+à¶ ħ
+à¶ Ĭ
+à¶ Ķ
+à¶ §
+à¶ «
+à¶ °
+༠Ħ
+༠ħ
+༠Ĭ
+འĻ
+འ¡
+འ§
+à¿ Ģ
+à¿ Ļ
+áĢ Ŀ
+áĢ §
+áĢ ©
+áĢ ¿
+áģ µ
+áĤ ģ
+áĤ ½
+áĥ Ĥ
+áĥ ª
+áĦ Ĭ
+áĦ ¢
+áħ ¦
+áħ Ń
+áĨ ®
+áĨ ±
+áĨ »
+á ĩ
+áĩ Ĥ
+áĪ ħ
+áĪ ī
+áĪ Į
+áĪ IJ
+áĪ Ĵ
+áĪ Ļ
+áĪ ļ
+áĪ ľ
+áĪ ŀ
+áĪ ©
+áĪ ³
+áĪ º
+áĪ ½
+áī ħ
+áī ¢
+áī ±
+áī ´
+áĬ ĥ
+áĬ į
+áĬ ĸ
+áĬ ®
+áĬ ¸
+áĭ Ľ
+áĭ Ŀ
+áĭ ³
+áĮ ģ
+áĮ ħ
+áĮ ¥
+áĮ ¦
+á Į¨
+áį Ĭ
+áį į
+áį ķ
+áį ĸ
+áį ¢
+áį ¤
+áİ Ĵ
+áİ ª
+áı ģ
+áı IJ
+áı Ł
+áIJ Ĥ
+áIJ ĸ
+áIJ Ŀ
+áIJ ŀ
+áIJ Ł
+áIJ ł
+áij ĸ
+áĴ ĭ
+áĴ į
+áĴ ¡
+áĵ «
+áĶ ķ
+áķ ĭ
+áķ ij
+áķ Ļ
+áķ ļ
+áķ Ľ
+áķ ¤
+áķ ¦
+áķ ®
+áķ ¼
+áĸ ĵ
+áĹ Ĺ
+áĹ ¢
+áĹ ¯
+áĹ ·
+áĺ Ħ
+áĺ ij
+ἠĤ
+ἠĻ
+áŀ į
+áł Ĩ
+áł ¡
+áł ¦
+áł ®
+áł ¯
+áł ²
+áł ·
+á¡ į
+á¡ ŀ
+á¡ ¤
+á ¡´
+á¡ µ
+ᤠĵ
+ᥠĸ
+ᥠ°
+ᨠ¦
+ᨠ§
+ᨠ¨
+ᨠª
+ᨠ¬
+ᨠ¯
+ᨠ³
+ᨠµ
+á© ĥ
+ᬠķ
+áŃ £
+á ±
+á± ļ
+á² ł
+á´ ĵ
+á´ ¶
+áµ Ĥ
+áµ Į
+áµ ¥
+áµ ´
+á¶ ĩ
+ḠĪ
+Ḡł
+Ḡ§
+Ḡ´
+Ḡ¾
+á¹ Ģ
+á¹ ĸ
+á¹ Ł
+á¹ ł
+á¹ «
+á¹ ±
+á¹ ·
+á¹ ¿
+ẠĦ
+Ạį
+Ạij
+ẠĹ
+á¼ ī
+á¼ ĵ
+á¼ Ń
+á½ ĭ
+á½ Ĵ
+á½ ł
+á½ £
+á¾ Ħ
+á¾ ı
+á¾ ij
+á¾ Ĺ
+á¾ ¦
+á¾ §
+á¾ ¾
+á¿ Ħ
+á¿ ĵ
+á¿ ¡
+á¿ ¬
+âģ ļ
+âĤ Į
+âĦ ģ
+âĦ Ķ
+âĦ £
+âĦ §
+âĦ ¯
+âĦ °
+âĦ ´
+âħ ħ
+âĨ ľ
+âĨ «
+âĨ Ń
+âĨ ±
+âĨ ¹
+âĨ ½
+âĩ ĩ
+âĩ ľ
+âĩ µ
+âĪ ī
+âĪ Ĭ
+âĪ ĸ
+âĪ ľ
+âĪ ¾
+âī Ģ
+âī ĭ
+âī Į
+âī ĵ
+âī ľ
+âī ´
+âī ¿
+âĬ Ĭ
+âĬ ĭ
+âĬ Ķ
+âĬ ĸ
+âĬ £
+âĬ ¦
+âĭ İ
+âĭ ª
+âĭ ²
+âĮ ¦
+âĮ §
+âį º
+âİ Ī
+âİ ¨
+âİ ¬
+âİ ³
+âİ ¼
+âİ ¾
+âı Į
+âı ļ
+âı «
+âı ¯
+âı µ
+âĴ ľ
+âĴ Ŀ
+âĴ «
+âĵ Ħ
+âĵ Ĭ
+âĵ Ļ
+âĵ ©
+âĶ ij
+âĶ Ļ
+âĶ ļ
+âĶ ¥
+âķ ħ
+âķ ī
+âķ į
+âķ ı
+âķ ŀ
+âĸ ļ
+âĸ ¯
+âĹ ĥ
+âĹ ļ
+âĹ ¬
+âĹ ´
+âĺ Ī
+âĺ ¤
+âĺ ¥
+âĺ §
+âĺ ¬
+âĻ ģ
+âĻ ±
+âļ ĥ
+âļ Ħ
+âļ ħ
+âļ ı
+âļ ļ
+âļ ŀ
+âļ Ł
+âļ ±
+âļ ²
+âľ Ģ
+âľ Ł
+âľ ¢
+âĿ µ
+⣠¡
+⣠¦
+⣠§
+⣠³
+⣠¾
+⣠¿
+âł ĩ
+⤠Ħ
+⤠º
+⥠Ĥ
+⥠¹
+â§ ī
+â§ ¼
+â§ ½
+⨠į
+⬠Ĭ
+⬠Ł
+âŃ ŀ
+â® ŀ
+â® ³
+⯠Ī
+⯠ij
+â± ł
+â± ±
+â² Ń
+â´ ¹
+âµ ķ
+⸠¾
+â º«
+â¼ Ĩ
+â¼ ł
+â½ Ł
+â½ ¼
+â¾ Ľ
+â¾ §
+â¿ ĥ
+â¿ »
+ãĤ ķ
+ãĤ Ł
+ãĦ Ľ
+ãĦ ¡
+ãĦ ¶
+ãĦ º
+ãħ Ĵ
+ãħ Ł
+ãĨ Ģ
+ãĩ »
+ãĪ ij
+ãĪ Ń
+ãĪ ®
+ãĪ ³
+ãĪ ¹
+ãī ¥
+ãī ¦
+ãī ¹
+ãī ¿
+ãĬ ŀ
+ãĬ ¨
+ãĭ ij
+ãĭ ¥
+ãĭ ´
+ãĭ º
+ãİ Ħ
+ãİ ķ
+ãİ ¯
+ãı Ĥ
+ãı Ī
+ãı ĵ
+ãı ĸ
+ãı ±
+ãIJ ±
+ãŁ ģ
+ã ¢
+㢠¨
+ã ¨
+㨠³
+ã« ª
+ã« ´
+ã¶ ³
+㺠¾
+ä Ģ
+äĢ Ģ
+ä ĭ
+äĭ Į
+ä ĮĢ
+äIJ Ģ
+ä łĢ
+ä ł
+äł ¼
+ä §
+ä§ ŀ
+ä¨ °
+ä¨ º
+ä ´Ģ
+ä ·
+ä· ħ
+ä ·¸
+ê Ĥ
+êĤ «
+ê Į
+êĮ ¼
+ê į
+êį ²
+êĴ µ
+ê ĵ
+êĵ ½
+êĻ Ń
+êĿ Ľ
+êĿ ¥
+ê ŀ
+êŀ Ĭ
+ê¦ Ĩ
+ê¦ ĩ
+ê¦ Ł
+ê¦ ¨
+ê§ Ī
+ê ©
+ê© Ł
+êª ĭ
+êª ij
+êª ķ
+êª Ĺ
+êª ľ
+êª ®
+êª ±
+êª »
+êª ¼
+ê« Ģ
+ê« Ŀ
+ê° ĥ
+ê° ĺ
+ê± ľ
+ê² ĵ
+ê² ļ
+ê³ Ļ
+ê³ ¾
+ê´ Ĺ
+ê´ Ļ
+êµ Ľ
+ê¶ ĥ
+ê¶ ķ
+ê¶ ¨
+ê¸ ©
+ê¸ ¿
+ê ¹Ħ
+ê¹ Ĩ
+ê¹ ī
+ê¹ ĵ
+ê¹ ¢
+ê¹ £
+ê¹ ¸
+êº ³
+ê¿ ı
+ê¿ ķ
+ê¿ §
+ëĢ ©
+ëģ ħ
+ëĥ µ
+ëĦ ĸ
+ëĦ Ĺ
+ëĦ ¢
+ëħ Ĥ
+ëĨ IJ
+ëĩ ľ
+ëĪ ĭ
+ëĪ ļ
+ëī į
+ëī ¨
+ëĬ ļ
+ëĬ ¡
+ëĭ ľ
+ëĭ ª
+ëĮ ĺ
+ëĮ ¤
+ëĮ ¸
+ëİ Ł
+ëı ¨
+ëIJ Ħ
+ëIJ ı
+ëIJ ´
+ëIJ ¸
+ëij ģ
+ëij ¿
+ëĴ ¨
+ëĵ ·
+ëĶ ®
+ëĶ ²
+ëķ §
+ëĸ Ķ
+ëĸ ª
+ëĺ Ń
+ëļ Ģ
+ëļ ł
+ëĽ Ķ
+ëĽ ©
+ëľ ħ
+ëŀ ķ
+ëŀ °
+ëŁ IJ
+ëł ¡
+ë¡ ŀ
+ë¡ £
+ë¡ µ
+ë£ Ħ
+ë£ į
+ë¤ ³
+ë¦ į
+ë¦ ı
+ë¦ ³
+ë§ Ħ
+ë§ Ĩ
+ë§ į
+ë§ ľ
+ë§ «
+ë§ »
+ë¨ ®
+ë© Ĥ
+ë© Ń
+ëª ´
+ë¬ ľ
+ë¬ ł
+ë¬ «
+ë¬ ¾
+ëŃ ¬
+ë® ĺ
+ë® ¹
+ë¯ ķ
+ë¯ ľ
+ë° ¨
+ë° ª
+ë± Ķ
+ë² ĺ
+ë² Ľ
+ë² ±
+ë² ´
+ë´ ½
+ëµ ¤
+ëµ ¨
+ë· Ĺ
+ë· ĺ
+ë¸ ĵ
+ë¸ ľ
+ë¹ ª
+ëº ĥ
+ëº ĺ
+ëº µ
+ë» ´
+ë¼ IJ
+ë¾ Ķ
+ìģ Ń
+ìĤ ł
+ìĤ ®
+ìĥ ı
+ìĥ Ļ
+ìĦ º
+ìħ ¢
+ìĨ Ģ
+ìĨ ħ
+ìĨ ¤
+ìĨ ¦
+ìĨ ¬
+ìĩ ±
+ìĪ µ
+ìĭ ¨
+ìĭ ´
+ìĮ °
+ìį ľ
+ìİ Ĺ
+ìİ ĺ
+ìİ ¼
+ìij ī
+ìij Ŀ
+ìij »
+ìĴ Ķ
+ìĴ ¯
+ìĵ ©
+ìķ IJ
+ìķ ĸ
+ìĸ ł
+ìĸ ¾
+ìĹ ĥ
+ìĹ Ĺ
+ìĹ ľ
+ìĹ ¨
+ìĺ Ĥ
+ìĺ Ħ
+ìĺ ı
+ìĺ ¾
+ìĺ ¿
+ìľ §
+ìĿ IJ
+ìĿ ĸ
+ìĿ ·
+ìŀ į
+ìŀ ı
+ìŀ ¨
+ìŀ ª
+ìŀ ³
+ìł ¡
+ìł ´
+ìł ¹
+ì¡ Ģ
+ì¡ ª
+ì¡ µ
+ì¢ IJ
+ì¢ ¨
+ì£ Į
+ì£ Ļ
+ì£ ³
+ì¦ ij
+ì§ ¥
+ì§ ´
+ì§ ¾
+ì¨ ĵ
+ì¨ ķ
+ì© °
+ì© »
+ì© ¼
+ìª Ĺ
+ì¬ Ķ
+ì¬ ĺ
+ì® ®
+ì¯ ķ
+ì¯ ĺ
+ì° İ
+ì° ¯
+ì± ĥ
+ì± µ
+ì² §
+ì² ®
+ì² ¯
+ì³ ¬
+ì´ ĭ
+ì´ ¢
+ìµ ¥
+ì¶ £
+ì¸ Ī
+ì¸ Ļ
+ìº ¤
+ìº Ń
+ì» ½
+ì¼ Ļ
+ì½ ¬
+ì¾ Ģ
+ì¿ ħ
+ì¿ ½
+íĢ ħ
+íģ ¦
+íĤ ħ
+íĥ ¶
+íĥ ¹
+íĦ Ķ
+íħ £
+íĨ Ħ
+íĨ §
+íĨ ¹
+íĩ ¼
+íī ¤
+íĬ ½
+íĭ Ĥ
+íĭ ij
+íį Ī
+íį Ļ
+íį ¿
+íİ ¶
+íIJ Ŀ
+íĴ ľ
+íĵ Ŀ
+íĵ ª
+íĵ ±
+íĵ ·
+íĵ ¼
+íĶ Ļ
+íĶ ł
+íķ ļ
+íķ Ľ
+íķ ŀ
+íķ Ł
+íķ §
+íķ ¶
+íĸ Ĭ
+íĸ ĭ
+íĸ į
+íĸ Ķ
+íĸ ĺ
+íĸ ¡
+íĸ ¬
+íĹ £
+íĹ ¿
+íĺ ĸ
+íĺ Ń
+íļ °
+íĽ į
+íĽ ½
+íĿ Ł
+íĿ Ń
+íĿ ´
+íŀ ľ
+ï¤ ī
+ï¤ Ń
+ï¤ ²
+ï¤ µ
+ï¤ ¼
+ï¥ Ģ
+ï¥ ij
+ï¥ Ĵ
+ï¥ ķ
+ï¥ ĺ
+ï¥ Ļ
+ï¥ «
+ï¥ ¬
+ï¥ °
+ï ¥¿
+ï¦ ĭ
+ï¦ ı
+ï¦ Ķ
+ï¦ ĸ
+ï¦ ĺ
+ï¦ Ľ
+ï¦ ł
+ï¦ ®
+ï¦ ¯
+ï¦ º
+ï¦ »
+ï¦ ¾
+ï§ Ĩ
+ï§ ĸ
+ï§ Ľ
+ï§ ŀ
+ï§ Ł
+ï§ §
+ï§ ³
+ï§ º
+ï§ ½
+ï¨ ĥ
+ï¨ ļ
+ï¨ ¢
+ï© Ł
+ï¬ ¤
+ï¬ ¬
+ï¬ ¼
+ïŃ Ĵ
+ïŃ ķ
+ïŃ Ľ
+ïŃ Ŀ
+ïŃ ŀ
+ïŃ Ł
+ïŃ ¤
+ïŃ §
+ïŃ ¨
+ïŃ ®
+ïŃ °
+ïŃ ±
+ïŃ ·
+ïŃ ¹
+ïŃ »
+ï® Ģ
+ï® ĥ
+ï® Ħ
+ï® ħ
+ï® į
+ï® Ĵ
+ï® ĵ
+ï® ķ
+ï® ¦
+ï® ®
+ï® °
+ï¯ ĵ
+ï¯ ľ
+ï¯ ©
+ï¯ ª
+ï¯ ¬
+ï¯ Ń
+ï¯ ®
+ï¯ ·
+ï¯ ¹
+ï¯ »
+ï¯ ¼
+ï° ĥ
+ï° Į
+ï° IJ
+ï° ĺ
+ï° Ļ
+ï° ľ
+ï° ŀ
+ï° ¢
+ï° ®
+ï° °
+ï° ¼
+ï° ¿
+ï± Ģ
+ï± ģ
+ï± Ī
+ï± ĭ
+ï± ı
+ï± Ń
+ï² Ģ
+ï² ĩ
+ï² Ī
+ï² ĭ
+ï² İ
+ï² Ĵ
+ï² ľ
+ï² ł
+ï² ¬
+ï² »
+ï³ ĩ
+ï³ Ķ
+ï³ £
+ï³ «
+ï´ ĺ
+ï´ °
+ï´ ½
+ï ¶
+ï¶ °
+ï¸ ĸ
+ï¸ ´
+ï¸ ¹
+ï¹ į
+ï¹ Ĺ
+ï¹ ¢
+ï¹ ¤
+ï¹ ©
+ï¹ ±
+ï¾ °
+ï¿ Ĥ
+ï¿ ®
+ðIJĮ °
+ðIJĮ ¹
+ðIJĮ º
+ðIJĮ ½
+ðIJį Ĥ
+ðIJį ĥ
+ðIJį Ħ
+ðIJ İ
+ðIJİ ¹
+ðIJ¤ Ĥ
+ðIJ¤ į
+ðIJ¤ ı
+ðIJ¤ ĵ
+ðIJŃ ī
+ðIJŃ į
+ðIJ° ĩ
+ðIJ° °
+ðij Ĥ
+ðijĤ Ħ
+ðij ĺ
+ðijĺ ģ
+ðĴ Ģ
+ðĴĢ ¸
+ðĴ ģ
+ðĴģ º
+ðĴ Ħ
+ðĴĦ ·
+ðĴ Ĭ
+ðĴĬ ij
+ðĴ ĭ
+ðĴĭ Ĺ
+ð ĴĮ
+ðĴĮ ¨
+ðĵĥ ¢
+ðĵĥ °
+ðĸ ł
+ðĸł ļ
+ðĿĦ ĥ
+ðĿĦ ħ
+ðĿĦ ķ
+ðĿĦ Ļ
+ðĿĦ ±
+ðĿĦ ´
+ðĿĦ ¹
+ðĿħ İ
+ðĿħ ª
+ðĿĨ £
+ðĿĨ ³
+ðĿĨ ¹
+ðĿĩ Ĭ
+ðĿĩ Ĺ
+ðĿĩ ļ
+ðĿĩ ľ
+ðĿĩ ł
+ðĿIJ ī
+ðĿIJ ĸ
+ðĿIJ ĺ
+ðĿIJ £
+ðĿIJ ±
+ðĿij Ĭ
+ðĿij Ń
+ðĿij ¼
+ðĿij ½
+ðĿĴ °
+ðĿĴ ·
+ðĿĴ ¿
+ðĿĵ ģ
+ðĿĵ ĭ
+ðĿĵ İ
+ðĿĵ Ĵ
+ðĿ ĵĺ
+ðĿĵ ¢
+ðĿĵ ¦
+ðĿĵ «
+ðĿĵ ¿
+ðĿĶ İ
+ðĿĶ ±
+ðĿĶ ´
+ðĿĶ ·
+ðĿĶ ¸
+ðĿĶ ½
+ðĿķ Ĥ
+ðĿķ ĥ
+ðĿķ ĭ
+ðĿķ ı
+ðĿķ IJ
+ðĿķ ¥
+ðĿķ ´
+ðĿķ º
+ðĿĸ IJ
+ðĿĸ Ľ
+ðĿĸ Ŀ
+ðĿĸ ŀ
+ðĿĹ ©
+ðĿĹ ³
+ðĿĹ ½
+ðĿĺ Ĭ
+ðĿĺ ĭ
+ðĿĺ Ķ
+ðĿĺ ±
+ðĿĺ ´
+ðĿĺ ¿
+ðĿĻ Ĵ
+ðĿĻ Ŀ
+ðĿĻ Ł
+ðĿĻ ¬
+ðĿĻ Ń
+ðĿĻ »
+ðĿĻ ¾
+ðĿļ Ī
+ðĿļ ĭ
+ðĿļ ij
+ðĿļ Ł
+ðĿļ ł
+ðĿļ £
+ðĿĽ ½
+ðĿľ Ĥ
+ðĿľ Ķ
+ðĿľ Ļ
+ðŁ Ģ
+ðŁĢ Ħ
+ðŁĦ ²
+ðŁĦ ¶
+ðŁħ IJ
+ðŁħ ĸ
+ðŁħ ļ
+ðŁħ Ľ
+ðŁħ ¦
+ðŁħ ¶
+ðŁħ »
+ðŁħ ¼
+ðŁĨ ĥ
+ðŁĨ Ĩ
+ðŁĨ İ
+ðŁĪ ¯
+ðŁĪ ²
+ðŁĪ ¹
+ðŁĮ ĩ
+ðŁĮ ĵ
+ðŁį ĺ
+ðŁİ ij
+ðŁİ ¿
+ðŁı ı
+ðŁı Ĵ
+ðŁı ©
+ðŁı ¯
+ðŁIJ Ģ
+ðŁij Ŀ
+ðŁĴ ¹
+ðŁĴ º
+ðŁĵ Ł
+ðŁĵ ª
+ðŁĵ ¼
+ðŁĶ Ģ
+ðŁĶ Ĥ
+ðŁĶ ĥ
+ðŁĶ ĩ
+ðŁĶ ĵ
+ðŁĶ ¢
+ðŁĶ ¤
+ðŁĶ ©
+ðŁķ ĸ
+ðŁķ ļ
+ðŁķ ľ
+ðŁķ Ŀ
+ðŁķ ŀ
+ðŁķ ł
+ðŁķ ¢
+ðŁķ ³
+ðŁĸ ĩ
+ðŁĸ ij
+ðŁĸ ¶
+ðŁĹ ģ
+Ñ ¨
+Ú İ
+á¡ Į
+Ḡ°
+ẠĢ
+á¼ ®
+á½ Ŀ
+âĦ ¬
+âļ §
+⼠¤
+ã³ ¬
+êĻ ĭ
+ê¸ ij
+ëĶ ī
+ëĹ į
+ë¡ ij
+ë¯ ij
+ë» ħ
+ë¼ Ŀ
+ìĦ IJ
+ìī ¡
+ìĭ ²
+ìı ±
+ìĹ ¤
+ìĿ ©
+ìĿ ¿
+ìŁ Ļ
+ìł °
+ì¥ ī
+íĬ Ń
+íķ ®
+ï® ı
+ðŁħ ±
+ðŁĨ Ĵ
+ðŁķ ĭ
+É ĺ
+Ê ĵ
+Õ ĥ
+à´ ´
+འħ
+áĨ º
+áĪ Ĭ
+áĪ ¨
+áĪ ¾
+áī IJ
+áĮ ĥ
+áĮ ½
+áĶ Ń
+áł Ĥ
+áł ¬
+ᨠ¸
+á© ĭ
+á¶ ı
+á¾ Ķ
+á¿ IJ
+á¿ ļ
+âĻ Ļ
+âļ Ĥ
+âļ Ĺ
+â¡ ¢
+⤠¦
+ëĸ °
+ë¤ Ĥ
+ë§ ł
+ë± ĭ
+ë± IJ
+ìĽ ¢
+ìľ ¾
+ì³ ħ
+ì» ģ
+íģ »
+íĥ Ļ
+íĵ ĸ
+íĵ Ń
+íķ ±
+íĽ ľ
+ï¤ ħ
+ï¤ Ĩ
+ï¦ ĥ
+ï§ ©
+ï¨ Ĥ
+ðIJ¤ Ķ
+ðIJŃ ĵ
+ðIJ° ¼
+ðĿĵ ŀ
+ðĿĵ °
+ðĿĻ ľ
+ðĿļ ģ
+ðŁħ ¢
+ðŁı ĩ
+È ²
+Ê ¶
+Ô Ī
+Ô ij
+Ý ĵ
+Ý ¥
+ठij
+ॠ±
+ଠī
+à° ³
+à° µ
+ಠŁ
+áĢ ı
+áģ ¼
+áī ¨
+áĬ Ĵ
+áĭ ©
+áĮ Ħ
+áĮ Ķ
+áIJ §
+á ĴĮ
+áĶ ħ
+áĶ Ĭ
+áł Ħ
+ᨠģ
+Ḡĥ
+Ḡ»
+âĶ ŀ
+âĺ µ
+âļ £
+â² ¢
+ãĪ ª
+ä¶ µ
+ê² Ļ
+ê² ´
+ê³ Ĥ
+ë¡ ¼
+ìĨ Ĭ
+ì¼ ĩ
+íĭ į
+íĵ ¬
+íĵ ®
+íĵ ¶
+íĵ »
+ï¤ ¦
+ï¥ ł
+ï¥ ±
+ïŃ ²
+ðIJŃ Ĭ
+ðIJ ±ħ
+ðĸ ¥
+ðĸ¥ ¨
+ðĿij ³
+ðĿĵ ķ
+ðĿĵ ¬
+ðĿĵ ¹
+ðĿĵ ¾
+ðĿĶ ĵ
+ðĿķ į
+ðĿķ ¡
+ðĿķ ±
+ðĿĸ ĸ
+ðĿĺ ı
+ðĿĺ IJ
+ðĿĺ ļ
+ðĿĻ ®
+ðĿĻ °
+ðĿĻ ¸
+ðĿĻ º
+ðĿĻ ¼
+ðĿĻ ½
+ðĿĻ ¿
+ðĿļ Ħ
+ðĿļ ı
+ðŁħ ħ
+ðŁħ ĵ
+Æ Ī
+àł Į
+áĻ ³
+á ļĮ
+ἠħ
+ἠIJ
+ᤠĬ
+ḠĬ
+âĶ ½
+âķ Ĭ
+⼠ĩ
+⼠ı
+âĿ ª
+âĿ «
+⣠°
+ãĦ į
+ãĦ ĵ
+ãĦ §
+ãħ ĸ
+ãī «
+ê¦ Ķ
+ï± Ĭ
+ຠĤ
+áħ £
+ᥠĶ
+ᥠ¤
+âĨ ¤
+âĨ ·
+âĩ ŀ
+âĸ ¤
+âŀ ¶
+ãĪ ¼
+ï¨ ·
+ðĵı §
+âĶ ²
+âĢ ´
+âĴ Ł
+âĴ ¡
+â° Ĥ
+â° į
+â° İ
+â° IJ
+â° ij
+â° Ł
+â° ł
+â° ¡
+â¼ Ń
+ãĬ ¥
+âĴ ł
+â½ º
+ãĩ º
+ãĩ ½
+ï¨ Ĭ
+áķ ·
+âį ¨
+⺠Ł
+â½ Ĺ
diff --git a/mllm/models/qwen2_5omni/python_src_code/model.safetensors.index.json b/mllm/models/qwen2_5omni/python_src_code/model.safetensors.index.json
new file mode 100644
index 000000000..e8728f00d
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/model.safetensors.index.json
@@ -0,0 +1,2455 @@
+{
+ "metadata": {
+ "total_size": 22366403936
+ },
+ "weight_map": {
+ "talker.codec_head.weight": "model-00005-of-00005.safetensors",
+ "talker.model.embed_tokens.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.0.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.1.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.10.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.11.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.12.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.13.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.14.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.15.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.16.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.17.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.input_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.18.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.18.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.18.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.18.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.18.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.19.input_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.19.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.2.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.2.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.20.input_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.20.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.input_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.21.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.input_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.22.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.input_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.mlp.down_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.mlp.gate_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.mlp.up_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.post_attention_layernorm.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.self_attn.k_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.self_attn.k_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.self_attn.o_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.self_attn.q_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.self_attn.q_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.self_attn.v_proj.bias": "model-00005-of-00005.safetensors",
+ "talker.model.layers.23.self_attn.v_proj.weight": "model-00005-of-00005.safetensors",
+ "talker.model.layers.3.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.3.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.4.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.5.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.6.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.7.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.8.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.model.layers.9.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "talker.model.norm.weight": "model-00005-of-00005.safetensors",
+ "talker.thinker_to_talker_proj.bias": "model-00004-of-00005.safetensors",
+ "talker.thinker_to_talker_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.audio_tower.audio_bos_eos_token.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.conv1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.conv1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.conv2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.conv2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.0.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.1.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.10.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.11.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.12.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.13.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.14.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.15.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.16.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.17.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.18.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.19.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.2.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.20.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.21.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.22.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.23.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.24.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.25.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.26.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.27.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.28.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.29.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.3.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.30.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.31.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.4.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.5.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.6.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.7.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.8.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.fc1.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.fc1.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.fc2.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.fc2.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.final_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.final_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn.out_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn.out_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn_layer_norm.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.layers.9.self_attn_layer_norm.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.ln_post.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.ln_post.weight": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.audio_tower.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.lm_head.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.embed_tokens.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.input_layernorm.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.0.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.input_layernorm.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.1.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.10.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.10.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.11.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.12.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.13.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.13.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.13.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.13.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.13.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.14.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.14.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.15.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.16.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.17.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.18.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.19.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.2.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.2.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.2.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.2.self_attn.k_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.self_attn.q_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.self_attn.v_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.2.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.model.layers.20.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.20.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.21.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.22.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.input_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.23.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.24.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.24.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.24.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.24.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.24.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.24.self_attn.k_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.24.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.24.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.24.self_attn.q_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.24.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.24.self_attn.v_proj.bias": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.24.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
+ "thinker.model.layers.25.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.25.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.26.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.input_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.mlp.down_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.mlp.gate_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.mlp.up_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.post_attention_layernorm.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.self_attn.k_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.self_attn.k_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.self_attn.o_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.self_attn.q_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.self_attn.q_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.self_attn.v_proj.bias": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.27.self_attn.v_proj.weight": "model-00004-of-00005.safetensors",
+ "thinker.model.layers.3.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.3.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.4.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.5.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.6.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.7.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.8.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.input_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.self_attn.k_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.self_attn.q_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.self_attn.v_proj.bias": "model-00002-of-00005.safetensors",
+ "thinker.model.layers.9.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
+ "thinker.model.norm.weight": "model-00004-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.mlp.down_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.mlp.gate_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.mlp.up_proj.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.norm1.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.norm2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.merger.ln_q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.merger.mlp.0.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.merger.mlp.0.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.merger.mlp.2.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.merger.mlp.2.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.patch_embed.proj.weight": "model-00001-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.activation_post.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.activation_post.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.conv_post.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.conv_pre.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.conv_pre.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.0.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.1.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.10.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.11.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.12.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.13.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.14.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.15.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.16.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.17.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.2.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.3.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.4.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.5.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.6.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.7.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.8.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.0.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.0.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.1.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.1.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.2.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.2.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.3.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.3.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.4.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.4.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.5.act.alpha": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.activations.5.act.beta": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs1.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs1.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs1.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs1.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs2.1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs2.1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs2.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.resblocks.9.convs2.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.0.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.0.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.1.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.1.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.2.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.2.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.3.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.3.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.4.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.4.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.5.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_bigvgan_model.ups.5.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.proj.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.proj.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.norm_out.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.norm_out.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.proj_out.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.proj_out.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.rotary_embed.inv_freq": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.text_embed.codec_embed.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.time_embed.time_mlp.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.time_embed.time_mlp.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.time_embed.time_mlp.2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.time_embed.time_mlp.2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_k.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_k.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_out.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_out.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_q.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_q.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_v.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn.to_v.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn_norm.linear.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.attn_norm.linear.weight": "model-00005-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.0.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.1.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.10.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.11.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.12.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.13.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.14.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.15.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.16.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.17.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.18.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.19.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.2.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.20.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.21.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.22.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.23.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.24.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.25.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.26.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.27.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.28.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.29.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.3.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.30.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.31.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.4.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.5.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.6.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.7.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.8.attn.v.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.q.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.k.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.v.bias": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.q.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.k.weight": "model-00001-of-00005.safetensors",
+ "thinker.visual.blocks.9.attn.v.weight": "model-00001-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.asp.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.asp.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.asp.tdnn.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.asp.tdnn.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.0.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.0.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.res2net_block.blocks.0.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.res2net_block.blocks.0.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.se_block.conv1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.se_block.conv1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.se_block.conv2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.se_block.conv2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.tdnn1.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.tdnn1.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.tdnn2.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.1.tdnn2.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.res2net_block.blocks.0.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.res2net_block.blocks.0.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.se_block.conv1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.se_block.conv1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.se_block.conv2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.se_block.conv2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.tdnn1.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.tdnn1.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.tdnn2.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.2.tdnn2.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.res2net_block.blocks.0.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.res2net_block.blocks.0.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.se_block.conv1.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.se_block.conv1.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.se_block.conv2.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.se_block.conv2.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.tdnn1.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.tdnn1.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.tdnn2.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.blocks.3.tdnn2.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.fc.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.fc.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.mfa.conv.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.input_embed.spk_encoder.mfa.conv.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.0.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.1.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.10.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.11.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.12.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.13.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.14.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.15.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.16.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.17.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.18.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.19.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.2.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.20.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.21.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.3.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.4.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.5.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.6.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.7.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.8.ff.ff.3.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.ff.ff.0.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.ff.ff.0.weight": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.ff.ff.3.bias": "model-00005-of-00005.safetensors",
+ "token2wav.code2wav_dit_model.transformer_blocks.9.ff.ff.3.weight": "model-00005-of-00005.safetensors"
+ }
+}
\ No newline at end of file
diff --git a/mllm/models/qwen2_5omni/python_src_code/modeling_qwen2_5_omni.py b/mllm/models/qwen2_5omni/python_src_code/modeling_qwen2_5_omni.py
new file mode 100644
index 000000000..ff4ee4d26
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/modeling_qwen2_5_omni.py
@@ -0,0 +1,4126 @@
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# This file was automatically generated from src/transformers/models/qwen2_5_omni/modular_qwen2_5_omni.py.
+# Do NOT edit this file manually as any edits will be overwritten by the generation of
+# the file from the modular. If any change should be done, please apply the change to the
+# modular_qwen2_5_omni.py file directly. One of our CI enforces this.
+# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
+# coding=utf-8
+# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+from collections.abc import Callable
+from dataclasses import dataclass
+from typing import Any, Optional, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.nn import Parameter
+
+from ... import initialization as init
+from ...activations import ACT2FN
+from ...cache_utils import Cache, DynamicCache
+from ...generation import GenerationMixin
+from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
+from ...modeling_flash_attention_utils import FlashAttentionKwargs
+from ...modeling_layers import GradientCheckpointingLayer
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, ModelOutput
+from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import TransformersKwargs, auto_docstring, check_torch_load_is_safe, logging
+from ...utils.deprecation import deprecate_kwarg
+from ...utils.generic import maybe_autocast
+from ...utils.hub import cached_file
+from ..qwen2.modeling_qwen2 import Qwen2RMSNorm
+from .configuration_qwen2_5_omni import (
+ Qwen2_5OmniAudioEncoderConfig,
+ Qwen2_5OmniBigVGANConfig,
+ Qwen2_5OmniConfig,
+ Qwen2_5OmniDiTConfig,
+ Qwen2_5OmniTalkerConfig,
+ Qwen2_5OmniTextConfig,
+ Qwen2_5OmniThinkerConfig,
+ Qwen2_5OmniToken2WavConfig,
+ Qwen2_5OmniVisionEncoderConfig,
+)
+
+
+logger = logging.get_logger(__name__)
+
+
+def kaiser_sinc_filter1d(cutoff, half_width, kernel_size):
+ """Generates a 1D Kaiser-windowed sinc filter.
+
+ Args:
+ cutoff (float): Normalized cutoff frequency (0 to 0.5).
+ half_width (float): Transition bandwidth.
+ kernel_size (int): Number of filter taps.
+
+ Returns:
+ torch.Tensor: A tensor of shape (1, 1, kernel_size) representing the filter.
+ """
+ is_even = kernel_size % 2 == 0
+ half_size = kernel_size // 2
+
+ # Compute Kaiser window parameters
+ delta_f = 4 * half_width
+ attenuation = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
+
+ if attenuation > 50.0:
+ beta = 0.1102 * (attenuation - 8.7)
+ elif attenuation >= 21.0:
+ beta = 0.5842 * (attenuation - 21) ** 0.4 + 0.07886 * (attenuation - 21.0)
+ else:
+ beta = 0.0
+
+ kaiser_window = torch.kaiser_window(kernel_size, beta=beta, periodic=False, dtype=torch.float32)
+
+ # Compute time indices
+ if is_even:
+ time_indices = torch.arange(-half_size, half_size) + 0.5
+ else:
+ time_indices = torch.arange(kernel_size) - half_size
+
+ # Compute sinc filter
+ if cutoff == 0:
+ return torch.zeros((1, 1, kernel_size), dtype=torch.float32) # Ensures correct shape
+
+ sinc_filter = torch.sinc(2 * cutoff * time_indices)
+ normalized_filter = 2 * cutoff * kaiser_window * sinc_filter
+
+ # Normalize to ensure sum = 1 (avoid leakage of constant component)
+ normalized_filter /= normalized_filter.sum()
+
+ return normalized_filter.view(1, 1, kernel_size)
+
+
+@auto_docstring
+class Qwen2_5OmniPreTrainedModel(PreTrainedModel):
+ config: Qwen2_5OmniConfig
+ base_model_prefix = "model"
+ input_modalities = ("image", "video", "audio", "text")
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["Qwen2_5OmniDecoderLayer", "Qwen2_5OmniVisionBlock"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn = True
+ _supports_sdpa = True
+ _can_compile_fullgraph = False
+ _supports_attention_backend = True
+
+ def _init_weights(self, module):
+ super()._init_weights(module)
+ if isinstance(module, SinusoidsPositionEmbedding):
+ log_timescale_increment = np.log(module.max_timescale) / (module.channels // 2 - 1)
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(module.channels // 2).float())
+ scaled_time = torch.arange(module.length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
+ init.copy_(module.positional_embedding, torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1))
+ elif isinstance(module, UpSample1d):
+ filter_tensor = kaiser_sinc_filter1d(0.5 / module.ratio, 0.6 / module.ratio, module.kernel_size)
+ init.copy_(module.filter, filter_tensor)
+ elif isinstance(module, DownSample1d):
+ filter_tensor = kaiser_sinc_filter1d(module.cutoff, module.half_width, module.kernel_size)
+ init.copy_(module.filter, filter_tensor)
+ elif isinstance(module, Qwen2_5_VisionRotaryEmbedding):
+ inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
+ init.copy_(module.inv_freq, inv_freq)
+
+
+class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedModel):
+ input_modalities = ("image", "video", "audio", "text")
+
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ self,
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ min_dtype: float,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to place the 4D attention mask on.
+ min_dtype (`float`):
+ The minimum value representable with the dtype `dtype`.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+
+ return causal_mask
+
+ def get_llm_pos_ids_for_vision(
+ self,
+ start_idx: int,
+ vision_idx: int,
+ spatial_merge_size: int,
+ t_index: list[int],
+ grid_hs: list[int],
+ grid_ws: list[int],
+ ):
+ llm_pos_ids_list = []
+ llm_grid_h = grid_hs[vision_idx] // spatial_merge_size
+ llm_grid_w = grid_ws[vision_idx] // spatial_merge_size
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(len(t_index), -1, llm_grid_w).flatten()
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(len(t_index), llm_grid_h, -1).flatten()
+ t_index = torch.Tensor(t_index).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten().long()
+ _llm_pos_ids = torch.stack([t_index, h_index, w_index])
+ llm_pos_ids_list.append(_llm_pos_ids + start_idx) # + 1 ) # 12.09 by malinhan
+ llm_pos_ids = torch.cat(llm_pos_ids_list, dim=1)
+ return llm_pos_ids
+
+ def get_chunked_index(
+ self, token_indices: torch.Tensor, tokens_per_chunk: int, remove_index: int
+ ) -> list[tuple[int, int]]:
+ """
+ Splits token index list into chunks based on token value ranges.
+
+ Given a list of token indices, returns a list of (start, end) index tuples representing
+ slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`.
+
+ For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that:
+ - the first chunk contains token values < 1000,
+ - the second chunk contains values >= 1000 and < 2000, and so on.
+
+ Parameters:
+ token_indices (`torch.Tensor` of shape `(seq_len, )`): A monotonically increasing list of
+ token index values.
+ t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold).
+ remove_index (`int`) An index id to subtract from `token_indices` before chunking
+
+ Returns:
+ `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive)
+ and end (exclusive) indices of a chunk in `token_indices`.
+ """
+
+ def _iter():
+ i, start_idx = 0, 0 # skip bos token
+ current_chunk = 1
+ while i < len(token_indices): # skip eos token
+ if token_indices[i] - remove_index >= current_chunk * tokens_per_chunk:
+ yield (start_idx, i)
+ start_idx = i
+ current_chunk += 1
+ i += 1
+ yield (start_idx, len(token_indices))
+
+ return list(_iter())
+
+ def get_rope_index(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ image_grid_thw: Optional[torch.LongTensor] = None,
+ video_grid_thw: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ use_audio_in_video: bool = False,
+ audio_seqlens: Optional[torch.LongTensor] = None,
+ second_per_grids: Optional[torch.Tensor] = None,
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """
+ Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
+
+ Explanation:
+ Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
+
+ For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
+ Examples:
+ input_ids: [T T T T T], here T is for text.
+ temporal position_ids: [0, 1, 2, 3, 4]
+ height position_ids: [0, 1, 2, 3, 4]
+ width position_ids: [0, 1, 2, 3, 4]
+
+ For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
+ and 1D rotary position embedding for text part.
+ Examples:
+ Temporal (Time): 3 patches, representing different segments of the video in time.
+ Height: 2 patches, dividing each frame vertically.
+ Width: 2 patches, dividing each frame horizontally.
+ We also have some important parameters:
+ fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
+ tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
+ temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
+ interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
+ input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
+ vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
+ vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
+ vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
+ text temporal position_ids: [101, 102, 103, 104, 105]
+ text height position_ids: [101, 102, 103, 104, 105]
+ text width position_ids: [101, 102, 103, 104, 105]
+ Here we calculate the text start position_ids as the max vision position_ids plus 1.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ use_audio_in_video (`bool`, *optional*):
+ If set to `True`, use the audio in video.
+ audio_seqlens (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ second_per_grids (`torch.LongTensor` of shape `(num_videos)`, *optional*):
+ The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
+
+ Returns:
+ position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
+ mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
+ """
+ spatial_merge_size = self.spatial_merge_size
+ image_token_id = self.config.image_token_id
+ video_token_id = self.config.video_token_id
+ audio_token_id = self.config.audio_token_id
+ vision_start_token_id = self.config.vision_start_token_id
+ audio_start_token_id = self.config.audio_start_token_id
+ position_id_per_seconds = self.config.position_id_per_seconds
+ seconds_per_chunk = self.config.seconds_per_chunk
+
+ mrope_position_deltas = []
+ if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
+ total_input_ids = input_ids
+ if attention_mask is not None:
+ attention_mask = attention_mask == 1
+ position_ids = torch.ones(
+ 3,
+ input_ids.shape[0],
+ input_ids.shape[1],
+ dtype=input_ids.dtype,
+ device=input_ids.device,
+ )
+ image_idx, video_idx, audio_idx = 0, 0, 0
+ for i, input_ids in enumerate(total_input_ids):
+ if attention_mask is not None:
+ input_ids = input_ids[attention_mask[i]]
+ image_nums, video_nums, audio_nums = 0, 0, 0
+ vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
+ vision_tokens = input_ids[vision_start_indices + 1]
+ audio_nums = torch.sum(input_ids == audio_start_token_id)
+ image_nums = (vision_tokens == image_token_id).sum()
+ video_nums = (
+ (vision_tokens == audio_start_token_id).sum()
+ if use_audio_in_video
+ else (vision_tokens == video_token_id).sum()
+ )
+ input_tokens = input_ids.tolist()
+ llm_pos_ids_list: list = []
+ st = 0
+ remain_images, remain_videos, remain_audios = image_nums, video_nums, audio_nums
+ multimodal_nums = (
+ image_nums + audio_nums if use_audio_in_video else image_nums + video_nums + audio_nums
+ )
+ for _ in range(multimodal_nums):
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ if image_token_id in input_tokens and remain_images > 0:
+ ed_image = input_tokens.index(image_token_id, st)
+ else:
+ ed_image = len(input_tokens) + 1
+ if video_token_id in input_tokens and remain_videos > 0:
+ ed_video = input_tokens.index(video_token_id, st)
+ else:
+ ed_video = len(input_tokens) + 1
+ if audio_token_id in input_tokens and remain_audios > 0:
+ ed_audio = input_tokens.index(audio_token_id, st)
+ else:
+ ed_audio = len(input_tokens) + 1
+ min_ed = min(ed_image, ed_video, ed_audio)
+ if min_ed == ed_audio:
+ text_len = min_ed - st - 1
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1
+ llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
+ llm_pos_ids_list.append(llm_pos_ids)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len + audio_len + eos_len
+ audio_idx += 1
+ remain_audios -= 1
+
+ elif min_ed == ed_image:
+ text_len = min_ed - st - 1
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ grid_t = image_grid_thw[image_idx][0]
+ grid_hs = image_grid_thw[:, 1]
+ grid_ws = image_grid_thw[:, 2]
+ t_index = (torch.arange(grid_t) * 1 * position_id_per_seconds).long()
+ llm_pos_ids = self.get_llm_pos_ids_for_vision(
+ st_idx, image_idx, spatial_merge_size, t_index, grid_hs, grid_ws
+ )
+ image_len = image_grid_thw[image_idx].prod() // (spatial_merge_size**2)
+ llm_pos_ids_list.append(llm_pos_ids)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len + image_len + eos_len
+ image_idx += 1
+ remain_images -= 1
+
+ elif min_ed == ed_video and not use_audio_in_video:
+ text_len = min_ed - st - 1
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ grid_t = video_grid_thw[video_idx][0]
+ grid_hs = video_grid_thw[:, 1]
+ grid_ws = video_grid_thw[:, 2]
+ t_index = (
+ torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
+ ).long()
+ llm_pos_ids = self.get_llm_pos_ids_for_vision(
+ st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
+ )
+ video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
+ llm_pos_ids_list.append(llm_pos_ids)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len + video_len + eos_len
+ video_idx += 1
+ remain_videos -= 1
+
+ elif min_ed == ed_video and use_audio_in_video:
+ text_len = min_ed - st - 2
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1
+ audio_llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
+ grid_t = video_grid_thw[video_idx][0]
+ grid_hs = video_grid_thw[:, 1]
+ grid_ws = video_grid_thw[:, 2]
+
+ t_index = (
+ torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
+ ).long()
+ video_llm_pos_ids = self.get_llm_pos_ids_for_vision(
+ st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
+ )
+
+ t_ntoken_per_chunk = int(position_id_per_seconds * seconds_per_chunk)
+ video_chunk_indexes = self.get_chunked_index(video_llm_pos_ids[0], t_ntoken_per_chunk, st_idx)
+ audio_chunk_indexes = self.get_chunked_index(audio_llm_pos_ids[0], t_ntoken_per_chunk, st_idx)
+ sub_len = 0
+ for j in range(max(len(video_chunk_indexes), len(audio_chunk_indexes))):
+ video_chunk_index = video_chunk_indexes[j] if j < len(video_chunk_indexes) else None
+ audio_chunk_index = audio_chunk_indexes[j] if j < len(audio_chunk_indexes) else None
+ if video_chunk_index is not None:
+ sub_len += video_chunk_index[1] - video_chunk_index[0]
+
+ llm_pos_ids_list.append(
+ video_llm_pos_ids[:, video_chunk_index[0] : video_chunk_index[1]]
+ )
+ if audio_chunk_index is not None:
+ sub_len += audio_chunk_index[1] - audio_chunk_index[0]
+
+ llm_pos_ids_list.append(
+ audio_llm_pos_ids[:, audio_chunk_index[0] : audio_chunk_index[1]]
+ )
+ video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len * 2 + audio_len + video_len + eos_len * 2
+
+ audio_idx += 1
+ video_idx += 1
+ remain_videos -= 1
+ remain_audios -= 1
+
+ if st < len(input_tokens):
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ text_len = len(input_tokens) - st
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
+
+ if attention_mask is not None:
+ position_ids[..., i, attention_mask[i]] = llm_positions.to(position_ids.device)
+ else:
+ position_ids[..., i, :] = llm_positions.to(position_ids.device)
+ mrope_position_deltas.append(llm_positions.max() + 1 - len(input_ids))
+ mrope_position_deltas = torch.tensor(mrope_position_deltas).unsqueeze(1).to(device=input_ids.device)
+
+ return position_ids, mrope_position_deltas
+ else:
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
+ max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
+ mrope_position_deltas = max_position_ids + 1 - torch.sum(attention_mask, dim=-1, keepdim=True)
+
+ return position_ids, mrope_position_deltas
+
+
+############################
+# Start Thinker #
+############################
+
+
+@dataclass
+@auto_docstring(
+ custom_intro="""
+ Base class for Qwen2.5OmniThinker causal language model (or autoregressive) outputs.
+ """
+)
+class Qwen2_5OmniThinkerCausalLMOutputWithPast(ModelOutput):
+ r"""
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Cache] = None
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
+ attentions: Optional[tuple[torch.FloatTensor]] = None
+ rope_deltas: Optional[torch.LongTensor] = None
+
+
+def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
+ """
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
+ """
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
+ if n_rep == 1:
+ return hidden_states
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
+
+
+def eager_attention_forward(
+ module: nn.Module,
+ query: torch.Tensor,
+ key: torch.Tensor,
+ value: torch.Tensor,
+ attention_mask: Optional[torch.Tensor],
+ scaling: float,
+ dropout: float = 0.0,
+ **kwargs,
+):
+ key_states = repeat_kv(key, module.num_key_value_groups)
+ value_states = repeat_kv(value, module.num_key_value_groups)
+
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
+ if attention_mask is not None:
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
+ attn_weights = attn_weights + causal_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
+ attn_output = torch.matmul(attn_weights, value_states)
+ attn_output = attn_output.transpose(1, 2).contiguous()
+
+ return attn_output, attn_weights
+
+
+class Qwen2_5OmniAudioAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ config: Qwen2_5OmniAudioEncoderConfig,
+ ):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.num_heads = config.encoder_attention_heads
+ self.dropout = config.attention_dropout
+ self.head_dim = self.embed_dim // self.num_heads
+ self.num_key_value_groups = 1 # needed for eager attention
+ self.config = config
+
+ if (self.head_dim * self.num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.attention_dropout = 0.0
+ self.is_decoder = False
+ self.is_causal = False
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ seq_length, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states).reshape(seq_length, self.num_heads, -1)
+ key_states = self.k_proj(hidden_states).reshape(seq_length, self.num_heads, -1)
+ value_states = self.v_proj(hidden_states).reshape(seq_length, self.num_heads, -1)
+
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, _ = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask=attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ cu_seq_lens_q=cu_seqlens, # pass cu seq lens for FA2
+ cu_seq_lens_k=cu_seqlens,
+ max_length_q=max_seqlen,
+ max_length_k=max_seqlen,
+ is_causal=False,
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output
+
+
+class Qwen2_5OmniAudioEncoderLayer(GradientCheckpointingLayer):
+ def __init__(self, config: Qwen2_5OmniAudioEncoderConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = Qwen2_5OmniAudioAttention(config)
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states = self.self_attn(
+ hidden_states=hidden_states,
+ cu_seqlens=cu_seqlens,
+ attention_mask=attention_mask,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16:
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ return outputs
+
+
+class SinusoidsPositionEmbedding(nn.Module):
+ def __init__(self, length, channels, max_timescale=10000):
+ super().__init__()
+ self.length = length
+ self.channels = channels
+ self.max_timescale = max_timescale
+ if channels % 2 != 0:
+ raise ValueError("SinusoidsPositionEmbedding needs even channels input")
+ log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float())
+ scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
+ self.register_buffer(
+ "positional_embedding",
+ torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1),
+ persistent=False,
+ )
+
+ def forward(self, seqlen: int):
+ return self.positional_embedding[:seqlen, :]
+
+
+@auto_docstring(
+ custom_intro="""
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`Qwen2_5OmniAudioEncoderLayer`].
+ """
+)
+class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniAudioEncoderConfig
+ main_input_name = "input_features"
+ input_modalities = "audio"
+ _no_split_modules = ["Qwen2_5OmniAudioEncoderLayer"]
+ _supports_sdpa = True
+
+ def __init__(self, config: Qwen2_5OmniAudioEncoderConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+
+ embed_dim = config.d_model
+ self.num_mel_bins = config.num_mel_bins
+ self.max_source_positions = config.max_source_positions
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+ self.n_window = config.n_window
+ self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
+ self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
+ self.positional_embedding = SinusoidsPositionEmbedding(self.max_source_positions, embed_dim)
+ self.audio_bos_eos_token = nn.Embedding(2, config.output_dim)
+ self.layers = nn.ModuleList([Qwen2_5OmniAudioEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.ln_post = nn.LayerNorm(config.d_model)
+ self.avg_pooler = nn.AvgPool1d(2, stride=2)
+ self.proj = nn.Linear(config.d_model, config.output_dim)
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.requires_grad = False
+ self._requires_grad = False
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.conv1
+
+ def set_input_embeddings(self, value: nn.Module):
+ self.conv1 = value
+
+ def _prepare_attention_mask(self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor:
+ # Flash Attention 2 doesn't need a 4D mask and relies on `cu_seqlens/max_seqlen`
+ # NOTE: the created attention masl only approximates the ragged FA2 attention by
+ # allowing bidirectional attention within `cu_seqlens` blocks, and not attending between
+ # blocks. Though it will not be a 100% match for FA2's `varlen` path
+ if self.config._attn_implementation == "flash_attention_2":
+ return None
+
+ seq_length = inputs_tensor.shape[0]
+ attention_mask = torch.full(
+ [1, 1, seq_length, seq_length],
+ torch.finfo(inputs_tensor.dtype).min,
+ device=inputs_tensor.device,
+ dtype=inputs_tensor.dtype,
+ )
+ for i in range(1, len(cu_seqlens)):
+ attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0
+ return attention_mask
+
+ @auto_docstring
+ def forward(
+ self,
+ input_features,
+ feature_lens=None,
+ aftercnn_lens=None,
+ **kwargs,
+ ):
+ r"""
+ feature_lens (`torch.LongTensor` of shape `(batch_size,)`):
+ mel length
+ aftercnn_lens (`torch.LongTensor` of shape `(batch_size,)`):
+ mel length after cnn
+ """
+ chunk_num = torch.ceil(feature_lens / (self.n_window * 2)).long()
+
+ chunk_lengths = torch.tensor(
+ [self.n_window * 2] * chunk_num.sum(),
+ dtype=torch.long,
+ device=feature_lens.device,
+ )
+ tail_chunk_index = F.pad(chunk_num, (1, 0), value=-1).cumsum(0)[1:]
+ chunk_lengths[tail_chunk_index] = feature_lens % (self.n_window * 2)
+ chunk_lengths = torch.where(chunk_lengths == 0, self.n_window * 2, chunk_lengths)
+
+ chunk_list = input_features.split(chunk_lengths.tolist(), dim=1)
+ padded_feature, padded_mask, padded_mask_after_cnn = self.padded_and_mask_function(
+ chunk_list, chunk_lengths, padding_value=0, padding_side="right"
+ )
+ padded_embed = nn.functional.gelu(self.conv1(padded_feature)) * padded_mask
+ padded_embed = nn.functional.gelu(self.conv2(padded_embed)).transpose(1, 2)
+
+ padded_embed = padded_embed + self.positional_embedding.positional_embedding[
+ : padded_embed.shape[1], :
+ ].unsqueeze(0).to(padded_embed.dtype)
+ hidden_states = padded_embed[padded_mask_after_cnn]
+ cu_seqlens = torch.cat(
+ (
+ torch.zeros(1, device=padded_mask_after_cnn.device, dtype=torch.int32),
+ padded_mask_after_cnn.sum(1).cumsum(0),
+ )
+ ).to(torch.int32)
+ attention_mask = self._prepare_attention_mask(hidden_states, cu_seqlens)
+
+ for encoder_layer in self.layers:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ cu_seqlens=cu_seqlens,
+ attention_mask=attention_mask,
+ **kwargs,
+ )
+ hidden_states = layer_outputs[0]
+
+ hidden_states_list = hidden_states.split(aftercnn_lens.tolist(), dim=0)
+ token_audio_list = []
+ for each_audio_states in hidden_states_list:
+ each_audio_states = self.avg_pooler(each_audio_states.transpose(0, 1)).transpose_(0, 1)
+ each_audio_states = self.ln_post(each_audio_states)
+ each_audio_states = self.proj(each_audio_states)
+ token_audio_list.append(each_audio_states)
+ token_audio = torch.cat(token_audio_list, dim=0)
+ return BaseModelOutput(last_hidden_state=token_audio)
+
+ def padded_and_mask_function(self, tensor_list, tensor_len, padding_value=0, padding_side="right"):
+ """
+ Pads a sequence of tensors to their maximum length on indicated `padding_side`.
+ Then prepares a mask so that pad tokens are not attended to.
+ """
+ max_len = tensor_len.max()
+ dim = tensor_list[0].shape[0]
+ padded_tensor = torch.full(
+ size=(len(tensor_list), dim, max_len),
+ fill_value=padding_value,
+ dtype=self.dtype,
+ device=tensor_list[0].device,
+ )
+
+ batch_mask = torch.zeros(
+ (len(tensor_len), max_len),
+ dtype=torch.long,
+ device=padded_tensor.device,
+ )
+ for i, length in enumerate(tensor_len):
+ batch_mask[i, :length] = 1
+ padded_tensor[i, :, :length] = tensor_list[i]
+
+ feature_lens_after_cnn = (tensor_len - 1) // 2 + 1
+ max_len_after_cnn = feature_lens_after_cnn.max()
+ batch_mask_after_cnn = torch.zeros(
+ (len(tensor_len), max_len_after_cnn),
+ dtype=torch.long,
+ device=padded_tensor.device,
+ )
+ for i, length in enumerate(feature_lens_after_cnn):
+ batch_mask_after_cnn[i, :length] = 1
+ return (
+ padded_tensor,
+ batch_mask.unsqueeze(1),
+ batch_mask_after_cnn.bool(),
+ )
+
+ # Ignore copy
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
+ """
+ Computes the output length of the convolutional layers and the output length of the audio encoder
+ """
+ input_lengths = (input_lengths - 1) // 2 + 1
+ output_lengths = (input_lengths - 2) // 2 + 1
+ return input_lengths, output_lengths
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb_vision(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
+ orig_dtype = tensor.dtype
+ tensor = tensor.float()
+ cos = freqs.cos()
+ sin = freqs.sin()
+ cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
+ sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
+ output = (tensor * cos) + (rotate_half(tensor) * sin)
+ output = output.to(orig_dtype)
+ return output
+
+
+class Qwen2_5OmniVisionAttention(nn.Module):
+ def __init__(self, config: Qwen2_5OmniVisionEncoderConfig = None) -> None:
+ super().__init__()
+ self.dim = config.hidden_size
+ self.num_heads = config.num_heads
+ self.head_dim = self.dim // self.num_heads
+ self.q = nn.Linear(self.dim, self.dim, bias=True)
+ self.k = nn.Linear(self.dim, self.dim, bias=True)
+ self.v = nn.Linear(self.dim, self.dim, bias=True)
+ self.proj = nn.Linear(self.dim, self.dim)
+ self.scaling = self.head_dim**-0.5
+ self.num_key_value_groups = 1 # needed for eager attention
+ self.config = config
+ self.attention_dropout = 0.0
+ self.is_causal = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: torch.Tensor,
+ rotary_pos_emb: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ seq_length = hidden_states.shape[0]
+ query_states = self.q(hidden_states).reshape(seq_length, self.num_heads, -1)
+ key_states = self.k(hidden_states).reshape(seq_length, self.num_heads, -1)
+ value_states = self.v(hidden_states).reshape(seq_length, self.num_heads, -1)
+ query_states = apply_rotary_pos_emb_vision(query_states.unsqueeze(0), rotary_pos_emb).squeeze(0)
+ key_states = apply_rotary_pos_emb_vision(key_states.unsqueeze(0), rotary_pos_emb).squeeze(0)
+
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ if self.config._attn_implementation == "flash_attention_2":
+ # Flash Attention 2: Use cu_seqlens for variable length attention
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
+ attn_output, _ = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask=None,
+ scaling=self.scaling,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ cu_seq_lens_q=cu_seqlens,
+ cu_seq_lens_k=cu_seqlens,
+ max_length_q=max_seqlen,
+ max_length_k=max_seqlen,
+ is_causal=False,
+ **kwargs,
+ )
+ else:
+ # Other implementations: Process each chunk separately
+ lengths = cu_seqlens[1:] - cu_seqlens[:-1]
+ splits = [
+ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
+ ]
+
+ attn_outputs = [
+ attention_interface(
+ self,
+ q,
+ k,
+ v,
+ attention_mask=None,
+ scaling=self.scaling,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ is_causal=False,
+ **kwargs,
+ )[0]
+ for q, k, v in zip(*splits)
+ ]
+ attn_output = torch.cat(attn_outputs, dim=1)
+
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
+ attn_output = self.proj(attn_output)
+ return attn_output
+
+
+class Qwen2_5OmniMLP(nn.Module):
+ def __init__(self, config, bias: bool = False):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_state):
+ return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
+
+
+class Qwen2_5OmniVisionBlock(GradientCheckpointingLayer):
+ def __init__(self, config: Qwen2_5OmniVisionEncoderConfig) -> None:
+ super().__init__()
+ self.norm1 = Qwen2RMSNorm(config.hidden_size, eps=1e-6)
+ self.norm2 = Qwen2RMSNorm(config.hidden_size, eps=1e-6)
+ self.attn = Qwen2_5OmniVisionAttention(config=config)
+ self.mlp = Qwen2_5OmniMLP(config, bias=True)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: torch.Tensor,
+ rotary_pos_emb: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ hidden_states = hidden_states + self.attn(
+ self.norm1(hidden_states),
+ cu_seqlens=cu_seqlens,
+ rotary_pos_emb=rotary_pos_emb,
+ **kwargs,
+ )
+ hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
+ return hidden_states
+
+
+class Qwen2_5_VisionRotaryEmbedding(nn.Module):
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
+
+ def __init__(self, dim: int, theta: float = 10000.0) -> None:
+ super().__init__()
+ self.dim = dim
+ self.theta = theta
+ inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ def forward(self, seqlen: int) -> torch.Tensor:
+ seq = torch.arange(seqlen, device=self.inv_freq.device, dtype=self.inv_freq.dtype)
+ freqs = torch.outer(seq, self.inv_freq)
+ return freqs
+
+
+class Qwen2_5_VisionPatchEmbed(nn.Module):
+ def __init__(
+ self,
+ patch_size: int = 14,
+ temporal_patch_size: int = 2,
+ in_channels: int = 3,
+ embed_dim: int = 1152,
+ ) -> None:
+ super().__init__()
+ self.patch_size = patch_size
+ self.temporal_patch_size = temporal_patch_size
+ self.in_channels = in_channels
+ self.embed_dim = embed_dim
+
+ kernel_size = [temporal_patch_size, patch_size, patch_size]
+ self.proj = nn.Conv3d(in_channels, embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ target_dtype = self.proj.weight.dtype
+ hidden_states = hidden_states.view(
+ -1, self.in_channels, self.temporal_patch_size, self.patch_size, self.patch_size
+ )
+ hidden_states = self.proj(hidden_states.to(dtype=target_dtype)).view(-1, self.embed_dim)
+ return hidden_states
+
+
+class Qwen2_5OmniPatchMerger(nn.Module):
+ def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None:
+ super().__init__()
+ self.hidden_size = context_dim * (spatial_merge_size**2)
+ self.ln_q = Qwen2RMSNorm(context_dim, eps=1e-6)
+ self.mlp = nn.Sequential(
+ nn.Linear(self.hidden_size, self.hidden_size),
+ nn.GELU(),
+ nn.Linear(self.hidden_size, dim),
+ )
+
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
+ x = self.mlp(self.ln_q(x).view(-1, self.hidden_size))
+ return x
+
+
+class Qwen2_5OmniVisionEncoder(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniVisionEncoderConfig
+ _no_split_modules = ["Qwen2_5OmniVisionBlock"]
+ _input_embed_layer = "patch_embed"
+ input_modalities = ("image", "video")
+
+ def __init__(self, config: Qwen2_5OmniVisionEncoderConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+ self.spatial_merge_size = config.spatial_merge_size
+ self.patch_size = config.patch_size
+ self.fullatt_block_indexes = config.fullatt_block_indexes
+ self.window_size = config.window_size
+ self.spatial_merge_unit = self.spatial_merge_size * self.spatial_merge_size
+
+ self.patch_embed = Qwen2_5_VisionPatchEmbed(
+ patch_size=config.patch_size,
+ temporal_patch_size=config.temporal_patch_size,
+ in_channels=config.in_channels,
+ embed_dim=config.hidden_size,
+ )
+
+ head_dim = config.hidden_size // config.num_heads
+ self.rotary_pos_emb = Qwen2_5_VisionRotaryEmbedding(head_dim // 2)
+ self.blocks = nn.ModuleList([Qwen2_5OmniVisionBlock(config) for _ in range(config.depth)])
+ self.merger = Qwen2_5OmniPatchMerger(
+ dim=config.out_hidden_size,
+ context_dim=config.hidden_size,
+ spatial_merge_size=config.spatial_merge_size,
+ )
+ self.gradient_checkpointing = False
+
+ self.post_init()
+
+ def rot_pos_emb(self, grid_thw):
+ pos_ids = []
+ for t, h, w in grid_thw:
+ hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w)
+ hpos_ids = hpos_ids.reshape(
+ h // self.spatial_merge_size,
+ self.spatial_merge_size,
+ w // self.spatial_merge_size,
+ self.spatial_merge_size,
+ )
+ hpos_ids = hpos_ids.permute(0, 2, 1, 3)
+ hpos_ids = hpos_ids.flatten()
+
+ wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1)
+ wpos_ids = wpos_ids.reshape(
+ h // self.spatial_merge_size,
+ self.spatial_merge_size,
+ w // self.spatial_merge_size,
+ self.spatial_merge_size,
+ )
+ wpos_ids = wpos_ids.permute(0, 2, 1, 3)
+ wpos_ids = wpos_ids.flatten()
+ pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1))
+ pos_ids = torch.cat(pos_ids, dim=0)
+ max_grid_size = grid_thw[:, 1:].max()
+ rotary_pos_emb_full = self.rotary_pos_emb(max_grid_size)
+ rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1)
+ return rotary_pos_emb
+
+ def get_window_index(self, grid_thw):
+ window_index: list = []
+ cu_window_seqlens: list = [0]
+ window_index_id = 0
+ vit_merger_window_size = self.window_size // self.spatial_merge_size // self.patch_size
+
+ for grid_t, grid_h, grid_w in grid_thw:
+ llm_grid_h, llm_grid_w = (
+ grid_h // self.spatial_merge_size,
+ grid_w // self.spatial_merge_size,
+ )
+ index = torch.arange(grid_t * llm_grid_h * llm_grid_w).reshape(grid_t, llm_grid_h, llm_grid_w)
+ pad_h = vit_merger_window_size - llm_grid_h % vit_merger_window_size
+ pad_w = vit_merger_window_size - llm_grid_w % vit_merger_window_size
+ num_windows_h = (llm_grid_h + pad_h) // vit_merger_window_size
+ num_windows_w = (llm_grid_w + pad_w) // vit_merger_window_size
+ index_padded = F.pad(index, (0, pad_w, 0, pad_h), "constant", -100)
+ index_padded = index_padded.reshape(
+ grid_t,
+ num_windows_h,
+ vit_merger_window_size,
+ num_windows_w,
+ vit_merger_window_size,
+ )
+ index_padded = index_padded.permute(0, 1, 3, 2, 4).reshape(
+ grid_t,
+ num_windows_h * num_windows_w,
+ vit_merger_window_size,
+ vit_merger_window_size,
+ )
+ seqlens = (index_padded != -100).sum([2, 3]).reshape(-1)
+ index_padded = index_padded.reshape(-1)
+ index_new = index_padded[index_padded != -100]
+ window_index.append(index_new + window_index_id)
+ cu_seqlens_tmp = seqlens.cumsum(0) * self.spatial_merge_unit + cu_window_seqlens[-1]
+ cu_window_seqlens.extend(cu_seqlens_tmp.tolist())
+ window_index_id += (grid_t * llm_grid_h * llm_grid_w).item()
+ window_index = torch.cat(window_index, dim=0)
+
+ return window_index, cu_window_seqlens
+
+ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
+ The final hidden states of the model.
+ grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
+ The temporal, height and width of feature shape of each image in LLM.
+
+ Returns:
+ `torch.Tensor`: hidden_states.
+ """
+ hidden_states = self.patch_embed(hidden_states)
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
+
+ window_index, cu_window_seqlens = self.get_window_index(grid_thw)
+ cu_window_seqlens = torch.tensor(
+ cu_window_seqlens,
+ device=hidden_states.device,
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
+ )
+ cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
+
+ seq_len, _ = hidden_states.size()
+ hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
+ hidden_states = hidden_states[window_index, :, :]
+ hidden_states = hidden_states.reshape(seq_len, -1)
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
+ rotary_pos_emb = rotary_pos_emb[window_index, :, :]
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
+
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
+ dim=0,
+ # Select dtype based on the following factors:
+ # - FA2 requires that cu_seqlens_q must have dtype int32
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
+ )
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
+
+ # Modification here
+ for layer_num, blk in enumerate(self.blocks):
+ if layer_num in self.fullatt_block_indexes:
+ cu_seqlens_now = cu_seqlens
+ else:
+ cu_seqlens_now = cu_window_seqlens
+
+ hidden_states = blk(
+ hidden_states,
+ cu_seqlens=cu_seqlens_now,
+ rotary_pos_emb=rotary_pos_emb,
+ **kwargs,
+ )
+ hidden_states = self.merger(hidden_states)
+ reverse_indices = torch.argsort(window_index)
+ hidden_states = hidden_states[reverse_indices, :]
+
+ return hidden_states
+
+
+class Qwen2_5OmniRotaryEmbedding(nn.Module):
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
+
+ def __init__(self, config: Qwen2_5OmniThinkerConfig, device=None):
+ super().__init__()
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+
+ self.rope_type = self.config.rope_parameters["rope_type"]
+ rope_init_fn: Callable = self.compute_default_rope_parameters
+ if self.rope_type != "default":
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
+
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
+
+ @staticmethod
+ def compute_default_rope_parameters(
+ config: Optional[Qwen2_5OmniConfig] = None,
+ device: Optional["torch.device"] = None,
+ seq_len: Optional[int] = None,
+ ) -> tuple["torch.Tensor", float]:
+ """
+ Computes the inverse frequencies according to the original RoPE implementation
+ Args:
+ config ([`~transformers.PreTrainedConfig`]):
+ The model configuration.
+ device (`torch.device`):
+ The device to use for initialization of the inverse frequencies.
+ seq_len (`int`, *optional*):
+ The current sequence length. Unused for this type of RoPE.
+ Returns:
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
+ """
+ base = config.rope_parameters["rope_theta"]
+ dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
+
+ attention_factor = 1.0 # Unused in this type of RoPE
+
+ # Compute the inverse frequencies
+ inv_freq = 1.0 / (
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
+ )
+ return inv_freq, attention_factor
+
+ # Ignore copy
+ def forward(self, x, position_ids):
+ # In contrast to other models, Qwen2_5Omni has different position ids for the grids
+ # So we expand the inv_freq to shape (3, ...)
+ inv_freq_expanded = self.inv_freq[None, None, :, None].float().expand(3, position_ids.shape[1], -1, 1)
+ position_ids_expanded = position_ids[:, :, None, :].float() # shape (3, bs, 1, positions)
+
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(2, 3)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos() * self.attention_scaling
+ sin = emb.sin() * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+def apply_multimodal_rotary_pos_emb(q, k, cos, sin, mrope_section, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding with Multimodal Sections to the query and key tensors (https://qwenlm.github.io/blog/qwen2-vl/).
+
+ Explanation:
+ Multimodal 3D rotary position embedding is an extension to 1D rotary position embedding. The input embedding
+ sequence contains vision (images / videos) embedding and text embedding or just contains text embedding. For
+ vision embedding part, we apply rotary position embedding on temporal, height and width dimension separately.
+ Here we split the channel dimension to 3 chunks for the temporal, height and width rotary position embedding.
+ For text embedding part, we just apply 1D rotary position embedding. The three rotary position index (temporal,
+ height and width) of text embedding is always the same, so the text embedding rotary position embedding has no
+ difference with modern LLMs.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ mrope_section(`List(int)`):
+ Multimodal rope section is for channel dimension of temporal, height and width in rope calculation.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ mrope_section = mrope_section * 2
+ cos = torch.cat([m[i % 3] for i, m in enumerate(cos.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
+ unsqueeze_dim
+ )
+ sin = torch.cat([m[i % 3] for i, m in enumerate(sin.split(mrope_section, dim=-1))], dim=-1).unsqueeze(
+ unsqueeze_dim
+ )
+
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+class Qwen2_5OmniAttention(nn.Module):
+ """
+ Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
+ and "Generating Long Sequences with Sparse Transformers".
+ """
+
+ def __init__(self, config: Qwen2_5OmniConfig, layer_idx: Optional[int] = None):
+ super().__init__()
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.is_causal = True
+ self.attention_dropout = config.attention_dropout
+ self.rope_parameters = config.rope_parameters
+ self.scaling = self.head_dim**-0.5
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
+ self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
+ self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states)
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
+ key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
+ value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
+
+ cos, sin = position_embeddings
+ query_states, key_states = apply_multimodal_rotary_pos_emb(
+ query_states, key_states, cos, sin, self.config.rope_parameters["mrope_section"]
+ )
+
+ if past_key_values is not None:
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, attn_weights = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ sliding_window=self.sliding_window,
+ position_ids=position_ids, # pass positions for FA2
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
+ attn_output = self.o_proj(attn_output)
+ return attn_output, attn_weights
+
+
+class Qwen2MLP(nn.Module):
+ def __init__(self, config, bias: bool = False):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.intermediate_size = config.intermediate_size
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias)
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=bias)
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=bias)
+ self.act_fn = ACT2FN[config.hidden_act]
+
+ def forward(self, hidden_state):
+ return self.down_proj(self.act_fn(self.gate_proj(hidden_state)) * self.up_proj(hidden_state))
+
+
+class Qwen2_5OmniDecoderLayer(GradientCheckpointingLayer):
+ def __init__(self, config: Qwen2_5OmniTextConfig, layer_idx: int):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+
+ if config.use_sliding_window and config._attn_implementation != "flash_attention_2":
+ logger.warning_once(
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
+ "unexpected results may be encountered."
+ )
+ self.self_attn = Qwen2_5OmniAttention(config, layer_idx)
+
+ self.mlp = Qwen2MLP(config)
+ self.input_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.attention_type = config.layer_types[layer_idx]
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ cache_position: Optional[torch.LongTensor] = None,
+ position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, sequence_length)` where padding elements are indicated by 0.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_values (`Cache`, *optional*): cached past key and value projection states
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
+ with `head_dim` being the embedding dimension of each attention head.
+ kwargs (`dict`, *optional*):
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
+ into the model
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ position_embeddings=position_embeddings,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ return outputs
+
+
+@auto_docstring
+class Qwen2_5OmniThinkerTextModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniTextConfig
+ input_modalities = ("text",)
+ _no_split_modules = ["Qwen2_5OmniDecoderLayer"]
+
+ def __init__(self, config: Qwen2_5OmniTextConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [Qwen2_5OmniDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self._attn_implementation = config._attn_implementation
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.has_sliding_layers = "sliding_attention" in self.config.layer_types
+ self.rotary_emb = Qwen2_5OmniRotaryEmbedding(config=config)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Union[tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # torch.jit.trace() doesn't support cache objects in the output
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
+ past_key_values = DynamicCache(config=self.config)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ # the hard coded `3` is for temporal, height and width.
+ if position_ids is None:
+ position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
+ elif position_ids.ndim == 2:
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
+
+ # NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
+ # where each dim indicates visual spatial positions for temporal/height/width grids.
+ # There are two scenarios when FA2-like packed masking might be activated.
+ # 1. User specifically passed packed `position_ids` and no attention mask.
+ # In this case we expect the useer to create correct position ids for all 3 grids
+ # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
+ # 2. User runs forward with no attention mask and no position ids. In this case, position ids
+ # are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
+ # prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
+ # text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
+ if position_ids.ndim == 3 and position_ids.shape[0] == 4:
+ text_position_ids = position_ids[0]
+ position_ids = position_ids[1:]
+ else:
+ # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
+ text_position_ids = None
+
+ # It may already have been prepared by e.g. `generate`
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
+ # Prepare mask arguments
+ mask_kwargs = {
+ "config": self.config,
+ "input_embeds": inputs_embeds,
+ "attention_mask": attention_mask,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "position_ids": text_position_ids,
+ }
+ # Create the masks
+ causal_mask_mapping = {
+ "full_attention": create_causal_mask(**mask_kwargs),
+ }
+ # The sliding window alternating layers are not always activated depending on the config
+ if self.has_sliding_layers:
+ causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
+
+ hidden_states = inputs_embeds
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask_mapping[decoder_layer.attention_type],
+ position_embeddings=position_embeddings,
+ position_ids=text_position_ids,
+ past_key_values=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None
+ )
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+@auto_docstring(
+ custom_intro="""
+ The Qwen2.5OmniThinker model which consists of a audio backbone and a language model.
+ """
+)
+class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin):
+ config: Qwen2_5OmniThinkerConfig
+ base_model_prefix = "thinker"
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
+ _no_split_modules = ["Qwen2_5OmniAudioEncoder", "Qwen2_5OmniVisionEncoder"]
+
+ def __init__(self, config: Qwen2_5OmniThinkerConfig):
+ super().__init__(config)
+ self.audio_tower = Qwen2_5OmniAudioEncoder._from_config(config.audio_config)
+ self.visual = Qwen2_5OmniVisionEncoder._from_config(config.vision_config)
+ self.vocab_size = config.text_config.vocab_size
+ self.model = Qwen2_5OmniThinkerTextModel._from_config(config.text_config)
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
+ self.spatial_merge_size = config.vision_config.spatial_merge_size
+ self.rope_deltas = None
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.model.set_input_embeddings(value)
+
+ def get_video_features(
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
+ ):
+ """
+ Encodes videos into continuous embeddings that can be forwarded to the language model.
+
+ Args:
+ pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
+ The tensors corresponding to the input videos.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ """
+ pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
+ video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
+ return video_embeds
+
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
+ """
+ Encodes images into continuous embeddings that can be forwarded to the language model.
+
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
+ The tensors corresponding to the input images.
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ """
+ pixel_values = pixel_values.type(self.visual.dtype)
+ image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
+ return image_embeds
+
+ def get_audio_features(
+ self,
+ input_features: torch.FloatTensor,
+ feature_attention_mask: Optional[torch.LongTensor] = None,
+ audio_feature_lengths: Optional[torch.LongTensor] = None,
+ ):
+ """
+ Encodes audios into continuous embeddings that can be forwarded to the language model.
+
+ Args:
+ input_features (`torch.FloatTensor`):
+ The tensors corresponding to the input audios.
+ feature_attention_mask (`torch.LongTensor`, *optional*):
+ Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
+ audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ """
+ if feature_attention_mask is not None:
+ audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
+ input_features = input_features.permute(0, 2, 1)[feature_attention_mask.bool()].permute(1, 0)
+ else:
+ audio_feature_lengths = None
+
+ audio_feat_lengths, audio_output_lengths = self.audio_tower._get_feat_extract_output_lengths(
+ audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1)
+ )
+ feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1)
+ audio_outputs = self.audio_tower(
+ input_features,
+ feature_lens=feature_lens,
+ aftercnn_lens=audio_feat_lengths,
+ )
+ audio_features = audio_outputs.last_hidden_state
+
+ if audio_features.shape[0] != sum(audio_output_lengths.tolist()):
+ raise ValueError("length of audio_features should match audio_output_lengths")
+
+ return audio_features
+
+ def get_placeholder_mask(
+ self,
+ input_ids: torch.LongTensor,
+ inputs_embeds: torch.FloatTensor,
+ image_features: Optional[torch.FloatTensor] = None,
+ video_features: Optional[torch.FloatTensor] = None,
+ ):
+ """
+ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
+ equal to the length of multimodal features. If the lengths are different, an error is raised.
+ """
+ if input_ids is None:
+ special_image_mask = inputs_embeds == self.get_input_embeddings()(
+ torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
+ )
+ special_image_mask = special_image_mask.all(-1)
+ special_video_mask = inputs_embeds == self.get_input_embeddings()(
+ torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
+ )
+ special_video_mask = special_video_mask.all(-1)
+ special_audio_mask = (
+ inputs_embeds
+ == self.get_input_embeddings()(
+ torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device)
+ )
+ ).all(-1)
+ else:
+ special_image_mask = input_ids == self.config.image_token_id
+ special_video_mask = input_ids == self.config.video_token_id
+ special_audio_mask = input_ids == self.config.audio_token_id
+
+ n_image_tokens = special_image_mask.sum()
+ special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
+ if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
+ raise ValueError(
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
+ )
+
+ n_video_tokens = special_video_mask.sum()
+ special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
+ if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
+ raise ValueError(
+ f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
+ )
+
+ special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
+ return special_image_mask, special_video_mask, special_audio_mask
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
+ image_grid_thw: Optional[torch.LongTensor] = None,
+ video_grid_thw: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ feature_attention_mask: Optional[torch.Tensor] = None,
+ audio_feature_lengths: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ rope_deltas: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ use_audio_in_video: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ video_second_per_grid: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> Union[tuple, Qwen2_5OmniThinkerCausalLMOutputWithPast]:
+ r"""
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ feature_attention_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_audio_in_video (`bool`, *optional*):
+ Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
+ video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*):
+ Number of seconds per grid for each video, used for temporal feature mapping.
+
+ Example:
+
+ ```python
+ >>> from io import BytesIO
+ >>> from urllib.request import urlopen
+ >>> import librosa
+ >>> from qwen_vl_utils import process_vision_info
+ >>> from transformers import Qwen2_5OmniProcessor, Qwen2_5OmniThinkerForConditionalGeneration
+
+ >>> thinker = Qwen2_5OmniThinkerForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-Omni-7B")
+ >>> processor = Qwen2_5OmniProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B")
+
+ >>> conversations = [
+ >>> {'role': 'system', 'content': 'You are a helpful voice chat bot, and please respond to me in a casual conversation manner using random voice.'},
+ >>> {"role": "user", "content": [
+ >>> {"type": "image", "image_url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
+ >>> {"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
+ >>> ]},
+ >>> ]
+
+ >>> text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
+ >>> audios = [ librosa.load(BytesIO(urlopen( conversations[1]['content'][1]['audio_url'] ).read()), sr=self.processor.feature_extractor.sampling_rate) ]
+ >>> images, videos = process_vision_info(conversations)
+ >>> inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True)
+
+ >>> # Generate
+ >>> inputs['use_audio_in_video'] = `True` or `False`
+ >>> generation = thinker.generate(**inputs, max_new_tokens=2048)
+ >>> generate_ids = generation[:, inputs.input_ids.size(1):]
+
+ >>> response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if inputs_embeds is None:
+ # 1. Extract the input embeddings
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+
+ # 2. Merge text , audios , image and video
+ if input_features is not None:
+ audio_features = self.get_audio_features(
+ input_features,
+ feature_attention_mask=feature_attention_mask,
+ audio_feature_lengths=audio_feature_lengths,
+ )
+ audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)
+ _, _, audio_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
+ inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_features)
+
+ if pixel_values is not None:
+ image_embeds = self.get_image_features(pixel_values, image_grid_thw)
+ image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
+ image_mask, _, _ = self.get_placeholder_mask(
+ input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
+ )
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
+
+ if pixel_values_videos is not None:
+ video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
+ video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
+ _, video_mask, _ = self.get_placeholder_mask(
+ input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
+ )
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
+
+ if feature_attention_mask is not None:
+ audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
+ else:
+ audio_feature_lengths = None
+
+ if attention_mask is not None and position_ids is None:
+ past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
+ if past_key_values_length == 0 or self.rope_deltas is None:
+ delta0 = (1 - attention_mask).sum(dim=-1).unsqueeze(1)
+ position_ids, rope_deltas = self.get_rope_index(
+ input_ids,
+ image_grid_thw,
+ video_grid_thw,
+ attention_mask,
+ use_audio_in_video,
+ audio_feature_lengths,
+ video_second_per_grid,
+ )
+ rope_deltas = rope_deltas - delta0
+ self.rope_deltas = rope_deltas
+ else:
+ batch_size, seq_length = input_ids.shape
+ delta = (past_key_values_length + self.rope_deltas).to(input_ids.device)
+ position_ids = torch.arange(seq_length, device=input_ids.device)
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
+ position_ids = position_ids.add(delta)
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
+
+ outputs = self.model(
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(
+ logits=logits, labels=labels, vocab_size=self.config.get_text_config().vocab_size
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs
+ return (loss,) + output if loss is not None else output
+
+ return Qwen2_5OmniThinkerCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ rope_deltas=self.rope_deltas,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ pixel_values=None,
+ pixel_values_videos=None,
+ image_grid_thw=None,
+ video_grid_thw=None,
+ input_features=None,
+ feature_attention_mask=None,
+ use_audio_in_video=False,
+ video_second_per_grid=None,
+ is_first_iteration=False,
+ **kwargs,
+ ):
+ model_inputs = super().prepare_inputs_for_generation(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ cache_position=cache_position,
+ position_ids=position_ids,
+ use_cache=use_cache,
+ pixel_values=pixel_values,
+ pixel_values_videos=pixel_values_videos,
+ image_grid_thw=image_grid_thw,
+ video_grid_thw=video_grid_thw,
+ input_features=input_features,
+ feature_attention_mask=feature_attention_mask,
+ use_audio_in_video=use_audio_in_video,
+ video_second_per_grid=video_second_per_grid,
+ is_first_iteration=is_first_iteration,
+ **kwargs,
+ )
+
+ model_inputs["position_ids"] = None
+
+ if not is_first_iteration and use_cache:
+ model_inputs["pixel_values"] = None
+ model_inputs["pixel_values_videos"] = None
+ model_inputs["input_features"] = None
+
+ return model_inputs
+
+
+############################
+# Start Talker #
+############################
+
+
+@dataclass
+@auto_docstring(
+ custom_intro="""
+ Base class for Qwen2.5OmniTalker causal language model (or autoregressive) outputs.
+ """
+)
+class Qwen2_5OmniTalkerCausalLMOutputWithPast(ModelOutput):
+ r"""
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Hidden states from the thinker model that are used as input for the talker model. These represent the encoded
+ response that the talker model will use to generate speech tokens.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Cache] = None
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
+ attentions: Optional[tuple[torch.FloatTensor]] = None
+ rope_deltas: Optional[torch.LongTensor] = None
+ thinker_reply_part: Optional[torch.FloatTensor] = None
+
+
+@auto_docstring
+class Qwen2_5OmniTalkerModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniTalkerConfig
+ input_modalities = ("image", "video", "audio", "text")
+
+ _no_split_modules = ["Qwen2_5OmniTalkerDecoderLayer"]
+
+ def __init__(self, config: Qwen2_5OmniTalkerConfig):
+ super().__init__(config)
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.embedding_size, self.padding_idx)
+ self.layers = nn.ModuleList(
+ [Qwen2_5OmniDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
+ )
+ self._attn_implementation = config._attn_implementation
+ self.norm = Qwen2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.has_sliding_layers = "sliding_attention" in self.config.layer_types
+ self.rotary_emb = Qwen2_5OmniRotaryEmbedding(config=config)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[FlashAttentionKwargs],
+ ) -> Union[tuple, BaseModelOutputWithPast]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if (input_ids is None) ^ (inputs_embeds is not None):
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # torch.jit.trace() doesn't support cache objects in the output
+ if use_cache and past_key_values is None and not torch.jit.is_tracing():
+ past_key_values = DynamicCache(config=self.config)
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+
+ if cache_position is None:
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
+ cache_position = torch.arange(
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
+ )
+
+ # the hard coded `3` is for temporal, height and width.
+ if position_ids is None:
+ position_ids = cache_position.view(1, 1, -1).expand(3, inputs_embeds.shape[0], -1)
+ elif position_ids.ndim == 2:
+ position_ids = position_ids[None, ...].expand(3, position_ids.shape[0], -1)
+
+ # NOTE: we need to pass text position ids for packing. Qwen2-VL uses 3D positions
+ # where each dim indicates visual spatial positions for temporal/height/width grids.
+ # There are two scenarios when FA2-like packed masking might be activated.
+ # 1. User specifically passed packed `position_ids` and no attention mask.
+ # In this case we expect the useer to create correct position ids for all 3 grids
+ # and prepend text-only position ids to it. The final tensor will be [4, bs, seq-len]
+ # 2. User runs forward with no attention mask and no position ids. In this case, position ids
+ # are prepared by the model (`get_rope_index`) as `[4, bs, seq-len]` tensor. Text-only positions are
+ # prepended by us when creating positions so that the mask is constructed correctly. NOTE: failing to pass
+ # text-only positions will cause incorrect mask construction, do not change `prepare_input_for_generation`
+ if position_ids.ndim == 3 and position_ids.shape[0] == 4:
+ text_position_ids = position_ids[0]
+ position_ids = position_ids[1:]
+ else:
+ # If inputs are not packed (usual 3D positions), do not prepare mask from position_ids
+ text_position_ids = None
+
+ # It may already have been prepared by e.g. `generate`
+ if not isinstance(causal_mask_mapping := attention_mask, dict):
+ # Prepare mask arguments
+ mask_kwargs = {
+ "config": self.config,
+ "input_embeds": inputs_embeds,
+ "attention_mask": attention_mask,
+ "cache_position": cache_position,
+ "past_key_values": past_key_values,
+ "position_ids": text_position_ids,
+ }
+ # Create the masks
+ causal_mask_mapping = {
+ "full_attention": create_causal_mask(**mask_kwargs),
+ }
+ # The sliding window alternating layers are not always activated depending on the config
+ if self.has_sliding_layers:
+ causal_mask_mapping["sliding_attention"] = create_sliding_window_causal_mask(**mask_kwargs)
+
+ hidden_states = inputs_embeds
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=causal_mask_mapping[decoder_layer.attention_type],
+ position_embeddings=position_embeddings,
+ position_ids=text_position_ids,
+ past_key_values=past_key_values,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns] if v is not None
+ )
+ return BaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=past_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ )
+
+
+class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin):
+ config: Qwen2_5OmniTalkerConfig
+ base_model_prefix = "talker"
+ output_modalities = ("audio",)
+
+ def __init__(self, config: Qwen2_5OmniTalkerConfig):
+ super().__init__(config)
+
+ self.thinker_to_talker_proj = nn.Linear(config.embedding_size, config.hidden_size)
+
+ self.model = Qwen2_5OmniTalkerModel(config)
+ self.codebook_size = config.vocab_size
+ self.codec_head = nn.Linear(config.hidden_size, self.codebook_size, bias=False)
+
+ self.codec_bos_token = config.tts_codec_start_token_id
+ self.codec_eos_token = config.tts_codec_end_token_id
+ self.codec_pad_token = config.tts_codec_pad_token_id
+ self.codec_mask_token = config.tts_codec_mask_token_id
+
+ self.text_bos_token = config.tts_text_start_token_id
+ self.text_eos_token = config.tts_text_end_token_id
+ self.text_pad_token = config.tts_text_pad_token_id
+
+ self.spatial_merge_size = self.config.spatial_merge_size
+ self.rope_deltas = None
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.model.set_input_embeddings(value)
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ thinker_reply_part: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ rope_deltas: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ input_text_ids: Optional[torch.LongTensor] = None,
+ image_grid_thw: Optional[torch.LongTensor] = None,
+ video_grid_thw: Optional[torch.LongTensor] = None,
+ use_audio_in_video: Optional[bool] = None,
+ audio_feature_lengths: Optional[torch.LongTensor] = None,
+ video_second_per_grid: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[tuple, Qwen2_5OmniTalkerCausalLMOutputWithPast]:
+ r"""
+ thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Hidden states from the thinker model's output that represent the text reply part to be processed.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ input_text_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Input token IDs for text-only content, used for position calculation in multimodal contexts.
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ use_audio_in_video (`bool`, *optional*):
+ Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
+ audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*):
+ Number of seconds per grid for each video, used for temporal feature mapping.
+
+ Example:
+
+ ```python
+ >>> from io import BytesIO
+ >>> from urllib.request import urlopen
+ >>> import librosa
+ >>> from transformers import AutoProcessor, Qwen2_5OmniTalkerForConditionalGeneration
+
+ >>> model = Qwen2_5OmniTalkerForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B")
+ >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B")
+
+ >>> prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>Generate the caption in English:"
+ >>> url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"
+ >>> audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate)
+
+ >>> inputs = processor(text=prompt, audio=audio, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(**inputs, max_length=30)
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Generate the caption in English: Glass is breaking."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if attention_mask is not None and position_ids is None:
+ past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
+ if past_key_values_length == 0 or self.rope_deltas is None:
+ position_ids, rope_deltas = self.get_rope_index(
+ input_text_ids,
+ image_grid_thw,
+ video_grid_thw,
+ attention_mask,
+ use_audio_in_video,
+ audio_feature_lengths,
+ video_second_per_grid,
+ )
+
+ inputs_embeds[:, -1, :] += self.get_input_embeddings()(
+ torch.tensor([self.codec_bos_token], dtype=torch.long, device=inputs_embeds.device)
+ )
+ inputs_embeds[:, -2, :] += self.get_input_embeddings()(
+ torch.tensor([self.codec_pad_token], dtype=torch.long, device=inputs_embeds.device)
+ )
+ self.rope_deltas = rope_deltas
+
+ else:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ delta = (past_key_values_length + self.rope_deltas).to(input_ids.device)
+ position_ids = torch.arange(seq_length, device=input_ids.device)
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
+ position_ids = position_ids.add(delta)
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
+
+ if inputs_embeds is None:
+ # 1. Inference tokens after second token
+ codec_embeds = self.get_input_embeddings()(input_ids)
+ inputs_embeds = codec_embeds + thinker_reply_part[:, :1, :]
+ if thinker_reply_part.shape[1] > 1:
+ thinker_reply_part = thinker_reply_part[:, 1:, :]
+
+ talker_lm_input = self.thinker_to_talker_proj(inputs_embeds)
+
+ if attention_mask is not None:
+ attention_mask = attention_mask.to(inputs_embeds.device)
+
+ outputs = self.model(
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=talker_lm_input,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.codec_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return Qwen2_5OmniTalkerCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ rope_deltas=self.rope_deltas,
+ thinker_reply_part=thinker_reply_part,
+ )
+
+ def _get_initial_cache_position(self, seq_length, device, model_kwargs):
+ # Talker needs to calculate cache_position with input_ids, so pop inputs_embeds temporarily
+ inputs_embeds = model_kwargs.pop("inputs_embeds")
+ model_kwargs = super()._get_initial_cache_position(seq_length, device, model_kwargs)
+ model_kwargs["inputs_embeds"] = inputs_embeds
+ return model_kwargs
+
+ # prepare inputs for talker lm generation
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ input_text_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ thinker_reply_part=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ pixel_values=None,
+ pixel_values_videos=None,
+ image_grid_thw=None,
+ video_grid_thw=None,
+ input_audio_features=None,
+ audio_feature_attention_mask=None,
+ audio_feature_lengths=None,
+ use_audio_in_video=False,
+ video_second_per_grid=None,
+ **kwargs,
+ ):
+ model_inputs = super().prepare_inputs_for_generation(
+ input_ids,
+ past_key_values,
+ attention_mask,
+ inputs_embeds,
+ cache_position,
+ use_cache=use_cache,
+ thinker_reply_part=thinker_reply_part,
+ input_text_ids=input_text_ids,
+ image_grid_thw=image_grid_thw,
+ video_grid_thw=video_grid_thw,
+ use_audio_in_video=use_audio_in_video,
+ audio_feature_lengths=audio_feature_lengths,
+ video_second_per_grid=video_second_per_grid,
+ **kwargs,
+ )
+
+ model_inputs["position_ids"] = None
+
+ return model_inputs
+
+ def _update_model_kwargs_for_generation(
+ self,
+ outputs: ModelOutput,
+ model_kwargs: dict[str, Any],
+ is_encoder_decoder: bool = False,
+ num_new_tokens: int = 1,
+ ) -> dict[str, Any]:
+ model_kwargs = super()._update_model_kwargs_for_generation(
+ outputs, model_kwargs, is_encoder_decoder, num_new_tokens
+ )
+
+ if getattr(outputs, "thinker_reply_part", None) is not None:
+ model_kwargs["thinker_reply_part"] = outputs.thinker_reply_part
+
+ return model_kwargs
+
+
+class Qwen2_5OmniDiTRotaryEmbedding(nn.Module):
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
+
+ def __init__(self, config: Qwen2_5OmniDiTConfig, device=None):
+ super().__init__()
+ self.max_seq_len_cached = config.max_position_embeddings
+ self.original_max_seq_len = config.max_position_embeddings
+
+ self.config = config
+
+ self.rope_type = self.config.rope_parameters["rope_type"]
+ rope_init_fn: Callable = self.compute_default_rope_parameters
+ if self.rope_type != "default":
+ rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
+ inv_freq, self.attention_scaling = rope_init_fn(self.config, device)
+
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+ self.register_buffer("original_inv_freq", inv_freq.clone(), persistent=False)
+
+ @staticmethod
+ def compute_default_rope_parameters(
+ config: Optional[Qwen2_5OmniDiTConfig] = None,
+ device: Optional["torch.device"] = None,
+ seq_len: Optional[int] = None,
+ ) -> tuple["torch.Tensor", float]:
+ """
+ Computes the inverse frequencies according to the original RoPE implementation
+ Args:
+ config ([`~transformers.PreTrainedConfig`]):
+ The model configuration.
+ device (`torch.device`):
+ The device to use for initialization of the inverse frequencies.
+ seq_len (`int`, *optional*):
+ The current sequence length. Unused for this type of RoPE.
+ Returns:
+ Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the
+ post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).
+ """
+ base = config.rope_parameters["rope_theta"]
+ dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
+
+ attention_factor = 1.0 # Unused in this type of RoPE
+
+ # Compute the inverse frequencies
+ inv_freq = 1.0 / (
+ base ** (torch.arange(0, dim, 2, dtype=torch.int64).to(device=device, dtype=torch.float) / dim)
+ )
+ return inv_freq, attention_factor
+
+ @torch.no_grad()
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
+ def forward(self, x, position_ids):
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
+ position_ids_expanded = position_ids[:, None, :].float()
+
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
+ with maybe_autocast(device_type=device_type, enabled=False): # Force float32
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
+ emb = torch.cat((freqs, freqs), dim=-1)
+ cos = emb.cos() * self.attention_scaling
+ sin = emb.sin() * self.attention_scaling
+
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
+
+
+class TimeDelayNetBlock(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ dilation,
+ ):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ dilation=dilation,
+ padding="same",
+ padding_mode="reflect",
+ )
+ self.activation = nn.ReLU()
+
+ def forward(self, hidden_states: torch.Tensor):
+ return self.activation(self.conv(hidden_states))
+
+
+class Res2NetBlock(torch.nn.Module):
+ def __init__(self, in_channels, out_channels, scale=8, kernel_size=3, dilation=1):
+ super().__init__()
+
+ in_channel = in_channels // scale
+ hidden_channel = out_channels // scale
+
+ self.blocks = nn.ModuleList(
+ [
+ TimeDelayNetBlock(
+ in_channel,
+ hidden_channel,
+ kernel_size=kernel_size,
+ dilation=dilation,
+ )
+ for i in range(scale - 1)
+ ]
+ )
+ self.scale = scale
+
+ def forward(self, hidden_states):
+ outputs = []
+ for i, hidden_part in enumerate(torch.chunk(hidden_states, self.scale, dim=1)):
+ if i == 0:
+ output_part = hidden_part
+ elif i == 1:
+ output_part = self.blocks[i - 1](hidden_part)
+ else:
+ output_part = self.blocks[i - 1](hidden_part + output_part)
+ outputs.append(output_part)
+ output = torch.cat(outputs, dim=1)
+ return output
+
+
+class SqueezeExcitationBlock(nn.Module):
+ def __init__(self, in_channels, se_channels, out_channels):
+ super().__init__()
+
+ self.conv1 = nn.Conv1d(
+ in_channels=in_channels,
+ out_channels=se_channels,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv1d(
+ in_channels=se_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, hidden_states):
+ hidden_states_mean = hidden_states.mean(dim=2, keepdim=True)
+
+ hidden_states_mean = self.relu(self.conv1(hidden_states_mean))
+ hidden_states_mean = self.sigmoid(self.conv2(hidden_states_mean))
+
+ return hidden_states * hidden_states_mean
+
+
+class AttentiveStatisticsPooling(nn.Module):
+ """This class implements an attentive statistic pooling layer for each channel.
+ It returns the concatenated mean and std of the input tensor.
+ """
+
+ def __init__(self, channels, attention_channels=128):
+ super().__init__()
+
+ self.eps = 1e-12
+ self.tdnn = TimeDelayNetBlock(channels * 3, attention_channels, 1, 1)
+ self.tanh = nn.Tanh()
+ self.conv = nn.Conv1d(
+ in_channels=attention_channels,
+ out_channels=channels,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+
+ def _length_to_mask(self, length, max_len=None, dtype=None, device=None):
+ """Creates a binary mask for each sequence.
+
+ Reference: https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3
+
+ Arguments
+ ---------
+ length : torch.LongTensor
+ Containing the length of each sequence in the batch. Must be 1D.
+ max_len : int
+ Max length for the mask, also the size of the second dimension.
+ dtype : torch.dtype, default: None
+ The dtype of the generated mask.
+ device: torch.device, default: None
+ The device to put the mask variable.
+
+ Returns
+ -------
+ mask : tensor
+ The binary mask.
+ """
+
+ if max_len is None:
+ max_len = length.max().long().item() # using arange to generate mask
+ mask = torch.arange(max_len, device=length.device, dtype=length.dtype).expand(
+ len(length), max_len
+ ) < length.unsqueeze(1)
+
+ mask = torch.as_tensor(mask, dtype=dtype, device=device)
+ return mask
+
+ def _compute_statistics(self, x, m, dim=2):
+ mean = (m * x).sum(dim)
+ std = torch.sqrt((m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(self.eps))
+ return mean, std
+
+ def forward(self, hidden_states):
+ seq_length = hidden_states.shape[-1]
+ lengths = torch.ones(hidden_states.shape[0], device=hidden_states.device)
+
+ # Make binary mask of shape [N, 1, L]
+ mask = self._length_to_mask(
+ lengths * seq_length, max_len=seq_length, dtype=hidden_states.dtype, device=hidden_states.device
+ )
+ mask = mask.unsqueeze(1)
+
+ # Expand the temporal context of the pooling layer by allowing the
+ # self-attention to look at global properties of the utterance.
+ total = mask.sum(dim=2, keepdim=True)
+
+ mean, std = self._compute_statistics(hidden_states, mask / total)
+ mean = mean.unsqueeze(2).repeat(1, 1, seq_length)
+ std = std.unsqueeze(2).repeat(1, 1, seq_length)
+ attention = torch.cat([hidden_states, mean, std], dim=1)
+
+ # Apply layers
+ attention = self.conv(self.tanh(self.tdnn(attention)))
+
+ # Filter out zero-paddings
+ attention = attention.masked_fill(mask == 0, float("-inf"))
+
+ attention = F.softmax(attention, dim=2)
+ mean, std = self._compute_statistics(hidden_states, attention)
+ # Append mean and std of the batch
+ pooled_stats = torch.cat((mean, std), dim=1)
+ pooled_stats = pooled_stats.unsqueeze(2)
+
+ return pooled_stats
+
+
+class SqueezeExcitationRes2NetBlock(nn.Module):
+ """An implementation of building block in ECAPA-TDNN, i.e.,
+ TDNN-Res2Net-TDNN-SqueezeExcitationBlock.
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ res2net_scale=8,
+ se_channels=128,
+ kernel_size=1,
+ dilation=1,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.tdnn1 = TimeDelayNetBlock(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ dilation=1,
+ )
+ self.res2net_block = Res2NetBlock(out_channels, out_channels, res2net_scale, kernel_size, dilation)
+ self.tdnn2 = TimeDelayNetBlock(
+ out_channels,
+ out_channels,
+ kernel_size=1,
+ dilation=1,
+ )
+ self.se_block = SqueezeExcitationBlock(out_channels, se_channels, out_channels)
+
+ def forward(self, hidden_state):
+ residual = hidden_state
+
+ hidden_state = self.tdnn1(hidden_state)
+ hidden_state = self.res2net_block(hidden_state)
+ hidden_state = self.tdnn2(hidden_state)
+ hidden_state = self.se_block(hidden_state)
+
+ return hidden_state + residual
+
+
+class ECAPA_TimeDelayNet(torch.nn.Module):
+ """An implementation of the speaker embedding model in a paper.
+ "ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in
+ TDNN Based Speaker Verification" (https://huggingface.co/papers/2005.07143).
+ """
+
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__()
+ if len(config.enc_channels) != len(config.enc_kernel_sizes) or len(config.enc_channels) != len(
+ config.enc_dilations
+ ):
+ raise ValueError("enc_channels, enc_kernel_sizes and enc_dilations should have same length")
+ self.channels = config.enc_channels
+ self.blocks = nn.ModuleList()
+
+ # The initial TDNN layer
+ self.blocks.append(
+ TimeDelayNetBlock(
+ config.mel_dim,
+ config.enc_channels[0],
+ config.enc_kernel_sizes[0],
+ config.enc_dilations[0],
+ )
+ )
+
+ # SE-Res2Net layers
+ for i in range(1, len(config.enc_channels) - 1):
+ self.blocks.append(
+ SqueezeExcitationRes2NetBlock(
+ config.enc_channels[i - 1],
+ config.enc_channels[i],
+ res2net_scale=config.enc_res2net_scale,
+ se_channels=config.enc_se_channels,
+ kernel_size=config.enc_kernel_sizes[i],
+ dilation=config.enc_dilations[i],
+ )
+ )
+
+ # Multi-layer feature aggregation
+ self.mfa = TimeDelayNetBlock(
+ config.enc_channels[-1],
+ config.enc_channels[-1],
+ config.enc_kernel_sizes[-1],
+ config.enc_dilations[-1],
+ )
+
+ # Attentive Statistical Pooling
+ self.asp = AttentiveStatisticsPooling(
+ config.enc_channels[-1],
+ attention_channels=config.enc_attention_channels,
+ )
+
+ # Final linear transformation
+ self.fc = nn.Conv1d(
+ in_channels=config.enc_channels[-1] * 2,
+ out_channels=config.enc_dim,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+
+ def forward(self, hidden_states):
+ # Minimize transpose for efficiency
+ hidden_states = hidden_states.transpose(1, 2)
+
+ hidden_states_list = []
+ for layer in self.blocks:
+ hidden_states = layer(hidden_states)
+ hidden_states_list.append(hidden_states)
+
+ # Multi-layer feature aggregation
+ hidden_states = torch.cat(hidden_states_list[1:], dim=1)
+ hidden_states = self.mfa(hidden_states)
+
+ # Attentive Statistical Pooling
+ hidden_states = self.asp(hidden_states)
+
+ # Final linear transformation
+ hidden_states = self.fc(hidden_states)
+
+ hidden_states = hidden_states.squeeze(-1)
+ return hidden_states
+
+
+class DiTInputEmbedding(nn.Module):
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__()
+ self.proj = nn.Linear(
+ config.mel_dim + config.enc_dim + config.enc_emb_dim + config.emb_dim,
+ config.hidden_size,
+ )
+ self.spk_encoder = ECAPA_TimeDelayNet(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ speaker_embedding: torch.Tensor,
+ condition_vector: torch.Tensor,
+ code_embed: torch.Tensor,
+ drop_audio_cond: Optional[bool] = False,
+ code_embed_uncond: Optional[bool] = None,
+ apply_cfg: Optional[bool] = True,
+ ):
+ if apply_cfg:
+ hidden_states = torch.cat([hidden_states, hidden_states], dim=0)
+ speaker_embedding = torch.cat([speaker_embedding, torch.zeros_like(speaker_embedding)], dim=0)
+ condition_vector = torch.cat([condition_vector, torch.zeros_like(condition_vector)], dim=0)
+ code_embed = torch.cat([code_embed, code_embed_uncond], dim=0)
+ elif drop_audio_cond: # cfg for cond audio
+ condition_vector = torch.zeros_like(condition_vector)
+ speaker_embedding = torch.zeros_like(speaker_embedding)
+ condition_vector = self.spk_encoder(condition_vector).unsqueeze(1).repeat(1, hidden_states.size(1), 1)
+ hidden_states = self.proj(torch.cat((hidden_states, condition_vector, code_embed, speaker_embedding), dim=-1))
+
+ return hidden_states
+
+
+# Transformer backbone using DiT blocks
+class DiTCodecEmbedding(nn.Module):
+ def __init__(self, codec_num_embeds, codec_dim, repeats):
+ super().__init__()
+ self.repeats = repeats
+ self.codec_embed = nn.Embedding(codec_num_embeds + 1, codec_dim)
+
+ def forward(self, code, drop_code=False):
+ if drop_code:
+ code = torch.zeros_like(code)
+ code_embed = self.codec_embed(code)
+
+ code_embed = torch.repeat_interleave(code_embed, repeats=self.repeats, dim=1)
+ return code_embed
+
+
+# AdaLayerNormZero
+# return with modulated x for attn input, and params for later mlp modulation
+class Qwen2_5_OmniAdaLayerNormZero(nn.Module):
+ def __init__(self, dim):
+ super().__init__()
+
+ self.silu = nn.SiLU()
+ self.linear = nn.Linear(dim, dim * 6)
+
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
+
+ def forward(self, hidden_states, emb=None):
+ emb = self.linear(self.silu(emb))
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1)
+
+ hidden_states = self.norm(hidden_states) * (1 + scale_msa[:, None]) + shift_msa[:, None]
+ return hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp
+
+
+# AdaLayerNormZero for final layer
+# return only with modulated x for attn input, cuz no more mlp modulation
+class Qwen2_5_OmniAdaLayerNormZero_Final(nn.Module):
+ def __init__(self, dim):
+ super().__init__()
+
+ self.silu = nn.SiLU()
+ self.linear = nn.Linear(dim, dim * 2)
+
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
+
+ def forward(self, hidden_states, emb):
+ emb = self.linear(self.silu(emb))
+ scale, shift = torch.chunk(emb, 2, dim=1)
+
+ hidden_states = self.norm(hidden_states) * (1 + scale)[:, None, :] + shift[:, None, :]
+ return hidden_states
+
+
+# FeedForward
+class DiTMLP(nn.Module):
+ def __init__(self, dim, mult=4, dropout=0.0):
+ super().__init__()
+ inner_dim = int(dim * mult)
+
+ self.ff = nn.ModuleList(
+ [
+ nn.Linear(dim, inner_dim),
+ nn.GELU(approximate="tanh"),
+ nn.Dropout(dropout),
+ nn.Linear(inner_dim, dim),
+ ]
+ )
+
+ def forward(self, hidden_states):
+ for layer in self.ff:
+ hidden_states = layer(hidden_states)
+ return hidden_states
+
+
+# Modified from Llama with a different rotate function, will fixed in next release
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+
+ def rotate_half_codec(x):
+ # x = rearrange(x, "... (d r) -> ... d r", r=2)
+ x = x.reshape(*x.shape[:-1], -1, 2)
+ x1, x2 = x.unbind(dim=-1)
+ x = torch.stack((-x2, x1), dim=-1)
+ return x.reshape(*x.shape[:-2], -1)
+
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half_codec(q) * sin)
+ k_embed = (k * cos) + (rotate_half_codec(k) * sin)
+ return q_embed, k_embed
+
+
+class DiTAttention(nn.Module):
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__()
+
+ self.config = config
+ self.dim = config.hidden_size
+ self.heads = config.num_attention_heads
+ self.inner_dim = config.head_dim * config.num_attention_heads
+ self.dropout = config.dropout
+ self.is_causal = False
+
+ self.to_q = nn.Linear(config.hidden_size, self.inner_dim)
+ self.to_k = nn.Linear(config.hidden_size, self.inner_dim)
+ self.to_v = nn.Linear(config.hidden_size, self.inner_dim)
+
+ self.to_out = nn.ModuleList([nn.Linear(self.inner_dim, config.hidden_size), nn.Dropout(config.dropout)])
+
+ def forward(
+ self,
+ hidden_states, # noised input x
+ position_embeddings=None, # rotary position embedding for x
+ attention_mask=None,
+ ) -> torch.Tensor:
+ batch_size = hidden_states.shape[0]
+
+ # `sample` projections.
+ query = self.to_q(hidden_states)
+ key = self.to_k(hidden_states)
+ value = self.to_v(hidden_states)
+
+ # attention
+ inner_dim = key.shape[-1]
+ head_dim = inner_dim // self.heads
+ query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
+ key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
+ value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
+
+ # apply rotary position embedding
+ # Due to training process, only first head is applied with RoPE, will be fixed at next release
+ cos, sin = position_embeddings
+ query[:, :1], key[:, :1] = apply_rotary_pos_emb(query[:, :1], key[:, :1], cos, sin)
+
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+ attention_weights, _ = attention_interface(
+ self,
+ query,
+ key,
+ value,
+ attention_mask=attention_mask,
+ is_causal=False,
+ )
+
+ # mask. e.g. inference got a batch with different target durations, mask out the padding
+ attention_weights = attention_weights.reshape(batch_size, -1, self.heads * head_dim)
+ attention_weights = attention_weights.to(query.dtype)
+
+ # linear proj
+ attention_output = self.to_out[0](attention_weights)
+ attention_output = self.to_out[1](attention_output)
+
+ return attention_output
+
+
+# time step conditioning embedding
+class SinusPositionEmbedding(nn.Module):
+ def __init__(self, dim):
+ super().__init__()
+ self.dim = dim
+
+ def forward(self, hidden_states, scale=1000):
+ device = hidden_states.device
+ half_dim = self.dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
+ emb = scale * hidden_states.unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
+ return emb.type_as(hidden_states)
+
+
+class DiTTimestepEmbedding(nn.Module):
+ def __init__(self, dim, freq_embed_dim=256):
+ super().__init__()
+ self.time_embed = SinusPositionEmbedding(freq_embed_dim)
+ self.time_mlp = nn.ModuleList([nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim)])
+
+ def forward(self, timestep):
+ time_hidden = self.time_embed(timestep)
+ time_hidden = time_hidden.to(timestep.dtype)
+ for layer in self.time_mlp:
+ time_hidden = layer(time_hidden) # b d
+ return time_hidden
+
+
+class DiTDecoderLayer(nn.Module):
+ def __init__(self, config: Qwen2_5OmniDiTConfig, look_ahead_block=0, look_backward_block=0):
+ super().__init__()
+ self.attn_norm = Qwen2_5_OmniAdaLayerNormZero(config.hidden_size)
+
+ self.attn = DiTAttention(config)
+ self.look_ahead_block = look_ahead_block
+ self.look_backward_block = look_backward_block
+ self.ff_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False, eps=1e-6)
+ self.ff = DiTMLP(dim=config.hidden_size, mult=config.ff_mult, dropout=config.dropout)
+
+ def forward(
+ self, hidden_states, timestep, position_embeddings=None, block_diff=None
+ ): # x: noised input, t: time embedding
+ # pre-norm & modulation for attention input
+ norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(hidden_states, emb=timestep)
+
+ # attention
+ attn_output = self.attn(
+ hidden_states=norm,
+ position_embeddings=position_embeddings,
+ attention_mask=(block_diff >= -float(self.look_backward_block))
+ & (block_diff <= float(self.look_ahead_block)),
+ )
+
+ # process attention output for input x
+ hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_output
+
+ norm = self.ff_norm(hidden_states) * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
+ ff_output = self.ff(norm)
+ hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output
+
+ return hidden_states
+
+
+class SnakeBeta(nn.Module):
+ """
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable parameter that controls frequency
+ - beta - trainable parameter that controls magnitude
+ References:
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
+ https://huggingface.co/papers/2006.08195
+ """
+
+ def __init__(self, in_features, alpha=1.0):
+ super().__init__()
+ self.in_features = in_features
+
+ # initialize alpha
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
+ self.beta = Parameter(torch.zeros(in_features) * alpha)
+
+ self.no_div_by_zero = 0.000000001
+
+ def forward(self, hidden_states):
+ """
+ Forward pass of the function.
+ Applies the function to the input elementwise.
+ SnakeBeta ∶= x + 1/b * sin^2 (xa)
+ """
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
+ beta = self.beta.unsqueeze(0).unsqueeze(-1)
+ alpha = torch.exp(alpha)
+ beta = torch.exp(beta)
+ hidden_states = hidden_states + (1.0 / (beta + self.no_div_by_zero)) * torch.pow(
+ torch.sin(hidden_states * alpha), 2
+ )
+
+ return hidden_states
+
+
+class UpSample1d(nn.Module):
+ def __init__(self, ratio=2, kernel_size=None):
+ super().__init__()
+ self.ratio = ratio
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
+ self.stride = ratio
+ self.pad = self.kernel_size // ratio - 1
+ self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
+ self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
+
+ filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size)
+ self.register_buffer("filter", filter, persistent=False)
+
+ def forward(self, hidden_states):
+ channels = hidden_states.shape[1]
+
+ hidden_states = F.pad(hidden_states, (self.pad, self.pad), mode="replicate")
+ hidden_states = self.ratio * F.conv_transpose1d(
+ hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels
+ )
+ hidden_states = hidden_states[..., self.pad_left : -self.pad_right]
+
+ return hidden_states
+
+
+class DownSample1d(nn.Module):
+ def __init__(self, ratio=2, kernel_size=None):
+ super().__init__()
+ cutoff = 0.5 / ratio
+ half_width = 0.6 / ratio
+ self.cutoff = cutoff
+ self.half_width = half_width
+ self.kernel_size = kernel_size
+
+ if cutoff < 0.0:
+ raise ValueError("Minimum cutoff must be larger than zero.")
+ if cutoff > 0.5:
+ raise ValueError("A cutoff above 0.5 does not make sense.")
+
+ self.even = kernel_size % 2 == 0
+ self.pad_left = kernel_size // 2 - int(self.even)
+ self.pad_right = kernel_size // 2
+ self.stride = ratio
+ filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
+ self.register_buffer("filter", filter, persistent=False)
+
+ def forward(self, hidden_states):
+ channels = hidden_states.shape[1]
+ hidden_states = F.pad(hidden_states, (self.pad_left, self.pad_right), mode="replicate")
+ out = F.conv1d(hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels)
+ return out
+
+
+class TorchActivation1d(nn.Module):
+ def __init__(
+ self,
+ activation,
+ up_ratio: int = 2,
+ down_ratio: int = 2,
+ up_kernel_size: int = 12,
+ down_kernel_size: int = 12,
+ ):
+ super().__init__()
+ if not callable(activation):
+ raise TypeError("Activation function must be callable")
+ self.act = activation
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
+
+ def forward(self, hidden_states):
+ hidden_states = self.upsample(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.downsample(hidden_states)
+
+ return hidden_states
+
+
+class AMPBlock(torch.nn.Module):
+ def __init__(
+ self,
+ channels,
+ kernel_size=3,
+ dilation=(1, 3, 5),
+ ):
+ super().__init__()
+
+ self.convs1 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[0],
+ padding=self._get_padding(kernel_size, dilation[0]),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[1],
+ padding=self._get_padding(kernel_size, dilation[1]),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[2],
+ padding=self._get_padding(kernel_size, dilation[2]),
+ ),
+ ]
+ )
+
+ self.convs2 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=self._get_padding(kernel_size, 1),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=self._get_padding(kernel_size, 1),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=self._get_padding(kernel_size, 1),
+ ),
+ ]
+ )
+
+ self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
+
+ self.activations = nn.ModuleList(
+ [TorchActivation1d(activation=SnakeBeta(channels)) for _ in range(self.num_layers)]
+ )
+
+ def _get_padding(self, kernel_size, dilation=1):
+ return int((kernel_size * dilation - dilation) / 2)
+
+ def forward(self, hidden_states):
+ acts1, acts2 = self.activations[::2], self.activations[1::2]
+ for conv1, conv2, act1, act2 in zip(self.convs1, self.convs2, acts1, acts2):
+ residual = hidden_states
+ hidden_states = act1(hidden_states)
+ hidden_states = conv1(hidden_states)
+ hidden_states = act2(hidden_states)
+ hidden_states = conv2(hidden_states)
+ hidden_states = residual + hidden_states
+
+ return hidden_states
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni Token2WavBigVGAN model. Which take mel spectrogram as input and predict waveform.
+ """
+)
+class Qwen2_5OmniToken2WavBigVGANModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniBigVGANConfig
+ input_modalities = "audio"
+
+ def __init__(self, config: Qwen2_5OmniBigVGANConfig):
+ super().__init__(config)
+ self.num_residual_blocks = len(config.resblock_kernel_sizes)
+ self.num_upsample_layers = len(config.upsample_rates)
+
+ self.conv_pre = nn.Conv1d(config.mel_dim, config.upsample_initial_channel, 7, 1, padding=3)
+
+ # Removing extra ModuleList breaks official state dict
+ ups = [
+ nn.ModuleList(
+ [
+ nn.ConvTranspose1d(
+ config.upsample_initial_channel // (2**layer_idx),
+ config.upsample_initial_channel // (2 ** (layer_idx + 1)),
+ kernel_size,
+ stride,
+ padding=(kernel_size - stride) // 2,
+ )
+ ]
+ )
+ for layer_idx, (stride, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes))
+ ]
+ self.ups = nn.ModuleList(ups)
+
+ self.resblocks = nn.ModuleList(
+ [
+ AMPBlock(config.upsample_initial_channel // (2 ** (layer_idx + 1)), kernel_size, dilation)
+ for layer_idx in range(self.num_upsample_layers)
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes)
+ ]
+ )
+
+ self.activation_post = TorchActivation1d(
+ activation=SnakeBeta(config.upsample_initial_channel // (2**self.num_upsample_layers))
+ )
+ self.conv_post = nn.Conv1d(
+ config.upsample_initial_channel // (2**self.num_upsample_layers), 1, 7, 1, padding=3, bias=False
+ )
+
+ self.post_init()
+
+ def normalize_spectrogram(self, spectrogram, max_value, min_db):
+ return torch.clamp((2 * max_value) * ((spectrogram - min_db) / (-min_db)) - max_value, -max_value, max_value)
+
+ def amplitude_to_db(self, amplitude, min_db_level):
+ min_level = torch.exp(
+ torch.tensor(min_db_level / 20.0 * np.log(10), device=amplitude.device, dtype=amplitude.dtype)
+ )
+ return 20 * torch.log10(torch.clamp(amplitude, min=min_level))
+
+ def process_mel_spectrogram(self, mel_spectrogram):
+ amplitude_spectrum = torch.exp(mel_spectrogram)
+ decibel_spectrum = self.amplitude_to_db(amplitude_spectrum, -115) - 20
+ return self.normalize_spectrogram(decibel_spectrum, 1, -115)
+
+ def forward(self, mel_spectrogram, **kwargs):
+ processed_spectrogram = self.process_mel_spectrogram(mel_spectrogram)
+ hidden_representation = self.conv_pre(processed_spectrogram)
+
+ for layer_index in range(self.num_upsample_layers):
+ hidden_representation = self.ups[layer_index][0](hidden_representation)
+ residual_output = sum(
+ self.resblocks[layer_index * self.num_residual_blocks + block_index](hidden_representation)
+ for block_index in range(self.num_residual_blocks)
+ )
+ residual_output = residual_output / self.num_residual_blocks
+ hidden_representation = residual_output
+
+ hidden_representation = self.activation_post(hidden_representation)
+ output_waveform = self.conv_post(hidden_representation)
+ return torch.clamp(output_waveform, min=-1.0, max=1.0).squeeze().cpu()
+
+
+class RungeKutta4ODESolver:
+ def __init__(self, function, initial_value):
+ self.function = function
+ self.initial_value = initial_value
+
+ self._one_third = 1 / 3
+ self._two_thirds = 2 / 3
+
+ def _rk4_step(self, function, time_start, time_step, time_end, value_start, function_value_start=None):
+ k1 = function_value_start if function_value_start is not None else function(time_start, value_start)
+ k2 = function(time_start + time_step * self._one_third, value_start + time_step * k1 * self._one_third)
+ k3 = function(time_start + time_step * self._two_thirds, value_start + time_step * (k2 - k1 * self._one_third))
+ k4 = function(time_end, value_start + time_step * (k1 - k2 + k3))
+ return (k1 + 3 * (k2 + k3) + k4) * time_step / 8
+
+ def _compute_step(self, function, time_start, time_step, time_end, value_start):
+ function_value_start = function(time_start, value_start)
+ return self._rk4_step(
+ function, time_start, time_step, time_end, value_start, function_value_start=function_value_start
+ ), function_value_start
+
+ def _linear_interpolation(self, time_start, time_end, value_start, value_end, time_point):
+ if time_point == time_start:
+ return value_start
+ if time_point == time_end:
+ return value_end
+ weight = (time_point - time_start) / (time_end - time_start)
+ return value_start + weight * (value_end - value_start)
+
+ def integrate(self, time_points):
+ solution = torch.empty(
+ len(time_points),
+ *self.initial_value.shape,
+ dtype=self.initial_value.dtype,
+ device=self.initial_value.device,
+ )
+ solution[0] = self.initial_value
+
+ current_index = 1
+ current_value = self.initial_value
+ for time_start, time_end in zip(time_points[:-1], time_points[1:]):
+ time_step = time_end - time_start
+ delta_value, _ = self._compute_step(self.function, time_start, time_step, time_end, current_value)
+ next_value = current_value + delta_value
+
+ while current_index < len(time_points) and time_end >= time_points[current_index]:
+ solution[current_index] = self._linear_interpolation(
+ time_start, time_end, current_value, next_value, time_points[current_index]
+ )
+ current_index += 1
+
+ current_value = next_value
+
+ return solution
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni Token2WavDiT model. Which take speech tokens as input and predict mel spectrogram.
+ """
+)
+class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniDiTConfig
+ input_modalities = "audio"
+ _no_split_modules = ["DiTDecoderLayer"]
+
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__(config)
+ self.mel_dim = config.mel_dim
+ self.repeats = config.repeats
+ self.time_embed = DiTTimestepEmbedding(config.hidden_size)
+
+ self.text_embed = DiTCodecEmbedding(config.num_embeds, config.emb_dim, config.repeats)
+ self.input_embed = DiTInputEmbedding(config)
+
+ self.rotary_embed = Qwen2_5OmniDiTRotaryEmbedding(config=config)
+
+ self.hidden_size = config.hidden_size
+ self.layers = config.num_hidden_layers
+ self.block_size = config.block_size
+ self.num_attention_heads = config.num_attention_heads
+
+ self.transformer_blocks = nn.ModuleList()
+ for i in range(config.num_hidden_layers):
+ self.transformer_blocks.append(
+ DiTDecoderLayer(
+ config,
+ look_ahead_block=1 if i in config.look_ahead_layers else 0,
+ look_backward_block=1 if i in config.look_backward_layers else 0,
+ )
+ )
+
+ self.norm_out = Qwen2_5_OmniAdaLayerNormZero_Final(config.hidden_size) # final modulation
+ self.proj_out = nn.Linear(config.hidden_size, config.mel_dim)
+
+ self.post_init()
+
+ def _create_block_diff(self, hidden_states):
+ batch, seq_len = hidden_states.shape[0], hidden_states.shape[1]
+ block_indices = torch.arange(seq_len, device=hidden_states.device) // self.block_size # [seq_length]
+
+ block_i = block_indices.unsqueeze(1) # [seq_length, 1]
+ block_j = block_indices.unsqueeze(0) # [1, seq_length]
+ block_diff = block_j - block_i # (n, n)
+
+ return block_diff.expand(batch, self.num_attention_heads, seq_len, seq_len)
+
+ def forward(
+ self,
+ hidden_states,
+ condition_vector,
+ speaker_embedding,
+ quantized_code,
+ time_step,
+ drop_audio_conditioning=False,
+ drop_code=False,
+ apply_cfg=True,
+ **kwargs,
+ ):
+ batch_size = hidden_states.shape[0]
+ if time_step.ndim == 0:
+ time_step = time_step.repeat(batch_size)
+
+ # Compute embeddings
+ time_embedding = self.time_embed(time_step)
+ text_embedding = self.text_embed(quantized_code, drop_code=False if apply_cfg else drop_code)
+ text_embedding_unconditioned = self.text_embed(quantized_code, drop_code=True) if apply_cfg else None
+
+ hidden_states = self.input_embed(
+ hidden_states,
+ speaker_embedding,
+ condition_vector,
+ text_embedding,
+ drop_audio_cond=drop_audio_conditioning,
+ code_embed_uncond=text_embedding_unconditioned,
+ apply_cfg=apply_cfg,
+ )
+
+ # Compute positional encodings
+ position_ids = torch.arange(hidden_states.shape[1], device=hidden_states.device)
+ position_ids = position_ids[None, :].repeat(batch_size, 1)
+ position_embeddings = self.rotary_embed(hidden_states, position_ids)
+ blockwise_difference = self._create_block_diff(hidden_states)
+
+ # Transformer blocks
+ for transformer_block in self.transformer_blocks:
+ hidden_states = transformer_block(
+ hidden_states,
+ time_embedding,
+ position_embeddings=position_embeddings,
+ block_diff=blockwise_difference,
+ )
+
+ hidden_states = self.norm_out(hidden_states, time_embedding)
+ output = self.proj_out(hidden_states)
+
+ return output
+
+ @torch.no_grad()
+ def sample(
+ self,
+ conditioning_vector,
+ reference_mel_spectrogram,
+ quantized_code,
+ num_steps=10,
+ guidance_scale=0.5,
+ sway_coefficient=-1.0,
+ ):
+ noise_initialization = torch.randn([1, 30000, self.mel_dim], dtype=reference_mel_spectrogram.dtype)
+ maximum_duration = quantized_code.shape[1] * self.repeats
+ initial_state = noise_initialization[:, :maximum_duration].to(quantized_code.device)
+ batch_size = reference_mel_spectrogram.shape[0]
+ conditioning_vector = conditioning_vector.unsqueeze(1).repeat(1, maximum_duration, 1)
+
+ if batch_size != 1:
+ raise ValueError("Only batch size = 1 is currently supported")
+
+ def ode_function(time_step, hidden_states):
+ if guidance_scale < 1e-5:
+ prediction = self(
+ hidden_states=hidden_states,
+ speaker_embedding=conditioning_vector,
+ condition_vector=reference_mel_spectrogram,
+ quantized_code=quantized_code,
+ time_step=time_step,
+ drop_audio_conditioning=False,
+ drop_code=False,
+ )
+ return prediction
+
+ model_output = self(
+ hidden_states=hidden_states,
+ quantized_code=quantized_code,
+ speaker_embedding=conditioning_vector,
+ condition_vector=reference_mel_spectrogram,
+ time_step=time_step,
+ apply_cfg=True,
+ )
+ guided_prediction, null_prediction = torch.chunk(model_output, 2, dim=0)
+ return guided_prediction + (guided_prediction - null_prediction) * guidance_scale
+
+ initial_time = 0
+ time_embedding = torch.linspace(
+ initial_time, 1, num_steps, device=quantized_code.device, dtype=conditioning_vector.dtype
+ )
+
+ if sway_coefficient is not None:
+ time_embedding += sway_coefficient * (torch.cos(torch.pi / 2 * time_embedding) - 1 + time_embedding)
+
+ ode_solver = RungeKutta4ODESolver(function=ode_function, initial_value=initial_state)
+ solution_trajectory = ode_solver.integrate(time_embedding)
+
+ generated_waveform = solution_trajectory[-1]
+ generated_mel_spectrogram = generated_waveform.permute(0, 2, 1)
+ return generated_mel_spectrogram
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni Token2Wav model. Consists a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform.
+ """
+)
+class Qwen2_5OmniToken2WavModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniToken2WavConfig
+ base_model_prefix = "model"
+ input_modalities = "audio"
+ _no_split_modules = ["Qwen2_5OmniToken2WavDiTModel", "Qwen2_5OmniToken2WavBigVGANModel"]
+
+ def __init__(self, config: Qwen2_5OmniToken2WavConfig):
+ super().__init__(config)
+ attn_impl = config._attn_implementation
+ if config._attn_implementation == "flash_attention_2":
+ logger.warning_once(
+ "Qwen2_5OmniToken2WavModel must inference with fp32, but flash_attention_2 only supports fp16 and bf16, "
+ "attention implementation of Qwen2_5OmniToken2WavModel will fallback to sdpa."
+ )
+ attn_impl = "sdpa"
+ elif config._attn_implementation == "eager":
+ logger.warning_once(
+ "Qwen2_5OmniToken2WavModel does not support eager attention implementation, fall back to sdpa"
+ )
+ attn_impl = "sdpa"
+ self.code2wav_dit_model = Qwen2_5OmniToken2WavDiTModel._from_config(
+ config.dit_config, attn_implementation=attn_impl
+ )
+ self.code2wav_bigvgan_model = Qwen2_5OmniToken2WavBigVGANModel._from_config(
+ config.bigvgan_config, attn_implementation=attn_impl
+ )
+
+ self.post_init()
+
+ def forward(
+ self,
+ code,
+ conditioning,
+ reference_mel,
+ num_steps=10,
+ guidance_scale=0.5,
+ sway_coefficient=-1.0,
+ **kwargs,
+ ):
+ """Generates a waveform from input code and conditioning parameters."""
+
+ mel_spectrogram = self.code2wav_dit_model.sample(
+ conditioning,
+ reference_mel,
+ code,
+ num_steps=num_steps,
+ guidance_scale=guidance_scale,
+ sway_coefficient=sway_coefficient,
+ )
+
+ waveform = self.code2wav_bigvgan_model(mel_spectrogram)
+
+ return waveform
+
+
+############################
+# Start Qwen2.5Omni #
+############################
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni model, a multimodal model composed of 3 sub-models:
+ - [`Qwen2_5OmniThinkerForConditionalGeneration`]:
+ a causal auto-regressive transformer takes text, audio, image, video as input and predict text tokens.
+ - [`Qwen2_5OmniTalkerForConditionalGeneration`]:
+ a causal auto-regressive transformer takes thinker hidden states and response as input and predict speech tokens.
+ - [`Qwen2_5OmniToken2WavModel`]:
+ a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform.
+ """
+)
+class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, GenerationMixin):
+ config: Qwen2_5OmniConfig
+ output_modalities = ("audio", "text")
+ _no_split_modules = [
+ "Qwen2_5OmniTalkerForConditionalGeneration",
+ "Qwen2_5OmniToken2WavModel",
+ ]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.thinker = Qwen2_5OmniThinkerForConditionalGeneration(config.thinker_config)
+
+ self.has_talker = config.enable_audio_output
+ self.speaker_map = {}
+ if config.enable_audio_output:
+ self.enable_talker()
+ self.post_init()
+
+ def enable_talker(self):
+ self.talker = Qwen2_5OmniTalkerForConditionalGeneration(self.config.talker_config)
+ self.token2wav = Qwen2_5OmniToken2WavModel(self.config.token2wav_config)
+ self.token2wav.float()
+ self.has_talker = True
+
+ def load_speakers(self, path):
+ check_torch_load_is_safe()
+ for key, value in torch.load(path, weights_only=True).items():
+ self.speaker_map[key] = value
+ logger.info(f"Speaker {list(self.speaker_map.keys())} loaded")
+
+ def disable_talker(self):
+ if hasattr(self, "talker"):
+ del self.talker
+ if hasattr(self, "token2wav"):
+ del self.token2wav
+ self.has_talker = False
+
+ @classmethod
+ def from_pretrained(
+ cls,
+ pretrained_model_name_or_path,
+ *model_args,
+ config=None,
+ cache_dir=None,
+ ignore_mismatched_sizes=False,
+ force_download=False,
+ local_files_only=False,
+ token=None,
+ revision="main",
+ use_safetensors=None,
+ weights_only=True,
+ **kwargs,
+ ):
+ model = super().from_pretrained(
+ pretrained_model_name_or_path,
+ *model_args,
+ config=config,
+ cache_dir=cache_dir,
+ ignore_mismatched_sizes=ignore_mismatched_sizes,
+ force_download=force_download,
+ local_files_only=local_files_only,
+ token=token,
+ revision=revision,
+ use_safetensors=use_safetensors,
+ weights_only=weights_only,
+ **kwargs,
+ )
+ spk_path = cached_file(
+ pretrained_model_name_or_path,
+ "spk_dict.pt",
+ subfolder=kwargs.pop("subfolder", None),
+ cache_dir=kwargs.pop("cache_dir", None),
+ force_download=kwargs.pop("force_download", False),
+ proxies=kwargs.pop("proxies", None),
+ local_files_only=kwargs.pop("local_files_only", False),
+ token=token,
+ revision=kwargs.pop("revision", None),
+ )
+ if spk_path is None:
+ raise ValueError(f"""{pretrained_model_name_or_path}/{spk_path} not exists""")
+ model.load_speakers(spk_path)
+
+ return model
+
+ @torch.no_grad()
+ @deprecate_kwarg("return_audio", version="v5", new_name="generation_mode")
+ # TODO: raushan, defaults should be saved in generation config
+ def generate(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ speaker: str = "Chelsie",
+ use_audio_in_video: bool = False,
+ thinker_max_new_tokens: int = 1024,
+ talker_max_new_tokens: int = 4096,
+ talker_do_sample: bool = True,
+ talker_top_k: int = 40,
+ talker_top_p: float = 0.8,
+ talker_temperature: float = 0.9,
+ talker_eos_token_id: list[int] = [8292, 8294],
+ talker_repetition_penalty: float = 1.05,
+ **kwargs,
+ ):
+ r"""
+ Generate text response and audio from input.
+
+ Args:
+ input_ids (`Optional[torch.Tensor]`, *optional*):
+ Input ids, should obtain from processor.
+ speaker (`str` , defaults to "Chelsie"):
+ Which speaker should be used in audio response.
+ use_audio_in_video (`bool`, defaults to False):
+ Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
+ generation_mode (`Optional[str]`, *optional*):
+ Whether or not return response in audio format. When `generation_mode="audio"`, this parameter is same as `config.enable_audio_output`.
+ kwargs (*optional*):
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model.
+ - With a *thinker_*, *talker_*, *token2wav_* prefix, they will be input for the `generate` method of the
+ thinker, talker and token2wav respectively. It has the priority over the keywords without a prefix.
+ Returns:
+ When `return_audio=False`:
+ - **Text** (`torch.Tensor`): Generated text token sequence.
+ When `return_audio=True`:
+ - **Text** (`torch.Tensor`): Generated text token sequence.
+ - **Audio waveform** (`torch.Tensor`): Generated audio waveform.
+ """
+ # check `False` on purpose because the paramter can be `str/bool`. This is needed for BC
+ generation_mode = kwargs.pop("generation_mode", None)
+ return_audio = generation_mode != "text" and generation_mode is not False
+
+ if speaker not in self.speaker_map:
+ raise ValueError(f"{speaker} is not available, available speakers: {self.speaker_map.keys()}")
+ if return_audio and not self.has_talker:
+ raise ValueError(
+ "Cannot use talker when talker module not initialized. Use `enable_talker` method or set enable_talker in config to enable talker."
+ )
+ if return_audio is None:
+ return_audio = self.has_talker
+ if input_ids.shape[0] != 1 and return_audio:
+ raise NotImplementedError("Qwen2.5-Omni currently does not support batched inference with audio output")
+
+ shared_kwargs = {"use_audio_in_video": use_audio_in_video}
+ thinker_kwargs = {
+ "max_new_tokens": thinker_max_new_tokens,
+ }
+ talker_kwargs = {
+ "max_new_tokens": talker_max_new_tokens,
+ "do_sample": talker_do_sample,
+ "top_k": talker_top_k,
+ "top_p": talker_top_p,
+ "temperature": talker_temperature,
+ "eos_token_id": talker_eos_token_id,
+ "repetition_penalty": talker_repetition_penalty,
+ }
+ token2wav_kwargs = {}
+
+ for key, value in kwargs.items():
+ if key.startswith("thinker_"):
+ thinker_kwargs[key[len("thinker_") :]] = value
+ elif key.startswith("talker_"):
+ talker_kwargs[key[len("talker_") :]] = value
+ elif key.startswith("token2wav_"):
+ token2wav_kwargs[key[len("token2wav_") :]] = value
+ # Process special input values
+ elif key == "feature_attention_mask":
+ thinker_kwargs[key] = value
+ talker_kwargs["audio_feature_lengths"] = torch.sum(value, dim=1)
+ elif key == "input_features" or key == "attention_mask":
+ thinker_kwargs[key] = value
+ # Put other key to shared kwargs
+ else:
+ shared_kwargs[key] = value
+
+ # Merge kwargs
+ for key, value in shared_kwargs.items():
+ if key not in thinker_kwargs:
+ thinker_kwargs[key] = value
+ if key not in talker_kwargs:
+ talker_kwargs[key] = value
+ if key not in token2wav_kwargs:
+ token2wav_kwargs[key] = value
+ speaker_params = self.speaker_map[speaker]
+
+ # 1. Generate from thinker module
+ generate_audio = return_audio and self.has_talker
+ if generate_audio:
+ thinker_kwargs["output_hidden_states"] = True
+ thinker_kwargs["return_dict_in_generate"] = True
+
+ thinker_result = self.thinker.generate(input_ids=input_ids, **thinker_kwargs)
+
+ if not generate_audio:
+ return thinker_result
+
+ # 2. Generate speech tokens from talker module
+ embeds_to_talker = thinker_result.hidden_states[0][0].clone().to(input_ids.device)
+ if thinker_kwargs.get("input_features") is not None:
+ audio_ids_mask = input_ids == self.config.thinker_config.audio_token_index
+ audio_mask = audio_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
+ audio_mask_tensor = torch.zeros(
+ [audio_ids_mask.sum(), embeds_to_talker.shape[-1]],
+ dtype=embeds_to_talker.dtype,
+ device=input_ids.device,
+ )
+ embeds_to_talker.masked_scatter_(audio_mask, audio_mask_tensor)
+ if thinker_kwargs.get("pixel_values") is not None:
+ image_ids_mask = input_ids == self.config.thinker_config.image_token_index
+ image_mask = image_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
+ image_mask_tensor = torch.zeros(
+ [image_ids_mask.sum(), embeds_to_talker.shape[-1]],
+ dtype=embeds_to_talker.dtype,
+ device=input_ids.device,
+ )
+ embeds_to_talker.masked_scatter_(image_mask, image_mask_tensor)
+ if thinker_kwargs.get("pixel_values_videos") is not None:
+ video_ids_mask = input_ids == self.config.thinker_config.video_token_index
+ video_mask = video_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
+ video_mask_tensor = torch.zeros(
+ [video_ids_mask.sum(), embeds_to_talker.shape[-1]],
+ dtype=embeds_to_talker.dtype,
+ device=input_ids.device,
+ )
+ embeds_to_talker.masked_scatter_(video_mask, video_mask_tensor)
+
+ processed_thinker_hidden = (
+ (embeds_to_talker,) + thinker_result.hidden_states[0][1:],
+ ) + thinker_result.hidden_states[1:]
+ thinker_generate_ids = thinker_result.sequences[:, input_ids.size(1) :].to(input_ids.device)
+ thinker_token_embeds = [
+ token_hidden_states[0].to(input_ids.device) for token_hidden_states in processed_thinker_hidden
+ ]
+ thinker_hidden_states = [
+ token_hidden_states[-1].to(input_ids.device) for token_hidden_states in processed_thinker_hidden
+ ]
+
+ talker_text_bos_token = speaker_params["bos_token"]
+ talker_input_text_ids = torch.cat(
+ [
+ input_ids,
+ torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device),
+ thinker_generate_ids[:, :1],
+ ],
+ dim=-1,
+ )
+
+ talker_input_ids = torch.cat(
+ [
+ torch.full_like(input_ids, fill_value=self.talker.codec_mask_token),
+ torch.tensor([[self.talker.codec_pad_token]], dtype=torch.long, device=input_ids.device),
+ torch.tensor([[self.talker.codec_bos_token]], dtype=torch.long, device=input_ids.device),
+ ],
+ dim=1,
+ )
+
+ thinker_embed_tokens = self.thinker.get_input_embeddings()
+ thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1)
+ talker_inputs_embeds = thinker_hidden_states[0] + thinker_token_embeds[0]
+ talker_text_bos_token = torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device)
+ talker_text_bos_embed = thinker_embed_tokens(talker_text_bos_token).to(input_ids.device)
+ talker_inputs_embeds = torch.cat(
+ [
+ talker_inputs_embeds,
+ talker_text_bos_embed,
+ thinker_reply_part[:, :1, :],
+ ],
+ dim=1,
+ )
+
+ eos_token = torch.tensor([[self.talker.text_eos_token]], dtype=torch.long, device=input_ids.device)
+ eos_embedding = thinker_embed_tokens(eos_token).to(input_ids.device)
+
+ pad_token = torch.tensor([[self.talker.text_pad_token]], dtype=torch.long, device=input_ids.device)
+ pad_embedding = thinker_embed_tokens(pad_token).to(input_ids.device)
+
+ thinker_reply_part = torch.cat(
+ [
+ thinker_reply_part[:, 1:, :],
+ eos_embedding,
+ pad_embedding,
+ ],
+ dim=1,
+ )
+
+ talker_attention_mask = None
+ if "attention_mask" in kwargs:
+ talker_attention_mask = torch.cat(
+ [kwargs["attention_mask"], kwargs["attention_mask"].new_ones((1, 2))], dim=1
+ ).to(input_ids.device)
+
+ talker_result = self.talker.generate(
+ input_ids=talker_input_ids,
+ input_text_ids=talker_input_text_ids,
+ thinker_reply_part=thinker_reply_part,
+ inputs_embeds=talker_inputs_embeds,
+ attention_mask=talker_attention_mask,
+ suppress_tokens=[self.talker.codec_bos_token],
+ **{k: (v.to(input_ids.device) if torch.is_tensor(v) else v) for k, v in talker_kwargs.items()},
+ )
+ talker_generate_codes = talker_result[:, talker_input_ids.shape[1] : -1]
+
+ # 3. Generate wavs from code
+ if self.token2wav.dtype != torch.float:
+ self.token2wav.float()
+
+ wav = self.token2wav(
+ talker_generate_codes.to(input_ids.device),
+ conditioning=speaker_params["cond"].to(input_ids.device).float(),
+ reference_mel=speaker_params["ref_mel"].to(input_ids.device).float(),
+ **token2wav_kwargs,
+ )
+
+ return thinker_result.sequences, wav.float()
+
+
+__all__ = [
+ "Qwen2_5OmniForConditionalGeneration",
+ "Qwen2_5OmniThinkerTextModel",
+ "Qwen2_5OmniThinkerForConditionalGeneration",
+ "Qwen2_5OmniTalkerModel",
+ "Qwen2_5OmniTalkerForConditionalGeneration",
+ "Qwen2_5OmniToken2WavDiTModel",
+ "Qwen2_5OmniToken2WavBigVGANModel",
+ "Qwen2_5OmniToken2WavModel",
+ "Qwen2_5OmniPreTrainedModel",
+ "Qwen2_5OmniPreTrainedModelForConditionalGeneration",
+]
diff --git a/mllm/models/qwen2_5omni/python_src_code/modular_qwen2_5_omni.py b/mllm/models/qwen2_5omni/python_src_code/modular_qwen2_5_omni.py
new file mode 100644
index 000000000..6d0c3f9bc
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/modular_qwen2_5_omni.py
@@ -0,0 +1,4289 @@
+# coding=utf-8
+# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch Qwen2.5Omni model (Audio, Image, Video)."""
+
+import math
+from collections.abc import Callable
+from dataclasses import dataclass
+from typing import Any, Optional, Union
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.nn import Parameter
+
+from ... import initialization as init
+from ...cache_utils import Cache
+from ...configuration_utils import PreTrainedConfig, layer_type_validation
+from ...generation import GenerationMixin
+from ...modeling_outputs import BaseModelOutput, ModelOutput
+from ...modeling_rope_utils import RopeParameters
+from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
+from ...processing_utils import Unpack
+from ...utils import (
+ TransformersKwargs,
+ auto_docstring,
+ check_torch_load_is_safe,
+ logging,
+)
+from ...utils.deprecation import deprecate_kwarg
+from ...utils.hub import cached_file
+from ..llama.modeling_llama import LlamaRotaryEmbedding, rotate_half
+from ..qwen2_5_vl.configuration_qwen2_5_vl import Qwen2_5_VLVisionConfig
+from ..qwen2_5_vl.modeling_qwen2_5_vl import (
+ Qwen2_5_VisionRotaryEmbedding,
+ Qwen2_5_VisionTransformerPretrainedModel,
+ Qwen2_5_VLAttention,
+ Qwen2_5_VLMLP,
+ Qwen2_5_VLPreTrainedModel,
+ Qwen2_5_VLTextModel,
+ Qwen2_5_VLVisionBlock,
+ eager_attention_forward,
+)
+from ..qwen2_audio.configuration_qwen2_audio import Qwen2AudioEncoderConfig
+from ..qwen2_audio.modeling_qwen2_audio import Qwen2AudioEncoderLayer
+from ..qwen2_vl.modeling_qwen2_vl import Qwen2VLRotaryEmbedding
+
+
+logger = logging.get_logger(__name__)
+
+
+class Qwen2_5OmniVisionEncoderConfig(Qwen2_5_VLVisionConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2_5OmniThinkerVision`]. It is used to instantiate a
+ Qwen2.5-VL vision encoder according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2.5-VL
+ architecture.
+
+ e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PreTrainedConfig`] for more information.
+
+ Args:
+ depth (`int`, *optional*, defaults to 32):
+ Number of layers (depth) in the model.
+ hidden_size (`int`, *optional*, defaults to 3584):
+ The size of the hidden layers.
+ hidden_act (`str`, *optional*, defaults to `"quick_gelu"`):
+ The non-linear activation function used in the model. Supported options include `"quick_gelu"` and others as applicable.
+ mlp_ratio (`float`, *optional*, defaults to 4):
+ The ratio used to determine the size of the MLP (Multi-Layer Perceptron) hidden layer.
+ num_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer.
+ in_channels (`int`, *optional*, defaults to 3):
+ Number of input channels.
+ patch_size (`int`, *optional*, defaults to 14):
+ The size of the patches extracted from the input.
+ spatial_merge_size (`int`, *optional*, defaults to 2):
+ The size used for merging spatial dimensions.
+ temporal_patch_size (`int`, *optional*, defaults to 2):
+ The size used for patches along the temporal dimension.
+
+ Example:
+
+ ```python
+ >>> from transformers import Qwen2_5OmniVisionEncoderConfig, Qwen2_5OmniVisionEncoder
+
+ >>> # Initializing a Qwen2_5OmniVisionEncoderConfig
+ >>> configuration = Qwen2_5OmniVisionEncoderConfig()
+
+ >>> # Initializing a Qwen2_5OmniVisionEncoder (with random weights)
+ >>> model = Qwen2_5OmniVisionEncoder(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qwen2_5_omni_vision_encoder"
+
+ def __init__(
+ self,
+ depth=32,
+ hidden_size=3584,
+ hidden_act="silu",
+ intermediate_size=3420,
+ num_heads=16,
+ in_channels=3,
+ patch_size=14,
+ spatial_merge_size=2,
+ temporal_patch_size=2,
+ window_size=112,
+ out_hidden_size=3584,
+ fullatt_block_indexes=[7, 15, 23, 31],
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ super().__init__(
+ depth,
+ hidden_size,
+ hidden_act,
+ intermediate_size,
+ num_heads,
+ in_channels,
+ patch_size,
+ spatial_merge_size,
+ temporal_patch_size,
+ window_size,
+ out_hidden_size,
+ fullatt_block_indexes,
+ initializer_range=initializer_range,
+ **kwargs,
+ )
+ del self.tokens_per_second
+
+
+class Qwen2_5OmniAudioEncoderConfig(Qwen2AudioEncoderConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2_5OmniAudioEncoder`]. It is used to instantiate a
+ Qwen2.5-Omni-Thinker audio encoder according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the audio encoder of the Qwen2-Audio
+ architecture.
+
+ e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PreTrainedConfig`] for more information.
+
+ Args:
+ num_mel_bins (`int`, *optional*, defaults to 128):
+ Number of mel features used per input features. Should correspond to the value used in the
+ `Qwen2_5OmniProcessor` class.
+ encoder_layers (`int`, *optional*, defaults to 32):
+ Number of encoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 20):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 5120):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in encoder.
+ d_model (`int`, *optional*, defaults to 1280):
+ Dimensionality of the layers.
+ dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_function (`str`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Scale embeddings by diving by sqrt(d_model).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ max_source_positions (`int`, *optional*, defaults to 1500):
+ The maximum sequence length of log-mel filter-bank features that this model might ever be used with.
+ n_window (`int`, *optional*, defaults to 100):
+ The chunk for conv and flash attn in AudioEncoder.
+ output_dim (`int`, *optional*, defaults to 3584):
+ The output dimension of AudioEncoder.
+
+ Example:
+
+ ```python
+ >>> from transformers import Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniAudioEncoder
+
+ >>> # Initializing a Qwen2_5OmniAudioEncoderConfig
+ >>> configuration = Qwen2_5OmniAudioEncoderConfig()
+
+ >>> # Initializing a Qwen2_5OmniAudioEncoder (with random weights)
+ >>> model = Qwen2_5OmniAudioEncoder(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qwen2_5_omni_audio_encoder"
+
+ def __init__(
+ self,
+ num_mel_bins=128,
+ encoder_layers=32,
+ encoder_attention_heads=20,
+ encoder_ffn_dim=5120,
+ d_model=1280,
+ dropout=0,
+ attention_dropout=0,
+ activation_function="gelu",
+ activation_dropout=0,
+ scale_embedding=False,
+ initializer_range=0.02,
+ max_source_positions=1500,
+ n_window=100,
+ output_dim=3584,
+ **kwargs,
+ ):
+ super().__init__(
+ num_mel_bins,
+ encoder_layers,
+ encoder_attention_heads,
+ encoder_ffn_dim,
+ d_model,
+ dropout,
+ attention_dropout,
+ activation_function,
+ activation_dropout,
+ scale_embedding,
+ initializer_range,
+ max_source_positions,
+ **kwargs,
+ )
+ self.n_window = n_window
+ self.output_dim = output_dim
+ del self.encoder_layerdrop
+
+
+class Qwen2_5OmniTextConfig(PreTrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2_5OmniThinkerForConditionalGeneration`]. It is used to instantiate an
+ Qwen2.5-Omni-Thinker model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Qwen2.5-Omni-Thinker.
+
+ e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PreTrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 152064):
+ Vocabulary size of the QwenOmni model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`Qwen2VLModel`]
+ hidden_size (`int`, *optional*, defaults to 3584):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 18944):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 28):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 28):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*, defaults to 4):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details, check out [this
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
+ The maximum sequence length that this model might ever be used with.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the rms normalization layers.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
+ Whether to use sliding window attention.
+ sliding_window (`int`, *optional*, defaults to 32768):
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
+ max_window_layers (`int`, *optional*, defaults to 28):
+ The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
+ additional layer afterwards will use SWA (Sliding Window Attention).
+ layer_types (`list`, *optional*):
+ Attention pattern for each layer.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ rope_parameters (`RopeParameters`, *optional*):
+ Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
+ a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
+ with longer `max_position_embeddings`.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+
+ Example:
+
+ ```python
+ >>> from transformers import Qwen2_5OmniThinkerForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniVisionEncoderConfig
+
+ >>> # Initializing a Qwen2_5OmniAudioEncoder config
+ >>> audio_config = Qwen2_5OmniAudioEncoderConfig()
+
+ >>> # Initializing a Qwen2_5OmniVisionEncoder config
+ >>> vision_config = Qwen2_5OmniVisionEncoderConfig()
+
+ >>> # Initializing a Qwen2.5OmniThinker configuration
+ >>> configuration = Qwen2_5OmniThinkerConfig(audio_config, vision_config)
+
+ >>> # Initializing a model from the Qwen-Omni style configuration
+ >>> model = Qwen2_5OmniThinkerForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qwen2_5_omni_text"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ default_theta = 1000000.0
+
+ # Default tensor parallel plan for base model `Qwen25OmniText`
+ base_model_tp_plan = {
+ "layers.*.self_attn.q_proj": "colwise",
+ "layers.*.self_attn.k_proj": "colwise",
+ "layers.*.self_attn.v_proj": "colwise",
+ "layers.*.self_attn.o_proj": "rowwise",
+ "layers.*.mlp.gate_proj": "colwise",
+ "layers.*.mlp.up_proj": "colwise",
+ "layers.*.mlp.down_proj": "rowwise",
+ }
+ base_model_pp_plan = {
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
+ "norm": (["hidden_states"], ["hidden_states"]),
+ }
+
+ def __init__(
+ self,
+ vocab_size: Optional[int] = 152064,
+ hidden_size: Optional[int] = 3584,
+ intermediate_size: Optional[int] = 18944,
+ num_hidden_layers: Optional[int] = 28,
+ num_attention_heads: Optional[int] = 28,
+ num_key_value_heads: Optional[int] = 4,
+ hidden_act: Optional[str] = "silu",
+ max_position_embeddings: Optional[int] = 32768,
+ initializer_range: Optional[float] = 0.02,
+ rms_norm_eps: Optional[int] = 1e-6,
+ use_cache: Optional[bool] = True,
+ tie_word_embeddings: Optional[bool] = False,
+ rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
+ use_sliding_window: Optional[bool] = False,
+ sliding_window: Optional[int] = 32768,
+ max_window_layers: Optional[int] = 28,
+ layer_types: Optional[list[str]] = None,
+ attention_dropout: Optional[float] = 0.0,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.use_sliding_window = use_sliding_window
+ self.sliding_window = sliding_window if self.use_sliding_window else None
+ self.max_window_layers = max_window_layers
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.attention_dropout = attention_dropout
+
+ self.layer_types = layer_types
+ if self.layer_types is None:
+ self.layer_types = [
+ "sliding_attention"
+ if self.sliding_window is not None and i >= self.max_window_layers
+ else "full_attention"
+ for i in range(self.num_hidden_layers)
+ ]
+ layer_type_validation(self.layer_types, self.num_hidden_layers)
+
+ self.rope_parameters = rope_parameters
+ super().__init__(
+ tie_word_embeddings=tie_word_embeddings,
+ ignore_keys_at_rope_validation={"mrope_section"},
+ **kwargs,
+ )
+
+
+class Qwen2_5OmniThinkerConfig(PreTrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2_5OmniThinkerForConditionalGeneration`]. It is used to instantiate an
+ Qwen2.5-Omni-Thinker model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Qwen2.5-Omni-Thinker.
+
+ e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PreTrainedConfig`] for more information.
+
+ Args:
+ audio_config (`dict`, *optional*):
+ The config dictionary of the audio backbone.
+ vision_config (`dict`, *optional*):
+ The config dictionary of the vision backbone.
+ text_config (`dict`, *optional*):
+ The config dictionary of the text backbone.
+ audio_token_index (`int`, *optional*, defaults to 151646):
+ The audio token index to encode the audio prompt.
+ image_token_index (`int`, *optional*, defaults to 151655):
+ The image token index to encode the image prompt.
+ video_token_index (`int`, *optional*, defaults to 151656):
+ The video token index to encode the video prompt.
+ position_id_per_seconds (`int`, *optional*, defaults to 25):
+ The increment of position id per second.
+ seconds_per_chunk (`int`, *optional*, defaults to 2):
+ The duration in seconds of the chunk of audio and video data.
+ audio_start_token_id (`int`, *optional*, defaults to 151647):
+ The audio start token index to encode the audio prompt.
+ audio_end_token_id (`int`, *optional*, defaults to 151648):
+ The audio end token index to encode the audio prompt.
+ user_token_id (`int, *optional*, defaults to 872):
+ The user token index to encode the user token.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+
+ Example:
+
+ ```python
+ >>> from transformers import Qwen2_5OmniThinkerForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniVisionEncoderConfig
+
+ >>> # Initializing a Qwen2_5OmniAudioEncoder config
+ >>> audio_config = Qwen2_5OmniAudioEncoderConfig()
+
+ >>> # Initializing a Qwen2_5OmniVisionEncoder config
+ >>> vision_config = Qwen2_5OmniVisionEncoderConfig()
+
+ >>> # Initializing a Qwen2_5OmniTextConfig config
+ >>> text_config = Qwen2_5OmniTextConfig()
+
+ >>> # Initializing a Qwen2.5OmniThinker configuration
+ >>> configuration = Qwen2_5OmniThinkerConfig(audio_config, vision_config, text_config)
+
+ >>> # Initializing a model from the Qwen-Omni style configuration
+ >>> model = Qwen2_5OmniThinkerForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qwen2_5_omni_thinker"
+ attribute_map = {
+ "image_token_id": "image_token_index",
+ "video_token_id": "video_token_index",
+ "audio_token_id": "audio_token_index",
+ }
+ sub_configs = {
+ "audio_config": Qwen2_5OmniAudioEncoderConfig,
+ "vision_config": Qwen2_5OmniVisionEncoderConfig,
+ "text_config": Qwen2_5OmniTextConfig,
+ }
+
+ def __init__(
+ self,
+ audio_config=None,
+ vision_config=None,
+ text_config=None,
+ audio_token_index=151646,
+ image_token_index=151655,
+ video_token_index=151656,
+ position_id_per_seconds=25,
+ seconds_per_chunk=2,
+ audio_start_token_id=151647,
+ audio_end_token_id=151648,
+ user_token_id=872,
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ self.audio_token_index = audio_token_index
+ self.image_token_index = image_token_index
+ self.video_token_index = video_token_index
+ self.user_token_id = user_token_id
+ self.position_id_per_seconds = position_id_per_seconds
+ self.seconds_per_chunk = seconds_per_chunk
+ self.audio_start_token_id = audio_start_token_id
+ self.audio_end_token_id = audio_end_token_id
+ self.initializer_range = initializer_range
+
+ if isinstance(vision_config, dict):
+ vision_config = Qwen2_5OmniVisionEncoderConfig(**vision_config)
+ elif vision_config is None:
+ vision_config = Qwen2_5OmniVisionEncoderConfig()
+ self.vision_config = vision_config
+
+ if isinstance(audio_config, dict):
+ audio_config = Qwen2_5OmniAudioEncoderConfig(**audio_config)
+ elif audio_config is None:
+ audio_config = Qwen2_5OmniAudioEncoderConfig()
+ self.audio_config = audio_config
+
+ if isinstance(text_config, dict):
+ text_config = Qwen2_5OmniTextConfig(**text_config)
+ elif text_config is None:
+ text_config = Qwen2_5OmniTextConfig()
+ self.text_config = text_config
+
+ super().__init__(**kwargs)
+
+
+class Qwen2_5OmniTalkerConfig(PreTrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2_5OmniTalkerForConditionalGeneration`]. It is used to instantiate an
+ Qwen2.5-Omni-Talker model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the Qwen2.5-Omni-Thinker.
+
+ e.g. [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B)
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PreTrainedConfig`] for more information.
+
+ Args:
+ audio_token_index (`int`, *optional*, defaults to 151646):
+ The audio token index to encode the audio prompt.
+ image_token_index (`int`, *optional*, defaults to 151655):
+ The image token index to encode the image prompt.
+ video_token_index (`int`, *optional*, defaults to 151656):
+ The video token index to encode the video prompt.
+ vocab_size (`int`, *optional*, defaults to 8448):
+ Vocabulary size of the QwenOmni model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`Qwen2VLModel`]
+ tts_text_start_token_id (`int`, *optional*, defaults to 151860):
+ The tts text start token index to encode the start of tts text.
+ tts_text_end_token_id (`int`, *optional*, defaults to 151861):
+ The tts text end token index to encode the end of tts text.
+ tts_text_pad_token_id (`int`, *optional*, defaults to 151859):
+ The tts text pad token index to encode the pad of tts text.
+ tts_codec_start_token_id (`int`, *optional*, defaults to 8293):
+ The tts codec start token index to encode the start of tts codec.
+ tts_codec_end_token_id (`int`, *optional*, defaults to 8294):
+ The tts codec end token index to encode the end of tts codec.
+ tts_codec_pad_token_id (`int`, *optional*, defaults to 8292):
+ The tts codec pad token index to encode the pad of tts codec.
+ tts_codec_mask_token_id (`int`, *optional*, defaults to 8296):
+ The tts codec mask token index to encode the mask of tts codec.
+ vision_start_token_id (`int`, *optional*, defaults to 151652):
+ The tts vision start token index to encode the start of vision.
+ vision_end_token_id (`int`, *optional*, defaults to 151653):
+ The tts vision end token index to encode the end of vision.
+ embedding_size (`int`, *optional*, defaults to 3584):
+ Dimension of the embedding representations.
+ hidden_size (`int`, *optional*, defaults to 3584):
+ Dimension of the hidden representations.
+ intermediate_size (`int`, *optional*, defaults to 18944):
+ Dimension of the MLP representations.
+ num_hidden_layers (`int`, *optional*, defaults to 28):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 28):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ num_key_value_heads (`int`, *optional*, defaults to 4):
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
+ by meanpooling all the original heads within that group. For more details, check out [this
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `32`.
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
+ The non-linear activation function (function or string) in the decoder.
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
+ The maximum sequence length that this model might ever be used with.
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
+ The epsilon used by the rms normalization layers.
+ head_dim (`int`, *optional*, defaults to 128):
+ The dimension of each attention head.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether the model's input and output word embeddings should be tied.
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
+ Whether to use sliding window attention.
+ sliding_window (`int`, *optional*, defaults to 32768):
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
+ max_window_layers (`int`, *optional*, defaults to 28):
+ The number of layers using full attention. The first `max_window_layers` layers will use full attention, while any
+ additional layer afterwards will use SWA (Sliding Window Attention).
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ rope_parameters (`RopeParameters`, *optional*):
+ Dictionary containing the configuration parameters for the RoPE embeddings. The dictionary should contain
+ a value for `rope_theta` and optionally parameters used for scaling in case you want to use RoPE
+ with longer `max_position_embeddings`.
+ position_id_per_seconds (`int`, *optional*, defaults to 25):
+ The increment of position id per second.
+ seconds_per_chunk (`int`, *optional*, defaults to 2):
+ The duration in seconds of the chunk of audio and video data.
+ audio_start_token_id (`int`, *optional*, defaults to 151647):
+ The audio start token index to encode the audio prompt.
+ audio_end_token_id (`int`, *optional*, defaults to 151648):
+ The audio end token index to encode the audio prompt.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ spatial_merge_size (`int`, *optional*, defaults to 2):
+ The size used for merging spatial dimensions.
+ layer_types (`list`, *optional*):
+ Attention pattern for each layer.
+
+ Example:
+
+ ```python
+ >>> from transformers import Qwen2_5OmniTalkerForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniAudioEncoderConfig, Qwen2_5OmniVisionEncoderConfig
+
+ >>> # Initializing a Qwen2_5OmniAudioEncoder config
+ >>> audio_config = Qwen2_5OmniAudioEncoderConfig()
+
+ >>> # Initializing a Qwen2 config
+ >>> text_config = Qwen2Config()
+
+ >>> # Initializing a Qwen2_5Omni configuration
+ >>> configuration = Qwen2_5OmniThinkerConfig(audio_config, text_config)
+
+ >>> # Initializing a model from the qwen2-audio style configuration
+ >>> model = Qwen2_5OmniTalkerForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "qwen2_5_omni_talker"
+ default_theta = 1000000.0
+ attribute_map = {
+ "image_token_id": "image_token_index",
+ "video_token_id": "video_token_index",
+ "audio_token_id": "audio_token_index",
+ }
+
+ def __init__(
+ self,
+ audio_token_index=151646,
+ image_token_index=151655,
+ video_token_index=151656,
+ vocab_size=8448,
+ tts_text_start_token_id=151860,
+ tts_text_end_token_id=151861,
+ tts_text_pad_token_id=151859,
+ tts_codec_start_token_id=8293,
+ tts_codec_end_token_id=8294,
+ tts_codec_pad_token_id=8292,
+ tts_codec_mask_token_id=8296,
+ vision_start_token_id=151652,
+ vision_end_token_id=151653,
+ embedding_size=3584,
+ hidden_size=3584,
+ intermediate_size=18944,
+ num_hidden_layers=28,
+ num_attention_heads=28,
+ num_key_value_heads=4,
+ hidden_act="silu",
+ max_position_embeddings=32768,
+ rms_norm_eps=1e-06,
+ head_dim=128,
+ use_cache=True,
+ tie_word_embeddings=False,
+ use_sliding_window=False,
+ sliding_window=32768,
+ max_window_layers=28,
+ attention_dropout=0.0,
+ rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
+ position_id_per_seconds=25,
+ seconds_per_chunk=2,
+ audio_start_token_id=151647,
+ audio_end_token_id=151648,
+ initializer_range=0.02,
+ spatial_merge_size=2,
+ layer_types=None,
+ **kwargs,
+ ):
+ self.audio_token_index = audio_token_index
+ self.image_token_index = image_token_index
+ self.video_token_index = video_token_index
+
+ self.tts_text_start_token_id = tts_text_start_token_id
+ self.tts_text_end_token_id = tts_text_end_token_id
+ self.tts_text_pad_token_id = tts_text_pad_token_id
+ self.tts_codec_start_token_id = tts_codec_start_token_id
+ self.tts_codec_end_token_id = tts_codec_end_token_id
+ self.tts_codec_pad_token_id = tts_codec_pad_token_id
+
+ self.tts_codec_mask_token_id = tts_codec_mask_token_id
+
+ self.vision_start_token_id = vision_start_token_id
+ self.vision_end_token_id = vision_end_token_id
+
+ self.vocab_size = vocab_size
+ self.head_dim = head_dim
+ self.embedding_size = embedding_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.use_sliding_window = use_sliding_window
+ self.sliding_window = sliding_window if self.use_sliding_window else None
+ self.max_window_layers = max_window_layers
+
+ # for backward compatibility
+ if num_key_value_heads is None:
+ num_key_value_heads = num_attention_heads
+
+ self.num_key_value_heads = num_key_value_heads
+ self.hidden_act = hidden_act
+ self.rms_norm_eps = rms_norm_eps
+ self.use_cache = use_cache
+ self.attention_dropout = attention_dropout
+ self.position_id_per_seconds = position_id_per_seconds # zf
+ self.seconds_per_chunk = seconds_per_chunk # zf
+ self.audio_start_token_id = audio_start_token_id # zf
+ self.audio_end_token_id = audio_end_token_id # zf
+
+ self.initializer_range = initializer_range
+ self.spatial_merge_size = spatial_merge_size
+
+ self.layer_types = layer_types
+ if self.layer_types is None:
+ self.layer_types = [
+ "sliding_attention"
+ if self.sliding_window is not None and i >= self.max_window_layers
+ else "full_attention"
+ for i in range(self.num_hidden_layers)
+ ]
+ layer_type_validation(self.layer_types, self.num_hidden_layers)
+
+ self.rope_parameters = rope_parameters
+ super().__init__(
+ tie_word_embeddings=tie_word_embeddings, ignore_keys_at_rope_validation={"mrope_section"}, **kwargs
+ )
+
+
+class Qwen2_5OmniDiTConfig(PreTrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of the Qwen2_5OmniToken2WavDiT used in the Qwen2.5-Omni-Token2Wav model.
+ It defines the architecture of the DiT model, which is used for generating mel-spectrograms from tokens.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 1024):
+ The dimension of the model.
+ num_hidden_layers (`int`, *optional*, defaults to 22):
+ The number of transformer blocks in the DiT model.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ The number of attention heads in each transformer block.
+ ff_mult (`int`, *optional*, defaults to 2):
+ The multiplier for the feedforward layer in each transformer block.
+ emb_dim (`int`, *optional*, defaults to 512):
+ The dimension of the embedding layer.
+ head_dim (`int`, *optional*, defaults to 64):
+ The dimension of each attention head.
+ repeats (`int`, *optional*, defaults to 2):
+ The number of times the codec embeddings are repeated.
+ num_embeds (`int`, *optional*, defaults to 8193):
+ The number of unique embeddings in the codec.
+ mel_dim (`int`, *optional*, defaults to 80):
+ The dimension of the mel-spectrogram.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout rate for the transformer blocks.
+
+ enc_emb_dim (`int`, *optional*, defaults to 192):
+ The dimension of the pre-trained speaker embedding.
+ enc_dim (`int`, *optional*, defaults to 128):
+ The dimension of the encoder output.
+ enc_channels (`list[int]`, *optional*, defaults to `[256, 256, 256, 256, 768]`):
+ A list of output channels for each TDNN/SERes2Net layer in the encoder.
+ enc_kernel_sizes (`list[int]`, *optional*, defaults to `[5, 3, 3, 3, 1]`):
+ A list of kernel sizes for each layer in the encoder.
+ enc_dilations (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 1]`):
+ A list of dilations for each layer in the encoder.
+ enc_attention_channels (`int`, *optional*, defaults to 64):
+ The number of attention channels in the SqueezeExcitationBlock.
+ enc_res2net_scale (`int`, *optional*, defaults to 2):
+ The scale of the Res2Net block in the encoder.
+ enc_se_channels (`int`, *optional*, defaults to 64):
+ The number of output channels after squeeze in the SqueezeExcitationBlock.
+ """
+
+ model_type = "qwen2_5_omni_dit"
+
+ def __init__(
+ self,
+ hidden_size=1024,
+ num_hidden_layers=22,
+ num_attention_heads=16,
+ ff_mult=2,
+ emb_dim=512,
+ head_dim=64,
+ rope_parameters: Optional[RopeParameters | dict[str, RopeParameters]] = None,
+ max_position_embeddings=32768,
+ block_size=24,
+ look_ahead_layers=[10],
+ look_backward_layers=[0, 20],
+ repeats=2,
+ num_embeds=8193,
+ mel_dim=80,
+ dropout=0.1,
+ enc_emb_dim=192,
+ enc_dim=128,
+ enc_channels=[256, 256, 256, 256, 768],
+ enc_kernel_sizes=[5, 3, 3, 3, 1],
+ enc_dilations=[1, 2, 3, 4, 1],
+ enc_attention_channels=64,
+ enc_res2net_scale=2,
+ enc_se_channels=64,
+ **kwargs,
+ ):
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.ff_mult = ff_mult
+ self.emb_dim = emb_dim
+ self.head_dim = head_dim
+ self.max_position_embeddings = max_position_embeddings
+ self.block_size = block_size
+ self.look_ahead_layers = look_ahead_layers
+ self.look_backward_layers = look_backward_layers
+ self.repeats = repeats
+ self.num_embeds = num_embeds
+ self.mel_dim = mel_dim
+ self.dropout = dropout
+ self.enc_emb_dim = enc_emb_dim
+ self.enc_dim = enc_dim
+ self.enc_channels = enc_channels
+ self.enc_kernel_sizes = enc_kernel_sizes
+ self.enc_dilations = enc_dilations
+ self.enc_attention_channels = enc_attention_channels
+ self.enc_res2net_scale = enc_res2net_scale
+ self.enc_se_channels = enc_se_channels
+ self.rope_parameters = rope_parameters
+
+ super().__init__(**kwargs)
+
+
+class Qwen2_5OmniBigVGANConfig(PreTrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of the Qwen2_5OmniToken2WavBigVGAN module used in the Qwen2.5-Omni-Token2Wav model.
+ It defines the architecture of the BigVGAN model, which is used for converting mel-spectrograms to waveforms.
+
+ Args:
+ mel_dim (`int`, *optional*, defaults to 80):
+ The dimension of the mel-spectrogram.
+ upsample_initial_channel (`int`, *optional*, defaults to 1536):
+ The number of channels in the initial upsampling layer.
+ resblock_kernel_sizes (`list[int]`, *optional*, defaults to `[3, 7, 11]`):
+ A list of kernel sizes for each residual block.
+ resblock_dilation_sizes (`list[list[int]]`, *optional*, defaults to `[[1, 3, 5], [1, 3, 5], [1, 3, 5]]`):
+ A list of dilation sizes for each residual block.
+ upsample_rates (`list[int]`, *optional*, defaults to `[5, 3, 2, 2, 2, 2]`):
+ A list of upsampling rates for each upsampling layer.
+ upsample_kernel_sizes (`list[int]`, *optional*, defaults to `[11, 7, 4, 4, 4, 4]`):
+ A list of kernel sizes for each upsampling layer.
+ """
+
+ model_type = "qwen2_5_omni_bigvgan"
+
+ def __init__(
+ self,
+ mel_dim=80,
+ upsample_initial_channel=1536,
+ resblock_kernel_sizes=[3, 7, 11],
+ resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
+ upsample_rates=[5, 3, 2, 2, 2, 2],
+ upsample_kernel_sizes=[11, 7, 4, 4, 4, 4],
+ **kwargs,
+ ):
+ self.mel_dim = mel_dim
+ self.upsample_initial_channel = upsample_initial_channel
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.upsample_rates = upsample_rates
+ self.upsample_kernel_sizes = upsample_kernel_sizes
+ super().__init__(**kwargs)
+
+
+class Qwen2_5OmniToken2WavConfig(PreTrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`Qwen2_5OmniToken2WavModel`].
+ It is used to instantiate the Qwen2.5-Omni-Token2Wav model which combines a Diffusion Transformer (DiT) for mel-spectrogram generation with a BigVGAN model for waveform synthesis. The configuration contains sub-configurations for both components.
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PreTrainedConfig`] for more information.
+
+ Args:
+ dit_config ([`DiT_Args`], *optional*):
+ Configuration class for the Diffusion Transformer (DiT) module responsible for generating mel-spectrograms.
+ bigvgan_config ([`BigVGAN_Args`], *optional*):
+ Configuration class for the BigVGAN module responsible for converting mel-spectrograms to waveforms.
+ Example:
+
+ ```python
+ >>> from transformers import Qwen2_5OmniToken2WavModel, DiT_Args, BigVGAN_Args
+
+ >>> # Initialize DiT configuration
+ >>> dit_config = DiT_Args(
+ ... dim=1024,
+ ... depth=22,
+ ... heads=16,
+ ... ff_mult=2
+ ... )
+
+ >>> # Initialize BigVGAN configuration
+ >>> bigvgan_config = BigVGAN_Args(
+ ... mel_dim=80,
+ ... upsample_rates=[5,3,2,2,2,2]
+ ... )
+
+ >>> # Initialize main configuration
+ >>> config = Qwen2_5OmniToken2WavConfig(dit_config, bigvgan_config)
+
+ >>> # Initialize model with config
+ >>> model = Qwen2_5OmniToken2Wav(config)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "qwen2_5_omni_token2wav"
+ sub_configs = {
+ "dit_config": Qwen2_5OmniDiTConfig,
+ "bigvgan_config": Qwen2_5OmniBigVGANConfig,
+ }
+
+ def __init__(self, dit_config=None, bigvgan_config=None, **kwargs):
+ if dit_config is None:
+ dit_config = {}
+ if bigvgan_config is None:
+ bigvgan_config = {}
+ self.dit_config = Qwen2_5OmniDiTConfig(**dit_config)
+ self.bigvgan_config = Qwen2_5OmniBigVGANConfig(**bigvgan_config)
+ super().__init__(**kwargs)
+
+
+class Qwen2_5OmniConfig(PreTrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`Qwen2_5OmniForConditionalGeneration`]. It is used to instantiate a Qwen2.5Omni
+ model according to the specified sub-models configurations, defining the model architecture.
+
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the
+ [Qwen/Qwen2.5-Omni-7B](https://huggingface.co/Qwen/Qwen2.5-Omni-7B) architecture.
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PreTrainedConfig`] for more information.
+
+ Args:
+ thinker_config (`dict`, *optional*): Configuration of the underlying thinker sub-model.
+ talker_config (`dict`, *optional*): Configuration of the underlying talker sub-model.
+ token2wav_config (`dict`, *optional*): Configuration of the underlying codec sub-model.
+ enable_audio_output (`bool`, *optional*, defaults to `True`): Whether enable audio output and load talker and token2wav module.
+
+ Example:
+
+ ```python
+ >>> from transformers import (
+ ... Qwen2_5OmniThinkerConfig,
+ ... Qwen2_5OmniTalkerConfig,
+ ... Qwen2_5OmniToken2WavConfig,
+ ... Qwen2_5OmniForConditionalGeneration,
+ ... Qwen2_5OmniConfig,
+ ... )
+
+ >>> # Initializing sub-modules configurations.
+ >>> thinker_config = Qwen2_5OmniThinkerConfig()
+ >>> talker_config = Qwen2_5OmniTalkerConfig()
+ >>> token2wav_config = Qwen2_5OmniToken2WavConfig()
+
+
+ >>> # Initializing a module style configuration
+ >>> configuration = Qwen2_5OmniConfig(
+ ... thinker_config, talker_config, token2wav_config
+ ... )
+
+ >>> # Initializing a model (with random weights)
+ >>> model = Qwen2_5OmniForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "qwen2_5_omni"
+ sub_configs = {
+ "thinker_config": Qwen2_5OmniThinkerConfig,
+ "talker_config": Qwen2_5OmniTalkerConfig,
+ "token2wav_config": Qwen2_5OmniToken2WavConfig,
+ }
+
+ def __init__(
+ self,
+ thinker_config=None,
+ talker_config=None,
+ token2wav_config=None,
+ enable_audio_output: bool = True,
+ **kwargs,
+ ):
+ if thinker_config is None:
+ thinker_config = {}
+ logger.info("thinker_config is None. Initializing thinker model with default values")
+
+ if talker_config is None:
+ talker_config = {}
+ logger.info("talker_config is None. Initializing talker model with default values")
+
+ if token2wav_config is None:
+ token2wav_config = {}
+ logger.info("token2wav_config is None. Initializing token2wav model with default values")
+
+ self.thinker_config = Qwen2_5OmniThinkerConfig(**thinker_config)
+ self.talker_config = Qwen2_5OmniTalkerConfig(**talker_config)
+ self.token2wav_config = Qwen2_5OmniToken2WavConfig(**token2wav_config)
+ self.enable_audio_output = enable_audio_output
+
+ super().__init__(**kwargs)
+
+ def get_text_config(self, *args, **kwargs):
+ """
+ Returns the config that is meant to be used with text IO. On most models, it is the original config instance
+ itself. On specific composite models, it is under a set of valid names.
+
+ Args:
+ decoder (`Optional[bool]`, *optional*, defaults to `False`):
+ If set to `True`, then only search for decoder config names.
+ """
+ # Overridden for deeply nested config like Qwen2-Omni. We don't have any omni model
+ # except for Qwen yet. This has to be generalized if more deeply nested configs are
+ # added. NOTE: currently method used only by vLLM
+ return self.thinker_config.get_text_config(*args, **kwargs)
+
+
+class Qwen2_5OmniPreTrainedModel(Qwen2_5_VLPreTrainedModel):
+ config: Qwen2_5OmniConfig
+ input_modalities = ("image", "video", "audio", "text")
+ _can_compile_fullgraph = False
+
+ def _init_weights(self, module):
+ PreTrainedModel._init_weights(self, module)
+ if isinstance(module, SinusoidsPositionEmbedding):
+ log_timescale_increment = np.log(module.max_timescale) / (module.channels // 2 - 1)
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(module.channels // 2).float())
+ scaled_time = torch.arange(module.length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
+ init.copy_(module.positional_embedding, torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1))
+ elif isinstance(module, UpSample1d):
+ filter_tensor = kaiser_sinc_filter1d(0.5 / module.ratio, 0.6 / module.ratio, module.kernel_size)
+ init.copy_(module.filter, filter_tensor)
+ elif isinstance(module, DownSample1d):
+ filter_tensor = kaiser_sinc_filter1d(module.cutoff, module.half_width, module.kernel_size)
+ init.copy_(module.filter, filter_tensor)
+ elif isinstance(module, Qwen2_5_VisionRotaryEmbedding):
+ inv_freq = 1.0 / (module.theta ** (torch.arange(0, module.dim, 2, dtype=torch.float) / module.dim))
+ init.copy_(module.inv_freq, inv_freq)
+
+
+class Qwen2_5OmniPreTrainedModelForConditionalGeneration(Qwen2_5OmniPreTrainedModel):
+ input_modalities = ("image", "video", "audio", "text")
+
+ def _prepare_4d_causal_attention_mask_with_cache_position(
+ self,
+ attention_mask: torch.Tensor,
+ sequence_length: int,
+ target_length: int,
+ dtype: torch.dtype,
+ device: torch.device,
+ min_dtype: float,
+ cache_position: torch.Tensor,
+ batch_size: int,
+ ):
+ """
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
+
+ Args:
+ attention_mask (`torch.Tensor`):
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
+ sequence_length (`int`):
+ The sequence length being processed.
+ target_length (`int`):
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
+ dtype (`torch.dtype`):
+ The dtype to use for the 4D attention mask.
+ device (`torch.device`):
+ The device to place the 4D attention mask on.
+ min_dtype (`float`):
+ The minimum value representable with the dtype `dtype`.
+ cache_position (`torch.Tensor`):
+ Indices depicting the position of the input sequence tokens in the sequence.
+ batch_size (`torch.Tensor`):
+ Batch size.
+ """
+ if attention_mask is not None and attention_mask.dim() == 4:
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
+ causal_mask = attention_mask
+ else:
+ causal_mask = torch.full(
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
+ )
+ if sequence_length != 1:
+ causal_mask = torch.triu(causal_mask, diagonal=1)
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
+ if attention_mask is not None:
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
+ mask_length = attention_mask.shape[-1]
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
+ padding_mask = padding_mask == 0
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
+ padding_mask, min_dtype
+ )
+
+ return causal_mask
+
+ def get_llm_pos_ids_for_vision(
+ self,
+ start_idx: int,
+ vision_idx: int,
+ spatial_merge_size: int,
+ t_index: list[int],
+ grid_hs: list[int],
+ grid_ws: list[int],
+ ):
+ llm_pos_ids_list = []
+ llm_grid_h = grid_hs[vision_idx] // spatial_merge_size
+ llm_grid_w = grid_ws[vision_idx] // spatial_merge_size
+ h_index = torch.arange(llm_grid_h).view(1, -1, 1).expand(len(t_index), -1, llm_grid_w).flatten()
+ w_index = torch.arange(llm_grid_w).view(1, 1, -1).expand(len(t_index), llm_grid_h, -1).flatten()
+ t_index = torch.Tensor(t_index).view(-1, 1).expand(-1, llm_grid_h * llm_grid_w).flatten().long()
+ _llm_pos_ids = torch.stack([t_index, h_index, w_index])
+ llm_pos_ids_list.append(_llm_pos_ids + start_idx) # + 1 ) # 12.09 by malinhan
+ llm_pos_ids = torch.cat(llm_pos_ids_list, dim=1)
+ return llm_pos_ids
+
+ def get_chunked_index(
+ self, token_indices: torch.Tensor, tokens_per_chunk: int, remove_index: int
+ ) -> list[tuple[int, int]]:
+ """
+ Splits token index list into chunks based on token value ranges.
+
+ Given a list of token indices, returns a list of (start, end) index tuples representing
+ slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`.
+
+ For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that:
+ - the first chunk contains token values < 1000,
+ - the second chunk contains values >= 1000 and < 2000, and so on.
+
+ Parameters:
+ token_indices (`torch.Tensor` of shape `(seq_len, )`): A monotonically increasing list of
+ token index values.
+ t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold).
+ remove_index (`int`) An index id to subtract from `token_indices` before chunking
+
+ Returns:
+ `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive)
+ and end (exclusive) indices of a chunk in `token_indices`.
+ """
+
+ def _iter():
+ i, start_idx = 0, 0 # skip bos token
+ current_chunk = 1
+ while i < len(token_indices): # skip eos token
+ if token_indices[i] - remove_index >= current_chunk * tokens_per_chunk:
+ yield (start_idx, i)
+ start_idx = i
+ current_chunk += 1
+ i += 1
+ yield (start_idx, len(token_indices))
+
+ return list(_iter())
+
+ def get_rope_index(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ image_grid_thw: Optional[torch.LongTensor] = None,
+ video_grid_thw: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ use_audio_in_video: bool = False,
+ audio_seqlens: Optional[torch.LongTensor] = None,
+ second_per_grids: Optional[torch.Tensor] = None,
+ ) -> tuple[torch.Tensor, torch.Tensor]:
+ """
+ Calculate the 3D rope index based on image and video's temporal, height and width in LLM.
+
+ Explanation:
+ Each embedding sequence contains vision embedding and text embedding or just contains text embedding.
+
+ For pure text embedding sequence, the rotary position embedding has no difference with modern LLMs.
+ Examples:
+ input_ids: [T T T T T], here T is for text.
+ temporal position_ids: [0, 1, 2, 3, 4]
+ height position_ids: [0, 1, 2, 3, 4]
+ width position_ids: [0, 1, 2, 3, 4]
+
+ For vision and text embedding sequence, we calculate 3D rotary position embedding for vision part
+ and 1D rotary position embedding for text part.
+ Examples:
+ Temporal (Time): 3 patches, representing different segments of the video in time.
+ Height: 2 patches, dividing each frame vertically.
+ Width: 2 patches, dividing each frame horizontally.
+ We also have some important parameters:
+ fps (Frames Per Second): The video's frame rate, set to 1. This means one frame is processed each second.
+ tokens_per_second: This is a crucial parameter. It dictates how many "time-steps" or "temporal tokens" are conceptually packed into a one-second interval of the video. In this case, we have 25 tokens per second. So each second of the video will be represented with 25 separate time points. It essentially defines the temporal granularity.
+ temporal_patch_size: The number of frames that compose one temporal patch. Here, it's 2 frames.
+ interval: The step size for the temporal position IDs, calculated as tokens_per_second * temporal_patch_size / fps. In this case, 25 * 2 / 1 = 50. This means that each temporal patch will be have a difference of 50 in the temporal position IDs.
+ input_ids: [V V V V V V V V V V V V T T T T T], here V is for vision.
+ vision temporal position_ids: [0, 0, 0, 0, 50, 50, 50, 50, 100, 100, 100, 100]
+ vision height position_ids: [0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1]
+ vision width position_ids: [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
+ text temporal position_ids: [101, 102, 103, 104, 105]
+ text height position_ids: [101, 102, 103, 104, 105]
+ text width position_ids: [101, 102, 103, 104, 105]
+ Here we calculate the text start position_ids as the max vision position_ids plus 1.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ use_audio_in_video (`bool`, *optional*):
+ If set to `True`, use the audio in video.
+ audio_seqlens (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ second_per_grids (`torch.LongTensor` of shape `(num_videos)`, *optional*):
+ The time interval (in seconds) for each grid along the temporal dimension in the 3D position IDs.
+
+ Returns:
+ position_ids (`torch.LongTensor` of shape `(3, batch_size, sequence_length)`)
+ mrope_position_deltas (`torch.Tensor` of shape `(batch_size)`)
+ """
+ spatial_merge_size = self.spatial_merge_size
+ image_token_id = self.config.image_token_id
+ video_token_id = self.config.video_token_id
+ audio_token_id = self.config.audio_token_id
+ vision_start_token_id = self.config.vision_start_token_id
+ audio_start_token_id = self.config.audio_start_token_id
+ position_id_per_seconds = self.config.position_id_per_seconds
+ seconds_per_chunk = self.config.seconds_per_chunk
+
+ mrope_position_deltas = []
+ if input_ids is not None and (image_grid_thw is not None or video_grid_thw is not None):
+ total_input_ids = input_ids
+ if attention_mask is not None:
+ attention_mask = attention_mask == 1
+ position_ids = torch.ones(
+ 3,
+ input_ids.shape[0],
+ input_ids.shape[1],
+ dtype=input_ids.dtype,
+ device=input_ids.device,
+ )
+ image_idx, video_idx, audio_idx = 0, 0, 0
+ for i, input_ids in enumerate(total_input_ids):
+ if attention_mask is not None:
+ input_ids = input_ids[attention_mask[i]]
+ image_nums, video_nums, audio_nums = 0, 0, 0
+ vision_start_indices = torch.argwhere(input_ids == vision_start_token_id).squeeze(1)
+ vision_tokens = input_ids[vision_start_indices + 1]
+ audio_nums = torch.sum(input_ids == audio_start_token_id)
+ image_nums = (vision_tokens == image_token_id).sum()
+ video_nums = (
+ (vision_tokens == audio_start_token_id).sum()
+ if use_audio_in_video
+ else (vision_tokens == video_token_id).sum()
+ )
+ input_tokens = input_ids.tolist()
+ llm_pos_ids_list: list = []
+ st = 0
+ remain_images, remain_videos, remain_audios = image_nums, video_nums, audio_nums
+ multimodal_nums = (
+ image_nums + audio_nums if use_audio_in_video else image_nums + video_nums + audio_nums
+ )
+ for _ in range(multimodal_nums):
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ if image_token_id in input_tokens and remain_images > 0:
+ ed_image = input_tokens.index(image_token_id, st)
+ else:
+ ed_image = len(input_tokens) + 1
+ if video_token_id in input_tokens and remain_videos > 0:
+ ed_video = input_tokens.index(video_token_id, st)
+ else:
+ ed_video = len(input_tokens) + 1
+ if audio_token_id in input_tokens and remain_audios > 0:
+ ed_audio = input_tokens.index(audio_token_id, st)
+ else:
+ ed_audio = len(input_tokens) + 1
+ min_ed = min(ed_image, ed_video, ed_audio)
+ if min_ed == ed_audio:
+ text_len = min_ed - st - 1
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1
+ llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
+ llm_pos_ids_list.append(llm_pos_ids)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len + audio_len + eos_len
+ audio_idx += 1
+ remain_audios -= 1
+
+ elif min_ed == ed_image:
+ text_len = min_ed - st - 1
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ grid_t = image_grid_thw[image_idx][0]
+ grid_hs = image_grid_thw[:, 1]
+ grid_ws = image_grid_thw[:, 2]
+ t_index = (torch.arange(grid_t) * 1 * position_id_per_seconds).long()
+ llm_pos_ids = self.get_llm_pos_ids_for_vision(
+ st_idx, image_idx, spatial_merge_size, t_index, grid_hs, grid_ws
+ )
+ image_len = image_grid_thw[image_idx].prod() // (spatial_merge_size**2)
+ llm_pos_ids_list.append(llm_pos_ids)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len + image_len + eos_len
+ image_idx += 1
+ remain_images -= 1
+
+ elif min_ed == ed_video and not use_audio_in_video:
+ text_len = min_ed - st - 1
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ grid_t = video_grid_thw[video_idx][0]
+ grid_hs = video_grid_thw[:, 1]
+ grid_ws = video_grid_thw[:, 2]
+ t_index = (
+ torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
+ ).long()
+ llm_pos_ids = self.get_llm_pos_ids_for_vision(
+ st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
+ )
+ video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
+ llm_pos_ids_list.append(llm_pos_ids)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len + video_len + eos_len
+ video_idx += 1
+ remain_videos -= 1
+
+ elif min_ed == ed_video and use_audio_in_video:
+ text_len = min_ed - st - 2
+ if text_len != 0:
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ bos_len = 1
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+ llm_pos_ids_list.append(torch.arange(bos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ audio_len = ((audio_seqlens[audio_idx] - 1) // 2 + 1 - 2) // 2 + 1
+ audio_llm_pos_ids = torch.arange(audio_len).view(1, -1).expand(3, -1) + st_idx
+ grid_t = video_grid_thw[video_idx][0]
+ grid_hs = video_grid_thw[:, 1]
+ grid_ws = video_grid_thw[:, 2]
+
+ t_index = (
+ torch.arange(grid_t) * second_per_grids[video_idx].cpu().float() * position_id_per_seconds
+ ).long()
+ video_llm_pos_ids = self.get_llm_pos_ids_for_vision(
+ st_idx, video_idx, spatial_merge_size, t_index, grid_hs, grid_ws
+ )
+
+ t_ntoken_per_chunk = int(position_id_per_seconds * seconds_per_chunk)
+ video_chunk_indexes = self.get_chunked_index(video_llm_pos_ids[0], t_ntoken_per_chunk, st_idx)
+ audio_chunk_indexes = self.get_chunked_index(audio_llm_pos_ids[0], t_ntoken_per_chunk, st_idx)
+ sub_len = 0
+ for j in range(max(len(video_chunk_indexes), len(audio_chunk_indexes))):
+ video_chunk_index = video_chunk_indexes[j] if j < len(video_chunk_indexes) else None
+ audio_chunk_index = audio_chunk_indexes[j] if j < len(audio_chunk_indexes) else None
+ if video_chunk_index is not None:
+ sub_len += video_chunk_index[1] - video_chunk_index[0]
+
+ llm_pos_ids_list.append(
+ video_llm_pos_ids[:, video_chunk_index[0] : video_chunk_index[1]]
+ )
+ if audio_chunk_index is not None:
+ sub_len += audio_chunk_index[1] - audio_chunk_index[0]
+
+ llm_pos_ids_list.append(
+ audio_llm_pos_ids[:, audio_chunk_index[0] : audio_chunk_index[1]]
+ )
+ video_len = video_grid_thw[video_idx].prod() // (spatial_merge_size**2)
+
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ eos_len = 1
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+ llm_pos_ids_list.append(torch.arange(eos_len).view(1, -1).expand(3, -1) + st_idx)
+
+ st += text_len + bos_len * 2 + audio_len + video_len + eos_len * 2
+
+ audio_idx += 1
+ video_idx += 1
+ remain_videos -= 1
+ remain_audios -= 1
+
+ if st < len(input_tokens):
+ st_idx = llm_pos_ids_list[-1].max() + 1 if len(llm_pos_ids_list) > 0 else 0
+ text_len = len(input_tokens) - st
+ llm_pos_ids_list.append(torch.arange(text_len).view(1, -1).expand(3, -1) + st_idx)
+
+ llm_positions = torch.cat(llm_pos_ids_list, dim=1).reshape(3, -1)
+
+ if attention_mask is not None:
+ position_ids[..., i, attention_mask[i]] = llm_positions.to(position_ids.device)
+ else:
+ position_ids[..., i, :] = llm_positions.to(position_ids.device)
+ mrope_position_deltas.append(llm_positions.max() + 1 - len(input_ids))
+ mrope_position_deltas = torch.tensor(mrope_position_deltas).unsqueeze(1).to(device=input_ids.device)
+
+ return position_ids, mrope_position_deltas
+ else:
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1).to(attention_mask.device)
+ max_position_ids = position_ids.max(0, keepdim=False)[0].max(-1, keepdim=True)[0]
+ mrope_position_deltas = max_position_ids + 1 - torch.sum(attention_mask, dim=-1, keepdim=True)
+
+ return position_ids, mrope_position_deltas
+
+
+############################
+# Start Thinker #
+############################
+
+
+@dataclass
+@auto_docstring(
+ custom_intro="""
+ Base class for Qwen2.5OmniThinker causal language model (or autoregressive) outputs.
+ """
+)
+class Qwen2_5OmniThinkerCausalLMOutputWithPast(ModelOutput):
+ r"""
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`, *optional*):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Cache] = None
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
+ attentions: Optional[tuple[torch.FloatTensor]] = None
+ rope_deltas: Optional[torch.LongTensor] = None
+
+
+class Qwen2_5OmniAudioAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ config: Qwen2_5OmniAudioEncoderConfig,
+ ):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.num_heads = config.encoder_attention_heads
+ self.dropout = config.attention_dropout
+ self.head_dim = self.embed_dim // self.num_heads
+ self.num_key_value_groups = 1 # needed for eager attention
+ self.config = config
+
+ if (self.head_dim * self.num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.attention_dropout = 0.0
+ self.is_decoder = False
+ self.is_causal = False
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ seq_length, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states).reshape(seq_length, self.num_heads, -1)
+ key_states = self.k_proj(hidden_states).reshape(seq_length, self.num_heads, -1)
+ value_states = self.v_proj(hidden_states).reshape(seq_length, self.num_heads, -1)
+
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ attn_output, _ = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask=attention_mask,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ scaling=self.scaling,
+ cu_seq_lens_q=cu_seqlens, # pass cu seq lens for FA2
+ cu_seq_lens_k=cu_seqlens,
+ max_length_q=max_seqlen,
+ max_length_k=max_seqlen,
+ is_causal=False,
+ **kwargs,
+ )
+
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output
+
+
+class Qwen2_5OmniAudioEncoderLayer(Qwen2AudioEncoderLayer):
+ def __init__(self, config: Qwen2_5OmniAudioEncoderConfig):
+ super().__init__(config)
+ self.self_attn = Qwen2_5OmniAudioAttention(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states = self.self_attn(
+ hidden_states=hidden_states,
+ cu_seqlens=cu_seqlens,
+ attention_mask=attention_mask,
+ **kwargs,
+ )
+ hidden_states = residual + hidden_states
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16:
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ return outputs
+
+
+class SinusoidsPositionEmbedding(nn.Module):
+ def __init__(self, length, channels, max_timescale=10000):
+ super().__init__()
+ self.length = length
+ self.channels = channels
+ self.max_timescale = max_timescale
+ if channels % 2 != 0:
+ raise ValueError("SinusoidsPositionEmbedding needs even channels input")
+ log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1)
+ inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2).float())
+ scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :]
+ self.register_buffer(
+ "positional_embedding",
+ torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1),
+ persistent=False,
+ )
+
+ def forward(self, seqlen: int):
+ return self.positional_embedding[:seqlen, :]
+
+
+@auto_docstring(
+ custom_intro="""
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`Qwen2_5OmniAudioEncoderLayer`].
+ """
+)
+class Qwen2_5OmniAudioEncoder(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniAudioEncoderConfig
+ main_input_name = "input_features"
+ input_modalities = "audio"
+ _no_split_modules = ["Qwen2_5OmniAudioEncoderLayer"]
+ _supports_sdpa = True
+
+ def __init__(self, config: Qwen2_5OmniAudioEncoderConfig):
+ super().__init__(config)
+ self.dropout = config.dropout
+
+ embed_dim = config.d_model
+ self.num_mel_bins = config.num_mel_bins
+ self.max_source_positions = config.max_source_positions
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+ self.n_window = config.n_window
+ self.conv1 = nn.Conv1d(self.num_mel_bins, embed_dim, kernel_size=3, padding=1)
+ self.conv2 = nn.Conv1d(embed_dim, embed_dim, kernel_size=3, stride=2, padding=1)
+ self.positional_embedding = SinusoidsPositionEmbedding(self.max_source_positions, embed_dim)
+ self.audio_bos_eos_token = nn.Embedding(2, config.output_dim)
+ self.layers = nn.ModuleList([Qwen2_5OmniAudioEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.ln_post = nn.LayerNorm(config.d_model)
+ self.avg_pooler = nn.AvgPool1d(2, stride=2)
+ self.proj = nn.Linear(config.d_model, config.output_dim)
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _freeze_parameters(self):
+ for param in self.parameters():
+ param.requires_grad = False
+ self._requires_grad = False
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.conv1
+
+ def set_input_embeddings(self, value: nn.Module):
+ self.conv1 = value
+
+ def _prepare_attention_mask(self, inputs_tensor: torch.Tensor, cu_seqlens: torch.Tensor) -> torch.Tensor:
+ # Flash Attention 2 doesn't need a 4D mask and relies on `cu_seqlens/max_seqlen`
+ # NOTE: the created attention masl only approximates the ragged FA2 attention by
+ # allowing bidirectional attention within `cu_seqlens` blocks, and not attending between
+ # blocks. Though it will not be a 100% match for FA2's `varlen` path
+ if self.config._attn_implementation == "flash_attention_2":
+ return None
+
+ seq_length = inputs_tensor.shape[0]
+ attention_mask = torch.full(
+ [1, 1, seq_length, seq_length],
+ torch.finfo(inputs_tensor.dtype).min,
+ device=inputs_tensor.device,
+ dtype=inputs_tensor.dtype,
+ )
+ for i in range(1, len(cu_seqlens)):
+ attention_mask[..., cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i]] = 0
+ return attention_mask
+
+ @auto_docstring
+ def forward(
+ self,
+ input_features,
+ feature_lens=None,
+ aftercnn_lens=None,
+ **kwargs,
+ ):
+ r"""
+ feature_lens (`torch.LongTensor` of shape `(batch_size,)`):
+ mel length
+ aftercnn_lens (`torch.LongTensor` of shape `(batch_size,)`):
+ mel length after cnn
+ """
+ chunk_num = torch.ceil(feature_lens / (self.n_window * 2)).long()
+
+ chunk_lengths = torch.tensor(
+ [self.n_window * 2] * chunk_num.sum(),
+ dtype=torch.long,
+ device=feature_lens.device,
+ )
+ tail_chunk_index = F.pad(chunk_num, (1, 0), value=-1).cumsum(0)[1:]
+ chunk_lengths[tail_chunk_index] = feature_lens % (self.n_window * 2)
+ chunk_lengths = torch.where(chunk_lengths == 0, self.n_window * 2, chunk_lengths)
+
+ chunk_list = input_features.split(chunk_lengths.tolist(), dim=1)
+ padded_feature, padded_mask, padded_mask_after_cnn = self.padded_and_mask_function(
+ chunk_list, chunk_lengths, padding_value=0, padding_side="right"
+ )
+ padded_embed = nn.functional.gelu(self.conv1(padded_feature)) * padded_mask
+ padded_embed = nn.functional.gelu(self.conv2(padded_embed)).transpose(1, 2)
+
+ padded_embed = padded_embed + self.positional_embedding.positional_embedding[
+ : padded_embed.shape[1], :
+ ].unsqueeze(0).to(padded_embed.dtype)
+ hidden_states = padded_embed[padded_mask_after_cnn]
+ cu_seqlens = torch.cat(
+ (
+ torch.zeros(1, device=padded_mask_after_cnn.device, dtype=torch.int32),
+ padded_mask_after_cnn.sum(1).cumsum(0),
+ )
+ ).to(torch.int32)
+ attention_mask = self._prepare_attention_mask(hidden_states, cu_seqlens)
+
+ for encoder_layer in self.layers:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ cu_seqlens=cu_seqlens,
+ attention_mask=attention_mask,
+ **kwargs,
+ )
+ hidden_states = layer_outputs[0]
+
+ hidden_states_list = hidden_states.split(aftercnn_lens.tolist(), dim=0)
+ token_audio_list = []
+ for each_audio_states in hidden_states_list:
+ each_audio_states = self.avg_pooler(each_audio_states.transpose(0, 1)).transpose_(0, 1)
+ each_audio_states = self.ln_post(each_audio_states)
+ each_audio_states = self.proj(each_audio_states)
+ token_audio_list.append(each_audio_states)
+ token_audio = torch.cat(token_audio_list, dim=0)
+ return BaseModelOutput(last_hidden_state=token_audio)
+
+ def padded_and_mask_function(self, tensor_list, tensor_len, padding_value=0, padding_side="right"):
+ """
+ Pads a sequence of tensors to their maximum length on indicated `padding_side`.
+ Then prepares a mask so that pad tokens are not attended to.
+ """
+ max_len = tensor_len.max()
+ dim = tensor_list[0].shape[0]
+ padded_tensor = torch.full(
+ size=(len(tensor_list), dim, max_len),
+ fill_value=padding_value,
+ dtype=self.dtype,
+ device=tensor_list[0].device,
+ )
+
+ batch_mask = torch.zeros(
+ (len(tensor_len), max_len),
+ dtype=torch.long,
+ device=padded_tensor.device,
+ )
+ for i, length in enumerate(tensor_len):
+ batch_mask[i, :length] = 1
+ padded_tensor[i, :, :length] = tensor_list[i]
+
+ feature_lens_after_cnn = (tensor_len - 1) // 2 + 1
+ max_len_after_cnn = feature_lens_after_cnn.max()
+ batch_mask_after_cnn = torch.zeros(
+ (len(tensor_len), max_len_after_cnn),
+ dtype=torch.long,
+ device=padded_tensor.device,
+ )
+ for i, length in enumerate(feature_lens_after_cnn):
+ batch_mask_after_cnn[i, :length] = 1
+ return (
+ padded_tensor,
+ batch_mask.unsqueeze(1),
+ batch_mask_after_cnn.bool(),
+ )
+
+ # Ignore copy
+ def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
+ """
+ Computes the output length of the convolutional layers and the output length of the audio encoder
+ """
+ input_lengths = (input_lengths - 1) // 2 + 1
+ output_lengths = (input_lengths - 2) // 2 + 1
+ return input_lengths, output_lengths
+
+
+def apply_rotary_pos_emb_vision(tensor: torch.Tensor, freqs: torch.Tensor) -> torch.Tensor:
+ orig_dtype = tensor.dtype
+ tensor = tensor.float()
+ cos = freqs.cos()
+ sin = freqs.sin()
+ cos = cos.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
+ sin = sin.unsqueeze(1).repeat(1, 1, 2).unsqueeze(0).float()
+ output = (tensor * cos) + (rotate_half(tensor) * sin)
+ output = output.to(orig_dtype)
+ return output
+
+
+class Qwen2_5OmniVisionAttention(nn.Module):
+ def __init__(self, config: Qwen2_5OmniVisionEncoderConfig = None) -> None:
+ super().__init__()
+ self.dim = config.hidden_size
+ self.num_heads = config.num_heads
+ self.head_dim = self.dim // self.num_heads
+ self.q = nn.Linear(self.dim, self.dim, bias=True)
+ self.k = nn.Linear(self.dim, self.dim, bias=True)
+ self.v = nn.Linear(self.dim, self.dim, bias=True)
+ self.proj = nn.Linear(self.dim, self.dim)
+ self.scaling = self.head_dim**-0.5
+ self.num_key_value_groups = 1 # needed for eager attention
+ self.config = config
+ self.attention_dropout = 0.0
+ self.is_causal = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: torch.Tensor,
+ rotary_pos_emb: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ seq_length = hidden_states.shape[0]
+ query_states = self.q(hidden_states).reshape(seq_length, self.num_heads, -1)
+ key_states = self.k(hidden_states).reshape(seq_length, self.num_heads, -1)
+ value_states = self.v(hidden_states).reshape(seq_length, self.num_heads, -1)
+ query_states = apply_rotary_pos_emb_vision(query_states.unsqueeze(0), rotary_pos_emb).squeeze(0)
+ key_states = apply_rotary_pos_emb_vision(key_states.unsqueeze(0), rotary_pos_emb).squeeze(0)
+
+ query_states = query_states.transpose(0, 1).unsqueeze(0)
+ key_states = key_states.transpose(0, 1).unsqueeze(0)
+ value_states = value_states.transpose(0, 1).unsqueeze(0)
+
+ attention_interface: Callable = eager_attention_forward
+ if self.config._attn_implementation != "eager":
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+
+ if self.config._attn_implementation == "flash_attention_2":
+ # Flash Attention 2: Use cu_seqlens for variable length attention
+ max_seqlen = (cu_seqlens[1:] - cu_seqlens[:-1]).max()
+ attn_output, _ = attention_interface(
+ self,
+ query_states,
+ key_states,
+ value_states,
+ attention_mask=None,
+ scaling=self.scaling,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ cu_seq_lens_q=cu_seqlens,
+ cu_seq_lens_k=cu_seqlens,
+ max_length_q=max_seqlen,
+ max_length_k=max_seqlen,
+ is_causal=False,
+ **kwargs,
+ )
+ else:
+ # Other implementations: Process each chunk separately
+ lengths = cu_seqlens[1:] - cu_seqlens[:-1]
+ splits = [
+ torch.split(tensor, lengths.tolist(), dim=2) for tensor in (query_states, key_states, value_states)
+ ]
+
+ attn_outputs = [
+ attention_interface(
+ self,
+ q,
+ k,
+ v,
+ attention_mask=None,
+ scaling=self.scaling,
+ dropout=0.0 if not self.training else self.attention_dropout,
+ is_causal=False,
+ **kwargs,
+ )[0]
+ for q, k, v in zip(*splits)
+ ]
+ attn_output = torch.cat(attn_outputs, dim=1)
+
+ attn_output = attn_output.reshape(seq_length, -1).contiguous()
+ attn_output = self.proj(attn_output)
+ return attn_output
+
+
+class Qwen2_5OmniVisionBlock(Qwen2_5_VLVisionBlock):
+ def __init__(self, config: Qwen2_5OmniVisionEncoderConfig) -> None:
+ super().__init__(config, config._attn_implementation)
+ self.attn = Qwen2_5OmniVisionAttention(config=config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ cu_seqlens: torch.Tensor,
+ rotary_pos_emb: Optional[torch.Tensor] = None,
+ **kwargs,
+ ) -> torch.Tensor:
+ hidden_states = hidden_states + self.attn(
+ self.norm1(hidden_states),
+ cu_seqlens=cu_seqlens,
+ rotary_pos_emb=rotary_pos_emb,
+ **kwargs,
+ )
+ hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
+ return hidden_states
+
+
+class Qwen2_5_VisionRotaryEmbedding(Qwen2_5_VisionRotaryEmbedding):
+ pass
+
+
+class Qwen2_5OmniVisionEncoder(Qwen2_5_VisionTransformerPretrainedModel):
+ config: Qwen2_5OmniVisionEncoderConfig
+ input_modalities = ("image", "video")
+ _no_split_modules = ["Qwen2_5OmniVisionBlock"]
+ _input_embed_layer = "patch_embed"
+
+ def __init__(self, config: Qwen2_5OmniVisionEncoderConfig, *inputs, **kwargs) -> None:
+ super().__init__(config, *inputs, **kwargs)
+ self.blocks = nn.ModuleList([Qwen2_5OmniVisionBlock(config) for _ in range(config.depth)])
+
+ def forward(self, hidden_states: torch.Tensor, grid_thw: torch.Tensor, **kwargs) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.Tensor` of shape `(seq_len, hidden_size)`):
+ The final hidden states of the model.
+ grid_thw (`torch.Tensor` of shape `(num_images_or_videos, 3)`):
+ The temporal, height and width of feature shape of each image in LLM.
+
+ Returns:
+ `torch.Tensor`: hidden_states.
+ """
+ hidden_states = self.patch_embed(hidden_states)
+ rotary_pos_emb = self.rot_pos_emb(grid_thw)
+
+ window_index, cu_window_seqlens = self.get_window_index(grid_thw)
+ cu_window_seqlens = torch.tensor(
+ cu_window_seqlens,
+ device=hidden_states.device,
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
+ )
+ cu_window_seqlens = torch.unique_consecutive(cu_window_seqlens)
+
+ seq_len, _ = hidden_states.size()
+ hidden_states = hidden_states.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
+ hidden_states = hidden_states[window_index, :, :]
+ hidden_states = hidden_states.reshape(seq_len, -1)
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len // self.spatial_merge_unit, self.spatial_merge_unit, -1)
+ rotary_pos_emb = rotary_pos_emb[window_index, :, :]
+ rotary_pos_emb = rotary_pos_emb.reshape(seq_len, -1)
+
+ cu_seqlens = torch.repeat_interleave(grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0]).cumsum(
+ dim=0,
+ # Select dtype based on the following factors:
+ # - FA2 requires that cu_seqlens_q must have dtype int32
+ # - torch.onnx.export requires that cu_seqlens_q must have same dtype as grid_thw
+ # See https://github.com/huggingface/transformers/pull/34852 for more information
+ dtype=grid_thw.dtype if torch.jit.is_tracing() else torch.int32,
+ )
+ cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0)
+
+ # Modification here
+ for layer_num, blk in enumerate(self.blocks):
+ if layer_num in self.fullatt_block_indexes:
+ cu_seqlens_now = cu_seqlens
+ else:
+ cu_seqlens_now = cu_window_seqlens
+
+ hidden_states = blk(
+ hidden_states,
+ cu_seqlens=cu_seqlens_now,
+ rotary_pos_emb=rotary_pos_emb,
+ **kwargs,
+ )
+ hidden_states = self.merger(hidden_states)
+ reverse_indices = torch.argsort(window_index)
+ hidden_states = hidden_states[reverse_indices, :]
+
+ return hidden_states
+
+
+class Qwen2_5OmniRotaryEmbedding(Qwen2VLRotaryEmbedding):
+ def __init__(self, config: Qwen2_5OmniThinkerConfig, device=None):
+ super().__init__(config, device)
+
+
+# It's same as `Qwen2_5_VLAttention`, but talker model's hidden_size isn't divisible by num_heads.
+# Removes the value error as a workaround.
+class Qwen2_5OmniAttention(Qwen2_5_VLAttention):
+ def __init__(self, config: Qwen2_5OmniConfig, layer_idx: Optional[int] = None):
+ nn.Module.__init__(self)
+ self.config = config
+ self.layer_idx = layer_idx
+ if layer_idx is None:
+ logger.warning_once(
+ f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
+ "to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
+ "when creating this class."
+ )
+
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
+ self.num_key_value_heads = config.num_key_value_heads
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
+ self.is_causal = True
+ self.attention_dropout = config.attention_dropout
+ self.rope_parameters = config.rope_parameters
+ self.scaling = self.head_dim**-0.5
+
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=True)
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=True)
+ self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
+ self.layer_type = config.layer_types[layer_idx] if hasattr(config, "layer_types") else None
+ self.sliding_window = config.sliding_window if self.layer_type == "sliding_attention" else None
+
+
+class Qwen2MLP(Qwen2_5_VLMLP):
+ pass
+
+
+class Qwen2_5OmniThinkerTextModel(Qwen2_5_VLTextModel):
+ config: Qwen2_5OmniTextConfig
+ _no_split_modules = ["Qwen2_5OmniDecoderLayer"]
+
+ def __init__(self, config: Qwen2_5OmniTextConfig):
+ super().__init__(config)
+
+
+@auto_docstring(
+ custom_intro="""
+ The Qwen2.5OmniThinker model which consists of a audio backbone and a language model.
+ """
+)
+class Qwen2_5OmniThinkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin):
+ config: Qwen2_5OmniThinkerConfig
+ base_model_prefix = "thinker"
+ _tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
+ _no_split_modules = ["Qwen2_5OmniAudioEncoder", "Qwen2_5OmniVisionEncoder"]
+
+ def __init__(self, config: Qwen2_5OmniThinkerConfig):
+ super().__init__(config)
+ self.audio_tower = Qwen2_5OmniAudioEncoder._from_config(config.audio_config)
+ self.visual = Qwen2_5OmniVisionEncoder._from_config(config.vision_config)
+ self.vocab_size = config.text_config.vocab_size
+ self.model = Qwen2_5OmniThinkerTextModel._from_config(config.text_config)
+ self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
+ self.spatial_merge_size = config.vision_config.spatial_merge_size
+ self.rope_deltas = None
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.model.set_input_embeddings(value)
+
+ def get_video_features(
+ self, pixel_values_videos: torch.FloatTensor, video_grid_thw: Optional[torch.LongTensor] = None
+ ):
+ """
+ Encodes videos into continuous embeddings that can be forwarded to the language model.
+
+ Args:
+ pixel_values_videos (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
+ The tensors corresponding to the input videos.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ """
+ pixel_values_videos = pixel_values_videos.type(self.visual.dtype)
+ video_embeds = self.visual(pixel_values_videos, grid_thw=video_grid_thw)
+ return video_embeds
+
+ def get_image_features(self, pixel_values: torch.FloatTensor, image_grid_thw: Optional[torch.LongTensor] = None):
+ """
+ Encodes images into continuous embeddings that can be forwarded to the language model.
+
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)`):
+ The tensors corresponding to the input images.
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ """
+ pixel_values = pixel_values.type(self.visual.dtype)
+ image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw)
+ return image_embeds
+
+ def get_audio_features(
+ self,
+ input_features: torch.FloatTensor,
+ feature_attention_mask: Optional[torch.LongTensor] = None,
+ audio_feature_lengths: Optional[torch.LongTensor] = None,
+ ):
+ """
+ Encodes audios into continuous embeddings that can be forwarded to the language model.
+
+ Args:
+ input_features (`torch.FloatTensor`):
+ The tensors corresponding to the input audios.
+ feature_attention_mask (`torch.LongTensor`, *optional*):
+ Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
+ audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ """
+ if feature_attention_mask is not None:
+ audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
+ input_features = input_features.permute(0, 2, 1)[feature_attention_mask.bool()].permute(1, 0)
+ else:
+ audio_feature_lengths = None
+
+ audio_feat_lengths, audio_output_lengths = self.audio_tower._get_feat_extract_output_lengths(
+ audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1)
+ )
+ feature_lens = audio_feature_lengths if audio_feature_lengths is not None else feature_attention_mask.sum(-1)
+ audio_outputs = self.audio_tower(
+ input_features,
+ feature_lens=feature_lens,
+ aftercnn_lens=audio_feat_lengths,
+ )
+ audio_features = audio_outputs.last_hidden_state
+
+ if audio_features.shape[0] != sum(audio_output_lengths.tolist()):
+ raise ValueError("length of audio_features should match audio_output_lengths")
+
+ return audio_features
+
+ def get_placeholder_mask(
+ self,
+ input_ids: torch.LongTensor,
+ inputs_embeds: torch.FloatTensor,
+ image_features: Optional[torch.FloatTensor] = None,
+ video_features: Optional[torch.FloatTensor] = None,
+ ):
+ """
+ Obtains multimodal placeholder mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
+ equal to the length of multimodal features. If the lengths are different, an error is raised.
+ """
+ if input_ids is None:
+ special_image_mask = inputs_embeds == self.get_input_embeddings()(
+ torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
+ )
+ special_image_mask = special_image_mask.all(-1)
+ special_video_mask = inputs_embeds == self.get_input_embeddings()(
+ torch.tensor(self.config.video_token_id, dtype=torch.long, device=inputs_embeds.device)
+ )
+ special_video_mask = special_video_mask.all(-1)
+ special_audio_mask = (
+ inputs_embeds
+ == self.get_input_embeddings()(
+ torch.tensor(self.config.audio_token_id, dtype=torch.long, device=inputs_embeds.device)
+ )
+ ).all(-1)
+ else:
+ special_image_mask = input_ids == self.config.image_token_id
+ special_video_mask = input_ids == self.config.video_token_id
+ special_audio_mask = input_ids == self.config.audio_token_id
+
+ n_image_tokens = special_image_mask.sum()
+ special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
+ if image_features is not None and inputs_embeds[special_image_mask].numel() != image_features.numel():
+ raise ValueError(
+ f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {image_features.shape[0]}"
+ )
+
+ n_video_tokens = special_video_mask.sum()
+ special_video_mask = special_video_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
+ if video_features is not None and inputs_embeds[special_video_mask].numel() != video_features.numel():
+ raise ValueError(
+ f"Videos features and video tokens do not match: tokens: {n_video_tokens}, features {video_features.shape[0]}"
+ )
+
+ special_audio_mask = special_audio_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
+ return special_image_mask, special_video_mask, special_audio_mask
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ pixel_values_videos: Optional[torch.FloatTensor] = None,
+ image_grid_thw: Optional[torch.LongTensor] = None,
+ video_grid_thw: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ feature_attention_mask: Optional[torch.Tensor] = None,
+ audio_feature_lengths: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ rope_deltas: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ use_audio_in_video: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ video_second_per_grid: Optional[torch.LongTensor] = None,
+ **kwargs: Unpack[TransformersKwargs],
+ ) -> Union[tuple, Qwen2_5OmniThinkerCausalLMOutputWithPast]:
+ r"""
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ feature_attention_mask (`torch.Tensor` of shape `(batch_size, feature_sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding feature indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_audio_in_video (`bool`, *optional*):
+ Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
+ video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*):
+ Number of seconds per grid for each video, used for temporal feature mapping.
+
+ Example:
+
+ ```python
+ >>> from io import BytesIO
+ >>> from urllib.request import urlopen
+ >>> import librosa
+ >>> from qwen_vl_utils import process_vision_info
+ >>> from transformers import Qwen2_5OmniProcessor, Qwen2_5OmniThinkerForConditionalGeneration
+
+ >>> thinker = Qwen2_5OmniThinkerForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-Omni-7B")
+ >>> processor = Qwen2_5OmniProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B")
+
+ >>> conversations = [
+ >>> {'role': 'system', 'content': 'You are a helpful voice chat bot, and please respond to me in a casual conversation manner using random voice.'},
+ >>> {"role": "user", "content": [
+ >>> {"type": "image", "image_url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
+ >>> {"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
+ >>> ]},
+ >>> ]
+
+ >>> text = processor.apply_chat_template(conversation, add_generation_prompt=True, tokenize=False)
+ >>> audios = [ librosa.load(BytesIO(urlopen( conversations[1]['content'][1]['audio_url'] ).read()), sr=self.processor.feature_extractor.sampling_rate) ]
+ >>> images, videos = process_vision_info(conversations)
+ >>> inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors="pt", padding=True)
+
+ >>> # Generate
+ >>> inputs['use_audio_in_video'] = `True` or `False`
+ >>> generation = thinker.generate(**inputs, max_new_tokens=2048)
+ >>> generate_ids = generation[:, inputs.input_ids.size(1):]
+
+ >>> response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if inputs_embeds is None:
+ # 1. Extract the input embeddings
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+
+ # 2. Merge text , audios , image and video
+ if input_features is not None:
+ audio_features = self.get_audio_features(
+ input_features,
+ feature_attention_mask=feature_attention_mask,
+ audio_feature_lengths=audio_feature_lengths,
+ )
+ audio_features = audio_features.to(inputs_embeds.device, inputs_embeds.dtype)
+ _, _, audio_mask = self.get_placeholder_mask(input_ids, inputs_embeds=inputs_embeds)
+ inputs_embeds = inputs_embeds.masked_scatter(audio_mask, audio_features)
+
+ if pixel_values is not None:
+ image_embeds = self.get_image_features(pixel_values, image_grid_thw)
+ image_embeds = image_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
+ image_mask, _, _ = self.get_placeholder_mask(
+ input_ids, inputs_embeds=inputs_embeds, image_features=image_embeds
+ )
+ inputs_embeds = inputs_embeds.masked_scatter(image_mask, image_embeds)
+
+ if pixel_values_videos is not None:
+ video_embeds = self.get_video_features(pixel_values_videos, video_grid_thw)
+ video_embeds = video_embeds.to(inputs_embeds.device, inputs_embeds.dtype)
+ _, video_mask, _ = self.get_placeholder_mask(
+ input_ids, inputs_embeds=inputs_embeds, video_features=video_embeds
+ )
+ inputs_embeds = inputs_embeds.masked_scatter(video_mask, video_embeds)
+
+ if feature_attention_mask is not None:
+ audio_feature_lengths = torch.sum(feature_attention_mask, dim=1)
+ else:
+ audio_feature_lengths = None
+
+ if attention_mask is not None and position_ids is None:
+ past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
+ if past_key_values_length == 0 or self.rope_deltas is None:
+ delta0 = (1 - attention_mask).sum(dim=-1).unsqueeze(1)
+ position_ids, rope_deltas = self.get_rope_index(
+ input_ids,
+ image_grid_thw,
+ video_grid_thw,
+ attention_mask,
+ use_audio_in_video,
+ audio_feature_lengths,
+ video_second_per_grid,
+ )
+ rope_deltas = rope_deltas - delta0
+ self.rope_deltas = rope_deltas
+ else:
+ batch_size, seq_length = input_ids.shape
+ delta = (past_key_values_length + self.rope_deltas).to(input_ids.device)
+ position_ids = torch.arange(seq_length, device=input_ids.device)
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
+ position_ids = position_ids.add(delta)
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
+
+ outputs = self.model(
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ cache_position=cache_position,
+ **kwargs,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ loss = self.loss_function(
+ logits=logits, labels=labels, vocab_size=self.config.get_text_config().vocab_size
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs
+ return (loss,) + output if loss is not None else output
+
+ return Qwen2_5OmniThinkerCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ rope_deltas=self.rope_deltas,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ pixel_values=None,
+ pixel_values_videos=None,
+ image_grid_thw=None,
+ video_grid_thw=None,
+ input_features=None,
+ feature_attention_mask=None,
+ use_audio_in_video=False,
+ video_second_per_grid=None,
+ is_first_iteration=False,
+ **kwargs,
+ ):
+ model_inputs = super().prepare_inputs_for_generation(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ cache_position=cache_position,
+ position_ids=position_ids,
+ use_cache=use_cache,
+ pixel_values=pixel_values,
+ pixel_values_videos=pixel_values_videos,
+ image_grid_thw=image_grid_thw,
+ video_grid_thw=video_grid_thw,
+ input_features=input_features,
+ feature_attention_mask=feature_attention_mask,
+ use_audio_in_video=use_audio_in_video,
+ video_second_per_grid=video_second_per_grid,
+ is_first_iteration=is_first_iteration,
+ **kwargs,
+ )
+
+ model_inputs["position_ids"] = None
+
+ if not is_first_iteration and use_cache:
+ model_inputs["pixel_values"] = None
+ model_inputs["pixel_values_videos"] = None
+ model_inputs["input_features"] = None
+
+ return model_inputs
+
+
+############################
+# Start Talker #
+############################
+
+
+@dataclass
+@auto_docstring(
+ custom_intro="""
+ Base class for Qwen2.5OmniTalker causal language model (or autoregressive) outputs.
+ """
+)
+class Qwen2_5OmniTalkerCausalLMOutputWithPast(ModelOutput):
+ r"""
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Hidden states from the thinker model that are used as input for the talker model. These represent the encoded
+ response that the talker model will use to generate speech tokens.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: Optional[torch.FloatTensor] = None
+ past_key_values: Optional[Cache] = None
+ hidden_states: Optional[tuple[torch.FloatTensor]] = None
+ attentions: Optional[tuple[torch.FloatTensor]] = None
+ rope_deltas: Optional[torch.LongTensor] = None
+ thinker_reply_part: Optional[torch.FloatTensor] = None
+
+
+class Qwen2_5OmniTalkerModel(Qwen2_5_VLTextModel):
+ config: Qwen2_5OmniTalkerConfig
+ input_modalities = ("image", "video", "audio", "text")
+
+ _no_split_modules = ["Qwen2_5OmniTalkerDecoderLayer"]
+
+ def __init__(self, config: Qwen2_5OmniTalkerConfig):
+ super().__init__(config)
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.embedding_size, self.padding_idx)
+
+
+class Qwen2_5OmniTalkerForConditionalGeneration(Qwen2_5OmniPreTrainedModelForConditionalGeneration, GenerationMixin):
+ config: Qwen2_5OmniTalkerConfig
+ base_model_prefix = "talker"
+ output_modalities = ("audio",)
+
+ def __init__(self, config: Qwen2_5OmniTalkerConfig):
+ super().__init__(config)
+
+ self.thinker_to_talker_proj = nn.Linear(config.embedding_size, config.hidden_size)
+
+ self.model = Qwen2_5OmniTalkerModel(config)
+ self.codebook_size = config.vocab_size
+ self.codec_head = nn.Linear(config.hidden_size, self.codebook_size, bias=False)
+
+ self.codec_bos_token = config.tts_codec_start_token_id
+ self.codec_eos_token = config.tts_codec_end_token_id
+ self.codec_pad_token = config.tts_codec_pad_token_id
+ self.codec_mask_token = config.tts_codec_mask_token_id
+
+ self.text_bos_token = config.tts_text_start_token_id
+ self.text_eos_token = config.tts_text_end_token_id
+ self.text_pad_token = config.tts_text_pad_token_id
+
+ self.spatial_merge_size = self.config.spatial_merge_size
+ self.rope_deltas = None
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.model.set_input_embeddings(value)
+
+ @auto_docstring
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Cache] = None,
+ thinker_reply_part: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ rope_deltas: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ cache_position: Optional[torch.LongTensor] = None,
+ input_text_ids: Optional[torch.LongTensor] = None,
+ image_grid_thw: Optional[torch.LongTensor] = None,
+ video_grid_thw: Optional[torch.LongTensor] = None,
+ use_audio_in_video: Optional[bool] = None,
+ audio_feature_lengths: Optional[torch.LongTensor] = None,
+ video_second_per_grid: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[tuple, Qwen2_5OmniTalkerCausalLMOutputWithPast]:
+ r"""
+ thinker_reply_part (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Hidden states from the thinker model's output that represent the text reply part to be processed.
+ rope_deltas (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ The rope index difference between sequence length and multimodal rope.
+ input_text_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Input token IDs for text-only content, used for position calculation in multimodal contexts.
+ image_grid_thw (`torch.LongTensor` of shape `(num_images, 3)`, *optional*):
+ The temporal, height and width of feature shape of each image in LLM.
+ video_grid_thw (`torch.LongTensor` of shape `(num_videos, 3)`, *optional*):
+ The temporal, height and width of feature shape of each video in LLM.
+ use_audio_in_video (`bool`, *optional*):
+ Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
+ audio_feature_lengths (`torch.LongTensor` of shape `(num_audios)`, *optional*):
+ The length of feature shape of each audio in LLM.
+ video_second_per_grid (`torch.LongTensor` of shape `(num_videos)`, *optional*):
+ Number of seconds per grid for each video, used for temporal feature mapping.
+
+ Example:
+
+ ```python
+ >>> from io import BytesIO
+ >>> from urllib.request import urlopen
+ >>> import librosa
+ >>> from transformers import AutoProcessor, Qwen2_5OmniTalkerForConditionalGeneration
+
+ >>> model = Qwen2_5OmniTalkerForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B")
+ >>> processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B")
+
+ >>> prompt = "<|audio_bos|><|AUDIO|><|audio_eos|>Generate the caption in English:"
+ >>> url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"
+ >>> audio, _ = librosa.load(BytesIO(urlopen(url).read()), sr=self.processor.feature_extractor.sampling_rate)
+
+ >>> inputs = processor(text=prompt, audio=audio, return_tensors="pt")
+
+ >>> # Generate
+ >>> generate_ids = model.generate(**inputs, max_length=30)
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ "Generate the caption in English: Glass is breaking."
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if attention_mask is not None and position_ids is None:
+ past_key_values_length = 0 if past_key_values is None else past_key_values.get_seq_length()
+ if past_key_values_length == 0 or self.rope_deltas is None:
+ position_ids, rope_deltas = self.get_rope_index(
+ input_text_ids,
+ image_grid_thw,
+ video_grid_thw,
+ attention_mask,
+ use_audio_in_video,
+ audio_feature_lengths,
+ video_second_per_grid,
+ )
+
+ inputs_embeds[:, -1, :] += self.get_input_embeddings()(
+ torch.tensor([self.codec_bos_token], dtype=torch.long, device=inputs_embeds.device)
+ )
+ inputs_embeds[:, -2, :] += self.get_input_embeddings()(
+ torch.tensor([self.codec_pad_token], dtype=torch.long, device=inputs_embeds.device)
+ )
+ self.rope_deltas = rope_deltas
+
+ else:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ delta = (past_key_values_length + self.rope_deltas).to(input_ids.device)
+ position_ids = torch.arange(seq_length, device=input_ids.device)
+ position_ids = position_ids.view(1, -1).expand(batch_size, -1)
+ position_ids = position_ids.add(delta)
+ position_ids = position_ids.unsqueeze(0).expand(3, -1, -1)
+
+ if inputs_embeds is None:
+ # 1. Inference tokens after second token
+ codec_embeds = self.get_input_embeddings()(input_ids)
+ inputs_embeds = codec_embeds + thinker_reply_part[:, :1, :]
+ if thinker_reply_part.shape[1] > 1:
+ thinker_reply_part = thinker_reply_part[:, 1:, :]
+
+ talker_lm_input = self.thinker_to_talker_proj(inputs_embeds)
+
+ if attention_mask is not None:
+ attention_mask = attention_mask.to(inputs_embeds.device)
+
+ outputs = self.model(
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=talker_lm_input,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.codec_head(hidden_states)
+ logits = logits.float()
+
+ loss = None
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return Qwen2_5OmniTalkerCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ rope_deltas=self.rope_deltas,
+ thinker_reply_part=thinker_reply_part,
+ )
+
+ def _get_initial_cache_position(self, seq_length, device, model_kwargs):
+ # Talker needs to calculate cache_position with input_ids, so pop inputs_embeds temporarily
+ inputs_embeds = model_kwargs.pop("inputs_embeds")
+ model_kwargs = super()._get_initial_cache_position(seq_length, device, model_kwargs)
+ model_kwargs["inputs_embeds"] = inputs_embeds
+ return model_kwargs
+
+ # prepare inputs for talker lm generation
+ def prepare_inputs_for_generation(
+ self,
+ input_ids,
+ input_text_ids,
+ past_key_values=None,
+ attention_mask=None,
+ inputs_embeds=None,
+ thinker_reply_part=None,
+ cache_position=None,
+ position_ids=None,
+ use_cache=True,
+ pixel_values=None,
+ pixel_values_videos=None,
+ image_grid_thw=None,
+ video_grid_thw=None,
+ input_audio_features=None,
+ audio_feature_attention_mask=None,
+ audio_feature_lengths=None,
+ use_audio_in_video=False,
+ video_second_per_grid=None,
+ **kwargs,
+ ):
+ model_inputs = super().prepare_inputs_for_generation(
+ input_ids,
+ past_key_values,
+ attention_mask,
+ inputs_embeds,
+ cache_position,
+ use_cache=use_cache,
+ thinker_reply_part=thinker_reply_part,
+ input_text_ids=input_text_ids,
+ image_grid_thw=image_grid_thw,
+ video_grid_thw=video_grid_thw,
+ use_audio_in_video=use_audio_in_video,
+ audio_feature_lengths=audio_feature_lengths,
+ video_second_per_grid=video_second_per_grid,
+ **kwargs,
+ )
+
+ model_inputs["position_ids"] = None
+
+ return model_inputs
+
+ def _update_model_kwargs_for_generation(
+ self,
+ outputs: ModelOutput,
+ model_kwargs: dict[str, Any],
+ is_encoder_decoder: bool = False,
+ num_new_tokens: int = 1,
+ ) -> dict[str, Any]:
+ model_kwargs = super()._update_model_kwargs_for_generation(
+ outputs, model_kwargs, is_encoder_decoder, num_new_tokens
+ )
+
+ if getattr(outputs, "thinker_reply_part", None) is not None:
+ model_kwargs["thinker_reply_part"] = outputs.thinker_reply_part
+
+ return model_kwargs
+
+
+############################
+# Start Token2Wav #
+############################
+
+
+class Qwen2_5OmniDiTRotaryEmbedding(LlamaRotaryEmbedding):
+ def __init__(self, config: Qwen2_5OmniDiTConfig, device=None):
+ super().__init__(config, device=device)
+
+ @staticmethod
+ def compute_default_rope_parameters(
+ config: Optional[Qwen2_5OmniDiTConfig] = None,
+ device: Optional["torch.device"] = None,
+ seq_len: Optional[int] = None,
+ ) -> tuple["torch.Tensor", float]:
+ return super().compute_default_rope_parameters(
+ config,
+ device=device,
+ seq_len=seq_len,
+ )
+
+
+# Modified from Llama with a different rotate function, will fixed in next release
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`, *optional*):
+ Deprecated and unused.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+
+ def rotate_half_codec(x):
+ # x = rearrange(x, "... (d r) -> ... d r", r=2)
+ x = x.reshape(*x.shape[:-1], -1, 2)
+ x1, x2 = x.unbind(dim=-1)
+ x = torch.stack((-x2, x1), dim=-1)
+ return x.reshape(*x.shape[:-2], -1)
+
+ cos = cos.unsqueeze(unsqueeze_dim)
+ sin = sin.unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half_codec(q) * sin)
+ k_embed = (k * cos) + (rotate_half_codec(k) * sin)
+ return q_embed, k_embed
+
+
+class TimeDelayNetBlock(nn.Module):
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size,
+ dilation,
+ ):
+ super().__init__()
+ self.conv = nn.Conv1d(
+ in_channels=in_channels,
+ out_channels=out_channels,
+ kernel_size=kernel_size,
+ dilation=dilation,
+ padding="same",
+ padding_mode="reflect",
+ )
+ self.activation = nn.ReLU()
+
+ def forward(self, hidden_states: torch.Tensor):
+ return self.activation(self.conv(hidden_states))
+
+
+class Res2NetBlock(torch.nn.Module):
+ def __init__(self, in_channels, out_channels, scale=8, kernel_size=3, dilation=1):
+ super().__init__()
+
+ in_channel = in_channels // scale
+ hidden_channel = out_channels // scale
+
+ self.blocks = nn.ModuleList(
+ [
+ TimeDelayNetBlock(
+ in_channel,
+ hidden_channel,
+ kernel_size=kernel_size,
+ dilation=dilation,
+ )
+ for i in range(scale - 1)
+ ]
+ )
+ self.scale = scale
+
+ def forward(self, hidden_states):
+ outputs = []
+ for i, hidden_part in enumerate(torch.chunk(hidden_states, self.scale, dim=1)):
+ if i == 0:
+ output_part = hidden_part
+ elif i == 1:
+ output_part = self.blocks[i - 1](hidden_part)
+ else:
+ output_part = self.blocks[i - 1](hidden_part + output_part)
+ outputs.append(output_part)
+ output = torch.cat(outputs, dim=1)
+ return output
+
+
+class SqueezeExcitationBlock(nn.Module):
+ def __init__(self, in_channels, se_channels, out_channels):
+ super().__init__()
+
+ self.conv1 = nn.Conv1d(
+ in_channels=in_channels,
+ out_channels=se_channels,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = nn.Conv1d(
+ in_channels=se_channels,
+ out_channels=out_channels,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+ self.sigmoid = nn.Sigmoid()
+
+ def forward(self, hidden_states):
+ hidden_states_mean = hidden_states.mean(dim=2, keepdim=True)
+
+ hidden_states_mean = self.relu(self.conv1(hidden_states_mean))
+ hidden_states_mean = self.sigmoid(self.conv2(hidden_states_mean))
+
+ return hidden_states * hidden_states_mean
+
+
+class AttentiveStatisticsPooling(nn.Module):
+ """This class implements an attentive statistic pooling layer for each channel.
+ It returns the concatenated mean and std of the input tensor.
+ """
+
+ def __init__(self, channels, attention_channels=128):
+ super().__init__()
+
+ self.eps = 1e-12
+ self.tdnn = TimeDelayNetBlock(channels * 3, attention_channels, 1, 1)
+ self.tanh = nn.Tanh()
+ self.conv = nn.Conv1d(
+ in_channels=attention_channels,
+ out_channels=channels,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+
+ def _length_to_mask(self, length, max_len=None, dtype=None, device=None):
+ """Creates a binary mask for each sequence.
+
+ Reference: https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3
+
+ Arguments
+ ---------
+ length : torch.LongTensor
+ Containing the length of each sequence in the batch. Must be 1D.
+ max_len : int
+ Max length for the mask, also the size of the second dimension.
+ dtype : torch.dtype, default: None
+ The dtype of the generated mask.
+ device: torch.device, default: None
+ The device to put the mask variable.
+
+ Returns
+ -------
+ mask : tensor
+ The binary mask.
+ """
+
+ if max_len is None:
+ max_len = length.max().long().item() # using arange to generate mask
+ mask = torch.arange(max_len, device=length.device, dtype=length.dtype).expand(
+ len(length), max_len
+ ) < length.unsqueeze(1)
+
+ mask = torch.as_tensor(mask, dtype=dtype, device=device)
+ return mask
+
+ def _compute_statistics(self, x, m, dim=2):
+ mean = (m * x).sum(dim)
+ std = torch.sqrt((m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(self.eps))
+ return mean, std
+
+ def forward(self, hidden_states):
+ seq_length = hidden_states.shape[-1]
+ lengths = torch.ones(hidden_states.shape[0], device=hidden_states.device)
+
+ # Make binary mask of shape [N, 1, L]
+ mask = self._length_to_mask(
+ lengths * seq_length, max_len=seq_length, dtype=hidden_states.dtype, device=hidden_states.device
+ )
+ mask = mask.unsqueeze(1)
+
+ # Expand the temporal context of the pooling layer by allowing the
+ # self-attention to look at global properties of the utterance.
+ total = mask.sum(dim=2, keepdim=True)
+
+ mean, std = self._compute_statistics(hidden_states, mask / total)
+ mean = mean.unsqueeze(2).repeat(1, 1, seq_length)
+ std = std.unsqueeze(2).repeat(1, 1, seq_length)
+ attention = torch.cat([hidden_states, mean, std], dim=1)
+
+ # Apply layers
+ attention = self.conv(self.tanh(self.tdnn(attention)))
+
+ # Filter out zero-paddings
+ attention = attention.masked_fill(mask == 0, float("-inf"))
+
+ attention = F.softmax(attention, dim=2)
+ mean, std = self._compute_statistics(hidden_states, attention)
+ # Append mean and std of the batch
+ pooled_stats = torch.cat((mean, std), dim=1)
+ pooled_stats = pooled_stats.unsqueeze(2)
+
+ return pooled_stats
+
+
+class SqueezeExcitationRes2NetBlock(nn.Module):
+ """An implementation of building block in ECAPA-TDNN, i.e.,
+ TDNN-Res2Net-TDNN-SqueezeExcitationBlock.
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ res2net_scale=8,
+ se_channels=128,
+ kernel_size=1,
+ dilation=1,
+ ):
+ super().__init__()
+ self.out_channels = out_channels
+ self.tdnn1 = TimeDelayNetBlock(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ dilation=1,
+ )
+ self.res2net_block = Res2NetBlock(out_channels, out_channels, res2net_scale, kernel_size, dilation)
+ self.tdnn2 = TimeDelayNetBlock(
+ out_channels,
+ out_channels,
+ kernel_size=1,
+ dilation=1,
+ )
+ self.se_block = SqueezeExcitationBlock(out_channels, se_channels, out_channels)
+
+ def forward(self, hidden_state):
+ residual = hidden_state
+
+ hidden_state = self.tdnn1(hidden_state)
+ hidden_state = self.res2net_block(hidden_state)
+ hidden_state = self.tdnn2(hidden_state)
+ hidden_state = self.se_block(hidden_state)
+
+ return hidden_state + residual
+
+
+class ECAPA_TimeDelayNet(torch.nn.Module):
+ """An implementation of the speaker embedding model in a paper.
+ "ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in
+ TDNN Based Speaker Verification" (https://huggingface.co/papers/2005.07143).
+ """
+
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__()
+ if len(config.enc_channels) != len(config.enc_kernel_sizes) or len(config.enc_channels) != len(
+ config.enc_dilations
+ ):
+ raise ValueError("enc_channels, enc_kernel_sizes and enc_dilations should have same length")
+ self.channels = config.enc_channels
+ self.blocks = nn.ModuleList()
+
+ # The initial TDNN layer
+ self.blocks.append(
+ TimeDelayNetBlock(
+ config.mel_dim,
+ config.enc_channels[0],
+ config.enc_kernel_sizes[0],
+ config.enc_dilations[0],
+ )
+ )
+
+ # SE-Res2Net layers
+ for i in range(1, len(config.enc_channels) - 1):
+ self.blocks.append(
+ SqueezeExcitationRes2NetBlock(
+ config.enc_channels[i - 1],
+ config.enc_channels[i],
+ res2net_scale=config.enc_res2net_scale,
+ se_channels=config.enc_se_channels,
+ kernel_size=config.enc_kernel_sizes[i],
+ dilation=config.enc_dilations[i],
+ )
+ )
+
+ # Multi-layer feature aggregation
+ self.mfa = TimeDelayNetBlock(
+ config.enc_channels[-1],
+ config.enc_channels[-1],
+ config.enc_kernel_sizes[-1],
+ config.enc_dilations[-1],
+ )
+
+ # Attentive Statistical Pooling
+ self.asp = AttentiveStatisticsPooling(
+ config.enc_channels[-1],
+ attention_channels=config.enc_attention_channels,
+ )
+
+ # Final linear transformation
+ self.fc = nn.Conv1d(
+ in_channels=config.enc_channels[-1] * 2,
+ out_channels=config.enc_dim,
+ kernel_size=1,
+ padding="same",
+ padding_mode="reflect",
+ )
+
+ def forward(self, hidden_states):
+ # Minimize transpose for efficiency
+ hidden_states = hidden_states.transpose(1, 2)
+
+ hidden_states_list = []
+ for layer in self.blocks:
+ hidden_states = layer(hidden_states)
+ hidden_states_list.append(hidden_states)
+
+ # Multi-layer feature aggregation
+ hidden_states = torch.cat(hidden_states_list[1:], dim=1)
+ hidden_states = self.mfa(hidden_states)
+
+ # Attentive Statistical Pooling
+ hidden_states = self.asp(hidden_states)
+
+ # Final linear transformation
+ hidden_states = self.fc(hidden_states)
+
+ hidden_states = hidden_states.squeeze(-1)
+ return hidden_states
+
+
+class DiTInputEmbedding(nn.Module):
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__()
+ self.proj = nn.Linear(
+ config.mel_dim + config.enc_dim + config.enc_emb_dim + config.emb_dim,
+ config.hidden_size,
+ )
+ self.spk_encoder = ECAPA_TimeDelayNet(config)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ speaker_embedding: torch.Tensor,
+ condition_vector: torch.Tensor,
+ code_embed: torch.Tensor,
+ drop_audio_cond: Optional[bool] = False,
+ code_embed_uncond: Optional[bool] = None,
+ apply_cfg: Optional[bool] = True,
+ ):
+ if apply_cfg:
+ hidden_states = torch.cat([hidden_states, hidden_states], dim=0)
+ speaker_embedding = torch.cat([speaker_embedding, torch.zeros_like(speaker_embedding)], dim=0)
+ condition_vector = torch.cat([condition_vector, torch.zeros_like(condition_vector)], dim=0)
+ code_embed = torch.cat([code_embed, code_embed_uncond], dim=0)
+ elif drop_audio_cond: # cfg for cond audio
+ condition_vector = torch.zeros_like(condition_vector)
+ speaker_embedding = torch.zeros_like(speaker_embedding)
+ condition_vector = self.spk_encoder(condition_vector).unsqueeze(1).repeat(1, hidden_states.size(1), 1)
+ hidden_states = self.proj(torch.cat((hidden_states, condition_vector, code_embed, speaker_embedding), dim=-1))
+
+ return hidden_states
+
+
+# Transformer backbone using DiT blocks
+class DiTCodecEmbedding(nn.Module):
+ def __init__(self, codec_num_embeds, codec_dim, repeats):
+ super().__init__()
+ self.repeats = repeats
+ self.codec_embed = nn.Embedding(codec_num_embeds + 1, codec_dim)
+
+ def forward(self, code, drop_code=False):
+ if drop_code:
+ code = torch.zeros_like(code)
+ code_embed = self.codec_embed(code)
+
+ code_embed = torch.repeat_interleave(code_embed, repeats=self.repeats, dim=1)
+ return code_embed
+
+
+# AdaLayerNormZero
+# return with modulated x for attn input, and params for later mlp modulation
+class Qwen2_5_OmniAdaLayerNormZero(nn.Module):
+ def __init__(self, dim):
+ super().__init__()
+
+ self.silu = nn.SiLU()
+ self.linear = nn.Linear(dim, dim * 6)
+
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
+
+ def forward(self, hidden_states, emb=None):
+ emb = self.linear(self.silu(emb))
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1)
+
+ hidden_states = self.norm(hidden_states) * (1 + scale_msa[:, None]) + shift_msa[:, None]
+ return hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp
+
+
+# AdaLayerNormZero for final layer
+# return only with modulated x for attn input, cuz no more mlp modulation
+class Qwen2_5_OmniAdaLayerNormZero_Final(nn.Module):
+ def __init__(self, dim):
+ super().__init__()
+
+ self.silu = nn.SiLU()
+ self.linear = nn.Linear(dim, dim * 2)
+
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
+
+ def forward(self, hidden_states, emb):
+ emb = self.linear(self.silu(emb))
+ scale, shift = torch.chunk(emb, 2, dim=1)
+
+ hidden_states = self.norm(hidden_states) * (1 + scale)[:, None, :] + shift[:, None, :]
+ return hidden_states
+
+
+# FeedForward
+class DiTMLP(nn.Module):
+ def __init__(self, dim, mult=4, dropout=0.0):
+ super().__init__()
+ inner_dim = int(dim * mult)
+
+ self.ff = nn.ModuleList(
+ [
+ nn.Linear(dim, inner_dim),
+ nn.GELU(approximate="tanh"),
+ nn.Dropout(dropout),
+ nn.Linear(inner_dim, dim),
+ ]
+ )
+
+ def forward(self, hidden_states):
+ for layer in self.ff:
+ hidden_states = layer(hidden_states)
+ return hidden_states
+
+
+class DiTAttention(nn.Module):
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__()
+
+ self.config = config
+ self.dim = config.hidden_size
+ self.heads = config.num_attention_heads
+ self.inner_dim = config.head_dim * config.num_attention_heads
+ self.dropout = config.dropout
+ self.is_causal = False
+
+ self.to_q = nn.Linear(config.hidden_size, self.inner_dim)
+ self.to_k = nn.Linear(config.hidden_size, self.inner_dim)
+ self.to_v = nn.Linear(config.hidden_size, self.inner_dim)
+
+ self.to_out = nn.ModuleList([nn.Linear(self.inner_dim, config.hidden_size), nn.Dropout(config.dropout)])
+
+ def forward(
+ self,
+ hidden_states, # noised input x
+ position_embeddings=None, # rotary position embedding for x
+ attention_mask=None,
+ ) -> torch.Tensor:
+ batch_size = hidden_states.shape[0]
+
+ # `sample` projections.
+ query = self.to_q(hidden_states)
+ key = self.to_k(hidden_states)
+ value = self.to_v(hidden_states)
+
+ # attention
+ inner_dim = key.shape[-1]
+ head_dim = inner_dim // self.heads
+ query = query.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
+ key = key.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
+ value = value.view(batch_size, -1, self.heads, head_dim).transpose(1, 2)
+
+ # apply rotary position embedding
+ # Due to training process, only first head is applied with RoPE, will be fixed at next release
+ cos, sin = position_embeddings
+ query[:, :1], key[:, :1] = apply_rotary_pos_emb(query[:, :1], key[:, :1], cos, sin)
+
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
+ attention_weights, _ = attention_interface(
+ self,
+ query,
+ key,
+ value,
+ attention_mask=attention_mask,
+ is_causal=False,
+ )
+
+ # mask. e.g. inference got a batch with different target durations, mask out the padding
+ attention_weights = attention_weights.reshape(batch_size, -1, self.heads * head_dim)
+ attention_weights = attention_weights.to(query.dtype)
+
+ # linear proj
+ attention_output = self.to_out[0](attention_weights)
+ attention_output = self.to_out[1](attention_output)
+
+ return attention_output
+
+
+# time step conditioning embedding
+class SinusPositionEmbedding(nn.Module):
+ def __init__(self, dim):
+ super().__init__()
+ self.dim = dim
+
+ def forward(self, hidden_states, scale=1000):
+ device = hidden_states.device
+ half_dim = self.dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb)
+ emb = scale * hidden_states.unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
+ return emb.type_as(hidden_states)
+
+
+class DiTTimestepEmbedding(nn.Module):
+ def __init__(self, dim, freq_embed_dim=256):
+ super().__init__()
+ self.time_embed = SinusPositionEmbedding(freq_embed_dim)
+ self.time_mlp = nn.ModuleList([nn.Linear(freq_embed_dim, dim), nn.SiLU(), nn.Linear(dim, dim)])
+
+ def forward(self, timestep):
+ time_hidden = self.time_embed(timestep)
+ time_hidden = time_hidden.to(timestep.dtype)
+ for layer in self.time_mlp:
+ time_hidden = layer(time_hidden) # b d
+ return time_hidden
+
+
+class DiTDecoderLayer(nn.Module):
+ def __init__(self, config: Qwen2_5OmniDiTConfig, look_ahead_block=0, look_backward_block=0):
+ super().__init__()
+ self.attn_norm = Qwen2_5_OmniAdaLayerNormZero(config.hidden_size)
+
+ self.attn = DiTAttention(config)
+ self.look_ahead_block = look_ahead_block
+ self.look_backward_block = look_backward_block
+ self.ff_norm = nn.LayerNorm(config.hidden_size, elementwise_affine=False, eps=1e-6)
+ self.ff = DiTMLP(dim=config.hidden_size, mult=config.ff_mult, dropout=config.dropout)
+
+ def forward(
+ self, hidden_states, timestep, position_embeddings=None, block_diff=None
+ ): # x: noised input, t: time embedding
+ # pre-norm & modulation for attention input
+ norm, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.attn_norm(hidden_states, emb=timestep)
+
+ # attention
+ attn_output = self.attn(
+ hidden_states=norm,
+ position_embeddings=position_embeddings,
+ attention_mask=(block_diff >= -float(self.look_backward_block))
+ & (block_diff <= float(self.look_ahead_block)),
+ )
+
+ # process attention output for input x
+ hidden_states = hidden_states + gate_msa.unsqueeze(1) * attn_output
+
+ norm = self.ff_norm(hidden_states) * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
+ ff_output = self.ff(norm)
+ hidden_states = hidden_states + gate_mlp.unsqueeze(1) * ff_output
+
+ return hidden_states
+
+
+class SnakeBeta(nn.Module):
+ """
+ A modified Snake function which uses separate parameters for the magnitude of the periodic components
+ Shape:
+ - Input: (B, C, T)
+ - Output: (B, C, T), same shape as the input
+ Parameters:
+ - alpha - trainable parameter that controls frequency
+ - beta - trainable parameter that controls magnitude
+ References:
+ - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda:
+ https://huggingface.co/papers/2006.08195
+ """
+
+ def __init__(self, in_features, alpha=1.0):
+ super().__init__()
+ self.in_features = in_features
+
+ # initialize alpha
+ self.alpha = Parameter(torch.zeros(in_features) * alpha)
+ self.beta = Parameter(torch.zeros(in_features) * alpha)
+
+ self.no_div_by_zero = 0.000000001
+
+ def forward(self, hidden_states):
+ """
+ Forward pass of the function.
+ Applies the function to the input elementwise.
+ SnakeBeta ∶= x + 1/b * sin^2 (xa)
+ """
+ alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T]
+ beta = self.beta.unsqueeze(0).unsqueeze(-1)
+ alpha = torch.exp(alpha)
+ beta = torch.exp(beta)
+ hidden_states = hidden_states + (1.0 / (beta + self.no_div_by_zero)) * torch.pow(
+ torch.sin(hidden_states * alpha), 2
+ )
+
+ return hidden_states
+
+
+def kaiser_sinc_filter1d(cutoff, half_width, kernel_size):
+ """Generates a 1D Kaiser-windowed sinc filter.
+
+ Args:
+ cutoff (float): Normalized cutoff frequency (0 to 0.5).
+ half_width (float): Transition bandwidth.
+ kernel_size (int): Number of filter taps.
+
+ Returns:
+ torch.Tensor: A tensor of shape (1, 1, kernel_size) representing the filter.
+ """
+ is_even = kernel_size % 2 == 0
+ half_size = kernel_size // 2
+
+ # Compute Kaiser window parameters
+ delta_f = 4 * half_width
+ attenuation = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
+
+ if attenuation > 50.0:
+ beta = 0.1102 * (attenuation - 8.7)
+ elif attenuation >= 21.0:
+ beta = 0.5842 * (attenuation - 21) ** 0.4 + 0.07886 * (attenuation - 21.0)
+ else:
+ beta = 0.0
+
+ kaiser_window = torch.kaiser_window(kernel_size, beta=beta, periodic=False, dtype=torch.float32)
+
+ # Compute time indices
+ if is_even:
+ time_indices = torch.arange(-half_size, half_size) + 0.5
+ else:
+ time_indices = torch.arange(kernel_size) - half_size
+
+ # Compute sinc filter
+ if cutoff == 0:
+ return torch.zeros((1, 1, kernel_size), dtype=torch.float32) # Ensures correct shape
+
+ sinc_filter = torch.sinc(2 * cutoff * time_indices)
+ normalized_filter = 2 * cutoff * kaiser_window * sinc_filter
+
+ # Normalize to ensure sum = 1 (avoid leakage of constant component)
+ normalized_filter /= normalized_filter.sum()
+
+ return normalized_filter.view(1, 1, kernel_size)
+
+
+class UpSample1d(nn.Module):
+ def __init__(self, ratio=2, kernel_size=None):
+ super().__init__()
+ self.ratio = ratio
+ self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size
+ self.stride = ratio
+ self.pad = self.kernel_size // ratio - 1
+ self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2
+ self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2
+
+ filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, half_width=0.6 / ratio, kernel_size=self.kernel_size)
+ self.register_buffer("filter", filter, persistent=False)
+
+ def forward(self, hidden_states):
+ channels = hidden_states.shape[1]
+
+ hidden_states = F.pad(hidden_states, (self.pad, self.pad), mode="replicate")
+ hidden_states = self.ratio * F.conv_transpose1d(
+ hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels
+ )
+ hidden_states = hidden_states[..., self.pad_left : -self.pad_right]
+
+ return hidden_states
+
+
+class DownSample1d(nn.Module):
+ def __init__(self, ratio=2, kernel_size=None):
+ super().__init__()
+ cutoff = 0.5 / ratio
+ half_width = 0.6 / ratio
+ self.cutoff = cutoff
+ self.half_width = half_width
+ self.kernel_size = kernel_size
+
+ if cutoff < 0.0:
+ raise ValueError("Minimum cutoff must be larger than zero.")
+ if cutoff > 0.5:
+ raise ValueError("A cutoff above 0.5 does not make sense.")
+
+ self.even = kernel_size % 2 == 0
+ self.pad_left = kernel_size // 2 - int(self.even)
+ self.pad_right = kernel_size // 2
+ self.stride = ratio
+ filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size)
+ self.register_buffer("filter", filter, persistent=False)
+
+ def forward(self, hidden_states):
+ channels = hidden_states.shape[1]
+ hidden_states = F.pad(hidden_states, (self.pad_left, self.pad_right), mode="replicate")
+ out = F.conv1d(hidden_states, self.filter.expand(channels, -1, -1), stride=self.stride, groups=channels)
+ return out
+
+
+class TorchActivation1d(nn.Module):
+ def __init__(
+ self,
+ activation,
+ up_ratio: int = 2,
+ down_ratio: int = 2,
+ up_kernel_size: int = 12,
+ down_kernel_size: int = 12,
+ ):
+ super().__init__()
+ if not callable(activation):
+ raise TypeError("Activation function must be callable")
+ self.act = activation
+ self.upsample = UpSample1d(up_ratio, up_kernel_size)
+ self.downsample = DownSample1d(down_ratio, down_kernel_size)
+
+ def forward(self, hidden_states):
+ hidden_states = self.upsample(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.downsample(hidden_states)
+
+ return hidden_states
+
+
+class AMPBlock(torch.nn.Module):
+ def __init__(
+ self,
+ channels,
+ kernel_size=3,
+ dilation=(1, 3, 5),
+ ):
+ super().__init__()
+
+ self.convs1 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[0],
+ padding=self._get_padding(kernel_size, dilation[0]),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[1],
+ padding=self._get_padding(kernel_size, dilation[1]),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=dilation[2],
+ padding=self._get_padding(kernel_size, dilation[2]),
+ ),
+ ]
+ )
+
+ self.convs2 = nn.ModuleList(
+ [
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=self._get_padding(kernel_size, 1),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=self._get_padding(kernel_size, 1),
+ ),
+ nn.Conv1d(
+ channels,
+ channels,
+ kernel_size,
+ 1,
+ dilation=1,
+ padding=self._get_padding(kernel_size, 1),
+ ),
+ ]
+ )
+
+ self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers
+
+ self.activations = nn.ModuleList(
+ [TorchActivation1d(activation=SnakeBeta(channels)) for _ in range(self.num_layers)]
+ )
+
+ def _get_padding(self, kernel_size, dilation=1):
+ return int((kernel_size * dilation - dilation) / 2)
+
+ def forward(self, hidden_states):
+ acts1, acts2 = self.activations[::2], self.activations[1::2]
+ for conv1, conv2, act1, act2 in zip(self.convs1, self.convs2, acts1, acts2):
+ residual = hidden_states
+ hidden_states = act1(hidden_states)
+ hidden_states = conv1(hidden_states)
+ hidden_states = act2(hidden_states)
+ hidden_states = conv2(hidden_states)
+ hidden_states = residual + hidden_states
+
+ return hidden_states
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni Token2WavBigVGAN model. Which take mel spectrogram as input and predict waveform.
+ """
+)
+class Qwen2_5OmniToken2WavBigVGANModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniBigVGANConfig
+ input_modalities = "audio"
+
+ def __init__(self, config: Qwen2_5OmniBigVGANConfig):
+ super().__init__(config)
+ self.num_residual_blocks = len(config.resblock_kernel_sizes)
+ self.num_upsample_layers = len(config.upsample_rates)
+
+ self.conv_pre = nn.Conv1d(config.mel_dim, config.upsample_initial_channel, 7, 1, padding=3)
+
+ # Removing extra ModuleList breaks official state dict
+ ups = [
+ nn.ModuleList(
+ [
+ nn.ConvTranspose1d(
+ config.upsample_initial_channel // (2**layer_idx),
+ config.upsample_initial_channel // (2 ** (layer_idx + 1)),
+ kernel_size,
+ stride,
+ padding=(kernel_size - stride) // 2,
+ )
+ ]
+ )
+ for layer_idx, (stride, kernel_size) in enumerate(zip(config.upsample_rates, config.upsample_kernel_sizes))
+ ]
+ self.ups = nn.ModuleList(ups)
+
+ self.resblocks = nn.ModuleList(
+ [
+ AMPBlock(config.upsample_initial_channel // (2 ** (layer_idx + 1)), kernel_size, dilation)
+ for layer_idx in range(self.num_upsample_layers)
+ for kernel_size, dilation in zip(config.resblock_kernel_sizes, config.resblock_dilation_sizes)
+ ]
+ )
+
+ self.activation_post = TorchActivation1d(
+ activation=SnakeBeta(config.upsample_initial_channel // (2**self.num_upsample_layers))
+ )
+ self.conv_post = nn.Conv1d(
+ config.upsample_initial_channel // (2**self.num_upsample_layers), 1, 7, 1, padding=3, bias=False
+ )
+
+ self.post_init()
+
+ def normalize_spectrogram(self, spectrogram, max_value, min_db):
+ return torch.clamp((2 * max_value) * ((spectrogram - min_db) / (-min_db)) - max_value, -max_value, max_value)
+
+ def amplitude_to_db(self, amplitude, min_db_level):
+ min_level = torch.exp(
+ torch.tensor(min_db_level / 20.0 * np.log(10), device=amplitude.device, dtype=amplitude.dtype)
+ )
+ return 20 * torch.log10(torch.clamp(amplitude, min=min_level))
+
+ def process_mel_spectrogram(self, mel_spectrogram):
+ amplitude_spectrum = torch.exp(mel_spectrogram)
+ decibel_spectrum = self.amplitude_to_db(amplitude_spectrum, -115) - 20
+ return self.normalize_spectrogram(decibel_spectrum, 1, -115)
+
+ def forward(self, mel_spectrogram, **kwargs):
+ processed_spectrogram = self.process_mel_spectrogram(mel_spectrogram)
+ hidden_representation = self.conv_pre(processed_spectrogram)
+
+ for layer_index in range(self.num_upsample_layers):
+ hidden_representation = self.ups[layer_index][0](hidden_representation)
+ residual_output = sum(
+ self.resblocks[layer_index * self.num_residual_blocks + block_index](hidden_representation)
+ for block_index in range(self.num_residual_blocks)
+ )
+ residual_output = residual_output / self.num_residual_blocks
+ hidden_representation = residual_output
+
+ hidden_representation = self.activation_post(hidden_representation)
+ output_waveform = self.conv_post(hidden_representation)
+ return torch.clamp(output_waveform, min=-1.0, max=1.0).squeeze().cpu()
+
+
+class RungeKutta4ODESolver:
+ def __init__(self, function, initial_value):
+ self.function = function
+ self.initial_value = initial_value
+
+ self._one_third = 1 / 3
+ self._two_thirds = 2 / 3
+
+ def _rk4_step(self, function, time_start, time_step, time_end, value_start, function_value_start=None):
+ k1 = function_value_start if function_value_start is not None else function(time_start, value_start)
+ k2 = function(time_start + time_step * self._one_third, value_start + time_step * k1 * self._one_third)
+ k3 = function(time_start + time_step * self._two_thirds, value_start + time_step * (k2 - k1 * self._one_third))
+ k4 = function(time_end, value_start + time_step * (k1 - k2 + k3))
+ return (k1 + 3 * (k2 + k3) + k4) * time_step / 8
+
+ def _compute_step(self, function, time_start, time_step, time_end, value_start):
+ function_value_start = function(time_start, value_start)
+ return self._rk4_step(
+ function, time_start, time_step, time_end, value_start, function_value_start=function_value_start
+ ), function_value_start
+
+ def _linear_interpolation(self, time_start, time_end, value_start, value_end, time_point):
+ if time_point == time_start:
+ return value_start
+ if time_point == time_end:
+ return value_end
+ weight = (time_point - time_start) / (time_end - time_start)
+ return value_start + weight * (value_end - value_start)
+
+ def integrate(self, time_points):
+ solution = torch.empty(
+ len(time_points),
+ *self.initial_value.shape,
+ dtype=self.initial_value.dtype,
+ device=self.initial_value.device,
+ )
+ solution[0] = self.initial_value
+
+ current_index = 1
+ current_value = self.initial_value
+ for time_start, time_end in zip(time_points[:-1], time_points[1:]):
+ time_step = time_end - time_start
+ delta_value, _ = self._compute_step(self.function, time_start, time_step, time_end, current_value)
+ next_value = current_value + delta_value
+
+ while current_index < len(time_points) and time_end >= time_points[current_index]:
+ solution[current_index] = self._linear_interpolation(
+ time_start, time_end, current_value, next_value, time_points[current_index]
+ )
+ current_index += 1
+
+ current_value = next_value
+
+ return solution
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni Token2WavDiT model. Which take speech tokens as input and predict mel spectrogram.
+ """
+)
+class Qwen2_5OmniToken2WavDiTModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniDiTConfig
+ input_modalities = "audio"
+ _no_split_modules = ["DiTDecoderLayer"]
+
+ def __init__(self, config: Qwen2_5OmniDiTConfig):
+ super().__init__(config)
+ self.mel_dim = config.mel_dim
+ self.repeats = config.repeats
+ self.time_embed = DiTTimestepEmbedding(config.hidden_size)
+
+ self.text_embed = DiTCodecEmbedding(config.num_embeds, config.emb_dim, config.repeats)
+ self.input_embed = DiTInputEmbedding(config)
+
+ self.rotary_embed = Qwen2_5OmniDiTRotaryEmbedding(config=config)
+
+ self.hidden_size = config.hidden_size
+ self.layers = config.num_hidden_layers
+ self.block_size = config.block_size
+ self.num_attention_heads = config.num_attention_heads
+
+ self.transformer_blocks = nn.ModuleList()
+ for i in range(config.num_hidden_layers):
+ self.transformer_blocks.append(
+ DiTDecoderLayer(
+ config,
+ look_ahead_block=1 if i in config.look_ahead_layers else 0,
+ look_backward_block=1 if i in config.look_backward_layers else 0,
+ )
+ )
+
+ self.norm_out = Qwen2_5_OmniAdaLayerNormZero_Final(config.hidden_size) # final modulation
+ self.proj_out = nn.Linear(config.hidden_size, config.mel_dim)
+
+ self.post_init()
+
+ def _create_block_diff(self, hidden_states):
+ batch, seq_len = hidden_states.shape[0], hidden_states.shape[1]
+ block_indices = torch.arange(seq_len, device=hidden_states.device) // self.block_size # [seq_length]
+
+ block_i = block_indices.unsqueeze(1) # [seq_length, 1]
+ block_j = block_indices.unsqueeze(0) # [1, seq_length]
+ block_diff = block_j - block_i # (n, n)
+
+ return block_diff.expand(batch, self.num_attention_heads, seq_len, seq_len)
+
+ def forward(
+ self,
+ hidden_states,
+ condition_vector,
+ speaker_embedding,
+ quantized_code,
+ time_step,
+ drop_audio_conditioning=False,
+ drop_code=False,
+ apply_cfg=True,
+ **kwargs,
+ ):
+ batch_size = hidden_states.shape[0]
+ if time_step.ndim == 0:
+ time_step = time_step.repeat(batch_size)
+
+ # Compute embeddings
+ time_embedding = self.time_embed(time_step)
+ text_embedding = self.text_embed(quantized_code, drop_code=False if apply_cfg else drop_code)
+ text_embedding_unconditioned = self.text_embed(quantized_code, drop_code=True) if apply_cfg else None
+
+ hidden_states = self.input_embed(
+ hidden_states,
+ speaker_embedding,
+ condition_vector,
+ text_embedding,
+ drop_audio_cond=drop_audio_conditioning,
+ code_embed_uncond=text_embedding_unconditioned,
+ apply_cfg=apply_cfg,
+ )
+
+ # Compute positional encodings
+ position_ids = torch.arange(hidden_states.shape[1], device=hidden_states.device)
+ position_ids = position_ids[None, :].repeat(batch_size, 1)
+ position_embeddings = self.rotary_embed(hidden_states, position_ids)
+ blockwise_difference = self._create_block_diff(hidden_states)
+
+ # Transformer blocks
+ for transformer_block in self.transformer_blocks:
+ hidden_states = transformer_block(
+ hidden_states,
+ time_embedding,
+ position_embeddings=position_embeddings,
+ block_diff=blockwise_difference,
+ )
+
+ hidden_states = self.norm_out(hidden_states, time_embedding)
+ output = self.proj_out(hidden_states)
+
+ return output
+
+ @torch.no_grad()
+ def sample(
+ self,
+ conditioning_vector,
+ reference_mel_spectrogram,
+ quantized_code,
+ num_steps=10,
+ guidance_scale=0.5,
+ sway_coefficient=-1.0,
+ ):
+ noise_initialization = torch.randn([1, 30000, self.mel_dim], dtype=reference_mel_spectrogram.dtype)
+ maximum_duration = quantized_code.shape[1] * self.repeats
+ initial_state = noise_initialization[:, :maximum_duration].to(quantized_code.device)
+ batch_size = reference_mel_spectrogram.shape[0]
+ conditioning_vector = conditioning_vector.unsqueeze(1).repeat(1, maximum_duration, 1)
+
+ if batch_size != 1:
+ raise ValueError("Only batch size = 1 is currently supported")
+
+ def ode_function(time_step, hidden_states):
+ if guidance_scale < 1e-5:
+ prediction = self(
+ hidden_states=hidden_states,
+ speaker_embedding=conditioning_vector,
+ condition_vector=reference_mel_spectrogram,
+ quantized_code=quantized_code,
+ time_step=time_step,
+ drop_audio_conditioning=False,
+ drop_code=False,
+ )
+ return prediction
+
+ model_output = self(
+ hidden_states=hidden_states,
+ quantized_code=quantized_code,
+ speaker_embedding=conditioning_vector,
+ condition_vector=reference_mel_spectrogram,
+ time_step=time_step,
+ apply_cfg=True,
+ )
+ guided_prediction, null_prediction = torch.chunk(model_output, 2, dim=0)
+ return guided_prediction + (guided_prediction - null_prediction) * guidance_scale
+
+ initial_time = 0
+ time_embedding = torch.linspace(
+ initial_time, 1, num_steps, device=quantized_code.device, dtype=conditioning_vector.dtype
+ )
+
+ if sway_coefficient is not None:
+ time_embedding += sway_coefficient * (torch.cos(torch.pi / 2 * time_embedding) - 1 + time_embedding)
+
+ ode_solver = RungeKutta4ODESolver(function=ode_function, initial_value=initial_state)
+ solution_trajectory = ode_solver.integrate(time_embedding)
+
+ generated_waveform = solution_trajectory[-1]
+ generated_mel_spectrogram = generated_waveform.permute(0, 2, 1)
+ return generated_mel_spectrogram
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni Token2Wav model. Consists a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform.
+ """
+)
+class Qwen2_5OmniToken2WavModel(Qwen2_5OmniPreTrainedModel):
+ config: Qwen2_5OmniToken2WavConfig
+ base_model_prefix = "model"
+ input_modalities = "audio"
+ _no_split_modules = ["Qwen2_5OmniToken2WavDiTModel", "Qwen2_5OmniToken2WavBigVGANModel"]
+
+ def __init__(self, config: Qwen2_5OmniToken2WavConfig):
+ super().__init__(config)
+ attn_impl = config._attn_implementation
+ if config._attn_implementation == "flash_attention_2":
+ logger.warning_once(
+ "Qwen2_5OmniToken2WavModel must inference with fp32, but flash_attention_2 only supports fp16 and bf16, "
+ "attention implementation of Qwen2_5OmniToken2WavModel will fallback to sdpa."
+ )
+ attn_impl = "sdpa"
+ elif config._attn_implementation == "eager":
+ logger.warning_once(
+ "Qwen2_5OmniToken2WavModel does not support eager attention implementation, fall back to sdpa"
+ )
+ attn_impl = "sdpa"
+ self.code2wav_dit_model = Qwen2_5OmniToken2WavDiTModel._from_config(
+ config.dit_config, attn_implementation=attn_impl
+ )
+ self.code2wav_bigvgan_model = Qwen2_5OmniToken2WavBigVGANModel._from_config(
+ config.bigvgan_config, attn_implementation=attn_impl
+ )
+
+ self.post_init()
+
+ def forward(
+ self,
+ code,
+ conditioning,
+ reference_mel,
+ num_steps=10,
+ guidance_scale=0.5,
+ sway_coefficient=-1.0,
+ **kwargs,
+ ):
+ """Generates a waveform from input code and conditioning parameters."""
+
+ mel_spectrogram = self.code2wav_dit_model.sample(
+ conditioning,
+ reference_mel,
+ code,
+ num_steps=num_steps,
+ guidance_scale=guidance_scale,
+ sway_coefficient=sway_coefficient,
+ )
+
+ waveform = self.code2wav_bigvgan_model(mel_spectrogram)
+
+ return waveform
+
+
+############################
+# Start Qwen2.5Omni #
+############################
+
+
+@auto_docstring(
+ custom_intro="""
+ The full Qwen2.5Omni model, a multimodal model composed of 3 sub-models:
+ - [`Qwen2_5OmniThinkerForConditionalGeneration`]:
+ a causal auto-regressive transformer takes text, audio, image, video as input and predict text tokens.
+ - [`Qwen2_5OmniTalkerForConditionalGeneration`]:
+ a causal auto-regressive transformer takes thinker hidden states and response as input and predict speech tokens.
+ - [`Qwen2_5OmniToken2WavModel`]:
+ a DiT model take speech tokens as input and predict mel spectrogram and a BigVGAN vocoder take mel spectrogram as input and predict waveform.
+ """
+)
+class Qwen2_5OmniForConditionalGeneration(Qwen2_5OmniPreTrainedModel, GenerationMixin):
+ config: Qwen2_5OmniConfig
+ output_modalities = ("audio", "text")
+ _no_split_modules = [
+ "Qwen2_5OmniTalkerForConditionalGeneration",
+ "Qwen2_5OmniToken2WavModel",
+ ]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.thinker = Qwen2_5OmniThinkerForConditionalGeneration(config.thinker_config)
+
+ self.has_talker = config.enable_audio_output
+ self.speaker_map = {}
+ if config.enable_audio_output:
+ self.enable_talker()
+ self.post_init()
+
+ def enable_talker(self):
+ self.talker = Qwen2_5OmniTalkerForConditionalGeneration(self.config.talker_config)
+ self.token2wav = Qwen2_5OmniToken2WavModel(self.config.token2wav_config)
+ self.token2wav.float()
+ self.has_talker = True
+
+ def load_speakers(self, path):
+ check_torch_load_is_safe()
+ for key, value in torch.load(path, weights_only=True).items():
+ self.speaker_map[key] = value
+ logger.info(f"Speaker {list(self.speaker_map.keys())} loaded")
+
+ def disable_talker(self):
+ if hasattr(self, "talker"):
+ del self.talker
+ if hasattr(self, "token2wav"):
+ del self.token2wav
+ self.has_talker = False
+
+ @classmethod
+ def from_pretrained(
+ cls,
+ pretrained_model_name_or_path,
+ *model_args,
+ config=None,
+ cache_dir=None,
+ ignore_mismatched_sizes=False,
+ force_download=False,
+ local_files_only=False,
+ token=None,
+ revision="main",
+ use_safetensors=None,
+ weights_only=True,
+ **kwargs,
+ ):
+ model = super().from_pretrained(
+ pretrained_model_name_or_path,
+ *model_args,
+ config=config,
+ cache_dir=cache_dir,
+ ignore_mismatched_sizes=ignore_mismatched_sizes,
+ force_download=force_download,
+ local_files_only=local_files_only,
+ token=token,
+ revision=revision,
+ use_safetensors=use_safetensors,
+ weights_only=weights_only,
+ **kwargs,
+ )
+ spk_path = cached_file(
+ pretrained_model_name_or_path,
+ "spk_dict.pt",
+ subfolder=kwargs.pop("subfolder", None),
+ cache_dir=kwargs.pop("cache_dir", None),
+ force_download=kwargs.pop("force_download", False),
+ proxies=kwargs.pop("proxies", None),
+ local_files_only=kwargs.pop("local_files_only", False),
+ token=token,
+ revision=kwargs.pop("revision", None),
+ )
+ if spk_path is None:
+ raise ValueError(f"""{pretrained_model_name_or_path}/{spk_path} not exists""")
+ model.load_speakers(spk_path)
+
+ return model
+
+ @torch.no_grad()
+ @deprecate_kwarg("return_audio", version="v5", new_name="generation_mode")
+ # TODO: raushan, defaults should be saved in generation config
+ def generate(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ speaker: str = "Chelsie",
+ use_audio_in_video: bool = False,
+ thinker_max_new_tokens: int = 1024,
+ talker_max_new_tokens: int = 4096,
+ talker_do_sample: bool = True,
+ talker_top_k: int = 40,
+ talker_top_p: float = 0.8,
+ talker_temperature: float = 0.9,
+ talker_eos_token_id: list[int] = [8292, 8294],
+ talker_repetition_penalty: float = 1.05,
+ **kwargs,
+ ):
+ r"""
+ Generate text response and audio from input.
+
+ Args:
+ input_ids (`Optional[torch.Tensor]`, *optional*):
+ Input ids, should obtain from processor.
+ speaker (`str` , defaults to "Chelsie"):
+ Which speaker should be used in audio response.
+ use_audio_in_video (`bool`, defaults to False):
+ Whether or not use audio track in video, should same as the parameter in `process_audio_info`.
+ generation_mode (`Optional[str]`, *optional*):
+ Whether or not return response in audio format. When `generation_mode="audio"`, this parameter is same as `config.enable_audio_output`.
+ kwargs (*optional*):
+ - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model.
+ - With a *thinker_*, *talker_*, *token2wav_* prefix, they will be input for the `generate` method of the
+ thinker, talker and token2wav respectively. It has the priority over the keywords without a prefix.
+ Returns:
+ When `return_audio=False`:
+ - **Text** (`torch.Tensor`): Generated text token sequence.
+ When `return_audio=True`:
+ - **Text** (`torch.Tensor`): Generated text token sequence.
+ - **Audio waveform** (`torch.Tensor`): Generated audio waveform.
+ """
+ # check `False` on purpose because the paramter can be `str/bool`. This is needed for BC
+ generation_mode = kwargs.pop("generation_mode", None)
+ return_audio = generation_mode != "text" and generation_mode is not False
+
+ if speaker not in self.speaker_map:
+ raise ValueError(f"{speaker} is not available, available speakers: {self.speaker_map.keys()}")
+ if return_audio and not self.has_talker:
+ raise ValueError(
+ "Cannot use talker when talker module not initialized. Use `enable_talker` method or set enable_talker in config to enable talker."
+ )
+ if return_audio is None:
+ return_audio = self.has_talker
+ if input_ids.shape[0] != 1 and return_audio:
+ raise NotImplementedError("Qwen2.5-Omni currently does not support batched inference with audio output")
+
+ shared_kwargs = {"use_audio_in_video": use_audio_in_video}
+ thinker_kwargs = {
+ "max_new_tokens": thinker_max_new_tokens,
+ }
+ talker_kwargs = {
+ "max_new_tokens": talker_max_new_tokens,
+ "do_sample": talker_do_sample,
+ "top_k": talker_top_k,
+ "top_p": talker_top_p,
+ "temperature": talker_temperature,
+ "eos_token_id": talker_eos_token_id,
+ "repetition_penalty": talker_repetition_penalty,
+ }
+ token2wav_kwargs = {}
+
+ for key, value in kwargs.items():
+ if key.startswith("thinker_"):
+ thinker_kwargs[key[len("thinker_") :]] = value
+ elif key.startswith("talker_"):
+ talker_kwargs[key[len("talker_") :]] = value
+ elif key.startswith("token2wav_"):
+ token2wav_kwargs[key[len("token2wav_") :]] = value
+ # Process special input values
+ elif key == "feature_attention_mask":
+ thinker_kwargs[key] = value
+ talker_kwargs["audio_feature_lengths"] = torch.sum(value, dim=1)
+ elif key == "input_features" or key == "attention_mask":
+ thinker_kwargs[key] = value
+ # Put other key to shared kwargs
+ else:
+ shared_kwargs[key] = value
+
+ # Merge kwargs
+ for key, value in shared_kwargs.items():
+ if key not in thinker_kwargs:
+ thinker_kwargs[key] = value
+ if key not in talker_kwargs:
+ talker_kwargs[key] = value
+ if key not in token2wav_kwargs:
+ token2wav_kwargs[key] = value
+ speaker_params = self.speaker_map[speaker]
+
+ # 1. Generate from thinker module
+ generate_audio = return_audio and self.has_talker
+ if generate_audio:
+ thinker_kwargs["output_hidden_states"] = True
+ thinker_kwargs["return_dict_in_generate"] = True
+
+ thinker_result = self.thinker.generate(input_ids=input_ids, **thinker_kwargs)
+
+ if not generate_audio:
+ return thinker_result
+
+ # 2. Generate speech tokens from talker module
+ embeds_to_talker = thinker_result.hidden_states[0][0].clone().to(input_ids.device)
+ if thinker_kwargs.get("input_features") is not None:
+ audio_ids_mask = input_ids == self.config.thinker_config.audio_token_index
+ audio_mask = audio_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
+ audio_mask_tensor = torch.zeros(
+ [audio_ids_mask.sum(), embeds_to_talker.shape[-1]],
+ dtype=embeds_to_talker.dtype,
+ device=input_ids.device,
+ )
+ embeds_to_talker.masked_scatter_(audio_mask, audio_mask_tensor)
+ if thinker_kwargs.get("pixel_values") is not None:
+ image_ids_mask = input_ids == self.config.thinker_config.image_token_index
+ image_mask = image_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
+ image_mask_tensor = torch.zeros(
+ [image_ids_mask.sum(), embeds_to_talker.shape[-1]],
+ dtype=embeds_to_talker.dtype,
+ device=input_ids.device,
+ )
+ embeds_to_talker.masked_scatter_(image_mask, image_mask_tensor)
+ if thinker_kwargs.get("pixel_values_videos") is not None:
+ video_ids_mask = input_ids == self.config.thinker_config.video_token_index
+ video_mask = video_ids_mask.unsqueeze(-1).expand_as(embeds_to_talker)
+ video_mask_tensor = torch.zeros(
+ [video_ids_mask.sum(), embeds_to_talker.shape[-1]],
+ dtype=embeds_to_talker.dtype,
+ device=input_ids.device,
+ )
+ embeds_to_talker.masked_scatter_(video_mask, video_mask_tensor)
+
+ processed_thinker_hidden = (
+ (embeds_to_talker,) + thinker_result.hidden_states[0][1:],
+ ) + thinker_result.hidden_states[1:]
+ thinker_generate_ids = thinker_result.sequences[:, input_ids.size(1) :].to(input_ids.device)
+ thinker_token_embeds = [
+ token_hidden_states[0].to(input_ids.device) for token_hidden_states in processed_thinker_hidden
+ ]
+ thinker_hidden_states = [
+ token_hidden_states[-1].to(input_ids.device) for token_hidden_states in processed_thinker_hidden
+ ]
+
+ talker_text_bos_token = speaker_params["bos_token"]
+ talker_input_text_ids = torch.cat(
+ [
+ input_ids,
+ torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device),
+ thinker_generate_ids[:, :1],
+ ],
+ dim=-1,
+ )
+
+ talker_input_ids = torch.cat(
+ [
+ torch.full_like(input_ids, fill_value=self.talker.codec_mask_token),
+ torch.tensor([[self.talker.codec_pad_token]], dtype=torch.long, device=input_ids.device),
+ torch.tensor([[self.talker.codec_bos_token]], dtype=torch.long, device=input_ids.device),
+ ],
+ dim=1,
+ )
+
+ thinker_embed_tokens = self.thinker.get_input_embeddings()
+ thinker_reply_part = torch.cat(thinker_hidden_states[1:], dim=1) + torch.cat(thinker_token_embeds[1:], dim=1)
+ talker_inputs_embeds = thinker_hidden_states[0] + thinker_token_embeds[0]
+ talker_text_bos_token = torch.tensor([[talker_text_bos_token]], dtype=torch.long, device=input_ids.device)
+ talker_text_bos_embed = thinker_embed_tokens(talker_text_bos_token).to(input_ids.device)
+ talker_inputs_embeds = torch.cat(
+ [
+ talker_inputs_embeds,
+ talker_text_bos_embed,
+ thinker_reply_part[:, :1, :],
+ ],
+ dim=1,
+ )
+
+ eos_token = torch.tensor([[self.talker.text_eos_token]], dtype=torch.long, device=input_ids.device)
+ eos_embedding = thinker_embed_tokens(eos_token).to(input_ids.device)
+
+ pad_token = torch.tensor([[self.talker.text_pad_token]], dtype=torch.long, device=input_ids.device)
+ pad_embedding = thinker_embed_tokens(pad_token).to(input_ids.device)
+
+ thinker_reply_part = torch.cat(
+ [
+ thinker_reply_part[:, 1:, :],
+ eos_embedding,
+ pad_embedding,
+ ],
+ dim=1,
+ )
+
+ talker_attention_mask = None
+ if "attention_mask" in kwargs:
+ talker_attention_mask = torch.cat(
+ [kwargs["attention_mask"], kwargs["attention_mask"].new_ones((1, 2))], dim=1
+ ).to(input_ids.device)
+
+ talker_result = self.talker.generate(
+ input_ids=talker_input_ids,
+ input_text_ids=talker_input_text_ids,
+ thinker_reply_part=thinker_reply_part,
+ inputs_embeds=talker_inputs_embeds,
+ attention_mask=talker_attention_mask,
+ suppress_tokens=[self.talker.codec_bos_token],
+ **{k: (v.to(input_ids.device) if torch.is_tensor(v) else v) for k, v in talker_kwargs.items()},
+ )
+ talker_generate_codes = talker_result[:, talker_input_ids.shape[1] : -1]
+
+ # 3. Generate wavs from code
+ if self.token2wav.dtype != torch.float:
+ self.token2wav.float()
+
+ wav = self.token2wav(
+ talker_generate_codes.to(input_ids.device),
+ conditioning=speaker_params["cond"].to(input_ids.device).float(),
+ reference_mel=speaker_params["ref_mel"].to(input_ids.device).float(),
+ **token2wav_kwargs,
+ )
+
+ return thinker_result.sequences, wav.float()
+
+
+__all__ = [
+ "Qwen2_5OmniConfig",
+ "Qwen2_5OmniThinkerConfig",
+ "Qwen2_5OmniTalkerConfig",
+ "Qwen2_5OmniToken2WavConfig",
+ "Qwen2_5OmniForConditionalGeneration",
+ "Qwen2_5OmniThinkerTextModel",
+ "Qwen2_5OmniThinkerForConditionalGeneration",
+ "Qwen2_5OmniTalkerModel",
+ "Qwen2_5OmniTalkerForConditionalGeneration",
+ "Qwen2_5OmniToken2WavDiTModel",
+ "Qwen2_5OmniToken2WavBigVGANModel",
+ "Qwen2_5OmniToken2WavModel",
+ "Qwen2_5OmniPreTrainedModel",
+ "Qwen2_5OmniPreTrainedModelForConditionalGeneration",
+]
diff --git a/mllm/models/qwen2_5omni/python_src_code/name.py b/mllm/models/qwen2_5omni/python_src_code/name.py
new file mode 100644
index 000000000..87e194c17
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/name.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+"""
+打印 safetensors 模型文件的所有键名
+用法: python print_safetensors_keys.py <文件路径或目录>
+"""
+
+import sys
+from pathlib import Path
+
+try:
+ from safetensors import safe_open
+except ImportError:
+ print("请先安装 safetensors 库:")
+ print(" pip install safetensors")
+ sys.exit(1)
+
+
+def print_keys_from_file(filepath: Path):
+ """打印单个 safetensors 文件的所有键名"""
+ print(f"\n{'='*60}")
+ print(f"文件: {filepath.name}")
+ print(f"{'='*60}")
+
+ with safe_open(filepath, framework="pt") as f:
+ keys = f.keys()
+ print(f"共 {len(keys)} 个键:\n")
+ for i, key in enumerate(keys, 1):
+ # 获取张量的形状信息
+ tensor = f.get_tensor(key)
+ shape = tuple(tensor.shape)
+ dtype = tensor.dtype
+ print(f"{key}")
+
+
+def main():
+ if len(sys.argv) < 2:
+ print("用法: python print_safetensors_keys.py <文件路径或目录>")
+ print("示例:")
+ print(" python print_safetensors_keys.py model.safetensors")
+ print(" python print_safetensors_keys.py ./models/")
+ sys.exit(1)
+
+ path = Path(sys.argv[1])
+
+ if path.is_file():
+ # 单个文件
+ if path.suffix == ".safetensors":
+ print_keys_from_file(path)
+ else:
+ print(f"错误: {path} 不是 .safetensors 文件")
+ sys.exit(1)
+ elif path.is_dir():
+ # 目录,查找所有 safetensors 文件
+ files = sorted(path.glob("*.safetensors"))
+ if not files:
+ print(f"错误: 在 {path} 中未找到 .safetensors 文件")
+ sys.exit(1)
+
+ print(f"找到 {len(files)} 个 safetensors 文件")
+
+ total_keys = 0
+ for filepath in files:
+ print_keys_from_file(filepath)
+ with safe_open(filepath, framework="pt") as f:
+ total_keys += len(f.keys())
+
+ print(f"\n{'='*60}")
+ print(f"总计: {len(files)} 个文件, {total_keys} 个键")
+ else:
+ print(f"错误: {path} 不存在")
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/mllm/models/qwen2_5omni/python_src_code/preprocessor_config.json b/mllm/models/qwen2_5omni/python_src_code/preprocessor_config.json
new file mode 100644
index 000000000..63c79f1c4
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/preprocessor_config.json
@@ -0,0 +1,31 @@
+{
+ "chunk_length": 300,
+ "dither": 0.0,
+ "feature_extractor_type": "WhisperFeatureExtractor",
+ "feature_size": 128,
+ "hop_length": 160,
+ "image_mean": [
+ 0.48145466,
+ 0.4578275,
+ 0.40821073
+ ],
+ "image_processor_type": "Qwen2VLImageProcessor",
+ "image_std": [
+ 0.26862954,
+ 0.26130258,
+ 0.27577711
+ ],
+ "max_pixels": 12845056,
+ "merge_size": 2,
+ "min_pixels": 3136,
+ "n_fft": 400,
+ "n_samples": 4800000,
+ "nb_max_frames": 30000,
+ "padding_side": "right",
+ "padding_value": 0.0,
+ "patch_size": 14,
+ "processor_class": "Qwen2_5OmniProcessor",
+ "return_attention_mask": true,
+ "sampling_rate": 16000,
+ "temporal_patch_size": 2
+}
diff --git a/mllm/models/qwen2_5omni/python_src_code/processing_qwen2_5_omni.py b/mllm/models/qwen2_5omni/python_src_code/processing_qwen2_5_omni.py
new file mode 100644
index 000000000..55906c8f8
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/processing_qwen2_5_omni.py
@@ -0,0 +1,405 @@
+# coding=utf-8
+# Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
+#
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for Qwen2.5Omni.
+"""
+
+import logging
+import re
+from typing import Optional, Union
+
+import numpy as np
+
+from ...feature_extraction_utils import BatchFeature
+from ...image_utils import ImageInput
+from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, VideosKwargs
+from ...tokenization_utils_base import AudioInput, PreTokenizedInput, TextInput
+from ...video_utils import VideoInput
+
+
+# Redefine kwargs for videos because Qwen-Omni uses some kwargs for processing omni
+# and does not use them in video processor class
+class Qwen2_5_OmniVideosKwargs(VideosKwargs, total=False):
+ min_pixels: int
+ max_pixels: int
+ patch_size: int
+ temporal_patch_size: int
+ merge_size: int
+ min_frames: int
+ max_frames: int
+ use_audio_in_video: bool
+ seconds_per_chunk: float
+ position_id_per_seconds: Union[int, float]
+
+
+class Qwen2_5OmniProcessorKwargs(ProcessingKwargs, total=False):
+ videos_kwargs: Qwen2_5_OmniVideosKwargs
+
+ _defaults = {
+ "text_kwargs": {
+ "padding": False,
+ "padding_side": "left",
+ },
+ "videos_kwargs": {
+ "seconds_per_chunk": 2.0,
+ "position_id_per_seconds": 25,
+ "use_audio_in_video": False,
+ "size": {
+ "shortest_edge": 128 * 28 * 28,
+ "longest_edge": 768 * 28 * 28,
+ },
+ },
+ "audio_kwargs": {
+ "sampling_rate": 16000,
+ "padding": "max_length",
+ "return_attention_mask": True,
+ },
+ }
+
+
+class Qwen2_5OmniProcessor(ProcessorMixin):
+ r"""
+ Constructs a Qwen2.5Omni processor.
+ [`Qwen2_5OmniProcessor`] offers all the functionalities of [`Qwen2VLImageProcessor`], [`WhisperFeatureExtractor`], and [`Qwen2TokenizerFast`]. See the
+ [`~Qwen2_5OmniProcessor.__call__`] and [`~Qwen2_5OmniProcessor.decode`] for more information.
+
+ Args:
+ image_processor ([`Qwen2VLImageProcessor`], *optional*):
+ The image processor.
+ video_processor ([`Qwen2VLVideoProcessor`], *optional*):
+ The video processor.
+ feature_extractor ([`WhisperFeatureExtractor`], *optional*):
+ The audio feature extractor.
+ tokenizer ([`Qwen2TokenizerFast`], *optional*):
+ The text tokenizer.
+ chat_template (`Optional[str]`, *optional*):
+ The Jinja template to use for formatting the conversation. If not provided, the default chat template is used.
+ """
+
+ def __init__(
+ self, image_processor=None, video_processor=None, feature_extractor=None, tokenizer=None, chat_template=None
+ ):
+ super().__init__(image_processor, video_processor, feature_extractor, tokenizer, chat_template=chat_template)
+ self.image_token = self.tokenizer.image_token
+ self.audio_token = self.tokenizer.audio_token
+ self.video_token = self.tokenizer.video_token
+ self.vision_bos_token = self.tokenizer.vision_bos_token
+ self.vision_eos_token = self.tokenizer.vision_eos_token
+ self.audio_bos_token = self.tokenizer.audio_bos_token
+ self.audio_eos_token = self.tokenizer.audio_eos_token
+
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
+ images: Optional[ImageInput] = None,
+ videos: Optional[VideoInput] = None,
+ audio: Optional[AudioInput] = None,
+ **kwargs: Unpack[Qwen2_5OmniProcessorKwargs],
+ ) -> BatchFeature:
+ """
+ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text`
+ and `kwargs` arguments to Qwen2TokenizerFast's [`~Qwen2TokenizerFast.__call__`] if `text` is not `None` to encode
+ the text. To prepare the audio(s), this method forwards the `audio` and `kwargs` arguments to
+ WhisperFeatureExtractor's [`~WhisperFeatureExtractor.__call__`] if `audio` is not `None`. To prepare the vision inputs,
+ this method forwards the `vision_infos` and `kwargs` arguments to Qwen2VLImageProcessor's [`~Qwen2VLImageProcessor.__call__`]
+ if `vision_infos` is not `None`. Please refer to the doctsring
+ of the above two methods for more information.
+
+ Args:
+ text (`str`, `list[str]`, `list[list[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
+ tensor. Both channels-first and channels-last formats are supported.
+ videos (`np.ndarray`, `torch.Tensor`, `list[np.ndarray]`, `list[torch.Tensor]`):
+ The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
+ tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
+ audio (`np.ndarray`, `list[np.ndarray]`):
+ The audio or batch of audio to be prepared. Each audio can be a NumPy array.
+ """
+
+ if text is None:
+ raise ValueError("You need to specify either a `text` input to process.")
+
+ output_kwargs = self._merge_kwargs(
+ Qwen2_5OmniProcessorKwargs,
+ tokenizer_init_kwargs=self.tokenizer.init_kwargs,
+ **kwargs,
+ )
+
+ seconds_per_chunk = output_kwargs["videos_kwargs"].pop("seconds_per_chunk")
+ position_id_per_seconds = output_kwargs["videos_kwargs"].pop("position_id_per_seconds")
+ use_audio_in_video = output_kwargs["videos_kwargs"].pop("use_audio_in_video")
+
+ if audio is not None:
+ output_kwargs["audio_kwargs"]["padding"] = "max_length" # Support "max_length" padding only here
+ audio_inputs = self.feature_extractor(audio, **output_kwargs["audio_kwargs"])
+ audio_inputs["feature_attention_mask"] = audio_inputs.pop(
+ "attention_mask"
+ ) # rename feature_attention_mask to prevent conflicts later on
+ audio_inputs["input_features"] = audio_inputs.pop(
+ "input_features"
+ ) # rename input_features to prevent conflicts later on
+ input_lengths = (audio_inputs["feature_attention_mask"].sum(-1) - 1) // 2 + 1
+ audio_lengths = iter((input_lengths - 2) // 2 + 1)
+ else:
+ audio_inputs = {}
+ audio_lengths = iter([])
+
+ if images is not None:
+ images_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
+ image_grid_thw = iter(images_inputs["image_grid_thw"])
+ else:
+ images_inputs = {}
+ image_grid_thw = iter([])
+
+ if videos is not None:
+ videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
+
+ fps = output_kwargs["videos_kwargs"].get("fps", 2.0)
+ video_grid_thw = videos_inputs["video_grid_thw"]
+ second_per_grid_ts = [self.video_processor.temporal_patch_size / fps] * len(video_grid_thw)
+ videos_inputs["video_second_per_grid"] = second_per_grid_ts
+
+ video_grid_thw = iter(video_grid_thw)
+ video_second_per_grid = iter(second_per_grid_ts)
+ else:
+ videos_inputs = {}
+ video_grid_thw = iter([])
+ video_second_per_grid = iter([])
+
+ if not isinstance(text, list):
+ text = [text]
+
+ if images is not None or videos is not None or audio is not None:
+ text = self.replace_multimodal_special_tokens(
+ text,
+ audio_lengths,
+ image_grid_thw,
+ video_grid_thw,
+ video_second_per_grid=video_second_per_grid,
+ use_audio_in_video=use_audio_in_video,
+ position_id_per_seconds=position_id_per_seconds,
+ seconds_per_chunk=seconds_per_chunk,
+ )
+
+ texts_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
+
+ return BatchFeature(
+ data={**texts_inputs, **images_inputs, **videos_inputs, **audio_inputs},
+ tensor_type=kwargs.get("return_tensors"),
+ )
+
+ def replace_multimodal_special_tokens(
+ self,
+ text,
+ audio_lengths,
+ image_grid_thw,
+ video_grid_thw,
+ video_second_per_grid,
+ use_audio_in_video,
+ position_id_per_seconds,
+ seconds_per_chunk,
+ ):
+ # Extend mm token length
+ merge_length_image = self.image_processor.merge_size**2
+ merge_length_video = self.video_processor.merge_size**2
+
+ processed_text = []
+ for sample in text:
+ positions = []
+ special_tokens = [re.escape(tok) for tok in [self.audio_token, self.image_token, self.video_token]]
+ pattern = "|".join(special_tokens)
+ positions = sorted([(match.start(), match.group()) for match in re.finditer(pattern, sample)])
+ positions.sort(key=lambda x: x[0])
+
+ for _, special_token in positions:
+ if special_token == self.audio_token:
+ sample = sample.replace(self.audio_token, "<|audio_placeholder|>" * next(audio_lengths), 1)
+ elif special_token == self.image_token:
+ image_seq_length = next(image_grid_thw).prod() // merge_length_image
+ sample = sample.replace(self.image_token, "<|image_placeholder|>" * image_seq_length, 1)
+ elif special_token == self.video_token:
+ if not use_audio_in_video:
+ video_seq_length = next(video_grid_thw).prod() // merge_length_video
+ sample = sample.replace(self.video_token, "<|video_placeholder|>" * video_seq_length, 1)
+ else:
+ audio_token_indices = np.arange(next(audio_lengths))
+ curr_video_grid_thw = next(video_grid_thw)
+ height = curr_video_grid_thw[1] // self.video_processor.merge_size
+ width = curr_video_grid_thw[2] // self.video_processor.merge_size
+ video_token_indices = np.arange(curr_video_grid_thw[0]).reshape(-1, 1, 1)
+ video_token_indices = np.broadcast_to(
+ video_token_indices, (video_token_indices.shape[0], height, width)
+ ).reshape(-1)
+ video_token_indices = (
+ video_token_indices * next(video_second_per_grid) * position_id_per_seconds
+ )
+
+ tokens_per_chunk = int(position_id_per_seconds * seconds_per_chunk)
+ video_chunk_indexes = self.get_chunked_index(video_token_indices, tokens_per_chunk)
+ audio_chunk_indexes = self.get_chunked_index(audio_token_indices, tokens_per_chunk)
+
+ placeholder_string = self.vision_bos_token + self.audio_bos_token
+ for j in range(max(len(video_chunk_indexes), len(audio_chunk_indexes))):
+ if j < len(video_chunk_indexes):
+ video_seq_length = video_chunk_indexes[j][1] - video_chunk_indexes[j][0]
+ placeholder_string += "<|video_placeholder|>" * video_seq_length
+ if j < len(audio_chunk_indexes):
+ audio_seq_length = audio_chunk_indexes[j][1] - audio_chunk_indexes[j][0]
+ placeholder_string += "<|audio_placeholder|>" * audio_seq_length
+ placeholder_string += self.audio_eos_token + self.vision_eos_token
+ sample = sample.replace(
+ self.vision_bos_token + self.video_token + self.vision_eos_token,
+ placeholder_string,
+ 1,
+ )
+
+ sample = sample.replace("<|audio_placeholder|>", self.audio_token)
+ sample = sample.replace("<|image_placeholder|>", self.image_token)
+ sample = sample.replace("<|video_placeholder|>", self.video_token)
+ processed_text.append(sample)
+ return processed_text
+
+ def get_chunked_index(self, token_indices: np.ndarray, tokens_per_chunk: int) -> list[tuple[int, int]]:
+ """
+ Splits token index list into chunks based on token value ranges.
+
+ Given a list of token indices, returns a list of (start, end) index tuples representing
+ slices of the list where the token values fall within successive ranges of `t_ntoken_per_chunk`.
+
+ For example, if `t_ntoken_per_chunk` is 1000, the function will create chunks such that:
+ - the first chunk contains token values < 1000,
+ - the second chunk contains values >= 1000 and < 2000, and so on.
+
+ Parameters:
+ token_indices (`np.ndarray`): A monotonically increasing list of token index values.
+ t_ntoken_per_chunk (`int`): Number of tokens per chunk (used as the chunk size threshold).
+
+ Returns:
+ `list[tuple[int, int]]`: A list of tuples, each representing the start (inclusive)
+ and end (exclusive) indices of a chunk in `token_indices`.
+ """
+
+ def _iter():
+ i, start_idx = 0, 0 # skip bos token
+ current_chunk = 1
+ while i < len(token_indices): # skip eos token
+ if token_indices[i] >= current_chunk * tokens_per_chunk:
+ yield (start_idx, i)
+ start_idx = i
+ current_chunk += 1
+ i += 1
+ yield (start_idx, len(token_indices))
+
+ return list(_iter())
+
+ def apply_chat_template(self, conversations, chat_template=None, **kwargs):
+ is_batched = False
+ if isinstance(conversations[0], dict):
+ conversations = [conversations]
+ is_batched = True
+
+ for conversation in conversations:
+ if (
+ conversation[0]["role"] != "system"
+ or conversation[0]["content"][0]["text"]
+ != "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech."
+ ):
+ logging.warning(
+ "System prompt modified, audio output may not work as expected. "
+ + "Audio output mode only works when using default system prompt 'You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.'"
+ )
+ if is_batched:
+ conversations = conversations[0]
+
+ return super().apply_chat_template(conversations, chat_template, **kwargs)
+
+ def post_process_image_text_to_text(self, generated_outputs, skip_special_tokens=True, **kwargs):
+ """
+ Post-process the output of a vlm to decode the text.
+
+ Args:
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
+ or `(sequence_length,)`.
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
+ **kwargs:
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
+
+ Returns:
+ `list[str]`: The decoded text.
+ """
+ return self.tokenizer.batch_decode(generated_outputs[0], skip_special_tokens=skip_special_tokens, **kwargs)
+
+ def post_process_multimodal_output(
+ self, generated_outputs, skip_special_tokens=True, generation_mode=None, **kwargs
+ ):
+ """
+ Post-process the output of a multimodal model to return the requested modality output.
+ If the model cannot generated the requested modality, an error will be raised.
+
+ Args:
+ generated_outputs (`torch.Tensor` or `np.ndarray`):
+ The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
+ or `(sequence_length,)`.
+ skip_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
+ generation_mode (`str`, *optional*):
+ Generation mode indicated which modality to output and can be one of `["text", "image", "audio"]`.
+ **kwargs:
+ Additional arguments to be passed to the tokenizer's `batch_decode method`.
+
+ Returns:
+ `list[Inion[str, np.ndarray]]`: The decoded text or generated audio.
+ """
+ if generation_mode is None or generation_mode == "text":
+ return self.post_process_image_text_to_text(
+ generated_outputs, skip_special_tokens=skip_special_tokens, **kwargs
+ )
+
+ elif generation_mode == "audio":
+ # model supports only bs=1, so we will never get several audio outputs
+ audio = generated_outputs[1].reshape(-1).detach().cpu().numpy()
+ return [audio]
+
+ else:
+ raise ValueError(
+ f"{self.__class__.__name__} got an unexpected generation_mode={generation_mode}. Supported options are only `text` and `audio"
+ )
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ feature_extractor_input_names = self.feature_extractor.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ video_processor_input_names = self.video_processor.model_input_names
+ return list(
+ dict.fromkeys(
+ tokenizer_input_names
+ + feature_extractor_input_names
+ + image_processor_input_names
+ + video_processor_input_names
+ + ["feature_attention_mask"]
+ + ["video_second_per_grid"]
+ )
+ )
+
+
+__all__ = ["Qwen2_5OmniProcessor"]
diff --git a/mllm/models/qwen2_5omni/python_src_code/special_tokens_map.json b/mllm/models/qwen2_5omni/python_src_code/special_tokens_map.json
new file mode 100644
index 000000000..8c2a71eaa
--- /dev/null
+++ b/mllm/models/qwen2_5omni/python_src_code/special_tokens_map.json
@@ -0,0 +1,38 @@
+{
+ "additional_special_tokens": [
+ "<|im_start|>",
+ "<|im_end|>",
+ "<|AUDIO|>",
+ "<|audio_bos|>",
+ "<|audio_eos|>",
+ "<|box_end|>",
+ "<|quad_start|>",
+ "<|quad_end|>",
+ "<|vision_bos|>",
+ "<|vision_eos|>",
+ "<|vision_pad|>",
+ "<|IMAGE|>",
+ "<|VIDEO|>"
+ ],
+ "eos_token": {
+ "content": "<|im_end|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": {
+ "content": "<|endoftext|>",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "image_token": "<|IMAGE|>",
+ "audio_token": "<|AUDIO|>",
+ "video_token": "<|VIDEO|>",
+ "vision_bos_token": "<|vision_bos|>",
+ "vision_eos_token": "<|vision_eos|>",
+ "audio_bos_token": "<|audio_bos|>",
+ "audio_eos_token": "<|audio_eos|>"
+}
\ No newline at end of file
diff --git a/mllm/models/qwen2_5omni/python_src_code/spk_dict.pt b/mllm/models/qwen2_5omni/python_src_code/spk_dict.pt
new file mode 100644
index 0000000000000000000000000000000000000000..cd330273a5362ffbcf4e7cf25db40c888f704679
GIT binary patch
literal 259544
zcmbTdc{r8P`!_0c6p{!ji86&UMA=9wMJWx4N=eaVE~#Xm$B=oRr_f;D`~EylqBLku
zv(ltN8kC0fe1E@lz2|+;Iq&)7xUPL&hI_BQJZs(ex*>RmPxMRJ3s;9y(NJ^?%6wep=yf8|;T*oaj>EosRro6kLBmZ1N~~A*`vcam
z#&Ug+qMp-sR>xks`8pYou7ZX}7@T^2p?#8rdb!mQG24Xm*6YBMD5%llL#Ln|(#OkU
z@0;QjE&VyssiBf2(E$ZGNTWb{5K3U^9_N6Q#H
zsP>i|kcEGu1apNautDw>PA~0)t9CECy#7OYS^(0Q2B1V@4|HPG01KMEI;D)GWrA>e
z{E_Ueogw^V$}qO`qI+&6xMR6+*f@!dS??nYx)X@>vQ(n={V35Z%O(kbI*HB346^uq
zERp-HMOOLdlSi5q9Qt4}oc`+~VI!HOAtr%ro5~>xTvbF;q=pE4l#^MS-?5(DM?CF<
zSw}RJbaD#vS^7;) w2KZM}5CBqiE7&XEHByF(0LtGd%NznD&?zOrxR
}0L)qJVDj_Z3jCa49QM8~L%6z>DGp5-d&g)Q^Q;F1Hv==u>bMC`x*!^%0m=(R`
zgJw4|z0D4-?maNrb{hSQS3vB}T~bR~VDJ8XCUJIm$!47%!t^~Ro7w&mBV$2?KV3`L
zw4FHc&K_JG_K?_Liwc8Uq{(++Me+-pL;ip&qmEiGaAOr;Z<1(jouna2o+%WXbxTt
zRKrTW3&M|EaqMI*wApKMCa)O|Pp^Z`fQ9|m^lxU};K+JHkC*RI6!#S}I|V^|eIDcG&%c(!<|*7mzSBNJ?`0EdewE7B
zf917W>c2vLX>r$4+tt@n-EOv3AN~CgpV}21u3JSnhika+J&C5Tbhqeb;`l93?*GR*
z+xQT(|2xPh$1DbfTJh~i6LQu!#KcVp17Gnk++WA7&jGYfNGBkwoELwpsVf}6(SLjy
zzrrP_o1Tx=f_rE#J;B)a(qr6t1806ZvG(Rv3|Gx0XPYAf{p{GV--p