From 7f32aac9b949ed6f4fdc2fbba9e2027ef416f42c Mon Sep 17 00:00:00 2001
From: hyeminan
Date: Sun, 16 Jul 2023 16:05:20 +0900
Subject: [PATCH 1/4] stable_diffusion_jax
---
.../stable_diffusion_jax_how_to.mdx | 252 ++++++++++++++++++
1 file changed, 252 insertions(+)
create mode 100644 docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.mdx
diff --git a/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.mdx b/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.mdx
new file mode 100644
index 000000000000..808c0505ab67
--- /dev/null
+++ b/docs/source/ko/using-diffusers/stable_diffusion_jax_how_to.mdx
@@ -0,0 +1,252 @@
+# 𧨠Stable Diffusion in JAX / Flax !
+
+[[open-in-colab]]
+
+π€ Hugging Face [Diffusers] (https://github.com/huggingface/diffusers) λ λ²μ 0.5.1λΆν° Flaxλ₯Ό μ§μν©λλ€! μ΄λ₯Ό ν΅ν΄ Colab, Kaggle, Google Cloud Platformμμ μ¬μ©ν μ μλ κ²μ²λΌ Google TPUμμ μ΄κ³ μ μΆλ‘ μ΄ κ°λ₯ν©λλ€.
+
+μ΄ λ
ΈνΈλΆμ JAX / Flaxλ₯Ό μ¬μ©ν΄ μΆλ‘ μ μ€ννλ λ°©λ²μ 보μ¬μ€λλ€. Stable Diffusionμ μλ λ°©μμ λν μμΈν λ΄μ©μ μνκ±°λ GPUμμ μ€ννλ €λ©΄ μ΄ [λ
ΈνΈλΆ] ](https://huggingface.co/docs/diffusers/stable_diffusion)μ μ°Έμ‘°νμΈμ.
+
+λ¨Όμ , TPU λ°±μλλ₯Ό μ¬μ©νκ³ μλμ§ νμΈν©λλ€. Colabμμ μ΄ λ
ΈνΈλΆμ μ€ννλ κ²½μ°, λ©λ΄μμ λ°νμμ μ νν λ€μ "λ°νμ μ ν λ³κ²½" μ΅μ
μ μ νν λ€μ νλμ¨μ΄ κ°μκΈ° μ€μ μμ TPUλ₯Ό μ νν©λλ€.
+
+JAXλ TPU μ μ©μ μλμ§λ§ κ° TPU μλ²μλ 8κ°μ TPU κ°μκΈ°κ° λ³λ ¬λ‘ μλνκΈ° λλ¬Έμ ν΄λΉ νλμ¨μ΄μμ λ λΉμ λ°νλ€λ μ μ μμλμΈμ.
+
+
+## Setup
+
+λ¨Όμ diffusersκ° μ€μΉλμ΄ μλμ§ νμΈν©λλ€.
+
+```bash
+!pip install jax==0.3.25 jaxlib==0.3.25 flax transformers ftfy
+!pip install diffusers
+```
+
+```python
+import jax.tools.colab_tpu
+
+jax.tools.colab_tpu.setup_tpu()
+import jax
+```
+
+```python
+num_devices = jax.device_count()
+device_type = jax.devices()[0].device_kind
+
+print(f"Found {num_devices} JAX devices of type {device_type}.")
+assert (
+ "TPU" in device_type
+), "Available device is not a TPU, please select TPU from Edit > Notebook settings > Hardware accelerator"
+```
+
+```python out
+Found 8 JAX devices of type Cloud TPU.
+```
+
+κ·Έλ° λ€μ λͺ¨λ dependenciesλ₯Ό κ°μ Έμ΅λλ€.
+
+```python
+import numpy as np
+import jax
+import jax.numpy as jnp
+
+from pathlib import Path
+from jax import pmap
+from flax.jax_utils import replicate
+from flax.training.common_utils import shard
+from PIL import Image
+
+from huggingface_hub import notebook_login
+from diffusers import FlaxStableDiffusionPipeline
+```
+
+## λͺ¨λΈ λΆλ¬μ€κΈ°
+
+TPU μ₯μΉλ ν¨μ¨μ μΈ half-float μ νμΈ bfloat16μ μ§μν©λλ€. ν
μ€νΈμλ μ΄ μ νμ μ¬μ©νμ§λ§ λμ float32λ₯Ό μ¬μ©νμ¬ μ 체 μ λ°λ(full precision)λ₯Ό μ¬μ©ν μλ μμ΅λλ€.
+
+```python
+dtype = jnp.bfloat16
+```
+
+Flaxλ ν¨μν νλ μμν¬μ΄λ―λ‘ λͺ¨λΈμ 무μν(stateless)νμ΄λ©° λ§€κ°λ³μλ λͺ¨λΈ μΈλΆμ μ μ₯λ©λλ€. μ¬μ νμ΅λ Flax νμ΄νλΌμΈμ λΆλ¬μ€λ©΄ νμ΄νλΌμΈ μ체μ λͺ¨λΈ κ°μ€μΉ(λλ λ§€κ°λ³μ)κ° λͺ¨λ λ°νλ©λλ€. μ ν¬λ bf16 λ²μ μ κ°μ€μΉλ₯Ό μ¬μ©νκ³ μμΌλ―λ‘ μ ν κ²½κ³ κ° νμλμ§λ§ 무μν΄λ λ©λλ€.
+
+```python
+pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
+ "CompVis/stable-diffusion-v1-4",
+ revision="bf16",
+ dtype=dtype,
+)
+```
+
+## μΆλ‘
+
+TPUμλ μΌλ°μ μΌλ‘ 8κ°μ λλ°μ΄μ€κ° λ³λ ¬λ‘ μλνλ―λ‘ λ³΄μ ν λλ°μ΄μ€ μλ§νΌ ν둬ννΈλ₯Ό 볡μ ν©λλ€. κ·Έλ° λ€μ κ°κ° νλμ μ΄λ―Έμ§ μμ±μ λ΄λΉνλ 8κ°μ λλ°μ΄μ€μμ ν λ²μ μΆλ‘ μ μνν©λλ€. λ°λΌμ νλμ μΉ©μ΄ νλμ μ΄λ―Έμ§λ₯Ό μμ±νλ λ° κ±Έλ¦¬λ μκ°κ³Ό λμΌν μκ°μ 8κ°μ μ΄λ―Έμ§λ₯Ό μ»μ μ μμ΅λλ€.
+
+ν둬ννΈλ₯Ό 볡μ νκ³ λλ©΄ νμ΄νλΌμΈμ `prepare_inputs` ν¨μλ₯Ό νΈμΆνμ¬ ν ν°νλ ν
μ€νΈ IDλ₯Ό μ»μ΅λλ€. ν ν°νλ ν
μ€νΈμ κΈΈμ΄λ κΈ°λ³Έ CLIP ν
μ€νΈ λͺ¨λΈμ ꡬμ±μ λ°λΌ 77ν ν°μΌλ‘ μ€μ λ©λλ€.
+
+```python
+prompt = "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of field, close up, split lighting, cinematic"
+prompt = [prompt] * jax.device_count()
+prompt_ids = pipeline.prepare_inputs(prompt)
+prompt_ids.shape
+```
+
+```python out
+(8, 77)
+```
+
+### 볡μ¬(Replication) λ° μ λ ¬ν
+
+λͺ¨λΈ λ§€κ°λ³μμ μ
λ ₯κ°μ μ°λ¦¬κ° 보μ ν 8κ°μ λ³λ ¬ μ₯μΉμ 볡μ¬(Replication)λμ΄μΌ ν©λλ€. λ§€κ°λ³μ λμ
λ리λ `flax.jax_utils.replicate`(λμ
λ리λ₯Ό μννλ©° κ°μ€μΉμ λͺ¨μμ λ³κ²½νμ¬ 8λ² λ°λ³΅νλ ν¨μ)λ₯Ό μ¬μ©νμ¬ λ³΅μ¬λ©λλ€. λ°°μ΄μ `shard`λ₯Ό μ¬μ©νμ¬ λ³΅μ λ©λλ€.
+
+```python
+p_params = replicate(params)
+```
+
+```python
+prompt_ids = shard(prompt_ids)
+prompt_ids.shape
+```
+
+```python out
+(8, 1, 77)
+```
+
+μ΄ shapeμ 8κ°μ λλ°μ΄μ€ κ°κ°μ΄ shape `(1, 77)`μ jnp λ°°μ΄μ μ
λ ₯κ°μΌλ‘ λ°λλ€λ μλ―Έμ
λλ€. μ¦ 1μ λλ°μ΄μ€λΉ batch(λ°°μΉ) ν¬κΈ°μ
λλ€. λ©λͺ¨λ¦¬κ° μΆ©λΆν TPUμμλ ν λ²μ μ¬λ¬ μ΄λ―Έμ§(μΉ©λΉ)λ₯Ό μμ±νλ €λ κ²½μ° 1λ³΄λ€ ν΄ μ μμ΅λλ€.
+
+μ΄λ―Έμ§λ₯Ό μμ±ν μ€λΉκ° κ±°μ μλ£λμμ΅λλ€! μ΄μ μμ± ν¨μμ μ λ¬ν λμ μμ±κΈ°λ§ λ§λ€λ©΄ λ©λλ€. μ΄κ²μ λμλ₯Ό λ€λ£¨λ λͺ¨λ ν¨μμ λμ μμ±κΈ°κ° μμ΄μΌ νλ€λ, λμμ λν΄ λ§€μ° μ§μ§νκ³ λ
λ¨μ μΈ Flaxμ νμ€ μ μ°¨μ
λλ€. μ΄λ κ² νλ©΄ μ¬λ¬ λΆμ°λ κΈ°κΈ°μμ νλ ¨ν λμλ μ¬νμ±μ΄ 보μ₯λ©λλ€.
+
+μλ λμ°λ―Έ ν¨μλ μλλ₯Ό μ¬μ©νμ¬ λμ μμ±κΈ°λ₯Ό μ΄κΈ°νν©λλ€. λμΌν μλλ₯Ό μ¬μ©νλ ν μ νν λμΌν κ²°κ³Όλ₯Ό μ»μ μ μμ΅λλ€. λμ€μ λ
ΈνΈλΆμμ κ²°κ³Όλ₯Ό νμν λμ λ€λ₯Έ μλλ₯Ό μμ λ‘κ² μ¬μ©νμΈμ.
+
+```python
+def create_key(seed=0):
+ return jax.random.PRNGKey(seed)
+```
+
+rngλ₯Ό μ»μ λ€μ 8λ² 'λΆν 'νμ¬ κ° λλ°μ΄μ€κ° λ€λ₯Έ μ λλ μ΄ν°λ₯Ό μμ νλλ‘ ν©λλ€. λ°λΌμ κ° λλ°μ΄μ€λ§λ€ λ€λ₯Έ μ΄λ―Έμ§κ° μμ±λλ©° μ 체 νλ‘μΈμ€λ₯Ό μ¬νν μ μμ΅λλ€.
+
+```python
+rng = create_key(0)
+rng = jax.random.split(rng, jax.device_count())
+```
+
+JAX μ½λλ λ§€μ° λΉ λ₯΄κ² μ€νλλ ν¨μ¨μ μΈ ννμΌλ‘ μ»΄νμΌν μ μμ΅λλ€. νμ§λ§ νμ νΈμΆμμ λͺ¨λ μ
λ ₯μ΄ λμΌν λͺ¨μμ κ°λλ‘ ν΄μΌ νλ©°, κ·Έλ μ§ μμΌλ©΄ JAXκ° μ½λλ₯Ό λ€μ μ»΄νμΌν΄μΌ νλ―λ‘ μ΅μ νλ μλλ₯Ό νμ©ν μ μμ΅λλ€.
+
+`jit = True`λ₯Ό μΈμλ‘ μ λ¬νλ©΄ Flax νμ΄νλΌμΈμ΄ μ½λλ₯Ό μ»΄νμΌν μ μμ΅λλ€. λν λͺ¨λΈμ΄ μ¬μ© κ°λ₯ν 8κ°μ λλ°μ΄μ€μμ λ³λ ¬λ‘ μ€νλλλ‘ λ³΄μ₯ν©λλ€.
+
+λ€μ μ
μ μ²μ μ€ννλ©΄ μ»΄νμΌνλ λ° μκ°μ΄ μ€λ 걸리μ§λ§ μ΄ν νΈμΆ(μ
λ ₯μ΄ λ€λ₯Έ κ²½μ°μλ)μ ν¨μ¬ λΉ¨λΌμ§λλ€. μλ₯Ό λ€μ΄, ν
μ€νΈνμ λ TPU v2-8μμ μ»΄νμΌνλ λ° 1λΆ μ΄μ 걸리μ§λ§ μ΄ν μΆλ‘ μ€νμλ μ½ 7μ΄κ° 걸립λλ€.
+
+```
+%%time
+images = pipeline(prompt_ids, p_params, rng, jit=True)[0]
+```
+
+```python out
+CPU times: user 56.2 s, sys: 42.5 s, total: 1min 38s
+Wall time: 1min 29s
+```
+
+λ°νλ λ°°μ΄μ shapeμ `(8, 1, 512, 512, 3)`μ
λλ€. μ΄λ₯Ό μ¬κ΅¬μ±νμ¬ λ λ²μ§Έ μ°¨μμ μ κ±°νκ³ 512 Γ 512 Γ 3μ μ΄λ―Έμ§ 8κ°λ₯Ό μ»μ λ€μ PILλ‘ λ³νν©λλ€.
+
+```python
+images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
+images = pipeline.numpy_to_pil(images)
+```
+
+### μκ°ν
+
+μ΄λ―Έμ§λ₯Ό 그리λμ νμνλ λμ°λ―Έ ν¨μλ₯Ό λ§λ€μ΄ λ³΄κ² μ΅λλ€.
+
+```python
+def image_grid(imgs, rows, cols):
+ w, h = imgs[0].size
+ grid = Image.new("RGB", size=(cols * w, rows * h))
+ for i, img in enumerate(imgs):
+ grid.paste(img, box=(i % cols * w, i // cols * h))
+ return grid
+```
+
+```python
+image_grid(images, 2, 4)
+```
+
+
+
+
+## λ€λ₯Έ ν둬ννΈ μ¬μ©
+
+λͺ¨λ λλ°μ΄μ€μμ λμΌν ν둬ννΈλ₯Ό 볡μ ν νμλ μμ΅λλ€. ν둬ννΈ 2κ°λ₯Ό κ°κ° 4λ²μ© μμ±νκ±°λ ν λ²μ 8κ°μ μλ‘ λ€λ₯Έ ν둬ννΈλ₯Ό μμ±νλ λ± μνλ κ²μ 무μμ΄λ ν μ μμ΅λλ€. νλ² ν΄λ³΄μΈμ!
+
+λ¨Όμ μ
λ ₯ μ€λΉ μ½λλ₯Ό νΈλ¦¬ν ν¨μλ‘ λ¦¬ν©ν°λ§νκ² μ΅λλ€:
+
+```python
+prompts = [
+ "Labrador in the style of Hokusai",
+ "Painting of a squirrel skating in New York",
+ "HAL-9000 in the style of Van Gogh",
+ "Times Square under water, with fish and a dolphin swimming around",
+ "Ancient Roman fresco showing a man working on his laptop",
+ "Close-up photograph of young black woman against urban background, high quality, bokeh",
+ "Armchair in the shape of an avocado",
+ "Clown astronaut in space, with Earth in the background",
+]
+```
+
+```python
+prompt_ids = pipeline.prepare_inputs(prompts)
+prompt_ids = shard(prompt_ids)
+
+images = pipeline(prompt_ids, p_params, rng, jit=True).images
+images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
+images = pipeline.numpy_to_pil(images)
+
+image_grid(images, 2, 4)
+```
+
+
+
+
+## λ³λ ¬ν(parallelization)λ μ΄λ»κ² μλνλκ°?
+
+μμ `diffusers` Flax νμ΄νλΌμΈμ΄ λͺ¨λΈμ μλμΌλ‘ μ»΄νμΌνκ³ μ¬μ© κ°λ₯ν λͺ¨λ κΈ°κΈ°μμ λ³λ ¬λ‘ μ€ννλ€κ³ λ§μλλ Έμ΅λλ€. μ΄μ κ·Έ νλ‘μΈμ€λ₯Ό κ°λ΅νκ² μ΄ν΄λ³΄κ³ μλ λ°©μμ 보μ¬λλ¦¬κ² μ΅λλ€.
+
+JAX λ³λ ¬νλ μ¬λ¬ κ°μ§ λ°©λ²μΌλ‘ μνν μ μμ΅λλ€. κ°μ₯ μ¬μ΄ λ°©λ²μ jax.pmap ν¨μλ₯Ό μ¬μ©νμ¬ λ¨μΌ νλ‘κ·Έλ¨, λ€μ€ λ°μ΄ν°(SPMD) λ³λ ¬νλ₯Ό λ¬μ±νλ κ²μ
λλ€. μ¦, λμΌν μ½λμ 볡μ¬λ³Έμ κ°κ° λ€λ₯Έ λ°μ΄ν° μ
λ ₯μ λν΄ μ¬λ¬ κ° μ€ννλ κ²μ
λλ€. λ μ κ΅ν μ κ·Ό λ°©μλ κ°λ₯νλ―λ‘ κ΄μ¬μ΄ μμΌμλ€λ©΄ [JAX λ¬Έμ](https://jax.readthedocs.io/en/latest/index.html)μ [`pjit` νμ΄μ§](https://jax.readthedocs.io/en/latest/jax-101/08-pjit.html?highlight=pjit)μμ μ΄ μ£Όμ λ₯Ό μ΄ν΄λ³΄μκΈ° λ°λλλ€!
+
+`jax.pmap`μ λ κ°μ§ κΈ°λ₯μ μνν©λλ€:
+
+- `jax.jit()`λ₯Ό νΈμΆν κ²μ²λΌ μ½λλ₯Ό μ»΄νμΌ(λλ `jit`)ν©λλ€. μ΄ μμ
μ `pmap`μ νΈμΆν λκ° μλλΌ pmapped ν¨μκ° μ²μ νΈμΆλ λ μνλ©λλ€.
+- μ»΄νμΌλ μ½λκ° μ¬μ© κ°λ₯ν λͺ¨λ κΈ°κΈ°μμ λ³λ ¬λ‘ μ€νλλλ‘ ν©λλ€.
+
+μλ λ°©μμ 보μ¬λ리기 μν΄ μ΄λ―Έμ§ μμ±μ μ€ννλ λΉκ³΅κ° λ©μλμΈ νμ΄νλΌμΈμ `_generate` λ©μλλ₯Ό `pmap`ν©λλ€. μ΄ λ©μλλ ν₯ν `Diffusers` 릴리μ€μμ μ΄λ¦μ΄ λ³κ²½λκ±°λ μ κ±°λ μ μλ€λ μ μ μ μνμΈμ.
+
+```python
+p_generate = pmap(pipeline._generate)
+```
+
+`pmap`μ μ¬μ©ν ν μ€λΉλ ν¨μ `p_generate`λ κ°λ
μ μΌλ‘ λ€μμ μνν©λλ€:
+* κ° μ₯μΉμμ κΈ°λ³Έ ν¨μ `pipeline._generate`μ 볡μ¬λ³Έμ νΈμΆν©λλ€.
+* κ° μ₯μΉμ μ
λ ₯ μΈμμ λ€λ₯Έ λΆλΆμ 보λ
λλ€. μ΄κ²μ΄ λ°λ‘ μ€λ©μ΄ μ¬μ©λλ μ΄μ μ
λλ€. μ΄ κ²½μ° `prompt_ids`μ shapeμ `(8, 1, 77, 768)`μ
λλ€. μ΄ λ°°μ΄μ 8κ°λ‘ λΆν λκ³ `_generate`μ κ° λ³΅μ¬λ³Έμ `(1, 77, 768)`μ shapeμ κ°μ§ μ
λ ₯μ λ°κ² λ©λλ€.
+
+λ³λ ¬λ‘ νΈμΆλλ€λ μ¬μ€μ μμ ν 무μνκ³ `_generate`λ₯Ό μ½λ©ν μ μμ΅λλ€. batch(λ°°μΉ) ν¬κΈ°(μ΄ μμ μμλ `1`)μ μ½λμ μ ν©ν μ°¨μλ§ μ κ²½ μ°λ©΄ λλ©°, λ³λ ¬λ‘ μλνκΈ° μν΄ μ무κ²λ λ³κ²½ν νμκ° μμ΅λλ€.
+
+νμ΄νλΌμΈ νΈμΆμ μ¬μ©ν λμ λ§μ°¬κ°μ§λ‘, λ€μ μ
μ μ²μ μ€νν λλ μκ°μ΄ 걸리μ§λ§ κ·Έ μ΄νμλ ν¨μ¬ λΉ¨λΌμ§λλ€.
+
+```
+%%time
+images = p_generate(prompt_ids, p_params, rng)
+images = images.block_until_ready()
+images.shape
+```
+
+```python out
+CPU times: user 1min 15s, sys: 18.2 s, total: 1min 34s
+Wall time: 1min 15s
+```
+
+```python
+images.shape
+```
+
+```python out
+(8, 1, 512, 512, 3)
+```
+
+JAXλ λΉλκΈ° λμ€ν¨μΉλ₯Ό μ¬μ©νκ³ κ°λ₯ν ν 빨리 μ μ΄κΆμ Python 루νμ λ°ννκΈ° λλ¬Έμ μΆλ‘ μκ°μ μ ννκ² μΈ‘μ νκΈ° μν΄ `block_until_ready()`λ₯Ό μ¬μ©ν©λλ€. μμ§ κ΅¬μ²΄νλμ§ μμ κ³μ° κ²°κ³Όλ₯Ό μ¬μ©νλ €λ κ²½μ° μλμΌλ‘ μ°¨λ¨μ΄ μνλλ―λ‘ μ½λμμ μ΄ ν¨μλ₯Ό μ¬μ©ν νμκ° μμ΅λλ€.
\ No newline at end of file
From 7e56d6033f0495e22f84cd6b8667b597d2d88ad3 Mon Sep 17 00:00:00 2001
From: hyeminan
Date: Sun, 30 Jul 2023 15:45:14 +0900
Subject: [PATCH 2/4] index_update
---
docs/source/ko/index.mdx | 124 +++++++++++++++++++++++++--------------
1 file changed, 79 insertions(+), 45 deletions(-)
diff --git a/docs/source/ko/index.mdx b/docs/source/ko/index.mdx
index d01dff5c5e00..59f31131fa7f 100644
--- a/docs/source/ko/index.mdx
+++ b/docs/source/ko/index.mdx
@@ -16,48 +16,82 @@ specific language governing permissions and limitations under the License.
-# 𧨠Diffusers
-
-π€ Diffusersλ μ¬μ νμ΅λ λΉμ λ° μ€λμ€ νμ° λͺ¨λΈμ μ 곡νκ³ , μΆλ‘ λ° νμ΅μ μν λͺ¨λμ λꡬ μμ μν μ ν©λλ€.
-
-λ³΄λ€ μ ννκ², π€ Diffusersλ λ€μμ μ 곡ν©λλ€:
-
-- λ¨ λͺ μ€μ μ½λλ‘ μΆλ‘ μ μ€νν μ μλ μ΅μ νμ° νμ΄νλΌμΈμ μ 곡ν©λλ€. ([**Using Diffusers**](./using-diffusers/conditional_image_generation)λ₯Ό μ΄ν΄λ³΄μΈμ) μ§μλλ λͺ¨λ νμ΄νλΌμΈκ³Ό ν΄λΉ λ
Όλ¬Έμ λν κ°μλ₯Ό λ³΄λ €λ©΄ [**Pipelines**](#pipelines)μ μ΄ν΄λ³΄μΈμ.
-- μΆλ‘ μμ μλ vs νμ§μ μ μΆ©μ μν΄ μνΈκ΅νμ μΌλ‘ μ¬μ©ν μ μλ λ€μν λ
Έμ΄μ¦ μ€μΌμ€λ¬λ₯Ό μ 곡ν©λλ€. μμΈν λ΄μ©μ [**Schedulers**](./api/schedulers/overview)λ₯Ό μ°Έκ³ νμΈμ.
-- UNetκ³Ό κ°μ μ¬λ¬ μ νμ λͺ¨λΈμ end-to-end νμ° μμ€ν
μ κ΅¬μ± μμλ‘ μ¬μ©ν μ μμ΅λλ€. μμΈν λ΄μ©μ [**Models**](./api/models)μ μ°Έκ³ νμΈμ.
-- κ°μ₯ μΈκΈ°μλ νμ° λͺ¨λΈ ν
μ€ν¬λ₯Ό νμ΅νλ λ°©λ²μ 보μ¬μ£Όλ μμ λ€μ μ 곡ν©λλ€. μμΈν λ΄μ©μ [**Training**](./training/overview)λ₯Ό μ°Έκ³ νμΈμ.
-
-## 𧨠Diffusers νμ΄νλΌμΈ
-
-λ€μ νμλ 곡μμ μΌλ‘ μ§μλλ λͺ¨λ νμ΄νλΌμΈ, κ΄λ ¨ λ
Όλ¬Έ, μ§μ μ¬μ©ν΄ λ³Ό μ μλ Colab λ
ΈνΈλΆ(μ¬μ© κ°λ₯ν κ²½μ°)μ΄ μμ½λμ΄ μμ΅λλ€.
-
-| Pipeline | Paper | Tasks | Colab
-|---|---|:---:|:---:|
-| [alt_diffusion](./api/pipelines/alt_diffusion) | [**AltDiffusion**](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
-| [audio_diffusion](./api/pipelines/audio_diffusion) | [**Audio Diffusion**](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation | [](https://colab.research.google.com/github/teticio/audio-diffusion/blob/master/notebooks/audio_diffusion_pipeline.ipynb)
-| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [**Cycle Diffusion**](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
-| [dance_diffusion](./api/pipelines/dance_diffusion) | [**Dance Diffusion**](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
-| [ddpm](./api/pipelines/ddpm) | [**Denoising Diffusion Probabilistic Models**](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
-| [ddim](./api/pipelines/ddim) | [**Denoising Diffusion Implicit Models**](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
-| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
-| [latent_diffusion](./api/pipelines/latent_diffusion) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
-| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [**High-Resolution Image Synthesis with Latent Diffusion Models**](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
-| [paint_by_example](./api/pipelines/paint_by_example) | [**Paint by Example: Exemplar-based Image Editing with Diffusion Models**](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
-| [pndm](./api/pipelines/pndm) | [**Pseudo Numerical Methods for Diffusion Models on Manifolds**](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
-| [score_sde_ve](./api/pipelines/score_sde_ve) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
-| [score_sde_vp](./api/pipelines/score_sde_vp) | [**Score-Based Generative Modeling through Stochastic Differential Equations**](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
-| [stable_diffusion](./api/pipelines/stable_diffusion/text2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
-| [stable_diffusion](./api/pipelines/stable_diffusion/img2img) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/image_2_image_using_diffusers.ipynb)
-| [stable_diffusion](./api/pipelines/stable_diffusion/inpaint) | [**Stable Diffusion**](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/in_painting_with_stable_diffusion_using_diffusers.ipynb)
-| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
-| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
-| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [**Stable Diffusion 2**](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
-| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [**Safe Stable Diffusion**](https://arxiv.org/abs/2211.05105) | Text-Guided Generation | [](https://colab.research.google.com/github/ml-research/safe-latent-diffusion/blob/main/examples/Safe%20Latent%20Diffusion.ipynb)
-| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [**Elucidating the Design Space of Diffusion-Based Generative Models**](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
-| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125) | Text-to-Image Generation |
-| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
-| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
-| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
-| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
-
-**μ°Έκ³ **: νμ΄νλΌμΈμ ν΄λΉ λ¬Έμμ μ€λͺ
λ λλ‘ νμ° μμ€ν
μ μ¬μ©ν λ°©λ²μ λν κ°λ¨ν μμ
λλ€.
+
+# Diffusers
+
+π€ Diffusersλ μ΄λ―Έμ§, μ€λμ€, μ¬μ§μ΄ λΆμμ 3D ꡬ쑰λ₯Ό μμ±νκΈ° μν μ΅μ²¨λ¨ μ¬μ νλ ¨λ diffusion λͺ¨λΈμ μν λΌμ΄λΈλ¬λ¦¬μ
λλ€. κ°λ¨ν μΆλ‘ μ루μ
μ μ°Ύκ³ μλ , μ체 diffusion λͺ¨λΈμ νλ ¨νκ³ μΆλ , π€ Diffusersλ λ κ°μ§ λͺ¨λλ₯Ό μ§μνλ λͺ¨λμ ν΄λ°μ€μ
λλ€. μ ν¬ λΌμ΄λΈλ¬λ¦¬λ [μ±λ₯λ³΄λ€ μ¬μ©μ±](conceptual/philosophy#usability-over-performance), [κ°νΈν¨λ³΄λ€ λ¨μν¨](conceptual/philosophy#simple-over-easy), κ·Έλ¦¬κ³ [μΆμνλ³΄λ€ μ¬μ©μ μ§μ κ°λ₯μ±](conceptual/philosophy#tweakable-contributorfriendly-over-abstraction)μ μ€μ μ λκ³ μ€κ³λμμ΅λλ€.
+
+μ΄ λΌμ΄λΈλ¬λ¦¬μλ μΈ κ°μ§ μ£Όμ κ΅¬μ± μμκ° μμ΅λλ€:
+
+- λͺ μ€μ μ½λλ§μΌλ‘ μΆλ‘ ν μ μλ μ΅μ²¨λ¨ [diffusion νμ΄νλΌμΈ](api/pipelines/overview).
+- μμ± μλμ νμ§ κ°μ κ· νμ λ§μΆκΈ° μν΄ μνΈκ΅νμ μΌλ‘ μ¬μ©ν μ μλ [λ
Έμ΄μ¦ μ€μΌμ€λ¬](api/schedulers/overview).
+- λΉλ© λΈλ‘μΌλ‘ μ¬μ©ν μ μκ³ μ€μΌμ€λ¬μ κ²°ν©νμ¬ μ체μ μΈ end-to-end diffusion μμ€ν
μ λ§λ€ μ μλ μ¬μ νμ΅λ [λͺ¨λΈ](api/models).
+
+
+
+## Supported pipelines
+
+| Pipeline | Paper/Repository | Tasks |
+|---|---|:---:|
+| [alt_diffusion](./api/pipelines/alt_diffusion) | [AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679) | Image-to-Image Text-Guided Generation |
+| [audio_diffusion](./api/pipelines/audio_diffusion) | [Audio Diffusion](https://github.com/teticio/audio-diffusion.git) | Unconditional Audio Generation |
+| [controlnet](./api/pipelines/stable_diffusion/controlnet) | [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) | Image-to-Image Text-Guided Generation |
+| [cycle_diffusion](./api/pipelines/cycle_diffusion) | [Unifying Diffusion Models' Latent Space, with Applications to CycleDiffusion and Guidance](https://arxiv.org/abs/2210.05559) | Image-to-Image Text-Guided Generation |
+| [dance_diffusion](./api/pipelines/dance_diffusion) | [Dance Diffusion](https://github.com/williamberman/diffusers.git) | Unconditional Audio Generation |
+| [ddpm](./api/pipelines/ddpm) | [Denoising Diffusion Probabilistic Models](https://arxiv.org/abs/2006.11239) | Unconditional Image Generation |
+| [ddim](./api/pipelines/ddim) | [Denoising Diffusion Implicit Models](https://arxiv.org/abs/2010.02502) | Unconditional Image Generation |
+| [if](./if) | [**IF**](./api/pipelines/if) | Image Generation |
+| [if_img2img](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
+| [if_inpainting](./if) | [**IF**](./api/pipelines/if) | Image-to-Image Generation |
+| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Text-to-Image Generation |
+| [latent_diffusion](./api/pipelines/latent_diffusion) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752)| Super Resolution Image-to-Image |
+| [latent_diffusion_uncond](./api/pipelines/latent_diffusion_uncond) | [High-Resolution Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) | Unconditional Image Generation |
+| [paint_by_example](./api/pipelines/paint_by_example) | [Paint by Example: Exemplar-based Image Editing with Diffusion Models](https://arxiv.org/abs/2211.13227) | Image-Guided Image Inpainting |
+| [pndm](./api/pipelines/pndm) | [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778) | Unconditional Image Generation |
+| [score_sde_ve](./api/pipelines/score_sde_ve) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
+| [score_sde_vp](./api/pipelines/score_sde_vp) | [Score-Based Generative Modeling through Stochastic Differential Equations](https://openreview.net/forum?id=PxTIG12RRHS) | Unconditional Image Generation |
+| [semantic_stable_diffusion](./api/pipelines/semantic_stable_diffusion) | [Semantic Guidance](https://arxiv.org/abs/2301.12247) | Text-Guided Generation |
+| [stable_diffusion_text2img](./api/pipelines/stable_diffusion/text2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-to-Image Generation |
+| [stable_diffusion_img2img](./api/pipelines/stable_diffusion/img2img) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Image-to-Image Text-Guided Generation |
+| [stable_diffusion_inpaint](./api/pipelines/stable_diffusion/inpaint) | [Stable Diffusion](https://stability.ai/blog/stable-diffusion-public-release) | Text-Guided Image Inpainting |
+| [stable_diffusion_panorama](./api/pipelines/stable_diffusion/panorama) | [MultiDiffusion](https://multidiffusion.github.io/) | Text-to-Panorama Generation |
+| [stable_diffusion_pix2pix](./api/pipelines/stable_diffusion/pix2pix) | [InstructPix2Pix: Learning to Follow Image Editing Instructions](https://arxiv.org/abs/2211.09800) | Text-Guided Image Editing|
+| [stable_diffusion_pix2pix_zero](./api/pipelines/stable_diffusion/pix2pix_zero) | [Zero-shot Image-to-Image Translation](https://pix2pixzero.github.io/) | Text-Guided Image Editing |
+| [stable_diffusion_attend_and_excite](./api/pipelines/stable_diffusion/attend_and_excite) | [Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models](https://arxiv.org/abs/2301.13826) | Text-to-Image Generation |
+| [stable_diffusion_self_attention_guidance](./api/pipelines/stable_diffusion/self_attention_guidance) | [Improving Sample Quality of Diffusion Models Using Self-Attention Guidance](https://arxiv.org/abs/2210.00939) | Text-to-Image Generation Unconditional Image Generation |
+| [stable_diffusion_image_variation](./stable_diffusion/image_variation) | [Stable Diffusion Image Variations](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) | Image-to-Image Generation |
+| [stable_diffusion_latent_upscale](./stable_diffusion/latent_upscale) | [Stable Diffusion Latent Upscaler](https://twitter.com/StabilityAI/status/1590531958815064065) | Text-Guided Super Resolution Image-to-Image |
+| [stable_diffusion_model_editing](./api/pipelines/stable_diffusion/model_editing) | [Editing Implicit Assumptions in Text-to-Image Diffusion Models](https://time-diffusion.github.io/) | Text-to-Image Model Editing |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-to-Image Generation |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Image Inpainting |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Depth-Conditional Stable Diffusion](https://github.com/Stability-AI/stablediffusion#depth-conditional-stable-diffusion) | Depth-to-Image Generation |
+| [stable_diffusion_2](./api/pipelines/stable_diffusion_2) | [Stable Diffusion 2](https://stability.ai/blog/stable-diffusion-v2-release) | Text-Guided Super Resolution Image-to-Image |
+| [stable_diffusion_safe](./api/pipelines/stable_diffusion_safe) | [Safe Stable Diffusion](https://arxiv.org/abs/2211.05105) | Text-Guided Generation |
+| [stable_unclip](./stable_unclip) | Stable unCLIP | Text-to-Image Generation |
+| [stable_unclip](./stable_unclip) | Stable unCLIP | Image-to-Image Text-Guided Generation |
+| [stochastic_karras_ve](./api/pipelines/stochastic_karras_ve) | [Elucidating the Design Space of Diffusion-Based Generative Models](https://arxiv.org/abs/2206.00364) | Unconditional Image Generation |
+| [text_to_video_sd](./api/pipelines/text_to_video) | [Modelscope's Text-to-video-synthesis Model in Open Domain](https://modelscope.cn/models/damo/text-to-video-synthesis/summary) | Text-to-Video Generation |
+| [unclip](./api/pipelines/unclip) | [Hierarchical Text-Conditional Image Generation with CLIP Latents](https://arxiv.org/abs/2204.06125)(implementation by [kakaobrain](https://github.com/kakaobrain/karlo)) | Text-to-Image Generation |
+| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Text-to-Image Generation |
+| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Image Variations Generation |
+| [versatile_diffusion](./api/pipelines/versatile_diffusion) | [Versatile Diffusion: Text, Images and Variations All in One Diffusion Model](https://arxiv.org/abs/2211.08332) | Dual Image and Text Guided Generation |
+| [vq_diffusion](./api/pipelines/vq_diffusion) | [Vector Quantized Diffusion Model for Text-to-Image Synthesis](https://arxiv.org/abs/2111.14822) | Text-to-Image Generation |
From cb1873cbcda54f198db1f1b57060f1d02a5851be Mon Sep 17 00:00:00 2001
From: hyeminan
Date: Sun, 30 Jul 2023 15:53:18 +0900
Subject: [PATCH 3/4] index_update
---
docs/source/ko/index.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/docs/source/ko/index.mdx b/docs/source/ko/index.mdx
index 59f31131fa7f..a83dd0d0b29e 100644
--- a/docs/source/ko/index.mdx
+++ b/docs/source/ko/index.mdx
@@ -31,7 +31,7 @@ specific language governing permissions and limitations under the License.
Tutorials
- μΆλ ₯λ¬Όμ μμ±νκ³ , λλ§μ diffusion μμ€ν
μ ꡬμΆνκ³ , νμ° λͺ¨λΈμ νλ ¨νλ λ° νμν κΈ°λ³Έ κΈ°μ μ λ°°μ보μΈμ. π€ Diffusersλ₯Ό μ²μ μ¬μ©νλ κ²½μ° μ¬κΈ°μμ μμνλ κ²μ΄ μ’μ΅λλ€!
+ κ²°κ³Όλ¬Όμ μμ±νκ³ , λλ§μ diffusion μμ€ν
μ ꡬμΆνκ³ , νμ° λͺ¨λΈμ νλ ¨νλ λ° νμν κΈ°λ³Έ κΈ°μ μ λ°°μ보μΈμ. π€ Diffusersλ₯Ό μ²μ μ¬μ©νλ κ²½μ° μ¬κΈ°μμ μμνλ κ²μ΄ μ’μ΅λλ€!
How-to guides
From b33e21acb7ffd1afa7c0684ef7202e7b38b40aca Mon Sep 17 00:00:00 2001
From: hyeminan
Date: Sun, 30 Jul 2023 16:46:24 +0900
Subject: [PATCH 4/4] condition_image_generation
---
.../conditional_image_generation.mdx | 60 +++++++++++++++++++
1 file changed, 60 insertions(+)
create mode 100644 docs/source/ko/using-diffusers/conditional_image_generation.mdx
diff --git a/docs/source/ko/using-diffusers/conditional_image_generation.mdx b/docs/source/ko/using-diffusers/conditional_image_generation.mdx
new file mode 100644
index 000000000000..5525ac990ca4
--- /dev/null
+++ b/docs/source/ko/using-diffusers/conditional_image_generation.mdx
@@ -0,0 +1,60 @@
+
+
+# μ‘°κ±΄λΆ μ΄λ―Έμ§ μμ±
+
+[[open-in-colab]]
+
+μ‘°κ±΄λΆ μ΄λ―Έμ§ μμ±μ μ¬μ©νλ©΄ ν
μ€νΈ ν둬ννΈμμ μ΄λ―Έμ§λ₯Ό μμ±ν μ μμ΅λλ€. ν
μ€νΈλ μλ² λ©μΌλ‘ λ³νλλ©°, μλ² λ©μ λ
Έμ΄μ¦μμ μ΄λ―Έμ§λ₯Ό μμ±νλλ‘ λͺ¨λΈμ 쑰건ννλ λ° μ¬μ©λ©λλ€.
+
+[`DiffusionPipeline`]μ μΆλ‘ μ μν΄ μ¬μ νλ ¨λ diffusion μμ€ν
μ μ¬μ©νλ κ°μ₯ μ¬μ΄ λ°©λ²μ
λλ€.
+
+λ¨Όμ [`DiffusionPipeline`]μ μΈμ€ν΄μ€λ₯Ό μμ±νκ³ λ€μ΄λ‘λν νμ΄νλΌμΈ [체ν¬ν¬μΈνΈ](https://huggingface.co/models?library=diffusers&sort=downloads)λ₯Ό μ§μ ν©λλ€.
+
+μ΄ κ°μ΄λμμλ [μ μ¬ Diffusion](https://huggingface.co/CompVis/ldm-text2im-large-256)κ³Ό ν¨κ» ν
μ€νΈ-μ΄λ―Έμ§ μμ±μ [`DiffusionPipeline`]μ μ¬μ©ν©λλ€:
+
+```python
+>>> from diffusers import DiffusionPipeline
+
+>>> generator = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")
+```
+
+[`DiffusionPipeline`]μ λͺ¨λ λͺ¨λΈλ§, ν ν°ν, μ€μΌμ€λ§ κ΅¬μ± μμλ₯Ό λ€μ΄λ‘λνκ³ μΊμν©λλ€.
+μ΄ λͺ¨λΈμ μ½ 14μ΅ κ°μ νλΌλ―Έν°λ‘ ꡬμ±λμ΄ μκΈ° λλ¬Έμ GPUμμ μ€νν κ²μ κ°λ ₯ν κΆμ₯ν©λλ€.
+PyTorchμμμ λ§μ°¬κ°μ§λ‘ μμ±κΈ° κ°μ²΄λ₯Ό GPUλ‘ μ΄λν μ μμ΅λλ€:
+
+```python
+>>> generator.to("cuda")
+```
+
+μ΄μ ν
μ€νΈ ν둬ννΈμμ `μμ±κΈ°`λ₯Ό μ¬μ©ν μ μμ΅λλ€:
+
+```python
+>>> image = generator("An image of a squirrel in Picasso style").images[0]
+```
+
+μΆλ ₯κ°μ κΈ°λ³Έμ μΌλ‘ [`PIL.Image`](https://pillow.readthedocs.io/en/stable/reference/Image.html?highlight=image#the-image-class) κ°μ²΄λ‘ λνλ©λλ€.
+
+νΈμΆνμ¬ μ΄λ―Έμ§λ₯Ό μ μ₯ν μ μμ΅λλ€:
+
+```python
+>>> image.save("image_of_squirrel_painting.png")
+```
+
+μλ μ€νμ΄μ€λ₯Ό μ¬μ©ν΄λ³΄κ³ μλ΄ λ°°μ¨ λ§€κ°λ³μλ₯Ό μμ λ‘κ² μ‘°μ νμ¬ μ΄λ―Έμ§ νμ§μ μ΄λ€ μν₯μ λ―ΈμΉλμ§ νμΈν΄ 보μΈμ!
+
+
\ No newline at end of file