diff --git a/.gitignore b/.gitignore index e43b0f9..27cd6cf 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,9 @@ .DS_Store +tokenflow-results*/** +venv/ +__pycache__/ +*.pyc +latents/ +data/** +!data/wolf.mp4 +!data/woman-running.mp4 \ No newline at end of file diff --git a/README.md b/README.md index 80ea028..fe19f78 100644 --- a/README.md +++ b/README.md @@ -32,11 +32,23 @@ For more see the [project webpage](https://diffusion-tokenflow.github.io). ## Environment + +Using conda ``` conda create -n tokenflow python=3.9 conda activate tokenflow pip install -r requirements.txt ``` + +Using cpython Windows (powershell) +```powershell +python -m pip install virtualenv +python -m virtualenv venv +venv/Scripts/activate.ps1 +pip install -r requirements.txt +pip install -r requirements-torch.txt +``` + ## Preprocess Preprocess you video by running using the following command: diff --git a/requirements-torch.txt b/requirements-torch.txt new file mode 100644 index 0000000..224e56c --- /dev/null +++ b/requirements-torch.txt @@ -0,0 +1,3 @@ +-i https://download.pytorch.org/whl/cu117 +torch==2.0.1+cu117 +torchvision==0.15.2+cu117 \ No newline at end of file diff --git a/run_tokenflow_pnp.py b/run_tokenflow_pnp.py index 037d4dc..6c83010 100644 --- a/run_tokenflow_pnp.py +++ b/run_tokenflow_pnp.py @@ -115,7 +115,9 @@ def get_latents_path(self): latents_path = os.path.join(config["latents_path"], f'sd_{config["sd_version"]}', Path(config["data_path"]).stem, f'steps_{config["n_inversion_steps"]}') latents_path = [x for x in glob.glob(f'{latents_path}/*') if '.' not in Path(x).name] - n_frames = [int([x for x in latents_path[i].split('/') if 'nframes' in x][0].split('_')[1]) for i in range(len(latents_path))] + + n_frames = [int(os.path.basename(x).split('_')[1]) for x in latents_path if 'nframes_' in x] + latents_path = latents_path[np.argmax(n_frames)] self.config["n_frames"] = min(max(n_frames), config["n_frames"]) if self.config["n_frames"] % self.config["batch_size"] != 0: