From da6456a13f2fa5a07009ec117cbc42606fe0afd1 Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Wed, 29 Jan 2025 20:16:42 +0530 Subject: [PATCH] update llamatokenizer in hunyuanvideo tests --- tests/pipelines/hunyuan_video/test_hunyuan_video.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/pipelines/hunyuan_video/test_hunyuan_video.py b/tests/pipelines/hunyuan_video/test_hunyuan_video.py index 1ecfee666fcd..ba7ec43ec977 100644 --- a/tests/pipelines/hunyuan_video/test_hunyuan_video.py +++ b/tests/pipelines/hunyuan_video/test_hunyuan_video.py @@ -132,7 +132,7 @@ def get_dummy_components(self, num_layers: int = 1, num_single_layers: int = 1): torch.manual_seed(0) text_encoder = LlamaModel(llama_text_encoder_config) - tokenizer = LlamaTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") + tokenizer = LlamaTokenizer.from_pretrained("finetrainers/dummy-hunyaunvideo", subfolder="tokenizer") torch.manual_seed(0) text_encoder_2 = CLIPTextModel(clip_text_encoder_config) @@ -155,10 +155,8 @@ def get_dummy_inputs(self, device, seed=0): else: generator = torch.Generator(device=device).manual_seed(seed) - # Cannot test with dummy prompt because tokenizers are not configured correctly. - # TODO(aryan): create dummy tokenizers and using from hub inputs = { - "prompt": "", + "prompt": "dance monkey", "prompt_template": { "template": "{}", "crop_start": 0,