From f483d9abd9a71cb631bf4c121d267cc7983267d3 Mon Sep 17 00:00:00 2001 From: Jee Jee Li Date: Mon, 20 Jan 2025 03:54:26 +0000 Subject: [PATCH] Optmize unit test Signed-off-by: Jee Jee Li --- tests/lora/test_ultravox.py | 38 ++++++++----------------------------- 1 file changed, 8 insertions(+), 30 deletions(-) diff --git a/tests/lora/test_ultravox.py b/tests/lora/test_ultravox.py index 463c289fa3f8b..e0049180710c3 100644 --- a/tests/lora/test_ultravox.py +++ b/tests/lora/test_ultravox.py @@ -9,8 +9,6 @@ from vllm.lora.request import LoRARequest -from ..models.utils import check_outputs_equal - ULTRAVOX_MODEL_NAME = "fixie-ai/ultravox-v0_3" LLMA_MODEL_NAME = "meta-llama/Llama-3.1-8B-Instruct" @@ -61,6 +59,9 @@ def _get_prompt(audio_count, question, placeholder, model_name) -> str: def test_ultravox_lora(vllm_runner): + """ + TODO: Train an Ultravox LoRA instead of using a Llama LoRA. + """ llama3_1_8b_chess_lora = llama3_1_8b_chess_lora_path() with TemporaryDirectory() as temp_ultravox_lora_dir: llama3_1_8b_ultravox_chess_lora = mk_llama3_1_8b_ultravox_chess_lora( @@ -97,34 +98,11 @@ def test_ultravox_lora(vllm_runner): dtype="bfloat16", max_model_len=4096, ) as vllm_model: - llama_outputs_no_lora: List[Tuple[List[int], - str]] = vllm_model.generate_greedy( - [ - _get_prompt( - 0, PROMPT, - VLLM_PLACEHOLDER, - LLMA_MODEL_NAME) - ], - 256, - ) - llama_outputs: List[Tuple[List[int], - str]] = vllm_model.generate_greedy( - [ - _get_prompt(0, PROMPT, - VLLM_PLACEHOLDER, - LLMA_MODEL_NAME) - ], - 256, - lora_request=LoRARequest( - str(1), 1, llama3_1_8b_chess_lora), - ) - - check_outputs_equal( - outputs_0_lst=ultravox_outputs, - outputs_1_lst=llama_outputs, - name_0="ultravox", - name_1="llama", - ) + llama_outputs_no_lora: List[Tuple[List[int], str]] = ( + vllm_model.generate_greedy( + [_get_prompt(0, PROMPT, VLLM_PLACEHOLDER, LLMA_MODEL_NAME)], + 256, + )) _, llama_no_lora_str = llama_outputs_no_lora[0] _, ultravox_str = ultravox_outputs[0]