Skip to content

Commit

Permalink
Fix input_ids always on cpu issue
Browse files Browse the repository at this point in the history
Signed-off-by: Akshay Sonawane <[email protected]>
  • Loading branch information
apsonawane committed Jan 17, 2025
1 parent bc33e79 commit 8a56bfb
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 2 deletions.
4 changes: 3 additions & 1 deletion src/lemonade/tools/humaneval.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ def run(
k_samples,
timeout,
model_results_dir,
state.device,
first_n_samples,
)

Expand Down Expand Up @@ -153,6 +154,7 @@ def _evaluate_model(
k_samples: int,
timeout: float,
results_dir: str,
device: str,
first_n_samples: Optional[int] = TOTAL_PROBLEMS,
) -> Dict[str, float]:
"""
Expand Down Expand Up @@ -198,7 +200,7 @@ def _evaluate_model(
expected = dataset[task_id]["canonical_solution"]

# Generate completion
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
completion = model.generate(
input_ids,
max_new_tokens=512,
Expand Down
2 changes: 1 addition & 1 deletion src/lemonade/tools/mmlu.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def run(
prompt = _gen_prompt(dev_df, subject, ntrain) + _format_example(
test_df, i, include_answer=False
)
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(state.device)

response_text = _generate_response(tokenizer, model, input_ids)
try:
Expand Down

0 comments on commit 8a56bfb

Please sign in to comment.