Skip to content

Commit 0ffded8

Browse files
authored
[Fix] Better error message for batched prompts (vllm-project#342)
1 parent 0bd2a57 commit 0ffded8

File tree

1 file changed

+7
-1
lines changed

1 file changed

+7
-1
lines changed

vllm/entrypoints/openai/api_server.py

+7-1
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,13 @@ async def create_completion(raw_request: Request):
358358
model_name = request.model
359359
request_id = f"cmpl-{random_uuid()}"
360360
if isinstance(request.prompt, list):
361-
assert len(request.prompt) == 1
361+
if len(request.prompt) == 0:
362+
return create_error_response(HTTPStatus.BAD_REQUEST,
363+
"please provide at least one prompt")
364+
if len(request.prompt) > 1:
365+
return create_error_response(HTTPStatus.BAD_REQUEST,
366+
"multiple prompts in a batch is not "
367+
"currently supported")
362368
prompt = request.prompt[0]
363369
else:
364370
prompt = request.prompt

0 commit comments

Comments
 (0)