We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 0bd2a57 commit 0ffded8Copy full SHA for 0ffded8
vllm/entrypoints/openai/api_server.py
@@ -358,7 +358,13 @@ async def create_completion(raw_request: Request):
358
model_name = request.model
359
request_id = f"cmpl-{random_uuid()}"
360
if isinstance(request.prompt, list):
361
- assert len(request.prompt) == 1
+ if len(request.prompt) == 0:
362
+ return create_error_response(HTTPStatus.BAD_REQUEST,
363
+ "please provide at least one prompt")
364
+ if len(request.prompt) > 1:
365
366
+ "multiple prompts in a batch is not "
367
+ "currently supported")
368
prompt = request.prompt[0]
369
else:
370
prompt = request.prompt
0 commit comments