Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Frontend] Don't log duplicate error stacktrace for every request in the batch #9023

Merged
merged 4 commits into from
Oct 21, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 40 additions & 10 deletions tests/mq_llm_engine/test_error_handling.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,15 +59,7 @@ async def test_evil_forward(tmp_socket):
await asyncio.sleep(2.0)
await client.check_health()

# Throws an error in first forward pass.
with pytest.raises(RAISED_ERROR):
async for _ in client.generate(prompt="Hello my name is",
sampling_params=SamplingParams(),
request_id=uuid.uuid4()):
pass
assert client.errored

# Engine is errored, should get ENGINE_DEAD_ERROR.
# Throws an error that should get ENGINE_DEAD_ERROR.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We could also check a "batch" of requests here, like

def do_generate(client):
    async for _ in client.generate(prompt="Hello my name is",
                                   sampling_params=SamplingParams(),
                                   request_id=uuid.uuid4()):
        pass

...
# (in this test)
tasks = [asyncio.create_task(do_generate(client)) for _ in range(10)]

# Check that every `task` in `tasks` failed with `MQEngineDeadError`

That should test that we don't get the big spew of stack traces, since every request will raise an error type that doesn't log the stack trace

with pytest.raises(MQEngineDeadError):
async for _ in client.generate(prompt="Hello my name is",
sampling_params=SamplingParams(),
Expand Down Expand Up @@ -149,7 +141,7 @@ async def test_failed_abort(tmp_socket):
client = await engine.make_client()
assert client.is_running

# Firsh check health should work.
# First check health should work.
await client.check_health()

# Trigger an abort on the client side.
Expand All @@ -174,6 +166,44 @@ async def test_failed_abort(tmp_socket):
client.close()


@pytest.mark.asyncio
async def test_batch_error(tmp_socket):
with RemoteMQLLMEngine(engine_args=ENGINE_ARGS,
ipc_path=tmp_socket,
run_fn=run_with_evil_abort) as engine:

client = await engine.make_client()
assert client.is_running

# First check health should work.
await client.check_health()

# Batch of requests
async def do_generate(client):
# min_tokens=2048 to keep busy the engine busy
# to get enough time to get process a request
# that will crash the engine
params = SamplingParams(min_tokens=2048, max_tokens=2048)
async for _ in client.generate(prompt="Hello my name is",
sampling_params=params,
request_id=uuid.uuid4()):
pass

tasks = [asyncio.create_task(do_generate(client)) for _ in range(10)]

# This request will force a processing batch to raise
# an exception and next the engine get errored
await client.abort(request_id="foo")

# The batch of those request failed, then they
# should get the same exception as a MQEngineDeadError.
errors = await asyncio.gather(*tasks, return_exceptions=True)
for e in errors:
assert "KeyError" in repr(e)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@wallashss I think we need to assert that these errors are also MQEngineDeadErrors here, then we're good to go


client.close()


@pytest.mark.asyncio
async def test_bad_request(tmp_socket):
with RemoteMQLLMEngine(engine_args=ENGINE_ARGS,
Expand Down
12 changes: 12 additions & 0 deletions vllm/engine/multiprocessing/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,8 +204,20 @@ async def run_output_handler_loop(self):
# (and record only the first one)
if is_engine_errored and not self._errored_with:
self._errored_with = exception
# If engine is errored, no matter the type of exception
# it will no longer be able to receive new requests,
# therefore we have to inform that the current
# processed requests failed as well. Send back a dead
# engine error give this feedback and also give a
# 'hint' to the server to shutdown next.
exception = self.dead_error

if request_id is None:
# If request_id is None, then the engine raised an
# exception for a batch, and we may not know the
# request that caused it, neither if it was actually
# caused by any of them (e.g. CUDA OOM). Therefore we
# broadcast the same exception for all requests.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nice, love the explanation here!

for queue_i in tuple(self.output_queues.values()):
queue_i.put_nowait(exception)
else:
Expand Down