Skip to content

Chunked Prefill VLM #3188

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 31 commits into from
May 6, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
add encoder cache free
  • Loading branch information
mht-sharma committed Apr 18, 2025
commit 526a8785ed0c3870b76c5ef08161bc3235a55ab3
2 changes: 2 additions & 0 deletions server/text_generation_server/models/flash_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -1899,7 +1899,9 @@ def generate_token(
batch.prepare_for_prefill()

self.get_input_embeddings(batch)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Looks wrong too (given the implementation of the function and its name.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

updated name

from pdb import set_trace

set_trace()
prefill_logprobs = batch.prefill_next_token_indices is not None

# Update adapter indices for speculative tokens (if present)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -304,6 +304,7 @@ def __init__(
device=device,
rank=rank,
world_size=world_size,
support_chunking=True,
)

# Monkey patch of `self.model.forward` to match `FlashCausalLM`. It avoids duplicating a lot of code
Expand Down
34 changes: 34 additions & 0 deletions server/text_generation_server/models/vlm_causal_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -499,6 +499,7 @@ def prepare_for_prefill(self):
if image_id not in self.encoder_cache[i]:
self.scheduled_image_input.append((i, image_position))
scheduled_image_pixel_values.append(pixel_values)
self.image_inputs[i][j] = None

if self.has_image and len(scheduled_image_pixel_values):
self.pixel_values = torch.cat(
Expand Down Expand Up @@ -590,6 +591,37 @@ def get_mm_embeddings(self):

return torch.cat(mm_embeds, dim=0).to(device)

def free_encoder_cache(self):
for i, (
r,
cache_length,
input_length,
request_prefilling,
) in enumerate(
zip(
self.requests,
self.cache_lengths,
self.input_lengths,
self.prefilling_mask,
)
):
if not request_prefilling or self.image_positions[i] is None:
continue

for j, image_position in enumerate(self.image_positions[i]):
image_id = image_position.id

start_pos = image_position.offset
length = image_position.length

cache_length = cache_length + input_length
if start_pos >= cache_length:
# No encoder input required at this step
break

if start_pos + length <= cache_length:
self.encoder_cache[i][image_id] = None


class VlmCausalLM(FlashCausalLM):
def __init__(
Expand Down Expand Up @@ -831,4 +863,6 @@ def forward(
else None
)
logits = cuda_graph["logits"][:bs]

batch.free_encoder_cache()
return logits, speculative_logits