Skip to content

Commit

Permalink
[BugFix] Enhance test_pos_encoding to support execution on multi-devi…
Browse files Browse the repository at this point in the history
…ces (vllm-project#13187)

Signed-off-by: wchen61 <[email protected]>
  • Loading branch information
wchen61 authored and kerthcet committed Feb 21, 2025
1 parent 06d83cd commit c81caec
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions tests/kernels/test_pos_encoding.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def test_rotary_embedding(
if rotary_dim is None:
rotary_dim = head_size
rope = get_rope(head_size, rotary_dim, max_position, base, is_neox_style)
rope = rope.to(dtype=dtype)
rope = rope.to(dtype=dtype, device=torch.get_default_device())

positions = torch.randint(0, max_position, (batch_size, seq_len))
query_shape = tensor_shape_fn(batch_size, seq_len, num_heads, head_size)
Expand Down Expand Up @@ -125,7 +125,7 @@ def test_batched_rotary_embedding(
"rope_type": "linear",
"factor": (1, )
})
rope = rope.to(dtype=dtype)
rope = rope.to(dtype=dtype, device=torch.get_default_device())

positions = torch.randint(0, max_position, (batch_size, seq_len))
query_shape = tensor_shape_fn(batch_size, seq_len, num_heads, head_size)
Expand Down Expand Up @@ -184,7 +184,7 @@ def test_batched_rotary_embedding_multi_lora(
"rope_type": "linear",
"factor": tuple(scaling_factors)
})
rope = rope.to(dtype=dtype)
rope = rope.to(dtype=dtype, device=torch.get_default_device())

positions = torch.randint(0, max_position, (batch_size, seq_len))
query = torch.randn(batch_size,
Expand Down

0 comments on commit c81caec

Please sign in to comment.