Skip to content

Commit 5305673

Browse files
authored
fix some typos : supported_head_sizes (vllm-project#14627)
1 parent 4cbf286 commit 5305673

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

vllm/attention/backends/blocksparse_attn.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -335,11 +335,11 @@ def __init__(
335335
self.sparse_block_size = self.blocksparse_params.block_size
336336
self.head_sliding_step = self.blocksparse_params.head_sliding_step
337337

338-
suppored_head_sizes = PagedAttention.get_supported_head_sizes()
339-
if head_size not in suppored_head_sizes:
338+
supported_head_sizes = PagedAttention.get_supported_head_sizes()
339+
if head_size not in supported_head_sizes:
340340
raise ValueError(
341341
f"Head size {head_size} is not supported by PagedAttention. "
342-
f"Supported head sizes are: {suppored_head_sizes}.")
342+
f"Supported head sizes are: {supported_head_sizes}.")
343343

344344
self.tp_size = get_tensor_model_parallel_world_size()
345345
self.tp_rank = get_tensor_model_parallel_rank()

0 commit comments

Comments
 (0)