We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 4cbf286 commit 5305673Copy full SHA for 5305673
vllm/attention/backends/blocksparse_attn.py
@@ -335,11 +335,11 @@ def __init__(
335
self.sparse_block_size = self.blocksparse_params.block_size
336
self.head_sliding_step = self.blocksparse_params.head_sliding_step
337
338
- suppored_head_sizes = PagedAttention.get_supported_head_sizes()
339
- if head_size not in suppored_head_sizes:
+ supported_head_sizes = PagedAttention.get_supported_head_sizes()
+ if head_size not in supported_head_sizes:
340
raise ValueError(
341
f"Head size {head_size} is not supported by PagedAttention. "
342
- f"Supported head sizes are: {suppored_head_sizes}.")
+ f"Supported head sizes are: {supported_head_sizes}.")
343
344
self.tp_size = get_tensor_model_parallel_world_size()
345
self.tp_rank = get_tensor_model_parallel_rank()
0 commit comments