Skip to content

Commit

Permalink
Re-enable build of Flash on Windows
Browse files Browse the repository at this point in the history
ghstack-source-id: a668b3aaa4824bb5b4a670225788468f12493da3
Pull Request resolved: fairinternal/xformers#1167

__original_commit__ = fairinternal/xformers@e9c6199
  • Loading branch information
lw authored and xFormers Bot committed Jul 26, 2024
1 parent 3610a54 commit 8d8463c
Showing 1 changed file with 12 additions and 1 deletion.
13 changes: 12 additions & 1 deletion xformers/ops/fmha/torch_attention_compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,21 @@
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.

import sys

import torch
from torch._C import parse_schema

try:
# This function was added in https://github.com/pytorch/pytorch/pull/131894
# (which hadn't landed yet at the time of writing), thus will only arrive in
# PyTorch 2.5+. In the meantime we need a fallback.
from torch.modules.cuda import is_flash_attention_available
except ImportError:

def is_flash_attention_available():
return sys.platform == "linux"


def is_pt_cutlass_compatible(force: bool) -> bool:
compatible = True
Expand Down Expand Up @@ -57,7 +68,7 @@ def is_pt_cutlass_compatible(force: bool) -> bool:


def is_pt_flash_compatible(force: bool) -> bool:
if not torch.backends.cuda.flash_sdp_enabled():
if not is_flash_attention_available():
if force:
raise ImportError("Flash SDP backend is disabled")
return False
Expand Down

0 comments on commit 8d8463c

Please sign in to comment.