We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent b4bf9cc commit 23b77c8Copy full SHA for 23b77c8
flash_attn/__init__.py
@@ -1,4 +1,4 @@
1
-__version__ = "2.3.4"
+__version__ = "2.3.5"
2
3
from flash_attn.flash_attn_interface import (
4
flash_attn_func,
training/Dockerfile
@@ -85,7 +85,7 @@ RUN pip install transformers==4.25.1 datasets==2.8.0 pytorch-lightning==1.8.6 tr
85
RUN pip install git+https://github.com/mlcommons/logging.git@2.1.0
86
87
# Install FlashAttention
88
-RUN pip install flash-attn==2.3.4
+RUN pip install flash-attn==2.3.5
89
90
# Install CUDA extensions for fused dense, layer norm
91
RUN git clone https://github.com/HazyResearch/flash-attention \
0 commit comments