-
Notifications
You must be signed in to change notification settings - Fork 1k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
chore: update flash attention kernels (#1518)
* chore: update flash attention kernels * fmt * remove unused kernels * force f32 * correct stride
- Loading branch information
1 parent
3a7304c
commit 8d1a57c
Showing
28 changed files
with
1,086 additions
and
465 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
#include <cmath> | ||
|
||
#include <cute/tensor.hpp> | ||
|
||
#include <cutlass/cutlass.h> | ||
#include <cutlass/array.h> | ||
|
||
#include "utils.h" | ||
|
||
namespace flash { | ||
|
||
using namespace cute; | ||
|
||
//////////////////////////////////////////////////////////////////////////////////////////////////// | ||
|
||
template <bool Is_causal, typename Engine, typename Layout> | ||
inline __device__ void apply_alibi(Tensor<Engine, Layout> &tensor, | ||
const int col_idx_offset_, | ||
const int max_seqlen_k, | ||
const int row_idx_offset, | ||
const int max_seqlen_q, | ||
const int warp_row_stride, | ||
const float alibi_slope) { | ||
// tensor has shape (ncol=(2, MMA_M), nrow=(2, MMA_N)) | ||
static_assert(Layout::rank == 2, "Only support 2D Tensor"); | ||
const int lane_id = threadIdx.x % 32; | ||
const int col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2; | ||
if constexpr (Is_causal) { // Simpler, we add the same bias vector to all rows | ||
#pragma unroll | ||
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { | ||
const int col_idx_base = col_idx_offset + nj * 8; | ||
#pragma unroll | ||
for (int j = 0; j < size<1, 0>(tensor); ++j) { | ||
const int col_idx = col_idx_base + j; | ||
#pragma unroll | ||
for (int mi = 0; mi < size<0>(tensor); ++mi) { | ||
tensor(mi, make_coord(j, nj)) += alibi_slope * col_idx; | ||
} | ||
} | ||
} | ||
} else { // Bias depends on both row_idx and col_idx | ||
#pragma unroll | ||
for (int mi = 0; mi < size<0, 1>(tensor); ++mi) { | ||
const int row_idx_base = row_idx_offset + mi * warp_row_stride; | ||
#pragma unroll | ||
for (int i = 0; i < size<0, 0>(tensor); ++i) { | ||
const int row_idx = row_idx_base + i * 8; | ||
#pragma unroll | ||
for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { | ||
const int col_idx_base = col_idx_offset + nj * 8; | ||
#pragma unroll | ||
for (int j = 0; j < size<1, 0>(tensor); ++j) { | ||
const int col_idx = col_idx_base + j; | ||
tensor(make_coord(i, mi), make_coord(j, nj)) -= alibi_slope * abs(row_idx + max_seqlen_k - max_seqlen_q - col_idx); | ||
} | ||
} | ||
} | ||
} | ||
} | ||
} | ||
|
||
} // namespace flash |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,19 +1,10 @@ | ||
// Copyright (c) 2023, Tri Dao. | ||
|
||
// Splitting the different head dimensions to different files to speed up compilation. | ||
// This file is auto-generated. See "generate_kernels.py" | ||
|
||
#include "flash_fwd_launch_template.h" | ||
|
||
// template<> | ||
// void run_mha_fwd_<cutlass::bfloat16_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) { | ||
// using elem_type = cutlass::bfloat16_t; | ||
// if (params.p_dropout == 1.f) { | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream); | ||
// } else { | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream); | ||
// } | ||
// } | ||
template<> | ||
void run_mha_fwd_<cutlass::bfloat16_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) { | ||
run_mha_fwd_hdim128<cutlass::bfloat16_t>(params, stream); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,32 +1,10 @@ | ||
// Copyright (c) 2023, Tri Dao. | ||
|
||
// Splitting the different head dimensions to different files to speed up compilation. | ||
// This file is auto-generated. See "generate_kernels.py" | ||
|
||
#include "flash_fwd_launch_template.h" | ||
|
||
// template<> | ||
// void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) { | ||
// using elem_type = cutlass::half_t; | ||
// if (params.p_dropout == 1.f) { | ||
// // Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream); | ||
// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, false, elem_type>, false>(params, stream); | ||
// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, true, elem_type>, false>(params, stream); | ||
// // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, true, elem_type>, false>(params, stream); | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, false>(params, stream); | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 64, 4, false, false, elem_type>, false>(params, stream); | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 128, 4, false, false, elem_type>, false>(params, stream); | ||
// // 1st ones are good for H100, A100 | ||
// // 2nd one is good for A6000 bc we get slightly better occupancy | ||
// } else { | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream); | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, false, elem_type>, true>(params, stream); | ||
// run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, true, elem_type>, true>(params, stream); | ||
// // 1st one is good for H100, A100, A6000 | ||
// } | ||
// } | ||
|
||
template<> | ||
void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params ¶ms, cudaStream_t stream) { | ||
run_mha_fwd_hdim128<cutlass::half_t>(params, stream); | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,17 +1,10 @@ | ||
// Copyright (c) 2023, Tri Dao. | ||
|
||
// Splitting the different head dimensions to different files to speed up compilation. | ||
// This file is auto-generated. See "generate_kernels.py" | ||
|
||
#include "flash_fwd_launch_template.h" | ||
|
||
// template<> | ||
// void run_mha_fwd_<cutlass::bfloat16_t, 160>(Flash_fwd_params ¶ms, cudaStream_t stream) { | ||
// using elem_type = cutlass::bfloat16_t; | ||
// BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { | ||
// run_flash_fwd<Flash_fwd_kernel_traits<160, 128, 32, 4, false, false, elem_type>, Is_dropout>(params, stream); | ||
// }); | ||
// } | ||
template<> | ||
void run_mha_fwd_<cutlass::bfloat16_t, 160>(Flash_fwd_params ¶ms, cudaStream_t stream) { | ||
run_mha_fwd_hdim160<cutlass::bfloat16_t>(params, stream); | ||
} | ||
} |
Oops, something went wrong.