
    Vh<                       d Z ddlZddlZddlZddlmZ ddlmZ ddlm	Z	m
Z
 ddlmZmZmZ ddlZddlZddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZmZm Z m!Z!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z* ddl+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4 ddl5m6Z6m7Z7m8Z8m9Z9  ejt                  e;      Z<ejz                  j|                  Z>ej~                  Z?dee@   dee@   dee@   fdZAdee@   dee@   fdZBe8d        ZC	 dbdeDdej                  dej                  deeGe@      de*f
dZHdeGee%      fdZId ZJd eGe@   fd!ZKeeGee      ee   f   ZLdeGe*   d"ej                  j                  deLfd#ZOdeGe*   d$e)deLfd%ZPd&ZQd'ZRd(ZSd)ZTd*ZUd+ZVd,ZW e9d-eCeUeVz   eQz   eWz   eSz   eRz   .      ZXd/ ZYej                  d0fd1ej                  d2fd3ej                  d4fd5ej                  d0fd6ej                  d2fd7ej                  d4fd8ej                  d0fd6ej                  d2fd9ej                  d4fd8i	Z]ej                  d0fd1ej                  d2fd1ej                  d4fd:ej                  d0fd;ej                  d2fd7ej                  d4fd3ej                  d0fd;ej                  d2fd7ej                  d4fd3i	Z^ej                  d0fd<ej                  d2fd<ej                  d4fd=ej                  d0fd>ej                  d2fd>ej                  d4fd?ej                  d0fd>ej                  d2fd>ej                  d4fd@i	Z_ G dA dBe
      Z`dCe`deae@e@e@e@f   fdDZbdCe`deae@e@e@e@f   fdEZcdeae@e@e@e@f   fdFZddeae@e@e@e@f   fdGZedH Zfdej                  fdIZhddJlimjZj ddKlkmlZl dL ZmdM ZndN ZodO ZpdP ZqdQereDef   fdRZs e3ejz                  j                  j                  dS      dT        ZudU Zv e9dVevdWeQz   eRz   eTz   .      ZwdXej                  j                  fdYZy edZ[       G d\ d]             Zzd^eLd_e@dezfd`Z{ e3ejz                  j                  j                  dS      da        Z|y)cz2Triton Implementation of the flex_attention Kernel    N)Sequence)	dataclass)autoEnum)AnyOptionalUnion)V)
OrderedSettree_map)int_oo)ValueRanges   )config)BufferComputedBufferExternKernelFixedLayoutFlexibleLayoutget_fill_orderInputBufferIRNodeMutationLayoutSHOULDREMOVEScatter
StorageBoxSubgraph	TensorBox)	_fullcheck_and_broadcast_indicesemptyempty_stridedexpandindex_output_size_and_inner_fn	loweringsregister_loweringto_dtype)autotune_select_algorithmrealize_inputsSymbolicGridFnTritonTemplatesizes
fill_orderreturnc                     t        |       t        |      k(  sJ d       dgt        |       z  }d}|D ]  }|||<   || |   z  } |S )zTFrom a list of sizes and a fill order, construct the strides of the permuted tensor.z7Length of sizes must match the length of the fill orderr      )len)r,   r-   stridescurrent_stridedims        U/home/dcms/DCMS/lib/python3.12/site-packages/torch/_inductor/kernel/flex_attention.pyconstruct_stridesr6   =   sl     u:Z( A( cCJG N  %%%*$% N    sizeorig_stridesc                 v    t        |t        j                  j                  j                        }t        | |      S )a  This is a mirror of the same function in aten/src/ATen/ExpandUtils.cpp

    Args:
        size: The size of the output tensor
        orig_strides: The strides of the input tensor
    Returns:
        List[int]: Dense non-overlapping strides that preserve the input tensor's layout permutation.
        The returned strides follow the same stride propagation rules as TensorIterator. This matches
        The behavior of empty_like()
    )r   r
   graphsizevars	shape_envr6   )r8   r9   r-   s      r5   infer_dense_stridesr>   S   s-      agg.>.>.H.HIJT:..r7   c                &     |||d         | |z  dfS )zHow is this kernel parallelized?
    We create a grid of (batch_size * num_heads, ceil_div(n_queries, query_block_size), 1)
    Each block is responsible for iterating over blocks of keys and values calculating
    the final attention output.
    BLOCK_Mr0    )
batch_sizeq_headsnum_queriesd_modelmetacdivs         r5   flex_attention_gridrH   b   s!     d9o.
W0DaHHr7   namedtypedevicec                     t        | t        |||r|ng |rt        j                  |      ng             }t	        j
                  |      S )zBCreates a placeholder input buffers for producing subgraph_output.)rI   layout)r   r   r   contiguous_stridesr   create)rI   rJ   rK   r8   input_buffers        r5   create_placeholderrQ   l   sK     Db7;N--d3	
L L))r7   argsc                     t        d |       S )zIAccepts a list of optional IRNodes and returns a list of realized IRNodesc                 T    | %t        | t        j                        st        |       S | S N)
isinstancesympySymbolr)   )xs    r5   <lambda>zmaybe_realize.<locals>.<lambda>   s-    }Z5<<%@ 1   r7   r   rR   s    r5   maybe_realizer\      s    	

 	 r7   c                      t        j                         dk(  s8t         j                  j                  st         j                  j                         ryy)Nhighestz'ieee'z'tf32')torchget_float32_matmul_precisionversionhipmtiais_availablerA   r7   r5   get_float32_precisionre      s6    **,	9==::""$r7   shapec           
      8   t        d|j                         t        j                  |       }t	        |t
              sJ |j                          |j                         }t        ||j                               }|D cg c]  }||j                         nd  }}t        ||j                               \  }}t        ||d      j                               }t        t        |            D cg c]  }||   	 }	}t        ||||||	d d      \  }
}t!        ||
      }|j                         }|J t#        ||j                         |j                         |
|d      }t%        |j&                  j&                  j(                  t+        |      |      }|S c c}w c c}w )Nr   T)check
atomic_add)rK   rJ   inner_fnrangesoutput_indexerscatter_moderI   rM   data)r   
get_devicer_   float32rV   r   realizeget_sizer'   	get_dtypemake_loaderr    listranger1   r$   r#   r   r   ro   rI   r   )rf   indicesvaluesgradx_sizeiindices_loaderstensor_indicestensor_sizeindexed_sizeexpected_vals_sizerj   rK   scatterbuffers                  r5   zeros_and_scatter_loweringr      s   F%%'>DdI&&&LLN]]_Ffdnn./FKRSa!-q}}TASOS9'4??CTUG^w~a01::<=K',S\':;!F1I;L;#A	$  F./F__Fnn##%!!G YY^^  )$/F
 ME T <s   ;F*Fgraph_modulec                 b   ddl m}  ||t        j                  t	        t
        j                  j                  j                  j                  g      t
        j                  j                  j                  j                  t        i      }t        j                  |      5   |j                  |   ddd       t        |j                        dkD  r0|j                  D ]!  }t        j                  j                  |       # dt         t"           fd}t%        ||j&                        S # 1 sw Y   xxY w)af  This function's goal is to take in the required args and produce the subgraph buffer
    The subgraph buffer is a ComputedBuffer that will be inlined into the triton template

    Args:
        args: The args that are passed into the subgraph. Contains both fixed and lifted inputs.
        subgraph: The Subgraph ir for which to produce the output node
    r   )PointwiseSubgraphLowering)root_graph_loweringallowed_mutationsadditional_loweringsNr   r.   c           	         | y t        | t              r| S t        | t              sJ dt        |       f       t        | j                  t
              sJ dt        |       f       t        d t        | j                  j                         | j                  j                         | j                  j                               | j                  j                        }|S )NzLThe output node for flex attention's subgraph must be a TensorBox, but got: zOThe output node for the flex attention subgraph must be a StorageBox, but got: )rK   rJ   r8   rn   )
rV   r   r   typero   r   r   rp   rt   rs   )output_buffersubgraph_buffers     r5   convert_output_node_to_bufferzCbuild_subgraph_module_buffer.<locals>.convert_output_node_to_buffer   s     m^4  -3 	
Z6
 	
3 -,,j9 	
]<
 	
9 )!$))446#((224"''002
 ##((
 r7   )subgraph_loweringr   r
   r;   r   r_   opsflex_libzeros_and_scatterdefaultr   set_graph_handlerrunr1   buffersregister_bufferr   r   r   graph_outputs)rR   r   r   pw_subgraphr   r   s         r5   build_subgraph_module_bufferr      s     >+GG$eii&8&8&J&J&R&R%STII0088:T
	K 
		[	) 
 ;!#!)) 	,FGG##F+	,8P 2 1;3L3LMME s   D%%D.subgraphc                 .    t        | |j                        S rU   )r   r   )rR   r   s     r5   build_subgraph_bufferr      s    'h.C.CDDr7   a  
@triton.jit
def get_offset_for_next_block(
    loop_iter, col_indices, total_blocks,
    SPARSE_BLOCK, SPARSE_BLOCK_MULTIPLE, BLOCK,
    BLOCKS_ARE_CONTIGUOUS: tl.constexpr
):
    if BLOCKS_ARE_CONTIGUOUS:
        return BLOCK
    cur_block_idx = loop_iter // SPARSE_BLOCK_MULTIPLE
    cur_block = tl.load(col_indices + cur_block_idx, eviction_policy="evict_last")
    next_block = tl.load(col_indices + cur_block_idx + 1, eviction_policy="evict_last", mask=cur_block_idx + 1 < total_blocks)
    needs_jump = (loop_iter + 1) % SPARSE_BLOCK_MULTIPLE == 0
    jump_to_block = (next_block - cur_block ) * SPARSE_BLOCK - (SPARSE_BLOCK_MULTIPLE - 1) * BLOCK
    offset = jump_to_block * needs_jump + (1 - needs_jump) * BLOCK
    return offset
z~
@triton.jit
def get_bounded_indices(indices, max_len=None):
    return indices % max_len if max_len is not None else indices
a  
@triton.jit
def load_checked_block(block_ptr, IS_DIVISIBLE: tl.constexpr, SAFE_HEAD_DIM: tl.constexpr):
  if IS_DIVISIBLE and SAFE_HEAD_DIM:
    return tl.load(block_ptr)
  elif IS_DIVISIBLE and not SAFE_HEAD_DIM:
    return tl.load(block_ptr, boundary_check=(1,), padding_option="zero")
  elif not IS_DIVISIBLE and SAFE_HEAD_DIM:
      return tl.load(block_ptr, boundary_check=(0,), padding_option="zero")
  else:
      return tl.load(block_ptr, boundary_check=(0, 1), padding_option="zero")
ak  
@triton.jit
def load_checked_2d(
    ptr,
    offs_m,
    offs_n,
    stride_m,
    stride_n,
    IS_DIVISIBLE_M: tl.constexpr,
    IS_DIVISIBLE_N: tl.constexpr,
    M_LEN: tl.constexpr,
    N_DIM: tl.constexpr,
):
    # Calculate final pointer if strides are provided
    if stride_m is not None and stride_n is not None:
        ptr = ptr + offs_m[:, None] * stride_m + offs_n[None, :] * stride_n

    # Handle all masking cases
    if not IS_DIVISIBLE_M and not IS_DIVISIBLE_N:
        return tl.load(ptr, mask=(offs_m[:, None] < M_LEN) & (offs_n[None, :] < N_DIM), other=0.0)
    elif IS_DIVISIBLE_M and not IS_DIVISIBLE_N:
        return tl.load(ptr, mask=(offs_n[None, :] < N_DIM), other=0.0)
    elif not IS_DIVISIBLE_M and IS_DIVISIBLE_N:
        return tl.load(ptr, mask=(offs_m[:, None] < M_LEN), other=0.0)
    else:  # Both divisible
        return tl.load(ptr)
aj   
{{def_kernel("Q", "K", "V", "LSE", "KV_NUM_BLKS", "KV_IDX", "FULL_KV_NUM_BLKS", "FULL_KV_IDX")}}
    # Sub notation for this kernel:
    #
    # Q: Query, K: Key, V: Value
    # M: Number of queries, N: Number of keys/values, D: Model dimension
    # QK_HEAD_DIM: The dimension of the query and key embeddings
    # V_HEAD_DIM: The dimension of the value embeddings
    # z: Batch size, h: Number of heads, m: Number of queries per head, k: Number of keys per head
    # GQA_SHARED_HEADS: number of query heads sharing one kv head in GQA setups.
    #
    # The following FULL_* and PARTIAL_* is defined in the block sparse mask grid, rather than the thread block grid.
    # KV_NUM_BLKS: The number of KV blocks (that may or may not require masking) for each query.
    # KV_IDX: The indices of KV blocks (that may or may not require masking) for each query.
    # FULL_KV_NUM_BLKS: The number of fully unmasked KV blocks (so we don't need masking) for each query.
    # FULL_KV_IDX: The indices of fully unmasked KV blocks (so we don't need masking) for each query.
    #
    # OUTPUT_LOGSUMEXP: We only need to store the logsumexp if we require grad
    #
    # (Modifiable) Performance tuning options
    # BLOCK_M: The thread block size across the seqlen dim of Q.
    # BLOCK_N: Iterate over BLOCK_N across the seqlen dim of K/V in each thread block.

    # The below are kernel options that can be applied for certain score_mods,
    # or involve a numerics vs. perf tradeoff
    # PRESCALE_QK: Whether to pre-scale QK by 1/sqrt(d) and change of base. Has
    # about 20% more numerical error, but slightly faster.
    # ROWS_GUARANTEED_SAFE: Is it guaranteed that at least one value in each row
    # is not masked out? If so, we can skip an extra safety check
    # BLOCKS_ARE_CONTIGUOUS: Is it guaranteed that all blocks in the mask are
    # contiguous? If so, we don't need to do an indirect jump for every block

    tl.static_assert(SPARSE_Q_BLOCK_SIZE >= BLOCK_M and SPARSE_Q_BLOCK_SIZE % BLOCK_M == 0)
    tl.static_assert(SPARSE_KV_BLOCK_SIZE >= BLOCK_N and SPARSE_KV_BLOCK_SIZE % BLOCK_N == 0)

    # Define strides of inputs
    stride_qz, stride_qh, stride_qm, stride_qk = {{stride("Q")}}
    stride_kz, stride_kh, stride_kn, stride_kk = {{stride("K")}}
    stride_vz, stride_vh, stride_vn, stride_vk = {{stride("V")}}

    ZQ = {{size("Q", 0)}}
    HQ = {{size("Q", 1)}}
    Q_LEN = {{size("Q", 2)}}
    ZKV = {{size("K", 0)}}
    KV_LEN = {{size("K", 2)}}

    MATMUL_PRECISION = Q.dtype.element_ty

    q_start = tl.program_id(0)
    off_zq = tl.program_id(1) // HQ
    off_hq = tl.program_id(1) % HQ

    # We support two cases for batch dimension. a) (ZKV == ZQ) where off_zkv = off_zq.
    # b) (ZKV == 1 and ZQ > 1) where KV is broadcasted along the batch dimension and off_zkv=0.
    off_zkv = off_zq % ZKV
    off_hkv = off_hq // GQA_SHARED_HEADS
    off_g = off_hq % GQA_SHARED_HEADS

    q_offset = off_zq * stride_qz + off_hq * stride_qh
    k_offset = off_zkv * stride_kz + off_hkv * stride_kh
    v_offset = off_zkv * stride_vz + off_hkv * stride_vh

    Q = Q + q_offset
    K = K + k_offset
    V = V + v_offset

    SPARSE_Z = {{size("KV_NUM_BLKS", 0)}}
    SPARSE_HQ = {{size("KV_NUM_BLKS", 1)}}

    sparse_idx_z = off_zq % SPARSE_Z
    sparse_idx_hq = off_hq % SPARSE_HQ

    SPARSE_Q_MULTIPLE: tl.constexpr = (SPARSE_Q_BLOCK_SIZE // BLOCK_M)
    SPARSE_KV_MULTIPLE: tl.constexpr = (SPARSE_KV_BLOCK_SIZE // BLOCK_N)

    stride_kv_num_blks_h = {{stride("KV_NUM_BLKS", 1)}}
    stride_kv_idx_h = {{stride("KV_IDX", 1)}}
    stride_kv_idx_m = {{stride("KV_IDX", 2)}}

    # initialize pointer to m and l
    m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf")
    l_i = tl.zeros([BLOCK_M], dtype=tl.float32)
    acc = tl.zeros([BLOCK_M, V_HEAD_DIM_ROUNDED], dtype=tl.float32)

    offs_m = q_start * BLOCK_M + tl.arange(0, BLOCK_M)

    # KV_IDX and KV_NUM_BLKS are always contiguous.
    sparse_hz_offset = sparse_idx_z * SPARSE_HQ + sparse_idx_hq
    sparse_kv_num_blks_offset = sparse_hz_offset * stride_kv_num_blks_h + q_start // SPARSE_Q_MULTIPLE
    sparse_kv_idx_offset = sparse_hz_offset * stride_kv_idx_h + (q_start // SPARSE_Q_MULTIPLE) * stride_kv_idx_m  # noqa: B950

    Q_block_ptr = tl.make_block_ptr(
        base=Q,
        shape=(Q_LEN, QK_HEAD_DIM),
        strides=(stride_qm, stride_qk),
        offsets=(q_start * BLOCK_M, 0),
        block_shape=(BLOCK_M, QK_HEAD_DIM_ROUNDED),
        order=(1, 0)
    )
    q = load_checked_block(Q_block_ptr, IS_DIVISIBLE, SAFE_HEAD_DIM)
    # ~~~~~~~~~~~~~~ normal blocks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # We don't know anything "special" about these blocks, so we need to apply
    # both score_mod and mask_mod to it
    kv_indices = KV_IDX + sparse_kv_idx_offset
    kv_start = tl.load(kv_indices) * SPARSE_KV_BLOCK_SIZE # first kv block we're loading
    kv_num_blocks = tl.load(KV_NUM_BLKS + sparse_kv_num_blks_offset)
    block_n_end = tl.minimum(kv_num_blocks * SPARSE_KV_MULTIPLE, tl.maximum(tl.cdiv(KV_LEN, BLOCK_N), 1))

    K_block_ptr = tl.make_block_ptr(
        base=K,
        shape=(QK_HEAD_DIM, KV_LEN),
        strides=(stride_kk, stride_kn),
        offsets=(0, kv_start),
        block_shape=(QK_HEAD_DIM_ROUNDED, BLOCK_N),
        order=(0, 1)
    )
    V_block_ptr = tl.make_block_ptr(
        base=V,
        shape=(KV_LEN, V_HEAD_DIM),
        strides=(stride_vn, stride_vk),
        offsets=(kv_start, 0),
        block_shape=(BLOCK_N, V_HEAD_DIM_ROUNDED),
        order=(1, 0)
    )
    offs_n = kv_start + tl.arange(0, BLOCK_N)

    acc, l_i, m_i = forward_inner(
        {{gen_argdefs()}},
        q, K_block_ptr, V_block_ptr, Q_LEN, KV_LEN,
        acc, l_i, m_i,
        off_zq, off_hq, offs_m[:, None], offs_n[None, :],
        kv_indices, kv_num_blocks,
        0, block_n_end,
        MATMUL_PRECISION,
        IS_FULL_BLOCKS=False,
    )

    # ~~~~~~~~~~~~~~ "full" blocks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    # We know these blocks are guaranteed to be "full", so we don't need to
    # apply mask_mod to them - only score_mod
    if HAS_FULL_BLOCKS:
        # FULL_KV_IDX and FULL_KV_NUM_BLKS are always contiguous.
        kv_indices = FULL_KV_IDX + sparse_kv_idx_offset
        kv_start = tl.load(kv_indices) * SPARSE_KV_BLOCK_SIZE # first kv block we're loading
        kv_num_blocks = tl.load(FULL_KV_NUM_BLKS + sparse_kv_num_blks_offset)
        block_n_end = tl.minimum(kv_num_blocks * SPARSE_KV_MULTIPLE, tl.maximum(tl.cdiv(KV_LEN, BLOCK_N), 1))

        K_block_ptr = tl.make_block_ptr(
            base=K,
            shape=(QK_HEAD_DIM, KV_LEN),
            strides=(stride_kk, stride_kn),
            offsets=(0, kv_start),
            block_shape=(QK_HEAD_DIM_ROUNDED, BLOCK_N),
            order=(0, 1)
        )
        V_block_ptr = tl.make_block_ptr(
            base=V,
            shape=(KV_LEN, V_HEAD_DIM),
            strides=(stride_vn, stride_vk),
            offsets=(kv_start, 0),
            block_shape=(BLOCK_N, V_HEAD_DIM_ROUNDED),
            order=(1, 0)
        )
        offs_n = kv_start + tl.arange(0, BLOCK_N)

        acc, l_i, m_i = forward_inner(
            {{gen_argdefs()}},
            q, K_block_ptr, V_block_ptr, Q_LEN, KV_LEN,
            acc, l_i, m_i,
            off_zq, off_hq, offs_m[:, None], offs_n[None, :],
            kv_indices, kv_num_blocks,
            0, block_n_end,
            MATMUL_PRECISION,
            IS_FULL_BLOCKS=True,
        )


    # [Note] Handle fully masked out rows:
    # Li will be the sum(e^(-inf)) == 0.0 for masked out rows, mi will be -inf.
    # We set Li to 1.0 which will result in lse/out = 0.0 | after the log(li) + mi(0.0) step
    l_i = tl.where(l_i == 0.0, 1, l_i)

    acc = acc / l_i[:, None]
    idx_zq = tl.program_id(1) // HQ
    idx_hq = tl.program_id(1) % HQ
    idx_m = offs_m[:, None]
    idx_d = tl.arange(0, V_HEAD_DIM_ROUNDED)[None, :]

    mask = (idx_m < Q_LEN) & (idx_d < V_HEAD_DIM)

    {{store_output(("idx_zq", "idx_hq", "idx_m", "idx_d"), "acc", "mask")}}

    if OUTPUT_LOGSUMEXP:
        off_hz = tl.program_id(1)
        l_ptrs = LSE + off_hz * Q_LEN + offs_m
        lse = m_i + tl.math.log2(l_i)
        if IS_DIVISIBLE:
            tl.store(l_ptrs, lse)
        else:
            tl.store(l_ptrs, lse, mask=offs_m < Q_LEN)
 a	  
@triton.jit
def forward_inner(
    {{gen_argdefs()}},
    q, K_block_ptr, V_block_ptr, Q_LEN, KV_LEN,
    # accumulated values
    acc, l_i, m_i,
    # Offsets used as inputs to score_mod & mask_mod
    # of size [BLOCK_M, BLOCK_N] or scalar.
    off_z, off_h, offs_m, offs_n,
    # blocksparse data
    kv_indices, kv_num_blocks,
    # start kv and end kv block
    block_n_start, block_n_end,
    MATMUL_PRECISION,
    IS_FULL_BLOCKS,
):
    # Redefines all kernel parameters (BLOCK_M, etc.) so we don't need to plumb them all through
    {{gen_defines() | indent_except_first(1)}}

    SPARSE_KV_MULTIPLE: tl.constexpr = (SPARSE_KV_BLOCK_SIZE // BLOCK_N)
    RCP_LN2: tl.constexpr = 1.44269504

    if PRESCALE_QK:
        q = (q * SM_SCALE * RCP_LN2).to(MATMUL_PRECISION)

    # loop over k, v and update accumulator until block_n_end
    for start_n in range(block_n_start, block_n_end):
        if IS_DIVISIBLE:
            acc, l_i, m_i = forward_block_mn(
                {{gen_argdefs()}},
                q, K_block_ptr, V_block_ptr, Q_LEN, KV_LEN,
                # accumulated values
                acc, l_i, m_i,
                # Offsets
                off_z, off_h, offs_m, offs_n,
                MATMUL_PRECISION, RCP_LN2,
                IS_FULL_BLOCKS,
            )
        else:
            # Benchmark shows even we applied mod & mask to each block for non divisible seqlen,
            # it's on par or slightly faster than only applying to the last block in fwd.
            # However, we choose different strategy for bwd, where we only apply mod & mask
            # to the last block because it's faster a lot.
            acc, l_i, m_i = forward_block_mn(
                {{gen_argdefs()}},
                q, K_block_ptr, V_block_ptr, Q_LEN, KV_LEN,
                # accumulated values
                acc, l_i, m_i,
                # Offsets
                off_z, off_h, offs_m, offs_n,
                MATMUL_PRECISION, RCP_LN2,
                IS_FULL_BLOCKS, CHECK_BLOCK_BOUNDARY=True,
            )

        # update pointers
        offset = get_offset_for_next_block(
            start_n, kv_indices, kv_num_blocks,
            SPARSE_KV_BLOCK_SIZE, SPARSE_KV_MULTIPLE, BLOCK_N, BLOCKS_ARE_CONTIGUOUS
        )

        V_block_ptr = tl.advance(V_block_ptr, (offset, 0))
        K_block_ptr = tl.advance(K_block_ptr, (0, offset))

        offs_n = offs_n + offset

    return acc, l_i, m_i

aX  
@triton.jit
def forward_block_mn(
    {{gen_argdefs()}},
    q, K_block_ptr, V_block_ptr, Q_LEN, KV_LEN,
    # accumulated values
    acc, l_i, m_i,
    # Offsets
    off_z, off_h, offs_m, offs_n,
    MATMUL_PRECISION, RCP_LN2,
    IS_FULL_BLOCKS, CHECK_BLOCK_BOUNDARY=False,

):
    # Redefines all kernel parameters (BLOCK_M, etc.) so we don't need to plumb them all through
    {{gen_defines() | indent_except_first(1)}}

    # -- load k --
    # NB reversed order to since K is transposed
    k = load_checked_block(K_block_ptr, SAFE_HEAD_DIM, IS_DIVISIBLE)
    # -- compute qk ---
    qk = tl.dot(q, k, input_precision=FLOAT32_PRECISION) # TODO: use cuda matmul when q_len <= 2.
    if not PRESCALE_QK:
        qk *= SM_SCALE
    # ~~~~~~~~~~~~~~~~~~~ Apply score modification  ~~~~~~~~~~~~~~~~~~~
    # If this is the last block of a non divisible seqlen, we still need to load [BLOCK_M, BLOCK_N] elements,
    # which is larger than the actual number of elements. To avoid access memory out of bound,
    # we need to mask out the elements that are out of Q_LEN & KV_LEN.
    m = get_bounded_indices(offs_m, Q_LEN if CHECK_BLOCK_BOUNDARY else None)
    n = get_bounded_indices(offs_n, KV_LEN if CHECK_BLOCK_BOUNDARY else None)

    {{ modification(
        subgraph_number=0,
        output_name="post_mod_scores",
        score="qk",
        b="off_z",
        h="off_h",
        m="m",
        n="n",
        out="qk"
    ) | indent_except_first(1) }}

    if CHECK_BLOCK_BOUNDARY:
        # Mask out the elements that are out of the KV_LEN for non divisible seqlen.
        post_mod_scores = tl.where(offs_n < KV_LEN, post_mod_scores, float("-inf"))

    if not IS_FULL_BLOCKS:
        {{ modification(
            subgraph_number=1,
            output_name="mask_mod_output",
            score="qk",
            b="off_z",
            h="off_h",
            m="m",
            n="n",
        ) | indent_except_first(2) }}

        if CHECK_BLOCK_BOUNDARY:
            mask_mod_output = tl.where(offs_n < KV_LEN, mask_mod_output, False)
        # apply mask for partially unmasked blocks
        post_mod_scores = tl.where(mask_mod_output, post_mod_scores, float("-inf"))

    if not PRESCALE_QK:
        post_mod_scores *= RCP_LN2
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    # -- compute scaling constant ---
    m_ij = tl.maximum(m_i, tl.max(post_mod_scores, 1))
    if not ROWS_GUARANTEED_SAFE:
        masked_out_rows = (m_ij == float("-inf"))
        m_ij_masked = tl.where(masked_out_rows, 0, m_ij)
    else:
        m_ij_masked = m_ij

    alpha = tl.math.exp2(m_i - m_ij_masked)
    p = tl.math.exp2(post_mod_scores - m_ij_masked[:, None])

    # NB: l_i update is pulled up here since it's a bit faster
    # NB: For headdim=256, it's faster to move it back down to after m_i =
    # m_ij
    l_i = l_i * alpha + tl.sum(p, 1)
    # # -- scale and update acc --
    acc = acc * alpha[:, None]
    v = load_checked_block(V_block_ptr, IS_DIVISIBLE, SAFE_HEAD_DIM)
    acc = tl.dot(p.to(MATMUL_PRECISION), v, acc, input_precision=FLOAT32_PRECISION)

    # -- update m_i
    m_i = m_ij

    return acc, l_i, m_i

flex_attention)rI   gridsourcec           
      "   |j                  dd      }t        j                  j                  j	                  t        j                  | j                         d   d            }t        j                  j                  j	                  t        j                  | j                         d   d            }t        | j                         d   t        t
        j                  f      }t        | j                         d   t        t
        j                  f      }|rOt        j                  j                  j	                  t        j                  |j                         d   d            }	nt        j                  j                  j	                  t        j                  t        j                  |j                         d   d      t        j                  |j                         d   | j                         d                     }	| xr |xr |xr
 |xr |xr |	S )a;  Decide which kernel to use, return true if use flex decoding kernel.
    Note:
       Since the number of splits is calculated based of the the number of batch and head dims
       we need to ensure that the batch and head dims are statically known. Otherwise we just
       use the main flex_attention kernel.
    FORCE_USE_FLEX_ATTENTIONF   r   r0   )getr
   r;   r<   evaluate_exprrW   Ltrs   GtrV   intIntegerEqOr)
query
kv_indiceskernel_options
enable_gqa
force_flexshort_query_lengthnon_zero_lengthstatic_batchstatic_num_headsvalid_block_mask_num_headss
             r5   _use_flex_decodingr     s     ##$>FJ))77!"%s+ gg&&44UXXenn>Nr>RTU5VWOenn.q1C3GHL!%.."21"5U]]7KL
 &'WW%5%5%C%CHHZ((*1-q1&
" &'WW%5%5%C%CHH,,.q115,,.q15>>3CA3FG&
"  	'	'	' 	' 		'
 'r7   @   )r             r   )r   r   r   r      )r   r   r   r   r   r   r   r   )r   r      r   r   r   r   r   )r   r   r   r   )r      r   r   r   r   r   r   )r   r   r   r0   )r   r   r   r0   r   r   r   r0   )r   r   r   r0   )r   r   r   r0   c                   (    e Zd Z e       Z e       Zy)ModeN)__name__
__module____qualname__r   fwdbwdrA   r7   r5   r   r     s    
&C
&Cr7   r   modec                    | j                         }t        j                  j                  j	                  | j                         d         }d }|t        j                  k(  rR|dk  r2|t        j                  k(  rd}nd}t        j                  ||f|      }|S |t        j                  k(  rd}|S d}|S |t        j                  k(  sJ |t        j                  k(  ry|dk  r|dk(  ry|d	k(  ry
yy)Nr   )r   r   r   r0   r   )r   r   r   r0   )r   r   r   r0   r   r   r   r0   r   r   )r   r   r   r0   )rt   r
   r;   r<   evaluate_static_shapers   r   r   r_   rq   _rocm_default_configr   r   )r   r   rJ   head_dim
fwd_configs        r5   _get_rocm_configr     s    OOEww55enn6Fr6JKHJtxxs?%+
,
-115(2CZPJ 	 %+
  ,
txxEMM!!_2~%S&%!r7   c                    | j                         }t        j                  j                  j	                  | j                         d         }d }d }t        j                  j                         }|t        j                  k(  rv|dk  rV|t        j                  k(  rd}nd}|dk\  rt        j                  ||f|      }|S |dk\  rt        j                  ||f|      }|S |t        j                  k(  rd}|S d}|S |t        j                  k(  sJ |t        j                  k(  rd	}|S |dk  r|dk\  r|d
k(  rd}|S |dk(  rd}|S d}|S |dk\  r&|d
k\  rd}|S |dk(  r|d   dk(  rdnd}d
d
d|f}|S d}|S d	}|S )Nr   r   r   r   r   r   r   )	   r   )r   r   )r   r   r   r   r   r   r   r   )r   r   r   r   )r   r   r   r   )r   r   r   r   r   r   r   r   )rt   r
   r;   r<   r   rs   r_   cudaget_device_capabilityr   r   rq   _h100_default_configr   _a100_default_configr   )r   r   rJ   r   r   
bwd_config
capability
num_stagess           r5   _get_nv_configr   2  s   OOEww55enn6Fr6JKHJJ113Jtxxs?%+
,
V#155uh6GT
  v%155uh6GT
 	 %+
  ,
 txxEMM!'J& % _v!52~+
   S,
  ,
  6!2~,
  S",R.A"5Q1
 "a4

  ,
  (Jr7   c                     t         j                  j                  t        | t        j
                        S t        | t        j
                        S N)r   )r_   ra   rb   r   r   r   r   r   s    r5   _get_default_config_fwdr   c  4    }} e$((33DHH55r7   c                     t         j                  j                  t        | t        j
                        S t        | t        j
                        S r   )r_   ra   rb   r   r   r   r   r   s    r5   _get_default_config_bwdr   j  r   r7   c                 4     dt         j                  f fd}|S )Nr.   c                 l   t         j                  j                  j                  j                  d         }| j                         D cg c]+  }t         j                  j                  j                  |      - }}t        j                  ||| j                         | j                               S c c}w )Nr   rJ   rK   )
r
   r;   r<   	size_hintrf   rs   r_   fullrt   rp   )rY   num_blocks_for_autotuningr|   r8   sparse_indicess       r5   create_num_blocks_fakez@create_num_blocks_fake_generator.<locals>.create_num_blocks_fake|  s    $%GG$4$4$>$>~?S?STV?W$X!78zz|D!  **1-DDzz%++-<<>	
 	
 Es   
0B1)r_   Tensor)r   r   s   ` r5    create_num_blocks_fake_generatorr   q  s    
U\\ 
 "!r7   c                 F   | j                         D cg c]+  }t        j                  j                  j	                  |      - }}t        j                  d|d   | j                         | j                               }|j                  |      j                         }|S c c}w )Nr   r   r   )rs   r
   r;   r<   r   r_   arangert   rp   r#   
contiguous)rY   r|   r8   rx   s       r5   create_indices_faker     sv    34::<@aAGG&&q)@D@ll1d2hakkmALLNSGnnT"--/GN As   0B)create_flex_decoding_kernel)CppFlexAttentionTemplatec                      dd l } dd l}t        j                  j	                         xr | j                  d      dk7  }|xr1 t        j                  j                          xr |j                  dk(   }|S )Nr   ATEN_CPU_CAPABILITYr   darwin)	ossysr_   cpu_is_avx2_supportedgetenvxpurd   platform)r   r   requires_avx2_on_cpu	supporteds       r5   check_cpu_supportedr    sq     			$$&X2995J+Ky+X  	 	)		&&((	)(( 
 r7   c           
          | j                         }|rR|d   dk7  rJt        t        t        t	        | j                                                 }t        j                  | |      S | S )zJEnsure that realized IR node has a contigous stride in the last dimension.r   r0   )maybe_get_striderv   reversedrw   r1   rs   r   require_stride_order)rY   r2   contiguous_stride_orders      r5   contiguous_last_dimr    sW      "G72;!#"&xc!**,6G0H'I"J004KLLHr7   c	                   >?@ |\  }	}	}
}}}}}}}}}}|d   rt        d      t               st        d      g }t        j                  j                  j
                  j                         j                  j                  ?t        j                  j                  j
                  j                         j                  j                  >t        j                  j                  j
                  }t        dt              |j                  ?<   t        dt              |j                  ><   t        j                  @d@?>gfdt        j                  g fdt        j                  g fdt        j                  ?d	gfd
t        j                  d	>gffD cg c]"  \  }}}t        ||| j!                         |      $ }}}}t#        |t%        |      z   |      }|;t'        |t$              r|D ]  }||j)                           n|j)                          d@?>gfdt        j                  g fdt        j                  g fdt        j                  ?d	gfd
t        j                  d	>gffD cg c]"  \  }}}t        ||| j!                         |      $ }}}}>?@fd} ||      }t+        |t%        |      z   |      } t        j                  j                  j
                  j,                  }!|!D "cg c]  }"|"?>fvs
|" c}"t        j                  j                  j
                  _        |t%        |      z   |z   t%        |      z   }#|#D ]8  }$t'        |$t.              s|j1                  |$j2                  j2                         : t5        t6        | ||g      \  } }}t9        | |||
|||||||g      \  } }}}
}}}}}}}t;        t=        | j?                         |j?                         |j?                         g            dk7  rt        d      | jA                         t        j                  t        jB                  t        jD                  fvrt        d| jA                          d      t9        |      }t9        |      }| jG                         \  }%}&}'}(|jG                         \  })}*}+},|%}-|-|&|'|,g}.tI        |.| jK                               }/tM        | j!                         | jA                         |-|&|'|,g|/D 0cg c]  }0tO        jP                  |0       c}0      }1g }2| |||
|g}3|sd}4nd}4|3|gz  }3d}5i }6|s|rd}5d|fd|ffD ]8  \  }7}8|6jS                  tU        |8      D 9:ci c]  \  }9}:|7 d|9 |: c}:}9       : |3|6jW                         D cg c]  }t'        |tN        jX                        s|  c}z  }3|j[                  dd      };t        j                  j                  j]                  |      }t        j                  j                  j]                  |      }t        j                  j                  j_                  tO        j`                  |'tO        jb                  |jG                         d   |                  sJ d       t        j                  j                  j_                  tO        j`                  |+tO        jb                  |jG                         d   |                  sJ d       te        jf                  |2|3|1||;rd n||;rd n| ||5|4|t;        |      t;        |      |6?>f       | |g}<ti        d|2|<|1      }=|=fS c c}}}w c c}}}w c c}"w c c}0w c c}:}9w c c}w )NOUTPUT_LOGSUMEXPzStorch.compile on CPU only supports inference and `return_lse` is not supported yet.z;torch.compile on current platform is not supported for CPU.r   scorebhq_idxr0   kv_idxc                 X   t        j                  | j                        }|j                  }|j	                  t        t        |j                                    5  |j                  d      }d d d        d }|j                  D ]  }|j                  dk(  s|} n |J |j                  d   }g}|j                  |      5  |j                  t        j                  |t        d       fdi      }d d d        |j                        5  |j                  t        j                   j"                  j$                  ||f      }	d d d        	f|_
        |j'                          t        j(                  j+                  ||      }
|
S # 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   \xY w)Nqk_dataoutputr   infrJ   )rR   kwargsr[   )copydeepcopyr   r;   inserting_beforenextiternodesplaceholderoprR   inserting_aftercall_functionr_   r   floatr   atenwherelintfxGraphModule)
mask_graphgmr;   qk_data_nodeoutput_nodenode	mask_node	size_node	full_node
where_node	convertedcur_kvSplitSizecur_qSplitSizescore_dtypes              r5   convert_mask_graph_modulez,lower_cpu.<locals>.convert_mask_graph_module  s   ]]:223##Dekk):$;< 	8 ,,Y7L	8 KK 	Dww(""	 &&&$$Q'	#_5	""9- 	++

%,/- , I	 ""9- 	,,		$$I|Y+O - J	 '=

HH((U3	E	8 	8 	 		 	s$   F1F9F FF F)r   z=Unsupported for now if query, key, value are the same buffer.z}`torch.float` , `torch.float16` and `torch.bfloat16` are supported in FlexAttention for CPU device. Found input tensors are `z`.strideTFscore_othersmask_others_SKIP_MASK_SCOREr   zkQ seqlen must be smaller than the block_mask size in the Q dimension, considering pass a larger block_mask.r   zmKV seqlen must be smaller than the block_mask size in the KV dimension, considering pass a larger block_mask.)choicesinput_nodesrM   scale	score_modmask_modkv_block_sizehas_other_bufferno_full_kv_blockfake_bufferslen_score_otherlen_mask_otherkernel_input_name_to_buffer
block_varsr   )5NotImplementedErrorr  r
   r;   r<   r=   create_unbacked_symintr)  exprr   r   var_to_ranger_   r  int64rQ   rp   r   rv   rV   freeze_layoutr   pending_fresh_unbacked_symbolsr   appendro   mapr  r\   r1   r   get_namert   bfloat16float16rs   r>   
get_strider   rW   sympifyupdate	enumeratery   rX   r   r   r   LeMulr   add_choicesr(   )Ar   keyvaluer   
block_maskr;  r   score_mod_other_buffersmask_mod_other_buffersr7  kv_num_blocksr   full_kv_num_blocksfull_kv_indicesq_num_blocks	q_indicesfull_q_num_blocksfull_q_indicesSPARSE_Q_BLOCK_SIZESPARSE_KV_BLOCK_SIZEr%  rA  r=   rI   rJ   r8   placeholder_inpsr   _bufmask_graph_placeholder_inpsr2  converted_mask_graph_modulemask_graph_bufferpendingrY   buffer_listitemBqHq	seq_len_qqk_head_dimBkvHkv
seq_len_kv
v_head_dimBout_sizeout_stridessrM   _choicesr:  r@  r?  rD  prefixr   r|   bufskip_mask_scoreinputs_for_autotuningresr/  r0  r1  sA                                                                 @@@r5   	lower_cpur    s   2 			 ()!a
 	
  !I
 	
 "$L WW%%//FFHMMRRNgg&&00GGINNSSO  **I
 .9F-CI>*.9!V.DI?+++K kNO#DE%++r"%++r"ekkNA#67u{{Q$89"
	 	D% 	4(8(8(:DA	 	 ,4 788(O "ot,' )#&&() ))+ kNO#DE%++r"%++r"ekkNA#67u{{Q$89"
	# 	#D% 	4(8(8(:DA	# 	#2&P #<J"G4#d+A&BB# gg((GGGAa'HHAAGG=
 	
&
'	(
%	& %
&	'   0dI&		/0
 /%e1DEE3 		
	" :u~~'9IJKLPQQ!K
 	
 ennemm LL!((-(9':">
 	
 ,,CD*+AB%*^^%5"BI{',~~'7$Cj*
A 2y*-H%h0@0@0BCK	
B	:&*56Qa 6	F H#umZ@K *++"$"8 4523 
 	OFG (..4=g4FG&!SF81QC#%G		 	4;;=
eU\\2 
 	
 %(():EBO77++AABVW''**@@ATU77))EIIj&9&9&;B&?ATUV  	v 
 77))UYYz':':'<R'@BVWX  	x 
 (()$(.?*))!3412$?"O4" 	
 $	C 6MM	(	#TAH 7& H
s*   '\<%'])]
5]
/]]	#]c                 &    | dk7  xr | | dz
  z  dk(  S )Nr   r0   rA   ns    r5   is_power_of_2r    s    6*QU)*r7   c                 d    | dk  rydt        j                  t        j                  |             z  S )Nr   r0   r   )mathceillog2r  s    r5   next_power_of_twor    s(    Av		$))A,'''r7   r   c                 L   |j                  |      }| j                  d|       | j                  dt        |             |j                  |      }| j                  d|       | j                  dt        |             | j                  dt        |      xr t        |             y)a:  
    Mutates kernel options, adding head dimension calculations.

    Args:
        kernel_options: Dictionary to populate with options
        qk_head_dim: Query/Key head dimension
        v_head_dim: Value head dimension
        graph_sizevars: Graph size variables object with evaluate_static_shape method

    QK_HEAD_DIMQK_HEAD_DIM_ROUNDED
V_HEAD_DIMV_HEAD_DIM_ROUNDEDSAFE_HEAD_DIMN)r   
setdefaultr  r  )r   rr  rv  graph_sizevarsqk_head_dim_staticv_head_dim_statics         r5   set_head_dim_valuesr    s     (==kJm-?@01CD
 '<<ZHl,=>/0AB
 ()Nm<M.Nr7   )type_promotion_kindc	                 d     j                         j                  dk(  rt         ||||||||	      S t        j                  j
                  j                  t        j                   j                         d   d            }	t        j                  j
                  j                  t        j                  |j                         d   d            }
|	s|
r3t        d j                         d    d|j                         d          |\  }}}}}}}}}}}}}d j                         fdt        j                  fdt        j                  fd	t        j                  fd
t        j                  ffD cg c]   \  }}t        || j                               " }}}t        |t!        |      z   |      }dt        j                  fdt        j                  fd	t        j                  fd
t        j                  ffD cg c]   \  }}t        || j                               " }}}t        |t!        |      z   |      }t#        |      }|j%                         D ci c]K  \  }}|t'        |t        j(                        r)t        j                  j
                  j+                  |      n|M }}}|j-                  dt/                      t        j                  j
                  j                  t        j0                   j                         d   |j                         d               }t3         |||      rt5         |||||||||
      S t7         ||||||||||g      \   }}}}}}}}}}t7        |      }t7        |      } j                         \  } }!}"}#|j                         \  }$}%}&}'t        j                  j
                  j                  t        j8                  | |$      t        j8                  |$d      z        sJ d|  d|$        t        j                  j
                  j                  t        j:                  |"d            sJ d       t        j                  j
                  j                  t        j:                  |&d            sJ d       | }(|"dz  dk7  s|&dz  dk7  r|j-                  dd       n|j-                  dd        j=                         })|(|!|"|'g}*t?        |*|)      }+tA         j                          j                         |(|!|"|'g|+D ,cg c]  },t        jB                  |,       c},      }-|(|!|"g}.tE        |.d t        jF                   j                               }/|j-                  d|       |!|%z  }0|j-                  d|0       |d u}1|j-                  d|1       |1s fdtI        d      D        \  }}tK        ||#|'t        j                  j
                         g }2g }3|3jM                  tO                      tP        jR                  r=|3g dz  }3t        jT                  jV                  r|3D 4cg c]  }4|4d   |4d   |4d   df }3}4t        j                  j
                  j+                  |      }t        j                  j
                  j+                  |      }t        jT                  jV                  rd|d<   |jY                         }5|3D ]i  \  }6}7}8}9||7z  dk7  s||6z  dk7  r!t[        |3      dk(  rt]        d| d | d!      :|5jY                         }:t!        |:j_                               D ]O  }|ja                  d"      r|:jc                  |      }||:|d#d  <   |ja                  d$      s?|:jc                  |       Q |:j-                  d%|9       |:j-                  d&|8       |:j-                  d'|6       |:j-                  d(|7       |:j-                  d)|       |:j-                  d*|       te        jf                  d/|2 |||/||||g|-||g|/g j                         d+|:};|;Zt[        |3      dk(  sj|;  |||/||||gt!        |      z   t!        |      z   }<ti        |      tj        ti        |      tj        d,}=tm        d-|2|<|-|=.      |/fS c c}}w c c}}w c c}}w c c},w c c}4w )0Nr   r   r   zTNYI: embedding dimension of the query, key, and value must be at least 16 but got E=z and Ev=r  r  r  mr  FLOAT32_PRECISIONr0   &Bq and Bkv must broadcastable. Got Bq=	 and Bkv=r   z#Query length must be greater than 0z!Key length must be greater than 0r   IS_DIVISIBLEFTr3  r   SM_SCALEGQA_SHARED_HEADSHAS_FULL_BLOCKSc              3   T   K   | ]  }t        d j                                ! ywr   )rK   Nr!   rp   .0r7  r   s     r5   	<genexpr>z!flex_attention.<locals>.<genexpr>  s)      /
45E!E,,.///
   %(r   )r   r   )r   r   r   r   )r   r   r   r   r   kpackzRQ and KV block size must be divisible by BLOCK_M and BLOCK_N. We got Q_BLOCK_SIZE=z and KV_BLOCK_SIZE=.fwd_r   bwd_r   	num_warpsr@   BLOCK_Nre  rf  r9  r:  rM   	subgraphsmutated_inputs
call_sizes)r            r   input_gen_fnsrA   )7rp   r   r  r
   r;   r<   r   rW   r   rs   rF  rt   r_   int32rQ   r   rv   dictitemsrV   rX   r   r  re   Ner   r   r\   r   r   rR  r>   r   rS  r"   rq   rw   r  rM  r   r   max_autotunera   rb   r  r1   
ValueErrorkeys
startswithpopflex_attention_templatemaybe_append_choicer   r   r(   )>r   rY  rZ  r   r[  r;  r   r\  r]  	small_dqksmall_dvr7  r^  r   r_  r`  ra  rb  rc  rd  re  rf  r%  rI   rJ   rg  r   ri  rk  kvr   ro  rp  rq  rr  rs  rt  ru  rv  rw  	q_stridesrx  ry  rz  rM   logsumexp_shape	logsumexpgqa_shared_headshas_full_blocksr9  configscoriginal_kernel_optionsr@   r  r  r   cur_kernel_optionserrorr  r  s>   `                                                             r5   r   r     s"	    %'#"

 
	
   ..uxx8H8Lb/QRIww--ehhu~~7G7KR.PQHH!%%*^^%5b%9$:(5>>CSTVCWBXZ
 	
& 			 eoo'(%++%++%++%++
	D% 	4(8(8(:;	 	 ,4 788(O %++%++%++%++	
#D% 	4(8(8(:;# # .#d+A&BBJ .)N #((*	 Aq 	
a& 7711!4	N  13H3JK!!//!!$cllnQ&78J %^ZH*#"
 	
2 		
	" ,,CD*+AB%*^^%5"BI{',~~'7$Cj*77))%((2s*;ehhsA>N*NO 
0IcUCO 77))%((9a*@A -A 77))%((:q*AB +B 	A3!zC/14!!.%8!!.$7   "I2y*-H%h	:K	
B	:&*56Qa 6	F "i(Omm!	I j%0 Sy02BC )4O/A/
9>q/
+O ZAQAQRG/1GNN*512 
 	
 ==6=>!adAaD!,>G> 77++AABVW''**@@ATU }}"#w
 -1133: 3/)Z')Q.2E2OST2T7|q  ((;'<<OPdOeefh  499; (--/0 	*A||F#&**1-,-"1QR5)||F#"&&q)	* 	%%lJ?%%k9=%%i9%%i9%%&;=PQ%%&<>RS';; 
"	 !
  ~~')
* !+
. W!2Kg3l 		
 &
'
	( %
&	'  ,J7+O<	M 	"!'	
 		 	c	#j 7R ?s   /%b8%bAb"b(9b-c                 x    ddl }|j                  ||d         ||z  z  |j                  ||d         z   d| |z  fS )a?  How is this kernel parallelized?
    Currently this is only parallelizing over batch* kv_heads, but we can, and want to
    parallelize over ceil_div(q_heads//kv_heads * num_key_value, key_value_block_size).
    To do this will either require atomic updates to some grad values or to have a two pass kernel design.
    r   NBLOCK_M2BLOCK_N1r0   )tritonrG   )rB   rC   rD   rE   kv_headsnum_key_valuerF   r  s           r5   flex_attention_backward_gridr  +  sS      	Kj!12g6IJ
++mT*%5
6	7	X	 r7   flex_attention_backwardam  
{{def_kernel("Q", "K", "V", "LSE", "DELTA", "DO", "DQ", "DV", "KV_NUM_BLKS", "KV_IDX", "Q_NUM_BLKS", "Q_IDX", "FULL_KV_NUM_BLKS", "FULL_KV_IDX", "FULL_Q_NUM_BLKS", "FULL_Q_IDX")}}
    # Sub notation for this kernel:
    #
    # Q: Query, K: Key, V: Value
    # LSE: logsumexp (logsumexp is always stored in fp32 regardless of the input dtype)
    # DELTA: Precomputed sum(OUT*DO, axis=-1)
    # DO: Derivative of Output, DQ: Derivative of Query, DV: Derivative of Value
    # DK: Derivative of Key, is the written to via the store_output call due to some limitations with
    # inductor codegen
    # M: Number of queries, N: Number of keys/values
    # QK_HEAD_DIM: The dimension of the query and key embeddings
    # V_HEAD_DIM: The dimension of the value embeddings
    # z: Batch size, h: Number of heads, m: Number of queries or keys/values, d: Head dim
    # GQA_SHARED_HEADS: number of query heads sharing one kv head in GQA setups.
    # (Modifiable) Performance tuning options
    # BLOCK_M1: when calculating DK & DV, iterate over BLOCK_M1 across the seqlen dim of Q in each thread block.
    # BLOCK_N1: when calculating DK & DV, the thread block size across the seqlen dim of K/V.
    # BLOCK_M2: when calculating DQ, the thread block size across the seqlen dim of Q.
    # BLOCK_N2: when calculating DQ, iterate over BLOCK_N2 across the seqlen dim of K/V in each thread block.
    #
    # The following FULL_* and PARTIAL_* is defined in the block sparse mask grid, rather than the thread block grid.
    # KV_NUM_BLKS: The number of KV blocks (that may or may not require masking) for each query.
    # KV_IDX: The indices of KV blocks (that may or may not require masking) for each query.
    # Q_NUM_BLKS: The number of Q blocks (that may or may not require masking) for each query.
    # Q_IDX: The indices of Q blocks (that may or may not require masking) for each query.
    # FULL_KV_NUM_BLKS: The number of fully unmasked KV blocks (so we don't need masking) for each query.
    # FULL_KV_IDX: The indices of fully unmasked KV blocks (so we don't need masking) for each query.
    # FULL_Q_NUM_BLKS: The number of fully unmasked Q blocks (so we don't need masking) for each query.
    # FULL_Q_IDX: The indices of fully unmasked Q blocks (so we don't need masking) for each query.

    # The below are kernel options that can be applied for certain score_mods,
    # or involve a numerics vs. perf tradeoff
    # PRESCALE_QK: Whether to pre-scale QK by 1/sqrt(d) and change of base. Has
    # about 20% more numerical error, but slightly faster.

    # Define strides of inputs
    stride_qz, stride_qh, stride_qm, stride_qd = {{stride("Q")}}
    stride_kz, stride_kh, stride_kn, stride_kd = {{stride("K")}}
    stride_vz, stride_vh, stride_vn, stride_vd = {{stride("V")}}
    stride_doz, stride_doh, stride_dom, stride_dod = {{stride("DO")}}

    stride_dqz, stride_dqh, stride_dqm, stride_dqd = {{stride("DQ")}}
    stride_dvz, stride_dvh, stride_dvm, stride_dvd = {{stride("DV")}}

    ZQ = {{size("Q", 0)}}
    HQ = {{size("Q", 1)}}
    HKV = {{size("K", 1)}}
    Q_LEN = {{size("Q", 2)}}
    ZKV = {{size("K", 0)}}
    KV_LEN = {{size("K", 2)}}

    MATMUL_PRECISION = Q.dtype.element_ty

    pid = tl.program_id(0)
    NUM_KV_BLOCKS = tl.cdiv(KV_LEN, BLOCK_N1)
    NUM_Q_BLOCKS = tl.cdiv(Q_LEN, BLOCK_M2)

    off_hz = tl.program_id(2)
    off_zq = off_hz // HKV # q batch idx
    off_hkv = off_hz % HKV # kv head idx
    off_zkv = off_zq % ZKV # kv batch idx

    SPARSE_Z = {{size("KV_NUM_BLKS", 0)}}
    SPARSE_HQ = {{size("KV_NUM_BLKS", 1)}}

    sparse_idx_z = off_zq % SPARSE_Z

    k_adj = (stride_kh * off_hkv + stride_kz * off_zkv).to(tl.int64)
    v_adj = (stride_vh * off_hkv + stride_vz * off_zkv).to(tl.int64)
    # first compute broadcasted dv of shape [Bq, Hkv, KV_LEN, V_HEAD_DIM]
    # then reduce to dv of shape [Bkv, Hkv, KV_LEN, V_HEAD_DIM]
    dv_adj = (stride_dvh * off_hkv + stride_dvz * off_zq).to(tl.int64)

    # offset K, V, DV pointers for batch/kv-head
    K += k_adj
    V += v_adj
    DV += dv_adj

    RCP_LN2 = 1.44269504
    offs_k = tl.arange(0, QK_HEAD_DIM_ROUNDED)
    offs_v = tl.arange(0, V_HEAD_DIM_ROUNDED)

    if pid >= NUM_KV_BLOCKS:
        off_pid = pid - NUM_KV_BLOCKS
        # THIS BLOCK DOES DQ
        SPARSE_Q_MULTIPLE = (SPARSE_Q_BLOCK_SIZE // BLOCK_M2)
        SPARSE_KV_MULTIPLE = (SPARSE_KV_BLOCK_SIZE // BLOCK_N2)
        off_hq2 = off_pid // NUM_Q_BLOCKS + off_hkv * GQA_SHARED_HEADS
        start_m2_block = off_pid % NUM_Q_BLOCKS
        off_pid_mask = start_m2_block // SPARSE_Q_MULTIPLE
        stride_kv_num_blks_h = {{stride("KV_NUM_BLKS", 1)}}
        stride_kv_idx_h = {{stride("KV_IDX", 1)}}
        stride_kv_idx_m = {{stride("KV_IDX", 2)}}

        sparse_idx_hq2 = off_hq2 % SPARSE_HQ
        sparse_hz_offset = sparse_idx_z * SPARSE_HQ + sparse_idx_hq2

        sparse_kv_num_blks_offset = sparse_hz_offset * stride_kv_num_blks_h + off_pid_mask
        sparse_kv_idx_offset = sparse_hz_offset * stride_kv_idx_h + off_pid_mask * stride_kv_idx_m  # noqa: B950

        # Offset Q, DQ, DO, DELTA & LSE. These inputs are offseted by query heads.
        q_adj2 = (stride_qh * off_hq2 + stride_qz * off_zq).to(tl.int64)
        do_adj2 = (stride_doh * off_hq2 + stride_doz * off_zq).to(tl.int64)
        dq_adj2 = (stride_dqh * off_hq2 + stride_dqz * off_zq).to(tl.int64)
        off_chz2 = ((off_zq * HQ + off_hq2) * Q_LEN).to(tl.int64)

        Q2 = Q + q_adj2
        DO2 = DO + do_adj2
        # TODO: This does not work if DQ is not the same layout as Q (for example,
        # if Q is broadcasted)
        DQ2 = DQ + dq_adj2
        LSE2 = LSE + off_chz2
        DELTA2 = DELTA + off_chz2

        # dq = tl.zeros([BLOCK_M2, QK_HEAD_DIM], dtype=tl.float32)
        dq = tl.zeros([BLOCK_M2, QK_HEAD_DIM_ROUNDED], dtype=tl.float32)

        start_m2 = start_m2_block * BLOCK_M2
        offs_m2 = start_m2 + tl.arange(0, BLOCK_M2)

        # load Q and do: they stay in SRAM throughout the inner loop.
        q = load_checked_2d(Q2, offs_m2, offs_k, stride_qm, stride_qd, IS_DIVISIBLE, SAFE_HEAD_DIM, Q_LEN, QK_HEAD_DIM)
        do = load_checked_2d(DO2, offs_m2, offs_v, stride_dom, stride_dod, IS_DIVISIBLE, SAFE_HEAD_DIM, Q_LEN, V_HEAD_DIM)

        if PRESCALE_QK:
            q = (q * SM_SCALE * RCP_LN2).to(MATMUL_PRECISION)

        if IS_DIVISIBLE:
            Di = tl.load(DELTA2 + offs_m2)
            lse = tl.load(LSE2 + offs_m2)
        else:
            Di = tl.load(DELTA2 + offs_m2, mask=offs_m2 < Q_LEN)
            lse = tl.load(LSE2 + offs_m2, mask=offs_m2 < Q_LEN)
        lse = tl.where(lse == -float("inf"), 0.0, lse)
        lse = lse[:, None]

        # ~~~~~~~~~~~ fully unmasked blocks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        # KV_IDX and KV_NUM_BLKS are always contiguous.
        kv_indices = KV_IDX + sparse_kv_idx_offset
        kv_start = tl.load(kv_indices) * SPARSE_KV_BLOCK_SIZE # first kv block we're loading
        sparse_kv_num_blocks = tl.load(KV_NUM_BLKS + sparse_kv_num_blks_offset)

        offs_n2 = kv_start + tl.arange(0, BLOCK_N2)
        dq = bwd_dq_inner(
            {{gen_argdefs()}},
            K, V,
            dq, q, do, Di, lse,
            off_zq, off_hq2, offs_m2, offs_n2,
            stride_kn, stride_kd, stride_vn, stride_vd,
            kv_indices, sparse_kv_num_blocks,
            MATMUL_PRECISION,
            IS_FULL_BLOCKS=False,
        )

        if HAS_FULL_BLOCKS:
            # ~~~~~~~~~~~ partial unmasked blocks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            # FULL_KV_IDX and FULL_KV_NUM_BLKS are always contiguous.
            kv_indices = FULL_KV_IDX + sparse_kv_idx_offset
            kv_start = tl.load(kv_indices) * SPARSE_KV_BLOCK_SIZE # first kv block we're loading
            sparse_kv_num_blocks = tl.load(FULL_KV_NUM_BLKS + sparse_kv_num_blks_offset)

            offs_n2 = kv_start + tl.arange(0, BLOCK_N2)
            dq = bwd_dq_inner(
                {{gen_argdefs()}},
                K, V,
                dq, q, do, Di, lse,
                off_zq, off_hq2, offs_m2, offs_n2,
                stride_kn, stride_kd, stride_vn, stride_vd,
                kv_indices, sparse_kv_num_blocks,
                MATMUL_PRECISION,
                IS_FULL_BLOCKS=True,
            )

        # Write back dQ.
        dq_ptrs = DQ2 + offs_m2[:, None] * stride_dqm + offs_k[None, :] * stride_dqd
        dq *= SM_SCALE
        if IS_DIVISIBLE and SAFE_HEAD_DIM:
            tl.store(dq_ptrs, dq)
        else:
            tl.store(dq_ptrs, dq, mask=(offs_m2[:, None] < Q_LEN) & (offs_k[None, :] < QK_HEAD_DIM))
    else:
        # THIS BLOCK DOES DK & DV
        SPARSE_Q_MULTIPLE = (SPARSE_Q_BLOCK_SIZE // BLOCK_M1)
        SPARSE_KV_MULTIPLE = (SPARSE_KV_BLOCK_SIZE // BLOCK_N1)

        pid_mask = pid // SPARSE_KV_MULTIPLE

        stride_q_num_blks_h = {{stride("Q_NUM_BLKS", 1)}}
        stride_q_idx_h = {{stride("Q_IDX", 1)}}
        stride_q_idx_n = {{stride("Q_IDX", 2)}}


        dv = tl.zeros([BLOCK_N1, V_HEAD_DIM_ROUNDED], dtype=tl.float32)
        dk = tl.zeros([BLOCK_N1, QK_HEAD_DIM_ROUNDED], dtype=tl.float32)

        start_n1 = pid * BLOCK_N1
        offs_n1 = start_n1 + tl.arange(0, BLOCK_N1)

        # load K and V: they stay in SRAM throughout the inner loop.
        k = load_checked_2d(K, offs_n1, offs_k, stride_kn, stride_kd, IS_DIVISIBLE, SAFE_HEAD_DIM, KV_LEN, QK_HEAD_DIM)
        v = load_checked_2d(V, offs_n1, offs_v, stride_vn, stride_vd, IS_DIVISIBLE, SAFE_HEAD_DIM, KV_LEN, V_HEAD_DIM)

        if PRESCALE_QK:
            k = (k * SM_SCALE * RCP_LN2).to(MATMUL_PRECISION)

        for off_g in range(0, GQA_SHARED_HEADS):
            off_hq1 = off_hkv * GQA_SHARED_HEADS + off_g

            # Offset Q, DQ, DO, DELTA & LSE. These inputs are offseted by query heads.
            q_adj1 = (stride_qh * off_hq1 + stride_qz * off_zq).to(tl.int64)
            do_adj1 = (stride_doh * off_hq1 + stride_doz * off_zq).to(tl.int64)
            dq_adj1 = (stride_dqh * off_hq1 + stride_dqz * off_zq).to(tl.int64)
            off_chz1 = ((off_zq * HQ + off_hq1) * Q_LEN).to(tl.int64)

            Q1 = Q + q_adj1
            DO1 = DO + do_adj1
            # TODO: This does not work if DQ is not the same layout as Q (for example,
            # if Q is broadcasted)
            LSE1 = LSE + off_chz1
            DELTA1 = DELTA + off_chz1

            sparse_idx_hq1 = off_hq1 % SPARSE_HQ
            sparse_hz_offset = sparse_idx_z * SPARSE_HQ + sparse_idx_hq1

            sparse_q_num_blks_offset = sparse_hz_offset * stride_q_num_blks_h + pid_mask
            sparse_q_idx_offset = sparse_hz_offset * stride_q_idx_h + pid_mask * stride_q_idx_n  # noqa: B950

            # ~~~~~~~~~~~~~~~ fully unmasked blocks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
            # Q_IDX and Q_NUM_BLKS are always contiguous.
            q_indices = Q_IDX + sparse_q_idx_offset
            q_start = tl.load(q_indices) * SPARSE_Q_BLOCK_SIZE # first q block we're loading
            sparse_q_num_blocks = tl.load(Q_NUM_BLKS + sparse_q_num_blks_offset)

            offs_m1 = q_start + tl.arange(0, BLOCK_M1)
            dk, dv = bwd_dkdv_inner(
                {{gen_argdefs()}},
                Q1, DO1, DELTA1, LSE1,
                dk, dv, k, v,
                off_zq, off_hq1, offs_n1, offs_m1,
                stride_qm, stride_qd, stride_dom, stride_dod,
                q_indices, sparse_q_num_blocks,
                MATMUL_PRECISION,
                IS_FULL_BLOCKS=False,
            )


            if HAS_FULL_BLOCKS:
                # ~~~~~~~~~~~~~~~ fully unmasked blocks ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
                # FULL_Q_IDX and FULL_Q_NUM_BLKS are always contiguous.
                q_indices = FULL_Q_IDX + sparse_q_idx_offset
                q_start = tl.load(q_indices) * SPARSE_Q_BLOCK_SIZE # first q block we're loading
                sparse_q_num_blocks = tl.load(FULL_Q_NUM_BLKS + sparse_q_num_blks_offset)

                offs_m1 = q_start + tl.arange(0, BLOCK_M1)
                dk, dv = bwd_dkdv_inner(
                    {{gen_argdefs()}},
                    Q1, DO1, DELTA1, LSE1,
                    dk, dv, k, v,
                    off_zq, off_hq1, offs_n1, offs_m1,
                    stride_qm, stride_qd, stride_dom, stride_dod,
                    q_indices, sparse_q_num_blocks,
                    MATMUL_PRECISION,
                    IS_FULL_BLOCKS=True,
                )

        # Write back dV and dK.
        dv_ptrs = DV + offs_n1[:, None] * stride_dvm + offs_v[None, :] * stride_dvd

        index_n = offs_n1[:, None]
        index_k = offs_k[None, :]
        index_v = offs_v[None, :]

        if IS_DIVISIBLE and SAFE_HEAD_DIM:
            tl.store(dv_ptrs, dv)
        else:
            tl.store(dv_ptrs, dv, mask=(index_n < KV_LEN) & (index_v < V_HEAD_DIM))

        dk *= SM_SCALE

        if SAFE_HEAD_DIM:
            mask = index_n < KV_LEN
        else:
            mask = (index_n < KV_LEN) & (index_k < QK_HEAD_DIM)

        # first compute broadcasted dk of shape [Bq, Hkv, KV_LEN, V_HEAD_DIM]
        # then reduce to dk of shape [Bkv, Hkv, KV_LEN, V_HEAD_DIM]
        {{store_output(("off_zq", "off_hkv", "index_n", "index_k"), "dk", "mask", indent_width=8)}}

@triton.jit
def bwd_dq_inner(
    {{gen_argdefs()}},
    K, V,  # pointers
    dq, q, do, Di, lse,
    off_z, off_hq, offs_m2, offs_n2,
    stride_kn, stride_kd, stride_vn, stride_vd,
    kv_indices, sparse_kv_num_blocks,
    MATMUL_PRECISION,
    IS_FULL_BLOCKS,
):
    {{gen_defines() | indent_except_first(1) }}
    SPARSE_KV_MULTIPLE: tl.constexpr = (SPARSE_KV_BLOCK_SIZE // BLOCK_N2)
    RCP_LN2: tl.constexpr = 1.44269504
    Q_LEN = {{size("Q", 2)}}
    KV_LEN = {{size("K", 2)}}

    offs_k = tl.arange(0, QK_HEAD_DIM_ROUNDED)
    offs_v = tl.arange(0, V_HEAD_DIM_ROUNDED)

    kT_ptrs = K + offs_n2[None, :] * stride_kn + offs_k[:, None] * stride_kd
    vT_ptrs = V + offs_n2[None, :] * stride_vn + offs_v[:, None] * stride_vd
    # BLOCK_M2 must be a multiple of BLOCK_N2, otherwise the code wouldn't work.
    tl.static_assert(BLOCK_M2 % BLOCK_N2 == 0)

    hi = tl.minimum(sparse_kv_num_blocks * SPARSE_KV_MULTIPLE, tl.maximum(tl.cdiv(KV_LEN, BLOCK_N2), 1))
    if not IS_DIVISIBLE:
        if hi >= 1:
            for start_n in range(0, hi - 1):
                dq = bwd_dq_block_mn(
                    {{gen_argdefs()}},
                    dq, q, kT_ptrs, vT_ptrs, do, Di, lse, Q_LEN, KV_LEN,
                    off_z, off_hq, offs_m2, offs_n2, offs_k, offs_v,
                    stride_kn, stride_kd, stride_vn, stride_vd,
                    kv_indices, sparse_kv_num_blocks,
                    MATMUL_PRECISION, RCP_LN2,
                    IS_FULL_BLOCKS,
                )

                # Increment pointers.
                offset = get_offset_for_next_block(
                    start_n, kv_indices, sparse_kv_num_blocks,
                    SPARSE_KV_BLOCK_SIZE, SPARSE_KV_MULTIPLE, BLOCK_N2, BLOCKS_ARE_CONTIGUOUS
                )

                kT_ptrs += offset * stride_kn
                vT_ptrs += offset * stride_vn

                offs_n2 += offset

            dq = bwd_dq_block_mn(
                {{gen_argdefs()}},
                dq, q, kT_ptrs, vT_ptrs, do, Di, lse, Q_LEN, KV_LEN,
                off_z, off_hq, offs_m2, offs_n2, offs_k, offs_v,
                stride_kn, stride_kd, stride_vn, stride_vd,
                kv_indices, sparse_kv_num_blocks,
                MATMUL_PRECISION, RCP_LN2,
                IS_FULL_BLOCKS, CHECK_BLOCK_BOUNDARY=True,
            )
    else:
        for start_n in range(0, hi):
            dq = bwd_dq_block_mn(
                {{gen_argdefs()}},
                dq, q, kT_ptrs, vT_ptrs, do, Di, lse, Q_LEN, KV_LEN,
                off_z, off_hq, offs_m2, offs_n2, offs_k, offs_v,
                stride_kn, stride_kd, stride_vn, stride_vd,
                kv_indices, sparse_kv_num_blocks,
                MATMUL_PRECISION, RCP_LN2,
                IS_FULL_BLOCKS,
            )

            # Increment pointers.
            offset = get_offset_for_next_block(
                start_n, kv_indices, sparse_kv_num_blocks,
                SPARSE_KV_BLOCK_SIZE, SPARSE_KV_MULTIPLE, BLOCK_N2, BLOCKS_ARE_CONTIGUOUS
            )

            kT_ptrs += offset * stride_kn
            vT_ptrs += offset * stride_vn

            offs_n2 += offset

    return dq


@triton.jit
def bwd_dq_block_mn(
    {{gen_argdefs()}},
    dq, q, kT_ptrs, vT_ptrs, do, Di, lse, Q_LEN, KV_LEN,
    off_z, off_hq, offs_m2, offs_n2, offs_k, offs_v,
    stride_kn, stride_kd, stride_vn, stride_vd,
    kv_indices, sparse_kv_num_blocks,
    MATMUL_PRECISION, RCP_LN2,
    IS_FULL_BLOCKS, CHECK_BLOCK_BOUNDARY=False,
):
    {{gen_defines() | indent_except_first(1)}}

    # NB reversed order to since K is transposed
    kT = load_checked_2d(kT_ptrs, offs_k, offs_n2, None, None, SAFE_HEAD_DIM, IS_DIVISIBLE, QK_HEAD_DIM, KV_LEN)
    qk = tl.dot(q, kT, input_precision=FLOAT32_PRECISION)
    if not PRESCALE_QK:
        qk *= SM_SCALE
    # ~~~~~~~~~~~~~~~~~~~ Apply score modification  ~~~~~~~~~~~~~~~~~~~
    pre_mod_scores = qk
    n = get_bounded_indices(offs_n2[None, :], KV_LEN if CHECK_BLOCK_BOUNDARY else None)
    # The boundary check is done for the outer loop, but here it's possible since we're iterating across N dim
    # that the M reads out of bounds prior to the last loop
    m = get_bounded_indices(offs_m2[:, None], Q_LEN if (not IS_DIVISIBLE or CHECK_BLOCK_BOUNDARY) else None)

    {{ modification(
        subgraph_number=0,
        output_name="post_mod_scores",
        score="qk",
        b="off_z",
        h="off_hq",
        m="m",
        n="n",
        out="qk"
    ) | indent_except_first(1) }}

    if CHECK_BLOCK_BOUNDARY:
        # Mask out the elements that are out of the KV_LEN for non divisible seqlen.
        post_mod_scores = tl.where(offs_n2[None, :] < KV_LEN, post_mod_scores, float("-inf"))

    if not IS_FULL_BLOCKS:
        {{ modification(
            subgraph_number=2,
            output_name="mask_mod_output",
            score="qk",
            b="off_z",
            h="off_hq",
            m="m",
            n="n",
        ) | indent_except_first(2) }}

        if CHECK_BLOCK_BOUNDARY:
            mask_mod_output = tl.where(offs_n2[None, :] < KV_LEN, mask_mod_output, False)
        # apply mask for partial masked block
        post_mod_scores = tl.where(mask_mod_output, post_mod_scores, float("-inf"))
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if not PRESCALE_QK:
        post_mod_scores *= RCP_LN2
    p = tl.math.exp2(post_mod_scores - lse)
    # Compute dP and dS.
    # NB reversed order to since V is transposed
    vT = load_checked_2d(vT_ptrs, offs_v, offs_n2, None, None, SAFE_HEAD_DIM, IS_DIVISIBLE, V_HEAD_DIM, KV_LEN)

    dp = tl.dot(do, vT, input_precision=FLOAT32_PRECISION)
    ds = p * (dp - Di[:, None])
    # ~~~~~~~~~~~~~~~~~~~ Apply joint modification  ~~~~~~~~~~~~~~~~~~~
    {{ modification(
        subgraph_number=1,
        output_name = "grad_scores",
        score="pre_mod_scores",
        b="off_z",
        h="off_hq",
        m="m",
        n="n",
        grad_score_mod="ds"
    ) | indent_except_first(1) }}
    if CHECK_BLOCK_BOUNDARY:
        grad_scores = tl.where(offs_n2[None, :] < KV_LEN, grad_scores, 0.0)

    # ~~~~~~~~~~~~~~~~~~~ Apply other buffer grad writes ~~~~~~~~~~~~~
    if WRITE_DQ:
        scatter_mask = offs_m2[:, None] < Q_LEN and offs_n2[None, :] < KV_LEN
        {{ modification(
            subgraph_number=3,
            output_name=None,
            mask="scatter_mask",
            score="pre_mod_scores",
            b="off_z",
            h="off_hq",
            m="m",
            n="n",
            grad_score_mod="ds"
        ) | indent_except_first(2) }}
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    ds = grad_scores

    if not IS_FULL_BLOCKS:
        if CHECK_BLOCK_BOUNDARY:
            mask_mod_output = tl.where(offs_n2[None, :] < KV_LEN, mask_mod_output, False)
        # (grads) apply mask for partially unmasked block
        ds = tl.where(mask_mod_output, ds, 0.0)
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    ds = ds.to(MATMUL_PRECISION)
    # Compute dQ.
    dq += tl.dot(ds, tl.trans(kT), input_precision=FLOAT32_PRECISION)

    return dq


@triton.jit
def bwd_dkdv_inner(
    {{gen_argdefs()}},
    Q, DO, DELTA, LSE, # pointers
    dk, dv, k, v,
    off_z, off_hq, offs_n1, offs_m1,
    stride_qm, stride_qd, stride_dom, stride_dod,
    q_indices, sparse_q_num_blocks,
    MATMUL_PRECISION,
    IS_FULL_BLOCKS,
):
    {{gen_defines() | indent_except_first(1) }}
    SPARSE_Q_MULTIPLE: tl.constexpr = (SPARSE_Q_BLOCK_SIZE // BLOCK_M1)
    RCP_LN2: tl.constexpr = 1.44269504
    Q_LEN = {{size("Q", 2)}}
    KV_LEN = {{size("K", 2)}}

    offs_k = tl.arange(0, QK_HEAD_DIM_ROUNDED)
    offs_v = tl.arange(0, V_HEAD_DIM_ROUNDED)

    qT_ptrs = Q + offs_m1[None, :] * stride_qm + offs_k[:, None] * stride_qd
    do_ptrs = DO + offs_m1[:, None] * stride_dom + offs_v[None, :] * stride_dod
    # BLOCK_N1 must be a multiple of BLOCK_M1, otherwise the code wouldn't work.
    tl.static_assert(BLOCK_N1 % BLOCK_M1 == 0)
    hi = tl.minimum(sparse_q_num_blocks * SPARSE_Q_MULTIPLE, tl.maximum(tl.cdiv(Q_LEN, BLOCK_M1), 1))

    if not IS_DIVISIBLE:
        if hi >= 1:
            for start_m in range(0, hi - 1):
                dk, dv = bwd_dkdv_block_mn(
                    {{gen_argdefs()}},
                    dk, dv, qT_ptrs, k, v, do_ptrs, DELTA, LSE, Q_LEN, KV_LEN,
                    off_z, off_hq, offs_n1, offs_m1, offs_k, offs_v,
                    stride_qm, stride_qd, stride_dom, stride_dod,
                    q_indices, sparse_q_num_blocks,
                    MATMUL_PRECISION, RCP_LN2,
                    IS_FULL_BLOCKS,
                )
                # Increment pointers.
                offset = get_offset_for_next_block(
                    start_m, q_indices, sparse_q_num_blocks,
                    SPARSE_Q_BLOCK_SIZE, SPARSE_Q_MULTIPLE, BLOCK_M1, BLOCKS_ARE_CONTIGUOUS
                )

                qT_ptrs += offset * stride_qm
                do_ptrs += offset * stride_dom

                offs_m1 += offset

            dk, dv = bwd_dkdv_block_mn(
                {{gen_argdefs()}},
                dk, dv, qT_ptrs, k, v, do_ptrs, DELTA, LSE, Q_LEN, KV_LEN,
                off_z, off_hq, offs_n1, offs_m1, offs_k, offs_v,
                stride_qm, stride_qd, stride_dom, stride_dod,
                q_indices, sparse_q_num_blocks,
                MATMUL_PRECISION, RCP_LN2,
                IS_FULL_BLOCKS, CHECK_BLOCK_BOUNDARY=True,
            )
    else:
        for start_m in range(0, hi):
            dk, dv = bwd_dkdv_block_mn(
                {{gen_argdefs()}},
                dk, dv, qT_ptrs, k, v, do_ptrs, DELTA, LSE, Q_LEN, KV_LEN,
                off_z, off_hq, offs_n1, offs_m1, offs_k, offs_v,
                stride_qm, stride_qd, stride_dom, stride_dod,
                q_indices, sparse_q_num_blocks,
                MATMUL_PRECISION, RCP_LN2,
                IS_FULL_BLOCKS,
            )
            # Increment pointers.
            offset = get_offset_for_next_block(
                start_m, q_indices, sparse_q_num_blocks,
                SPARSE_Q_BLOCK_SIZE, SPARSE_Q_MULTIPLE, BLOCK_M1, BLOCKS_ARE_CONTIGUOUS
            )

            qT_ptrs += offset * stride_qm
            do_ptrs += offset * stride_dom

            offs_m1 += offset

    return dk, dv


@triton.jit
def bwd_dkdv_block_mn(
    {{gen_argdefs()}},
    dk, dv, qT_ptrs, k, v, do_ptrs, DELTA, LSE, Q_LEN, KV_LEN,
    off_z, off_hq, offs_n1, offs_m1, offs_k, offs_v,
    stride_qm, stride_qd, stride_dom, stride_dod,
    q_indices, sparse_q_num_blocks,
    MATMUL_PRECISION, RCP_LN2,
    IS_FULL_BLOCKS, CHECK_BLOCK_BOUNDARY=False,
):
    {{gen_defines() | indent_except_first(1) }}

    # NB reversed order since Q is transposed
    qT = load_checked_2d(qT_ptrs, offs_k, offs_m1, None, None, SAFE_HEAD_DIM, IS_DIVISIBLE, QK_HEAD_DIM, Q_LEN)
    # Load LSE before computing qk to reduce pipeline stall.
    if IS_DIVISIBLE:
        lse = tl.load(LSE + offs_m1)
    else:
        lse = tl.load(LSE + offs_m1, mask=offs_m1 < Q_LEN)
    lse = tl.where(lse == -float("inf"), 0.0, lse)
    qkT = tl.dot(k, qT, input_precision=FLOAT32_PRECISION)
    if not PRESCALE_QK:
        qkT *= SM_SCALE
    # ~~~~~~~~~~~~~~~~~~~ Apply score modification  ~~~~~~~~~~~~~~~~~~~
    m = get_bounded_indices(offs_m1[None, :], Q_LEN if CHECK_BLOCK_BOUNDARY else None)
    # The boundary check is done for the outer loop, but here it's possible since we're iterating across M dim
    # that the n reads out of bounds prior to the last loop
    n = get_bounded_indices(offs_n1[:, None], KV_LEN if (not IS_DIVISIBLE or CHECK_BLOCK_BOUNDARY) else None)

    pre_mod_scores = qkT
    {{ modification(
        subgraph_number=0,
        output_name="post_mod_scores",
        score="qkT",
        b="off_z",
        h="off_hq",
        m="m",
        n="n",
        out="qkT"
    ) | indent_except_first(1) }}

    if CHECK_BLOCK_BOUNDARY:
        # Mask out the elements that are out of the KV_LEN for non divisible seqlen.
        post_mod_scores = tl.where(offs_n1[:, None] < KV_LEN, post_mod_scores, float("-inf"))

    if not IS_FULL_BLOCKS:
        {{ modification(
            subgraph_number=2,
            output_name="mask_mod_output",
            score="qkT",
            b="off_z",
            h="off_hq",
            m="m",
            n="n",
        ) | indent_except_first(2) }}
        if CHECK_BLOCK_BOUNDARY:
            mask_mod_output = tl.where(offs_n1[:, None] < KV_LEN, mask_mod_output, False)
        # (grads) apply mask for fully masked block
        post_mod_scores = tl.where(mask_mod_output, post_mod_scores, float("-inf"))
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    if not PRESCALE_QK:
        post_mod_scores *= RCP_LN2
    pT = tl.math.exp2(post_mod_scores - lse[None, :])
    do = load_checked_2d(do_ptrs, offs_m1, offs_v, None, None, IS_DIVISIBLE, SAFE_HEAD_DIM, Q_LEN, V_HEAD_DIM)
    # Compute dV.
    ppT = pT
    dv += tl.dot(ppT.to(MATMUL_PRECISION), do, input_precision=FLOAT32_PRECISION)
    if IS_DIVISIBLE:
        Di = tl.load(DELTA + offs_m1)
    else:
        Di = tl.load(DELTA + offs_m1, mask=offs_m1 < Q_LEN)
    # Compute dP and dS.
    dpT = tl.dot(v, tl.trans(do), input_precision=FLOAT32_PRECISION)
    dsT = pT * (dpT - Di[None, :])
    # ~~~~~~~~~~~~~~~~~~~ Apply joint modification  ~~~~~~~~~~~~~~~~~~~
    {{ modification(
        subgraph_number=1,
        output_name = "grad_scores",
        score="pre_mod_scores",
        b="off_z",
        h="off_hq",
        m="m",
        n="n",
        grad_score_mod="dsT"
    ) | indent_except_first(1) }}

    # ~~~~~~~~~~~~~~~~~~~ Apply other buffer grad writes ~~~~~~~~~~~~~
    if not WRITE_DQ:
        idx_b = off_z
        idx_h = off_hq
        idx_m = m
        idx_n = n
        scatter_mask = offs_m1[None, :] < Q_LEN and offs_n1[:, None] < KV_LEN
        {{ modification(
            subgraph_number=3,
            output_name=None,
            mask="scatter_mask",
            score="pre_mod_scores",
            b="idx_b",
            h="idx_h",
            m="idx_m",
            n="idx_n",
            grad_score_mod="dsT"
        ) | indent_except_first(2) }}
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

    if CHECK_BLOCK_BOUNDARY:
        grad_scores = tl.where(offs_n1[:, None] < KV_LEN, grad_scores, 0.0)

    dsT = grad_scores
    if not IS_FULL_BLOCKS:
        if CHECK_BLOCK_BOUNDARY:
            mask_mod_output = tl.where(offs_n1[:, None] < KV_LEN, mask_mod_output, False)
        # (grads) apply mask for partially unmasked block
        dsT = tl.where(mask_mod_output, dsT, 0.0)
    # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    dk += tl.dot(dsT.to(MATMUL_PRECISION), tl.trans(qT), input_precision=FLOAT32_PRECISION)

    return dk, dv
 joint_graphc                    | j                   D ]y  }|j                  dk(  s|j                  t        j                  j
                  j                  j                  k(  sO|j                  D ]  }|j                  dk7  st        d       { y)zKWe do some pre lowering graph checks in order to raise nicer error messagesr  r  aS  Using multiple indexing operations on the same tensor that requires gradients in a score_mod function is not currently supported. This typically happens when indexing the same tensor multiple times, like:

    def score_mod(score, b, h, q_idx, kv_idx):
        return score + bias[q_idx] + bias[kv_idx]  # bias used twice!

A valid workaround is to clone() the tensors that will be indexed multiple times. For example:

    bias1 = bias.clone()
    def score_mod(score, b, h, q_idx, kv_idx):
        return score + bias[q_idx] + bias1[kv_idx]

Note that this solution will use additional memory.N)
r  r  targetr_   r   r   r   r   usersrF  )r  r)  users      r5   validate_joint_graphr    s|    !! GG&uyy11CCKKK

 77h&-	N & r7   T)frozenc                   R    e Zd ZU dZeed<   ee   ed<   eee      ed<   ee   ed<   y)JointOutputResultz&Results from processing joint outputs.
grad_inputcaptured_grads_computecaptured_gradsmutated_gradsN)	r   r   r   __doc__r   __annotations__rv   r   r   rA   r7   r5   r  r  	  s0    0 00),--	?"r7   r  all_joint_outputsnum_placeholdersc                    t        | t              sJ | d   J d       | d   }| |dz
  d }|D cg c]  }||	 }}d }|D cg c]
  } ||       }}|D cg c]  }||	 }	}t        ||||	      S c c}w c c}w c c}w )a^  Process joint outputs and extract various buffers needed for lowering

    Args:
        all_joint_outputs: List of all the outputs from build_subgraphs
        num_placeholders: The number of placeholder inputs, used to skip over unused backward compute buffers

    Returns:
        JointOutputResult containing processed buffers and gradients
    r   Nz.joint_subgraph_buffer is None - this is a bug!r0   c                     | y t        | t              sJ | j                  J t        j                  t
        j                  j                  | j                              S rU   )rV   r   rI   r   rO   r
   r;   
get_buffer)r}  s    r5   get_outz&process_joint_outputs.<locals>.get_out-	  sN    ;#~...xx### 2 2388 <==r7   )r  r  r  r  )rV   rv   r  )
r  r  joint_bufferother_gradsr}  grads_computer  rY   	grads_outr  s
             r5   process_joint_outputsr  	  s     '...Q+ 8+ %Q'L#$4q$8$:;K %0CS3?SCMC> &111I1$-ASSAMA, #	  D 2As   A7A7A<B"Bc                    M | \  M}}}}}}}}	}
}}}}|
\  }}}}}}}}}}}}}t        M|||||||||||g      \  M}}}}}}}}}}}Mj                         }Mj                         }Mj                         \  }}}} |j                         \  }!}"}#}$t        j
                  j                  j                  t        j                  ||!      t        j                  |!d      z        sJ d| d|!        t        |      }|j                         D %&ci c]K  \  }%}&|%t        |&t        j                        r)t        j
                  j                  j                  |&      n|&M }}%}&|j                  dt!                      |dz  dk7  s|#dz  dk7  r|j                  dd       n|j                  dd	       d
|fdt"        j$                  fdt"        j$                  fdt"        j$                  fdt"        j$                  ffD 'cg c]  \  }'}t'        |'||       }(}'}t)        |(t+        |      z   |      })|(t'        d|      gz   }*|	j,                  j
                  j/                          t1        |	j,                  j
                         t)        |*t+        |      z   |	      }+t3        |+t5        |*            },dt"        j$                  fdt"        j$                  fdt"        j$                  fdt"        j$                  ffD 'cg c]   \  }'}t'        |'|Mj                               " }-}'}t)        |-t+        |      z   |      }.|.}.||"|#| g}/t7        |/|j9                               }0t;        |j                         |j                         |/|0D 1cg c]  }1t        j<                  |1       c}1      }2t?        t@        jB                     |dtE        jF                  d      z        }3t?        t@        jB                     ||      }4t?        t@        jH                     |4d      }5t?        t@        jJ                     |5|3      }5tM        jN                  |5      }5t        |3|5g      \  }3}5|||| g}6t7        |6Mj9                               }7tQ        |6|7D 1cg c]  }1t        j<                  |1       c}1Mj                         Mj                               }8||"|#|$g}9t7        |9|j9                               }:tQ        |9|:D 1cg c]  }1t        j<                  |1       c}1|j                         |j                               };|j                  d|       ||"z  }<|j                  d|<       |d u}=|j                  d|=       |=sMfdtS        d      D        \  }}}}tU        || |$t        j
                  j                         t        j
                  j                  j                  |      }t        j
                  j                  j                  |      }g }>g }?|?jW                  tY        M             tZ        j\                  rvt"        j^                  j`                  g dndg}@|?jc                  dD ABC1cg c]4  }AdD ]-  }BAdk\  sBdk\  rddgndgD ]  }C@D ]  }1BAz  dk(  rABC|1f  / 6 c}1}C}B}A       |je                         }D|?D ]  \  }A}B}E}F||Az  dk7  s|Az  dk7  s|Bz  dk7  s|Bz  dk7  r*Dje                         }Gt+        |Gjg                               D ]O  }%|%ji                  d      rGjk                  |%      }&|&|G|%dd  <   |%ji                  d      s?Gjk                  |%       Q Gj                  d E       |Gj                  d!F       |Gj                  d"A       |Gj                  d#B       |Gj                  d$|B       |Gj                  d%|A       |Gj                  d&|       |Gj                  d'|       tm        jn                  d.|>M||||5||8|;||||||||g|2|)|,jp                  |.|,jr                  g|8|;g|,jt                  Mj                         |j                         dd( z   d)|G  M||||5||8|;||||||||gt+        |      z   t+        |      z   |,jt                  z   }Htw        |      tx        tw        |      tx        tw        |      tx        tw        |      tx        d*}It{        d+|>|H|2|I,      }Jt        j
                  j                  j                  t        j                  ||!            rJ}K|;}Lnt        j
                  j                  j                  t        j|                  |d      t        j                  |!d      z        s[J dt        j
                  j                  j                  |       dt        j
                  j                  j                  |!              t?        t@        jH                     Jdd	-      }Kt?        t@        jH                     |;dd	-      }L|8KLt        |,j                        fS c c}&}%w c c}}'w c c}}'w c c}1w c c}1w c c}1w c c}1}C}B}Aw )/Nr0   r  r  r  r   r   r  FTr  r  r  r  r  grad_score_modr3  r   r   )axis)r4  rJ   rK   r  r  r  c              3   T   K   | ]  }t        d j                                ! ywr  r  r  s     r5   r  z*flex_attention_backward.<locals>.<genexpr>	  s+      R
45E!E,,.//R
r  r   )r0   r   r   r  )r   r   )r   r   r   r   r  r  r  r   BLOCK_M1r  r  BLOCK_N2re  rf  r   r  )r   r   
                  r  r  )r  keepdimsrA   )Ar\   rp   rt   rs   r
   r;   r<   r   rW   r   r  r  rV   rX   r   r  re   r_   r  rQ   r   rv   r   eliminate_dead_coder  r  r1   r>   rR  r   rS  r%   r   mulr  logsumsubr   require_contiguousr"   rw   r  rM  r   r   r  ra   rb   extendr  r  r  r   flex_attention_backward_templater  r  r  r  r   r   r(   r   tupler  )NrR   r  rY  rZ  outr  grad_outgrad_logsumexpfw_graphr  r[  r;  r   r\  r]  r7  r^  r   r_  r`  ra  rb  rc  rd  re  rf  r%  rK   rJ   ro  rp  rq  rr  rs  rt  ru  rv  r  r  rI   fwd_placeholder_inpsfw_subgraph_bufferjoint_placeholder_inpsr  joint_outputsri  rk  key_sizekey_stridesrz  layout_broadcasted_kgrad_lse_exp2	mul_deltadelta
query_sizegrad_query_strides
grad_query
value_sizevalue_stridesbroadcasted_grad_valuer  r  r9  r  num_stages_listBLOCK1BLOCK2wr  r  r   r  r  r  broadcasted_grad_keygrad_key
grad_valuer   sN                                                                                @r5   r  r  @	  s
   & 	  			  		
	$ FOOE%*^^%5"BI{',~~'7$Cj*77))%((2s*;ehhsA>N*NO 
0IcUCO .)N #((*	 Aq 	
a& 7711!4	N  13H3JK3!zC/14!!.%8!!.$7
 e%++%++%++%++
	D% 	4/	 	 /t$;<<h 2+UF;5  ""668 11778-&=!>>
 *356M %++%++%++%++	
#D% 	4(8(8(:;# # .#d+A&BBJ * C[1H%h0@AK&*56Qa 6	 dhh'DHHQKHM$((#C2Idhh	3Edhh}5E++E2E(-)?@M5 b)[1J,Z9I9I9KL*<=Qa =oo!	J c:z2J'
E4D4D4FGM**78Qa 8oo!	 j%0 Sy02BC )4O/AR
9>qR
NO-> ZAQAQR''**@@ATU77++AABVWG/1GNN*512*/--*;*;*C,! ' + $*cMVs]1a&	 (
 F?a' A&&&&		
 -11318 @
-	: 6)Q."V+q0#f,1"V+q0 599;(--/0 	*A||F#&**1-,-"1QR5)||F#"&&q)	* 	%%k9=%%lJ?%%j&9%%j&9%%j&9%%j&9%%&;=PQ%%&<>RS(<< "	
&"!!$ ("((!44	 & ,,
 ~~'#,,.1*==A"	
B !C"	
=@
F "!	
$ &
'%	(& %
&'	'( 
%
%)	& 0 ,J7,Y7,_=,^<	M 5!# 	ww%%ehhr3&78'+
ww--ehhr1oa@P.PQ 	
gg&&44R89 :ww''55c:;=	
Q
 TXX&';!dStxx()?aRVW
*eM4P4P.QRRA	B#. 7" > 9>s+   Ah2>h8:%h>ii	
.i
#9irU   )}r  r  loggingr  collections.abcr   dataclassesr   enumr   r   typingr   r   r	   rW   r_   torch._inductor.virtualizedr
   torch.utils._ordered_setr   torch.utils._pytreer   torch.utils._sympy.numbersr   torch.utils._sympy.value_rangesr    r   irr   r   r   r   r   r   r   r   r   r   r   r   r   loweringr   r    r!   r"   r#   r$   r%   r&   r'   select_algorithmr(   r)   r*   r+   	getLoggerr   r   r   r   Exprr   r6   r>   rH   strrJ   rK   rv   rQ   r\   re   r   SubgraphResultsr#  r$  r   r   compute_next_offset_funcget_bounded_indices_funcload_checked_blockload_checked_2dcompute_flex_attentioncompute_forward_innercompute_forward_block_mnr  r   rq   rP  rQ  r   r   r   r   r  r   r   r   r   r   r   r   $torch._inductor.kernel.flex_decodingr   #codegen.cpp_flex_attention_templater   r  r  r  r  r  r  r  higher_orderr   r  r  Graphr  r  r  r  rA   r7   r5   <module>r?     s   8    $ !  ' '   ) / ( - 7    
 
 
  g!yy~~zzC= c],/hsm /8C= / I I !%	*
*;;* LL* 49
	*
 *&	Xf-. 	)d3i )X Xn568PPQ6N
y/6N).)=)=6N6NrEY E8 E E
 $  8H VD NZ z )		!  	
 	 $P ]]B
]]C.
]]C.
^^R*
^^S?
^^S>
]]B)
]]C*
]]C.
  ]]B
]]C/
]]C.
^^R/
^^S?
^^S>
]]B
]]C/
]]C.
  ]]B
]]C/
]]C.
^^R/
^^S?
^^S>
]]B
]]C/
]]C.
 4 
"$ "5c31C+D "B. .sCc/A)B .b6eCc3,>&? 66eCc3,>&? 6"0ell  M J\~+(cND 599))88dSl Tld	$ $2	"	%l
Z [m
\ ]n
^ _o
s
$  lehhnn 0 $# # #'&':=''V 	II22JSJSr7   