
    AVh                         d Z ddlZddlmZ ddlmZ ddlmZ ddlm	Z	 dZ
 G d d	e      Zej                   e	d
      dd              Zy)z8Library for controlling the Tensorflow/XLA JIT compiler.    N)attr_value_pb2)context)ops)	tf_export)__xla_scopec                       e Zd ZdZd Zy)	_XlaScopezCKeeps track of previous XLA scope calls, and depth of current call.c                      || _         || _        y )N)countdepth)selfr   r   s      R/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/compiler/xla/jit.py__init__z_XlaScope.__init__   s    DJDJ    N)__name__
__module____qualname____doc__r    r   r   r	   r	      s
    Kr   r	   zxla.experimental.jit_scopec              #      K   t        j                         rt        d      t               r fd}nt	        j
                         }|t	        j
                  t        |            d}t        j                  t              }|s't        dd      }t        j                  t        |       n|d   }|j                  dk(  rIt	        j
                  d|j                  z  j                               |d<   |xj                  d	z  c_        |xj                  d	z  c_        t        j                         j!                  |      5  d
 d
d
d
       |xj                  d	z  c_        y
# 1 sw Y   xY ww)a
  Enable or disable JIT compilation of operators within the scope.

  NOTE: This is an experimental feature.

  The compilation is a hint and only supported on a best-effort basis.

  Example usage:

    ```python
    with tf.xla.experimental.jit_scope():
      c = tf.matmul(a, b)  # compiled
    with tf.xla.experimental.jit_scope(compile_ops=False):
      d = tf.matmul(a, c)  # not compiled
    with tf.xla.experimental.jit_scope(
        compile_ops=lambda node_def: 'matmul' in node_def.op.lower()):
      e = tf.matmul(a, b) + d  # matmul is compiled, the addition is not.
    ```

  Example of `separate_compiled_gradients`:

    ```python
    # In the example below, the computations for f, g and h will all be compiled
    # in separate scopes.
    with tf.xla.experimental.jit_scope(
        separate_compiled_gradients=True):
      f = tf.matmul(a, b)
    g = tf.gradients([f], [a, b], name='mygrads1')
    h = tf.gradients([f], [a, b], name='mygrads2')
    ```

  Ops that are not in the scope may be clustered and compiled with ops in
  the scope with `compile_ops=True`, while the ops in the scope with
  `compile_ops=False` will never be compiled.

  For example:

    ```python
    # In the example below, x and loss may be clustered and compiled together,
    # while y will not be compiled.
    with tf.xla.experimental.jit_scope():
      x = tf.matmul(a, b)
    with tf.xla.experimental.jit_scope(compile_ops=False):
      y = tf.matmul(c, d)
    loss = x + y
    ```

  If you want to only compile the ops in the scope with `compile_ops=True`,
  consider adding an outer `jit_scope(compile_ops=False)`:

    ```python
    # In the example below, only x will be compiled.
    with tf.xla.experimental.jit_scope(compile_ops=False):
      with tf.xla.experimental.jit_scope():
        x = tf.matmul(a, b)
      y = tf.matmul(c, d)
      loss = x + y
    ```

  Args:
    compile_ops: Whether to enable or disable compilation in the scope.
      Either a Python bool, or a callable that accepts the parameter
      `node_def` and returns a python bool.
    separate_compiled_gradients: If true put each gradient subgraph into a
      separate compilation scope. This gives fine-grained control over which
      portions of the graph will be compiled as a single unit. Compiling
      gradients separately may yield better performance for some graphs.
      The scope is named based on the scope of the forward computation as well
      as the name of the gradients. As a result, the gradients will be compiled
      in a scope that is separate from both the forward computation, and from
      other gradients.
  Raises:
    RuntimeError: if called when eager execution is enabled.
  Yields:
    The current scope, enabling or disabling compilation.
  zkxla.experimental.jit_scope is not supported when eager execution is enabled. Try use it inside tf.function.c                 <    t        j                   |             S )Nb)r   	AttrValue)node_defcompile_opss    r   xla_compilez+experimental_jit_scope.<locals>.xla_compilew   s    %%H(=>>r   r   )_XlaCompile_XlaSeparateCompiledGradientsr   zjit_scope_%d)sr	      N)r   executing_eagerlyRuntimeErrorcallabler   r   boolr   get_collection_XLA_SCOPE_KEYr	   add_to_collectionr   r   encodeget_default_graph_attr_scope)r   separate_compiled_gradientsr   attrsxla_scope_counters   `    r   experimental_jit_scoper/   $   sL    \  
 N O O k? !**[9K 

"
"T*E%F
G	% ((8	!!Q.*;<)!,!
 (11-333
;
;
=?E+q Q **51 
	
 Q	
 
s   E E2E&E2&E/+E2)TF)r   
contextlibtensorflow.core.frameworkr   tensorflow.python.eagerr   tensorflow.python.frameworkr    tensorflow.python.util.tf_exportr   r'   objectr	   contextmanagerr/   r   r   r   <module>r7      sV    ?  4 + + 6 "  
'(v ) vr   