
    AVhG                        d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	l
mZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dadZej4                  Zej6                  Zej8                  Zd Zd Zd Zd Z d Z!d&dZ"d Z#d Z$ejJ                  d        Z&d Z'd Z(d Z)d Z*d  Z+d! Z,d" Z- ed#g$      d%        Z.y)'zUtilities for V2 control flow.    )attr_value_pb2)context)atomic_function)concrete_function)tracing_compilation)	transform)function_def_to_graph)ops)	FuncGraph)control_flow_util)control_flow_v2_func_graphs)gradients_util)
keras_deps)tf_contextlib)	tf_exportNFc                  R   t        j                         ryt        j                         } t	        | t
              s t	        | t              st	        | t              r?| j                  } t	        | t
              rt	        | t              r.t	        | t              r?t	        | t              S )z:Returns if the current graph is, or is nested in, a defun.F)
r   executing_eagerlyr
   get_default_graph
isinstanceCondBranchFuncGraphWhileBodyFuncGraphWhileCondFuncGraphouter_graphr   graphs    Z/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/ops/control_flow_util_v2.pyin_defunr   +   sz     



!%	E.	/	E-	.	E-	.E 
E.	/	E-	.	E-	.	E9	%%    c                 p    t        j                         ryt        | t              xs t        | t              S )z/Returns if the graph is a while loop FuncGraph.F)r   r   r   r   r   r   s    r   in_while_loop_defunr    7   s0     
U.
/ 0
U.
/1r   c                     t        j                  |        t        j                  | j                  | i       }| j
                  j                  |       | j                  S )zConverts func_graph to a TF_Function and adds it to the current graph.

  Args:
    func_graph: FuncGraph

  Returns:
    The name of the new TF_Function.
  )r   apply_func_graph_transformsr   from_func_graphnamer   _add_function_recursive)
func_graphfuncs     r   create_new_tf_functionr(   >   sH     ''
3		(	(*b	I$006	r   c                 X    | |dt        j                         j                  dd      S )zReturns a unique name to use for a control flow function.

  Args:
    scope: A name scope string.
    name: An identifier for this function (e.g. "true", "body").

  Returns:
    A string, the name to use for the function.
  _/)r
   uidreplace)scoper$   s     r   unique_fn_namer/   N   s$     dCGGI
.	7	7S	AAr   c                 4    | dt        j                         S )N_grad_)r
   r,   )forward_names    r   unique_grad_fn_namer3   [   s    %swwy	11r   c                 H   |'| j                  dt        j                  |             yt        sst	        j
                  | j                        sSt        j                         j                  j                  dk7  r'| j                  dt        j                  d             yyyy)a7  Sets the flag to enable lowering on `op` if necessary.

  Lowering allows cond_v2 and while_v2 to avoid some of the limitations of
  Functions, allowing users to specify devices & colocation inside of cond_v2
  and while_v2 input functions, and enabling non-strict evaluation & partial
  pruning. This brings v2 control flow closer to feature parity with v1 control
  flow.

  However, we do not lower in the following cases:
    - When the `If` or `While` ops are in the XLA context. Because it is easier
      for XLA to apply its own optimizations when dealing with un-lowered
      control flow operators than with low-level control flow primitives.
    - When the eager execution context specifies the executor of functions to
      be the single threaded executor (see context.function_executor_type()).
      Because the single threaded executor does not support v1 control flow ops.
    - When 'lower_using_switch_merge' is explicitly set to False.

  Args:
    op: An `If` or `While` Operation.
    lower_using_switch_merge: Explicit value to lower or not (optional).
  N_lower_using_switch_mergebSINGLE_THREADED_EXECUTORT)
	_set_attrr   	AttrValue!_DISABLE_LOWER_USING_SWITCH_MERGEr   GraphOrParentsInXlaContextr   r   function_call_optionsexecutor_type)oplower_using_switch_merges     r   maybe_set_lowering_attrrA   _   s    , )LL,)),DEG .88B//=="	# LL,n.F.F.NO	# C .r   c                     t        j                  | j                        r'| j                  dt	        j
                  d             yy)ag  Tells XLA whether to propagate compile-time consts in the loop body.

  This is needed to make compile time constants available to ops, for example
  `max_num_elements` in `EmptyTensorList`, inside the loop body. Ideally this
  would always be turned on, but that doesn't work with legacy functionalized
  while_loops.

  Args:
    op: A `While` Operation.
  "_xla_propagate_compile_time_constsTr6   N)r   r<   r   r9   r   r:   )r?   s    r   *maybe_propagate_compile_time_consts_in_xlarD      s8     11"((;LL5))D13 <r   c                   	
 | |vr0| j                  d      }t        |      dk(  r|\  }}
n7t        |      dk(  r|\  }
n#t        |      dk(  sJ |d   }d
d| 
fz  } | |v rnt        
      
||   		
fd}	j                  dv r	j                  
   } n	j                  d	v r	j                   |d
         } nt	j                  dv rJ |d      }| |d      k7  r$t        dj                  	j                              	j                  |dz      } nt        d|d	j                  d      | |vr0|j                  |       S )ai  Returns the index of the input corresponding to `tensor_name`.

  This method is used to find the corresponding index of an arbitrary resource
  tensor in a function (the function could be a loop body). We assume that
  resource handles are never created in functions, so that every resource
  tensor can be traced back to a function input.

  The awkward signature of this method is to make it work with both FuncGraphs
  and FunctionDefs. This is so we can recurse on function call ops without
  building the corresponding FuncGraph (note that even if a FuncGraph for a
  FunctionDef already exists, the input/output/node names may have been
  changed when the FuncGraph was serialized to the FunctionDef, which makes it
  unusable with this algorithm).

  Args:
    tensor_name: the name of the resource tensor to be resolved to an input.
    input_names: a list of the names of all inputs to the function.
    node_defs: a dict mapping op name -> NodeDef for every op in the function.
    functions: a dict mapping function name -> AtomicFunction.

  Returns:
    The index into input_names corresponding to `tensor_name`.
  :         r   z%s:%dc           	         j                   |    j                  j                  }|   j                  }|j                  j
                  	   j                  }|j                  |   }t        ||j                  j                  D cg c]  }|j                   c}|j                  D ci c]  }|j                  | c}      S c c}w c c}w N)
attrr'   r$   cached_definition	signature
output_argretresource_input_index	input_argnode_def)
function_attribute_name	func_namefdefoutput_arg_nameoutput_tensor_nameargndef	functionsrS   
output_idxs
          r   _extract_input_indexz2resource_input_index.<locals>._extract_input_index   s    -- 78==BBiy!33d11*=BBo88O4!
4>>3K3KLCsxxL'+}}
5t499d?
5yB BL
5s   C
'C)IdentityWhilePartitionedCallStatefulPartitionedCallf)IfStatelessIfthen_branchelse_branchzXExpected cond branches ({} op) to each have the same input->output mapping of resources.zWTaking gradient of a while loop which creates a resource in its body is not supported: z ())	splitlenintr?   inputAssertionErrorformat
ValueErrorindex)tensor_nameinput_names	node_defsr[   partsop_namer*   r]   input_indexrS   r\   s      `     @@r   rQ   rQ      sv   0 	;&c"E
5zQ$gq*	Uq!gzZ1__agj{J77k		#ZJ!HB {{++ NN:.k	F	F
 NN#7#<=k	-	-(7k	,];	;34:F8;;4GI 	I NN
/k !8;;0 1 1i 	;&p 
		;	''r   c               #      K   t        j                         j                         } t        j                  d      5  t        j                         j	                  |        d ddd       y# 1 sw Y   yxY ww)a  Clears the control inputs but preserves the ControlFlowContext.

  This is needed to preserve the XLAControlFlowControl when clearing
  control inputs for the gradient accumulators in while_v2.
  `ops.control_dependencies` does not allow that.

  Yields:
    A context manager in which the ops created will not have any control inputs
    by default but the control flow context is the same.
  N)r
   r   _get_control_flow_contextcontrol_dependencies_set_control_flow_context)control_flow_contexts    r   clear_control_inputsr|      s]      ..0JJL
% 
556JK	
 
 
s   8A7(A+"	A7+A40A7c                 X    | d uxr% | j                   j                  j                  d      S )NTPUStrategy)	__class____name__
startswith)strategys    r   _is_tpu_strategyr      s0    
$
 @



%
%
0
0
?Ar   c                  V    t        j                         } | r |        j                  d uS y)NF)r   get_call_context_functionlayer)keras_call_context_functions    r   _is_building_keras_layerr     s+     * D D F &(..d::r   c                      t         t         S t               ryt        j                  t	        j
                               ryt        j                         j                  j                  dk(  ryt               S )a  Whether to output all intermediates of a functional control flow op.

  The default behavior is to output intermediates only when building a Keras
  Layer in graph mode and that too when certain other conditions are met:
  1. We do not output intermediates if the functional control flow op
     is being built inside a FuncGraph which is not a If/While graph. This
     guards against outputting intermediates in eager mode since keras adds
     tensors to a FuncGraph named "keras_graph" in that case. Also because we
     do not output intermediates of tf.function (since this feature is only for
     backwards compatibility) outputting intermediates of functional control
     flow ops built inside tf.function is of no value.
  2. We do not output intermediates when the compilation is using XLA or for a
     TPU.
  3. We do not output intermediates when a single threaded executor is used
     since that does not perform inlining and pruning.

  Returns:
    A bool telling whether to output all intermediates.
  Fr8   )
/_EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDEr   r   r<   r
   r   r   r=   r>   r    r   r   output_all_intermediatesr   
  s\    ( 5@::Z11#2G2G2IJoo--;; !	!	##r   c                    d}| j                   }|=|j                  |      }||j                  }nt        |d      r|j                  }nn|=|t        d|z        | j                   j                         5  t        j                  ||      }ddd       j                         D ]  }|j                  dv s|j                  |j                  d      j                        }	 t        j                  j                  |j                   |j                  |j                  j                         }	|	j%                         |_         |S # 1 sw Y   xY w# t"        $ r Y w xY w)zDGenerates and returns a FuncGraph for the given op and input_shapes.Nr   z%s cannot be found in the graph)input_shapesr`   rc   )attrs)r   _get_functionrM   hasattrr   KeyError
as_defaultr	   get_operationstypeget_attrr$   r   ConcreteFunctionr#   function_typerL   AttributeError_get_gradient_function_gradient_function)
r?   r   rU   rV   r   r'   r&   	operationrc   cfs
             r   get_func_graphr   *  sd   	$
((%y)D##dum$e 	 
\
4y@
AA 
xx )&<<<)J) ,,. Ai~~GG


i005::
;a//??GGOO%%** @ 
 &(%>%>%@i"A 
1) )&  s   8E%A
EE	E E c                 l    t        | t        j                        r| g fS | sd g fS | d   j                  | fS )Nr   )r   r
   	Operationr?   )op_or_outputss    r   get_op_and_outputsr   ]  s=    s}}-"8O--r   c                 H    |  dt        | dd      v ryt        | dd      } |  y)zDCheck if `graph` is wrapped by `run_as_function_for_tape_gradients`.Ncflow_gradient_wrapperr$    Tr   F)getattrr   s    r   -graph_wrapped_for_higher_order_tape_gradientsr   f  s7    75&"#==E=$/E 	 
r   c                 :   t        j                  |      t         j                  k(  rnt        j                         j
                  r dt        j                         j                  v s0t        j                  |ft        j                  | dd            }|S  | |      S )a  Fix higher-order tape gradients by wrapping `make_op` in a function.

  Args:
    make_op: A function that takes a list of inputs and returns a list of output
      tensors. This function should set any handle data relevant to its outputs
      before returning.
    inputs: A list of tensors to check for tape gradients and pass to
      `make_op`. These should include all tensors used in `make_op`.

  Returns:
    Tensors corresponding to `make_op`'s output.
  r   F)	autograph)tracing_options)
r   PossibleTapeGradientTypes$POSSIBLE_GRADIENT_TYPES_HIGHER_ORDERr
   r   building_functionr$   r   call_functionTracingOptions)make_opinputsresultss      r   "run_as_function_for_tape_gradientsr   o  s    ( ..v6		<	<= $$&88+s/D/D/F/K/KK!//		+::-
G N6?r   z%experimental.output_all_intermediates)v1c                     | a y)a  Whether to output all intermediates from functional control flow ops.

  The "default" behavior to is to output all intermediates when using v2 control
  flow inside Keras models in graph mode. This is needed to support taking
  gradients of v2 control flow. In graph mode, Keras can sometimes freeze the
  forward graph before the gradient computation which does not work for v2
  control flow since it requires updating the forward ops to output the needed
  intermediates. We work around this by proactively outputting the needed
  intermediates when building the forward pass itself. Ideally any such extra
  tensors should be pruned out at runtime. However, if for any reason this
  doesn't work for you or if you have an inference-only model you can turn this
  behavior off using
  `tf.compat.v1.experimental.output_all_intermediates(False)`.

  If with the default behavior you are still seeing errors of the form
  "Connecting to invalid output X of source node Y which has Z outputs" try
  setting `tf.compat.v1.experimental.output_all_intermediates(True)` and
  please file an issue at https://github.com/tensorflow/tensorflow/issues.

  Args:
    state: True, False or None. None restores the default behavior.
  N)r   )states    r   set_output_all_intermediatesr     s
    2 5:1r   rK   )/__doc__tensorflow.core.frameworkr   tensorflow.python.eagerr   ,tensorflow.python.eager.polymorphic_functionr   r   r   r   tensorflow.python.frameworkr	   r
   &tensorflow.python.framework.func_graphr   tensorflow.python.opsr   r   r   tensorflow.python.utilr   r    tensorflow.python.util.tf_exportr   r   r;   r   r   r   r   r    r(   r/   r3   rA   rD   rQ   contextmanagerr|   r   r   r   r   r   r   r   r   r   r   r   <module>r      s     % 4 + H J L B = + < 3 = 0 - 0 626 /$) ! 2EE 0CC 0CC 	&1 
B2 PH3$P(f 
 
&A
$@0f."J 678: 9:r   