
    AVhv                        d Z ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm	Z
 dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ d Z G d d      Z G d d      Zd Zd Zej*                  d        Zd Zd Zy)zHUtilities for managing state of v1 control flow for computing gradients.    )constant_op)dtypes)ops)tensor_util)	array_ops)control_flow_ops)control_flow_util)control_flow_v2_func_graphs)default_gradient)gen_data_flow_ops)gen_resource_variable_ops)resource_variable_opsc                 r   | j                   }t        j                         j                         }||j                   nd}t	        j
                  d      }|d|fvr|j                  }|t        d|d|j                   d      |j                  j                         }t        j                  ||      r||z  }nUt        j                  |      }|9t        d|d|j                   d|j                   d	|d
|j                   d      ||z  }t        j                  |j                  |      }|d|fvr|S )a  Calculate a max_size for use by stack ops inside an XLA while_loop.

  Args:
    value: The value inside the while_loop forward context.  Used for printing
      error messages.
    while_ctxt: The forward context inside which value resides.  This does not
      always match the value's immediate context, as `value` may be inside e.g.
      a cond context inside the while_loop.

  Returns:
    A tensor containing the `max_size` to feed to a Stack initializer.

  Raises:
    ValueError: If `value` is nested inside a `while_loop` that either
      lacks a `maximum_iterations` parameter, or the `maximum_iterations`
      parameter:

        - is inside a `while_loop` that is a parent of the calling context, and
        - cannot be evaluated at graph build time to a constant.
  N    z1Cannot create a gradient accumulator for tensor 'z^' inside XLA while_loop because maximum_iterations was not passed to the tf.while_loop call ('z').z4' inside XLA while_loop. maximum_iterations tensor 'z' for while_loop context 'z' must be statically known (e.g. a constant value or known shape dimension), or be defined at or outside the while loop context 'z' (currently defined in ')	stop_ctxt)namer   get_default_graph_get_control_flow_contextr   constantmaximum_iterations
ValueErroroputilIsContainingContextr   constant_valueGetContainingWhileContextouter_context)	value
while_ctxt
value_name	curr_ctxtcurr_ctxt_namemax_sizemax_itermax_iter_ctxtconst_max_iters	            X/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/ops/control_flow_state.py&_GetMaxSizeFromNestedMaximumIterationsr)   !   s;   * zz*##%??A)%.%:9>>.!!!$(
 	4++,,H /9*//KL L KK99;M 	=9(h #11(;n		 
!" 	" . h //  I7JE 	4++J 
/    c                   "   e Zd ZdZd Zed        Zed        Zed        Zed        Z	ed        Z
ed        Zed	        Zed
        Zed        Zed        Zed        Zed        Zed        Zej&                  d        ZddZ	 ddZd Zy)_GradLoopStatea  The state used for constructing the gradient graph for a while loop.

  We create a _GradLoopState for each while loop in forward and its
  corresponding while loop in backprop. This gives us access to both
  the forward and the backprop WhileContexts.

  During the construction of gradient graph, any time when we detect
  a forward value that is needed for backprop, we create a history
  accumulator and add it to `history_map`. Any time when we backprop
  a loop switch op (in _SwitchGrad), we add the grad merge op in
  `switch_map`.
  c                 T   d | _         d | _        d | _        d | _        d | _        d | _        d | _        i | _        i | _        g | _	        g | _
        t        |j                        | _        t        |j                        | _        || _         |r|j                   }n#t#        |d      st%        d      |j&                  }|j(                  j+                         5  |r|j-                          |j/                  |      \  }}|r|j1                          d d d        || _        | _        |r|j3                  j4                         |j7                  |      }|j8                  }|j-                          t;        j<                  |j>                  |j@                  |jB                  |jD                  |j4                  |       | _        |jG                  ||      }| j                  jI                  ||      | _        |j1                          y |r|j-                          t;        j<                  |j>                  |j@                  |jB                  |jD                  |j4                  |       | _        | j                  jI                  |      | _        |r|j1                          y y # 1 sw Y   xY w)Nr   z[Failed to call gradients on a while loop withoutproperly serializing graph via MetaGraphDef)r   parallel_iterations	back_propswap_memoryr   
grad_state)%_outer_grad_state_forward_context_forward_index_forward_sync_grad_context_grad_index
_grad_sync_history_map_switch_map_unused_exits_deferred_exitslist
loop_exits_forward_loop_exitslen_pending_exits_countforward_contexthasattrr   r   _graph
as_defaultEnterAddForwardLoopCounterExitAddNamer   AddForwardAccumulatorgrad_contextr   WhileContextr   r.   r/   r0   AddBackpropAccumulatedValueAddBackpropLoopCounter)	selfforward_ctxtouter_grad_stateouter_forward_ctxtcntforward_indexhistory_cntouter_grad_ctxtreal_cnts	            r(   __init__z_GradLoopState.__init__v   s   !D !D D D D D DO DDDD#L$;$;<D #L$;$; <D-D+;;\?3 G H 	H'55 
			'	'	) "	  "'==>NOc=	!" )D'D    *$::3?k(55o+88)<<*>> **"..  d "==k3Oh++BB
$&d	  "+88)<<*>> **"..  d ++BB
!d	! 
S" "s   9JJ'c                     | j                   S )z#The grad loop state for outer loop.)r2   rO   s    r(   rQ   z_GradLoopState.outer_grad_state   s     !!!r*   c                     | j                   S )z#The while loop context for forward.)r3   rZ   s    r(   rB   z_GradLoopState.forward_context   s        r*   c                     | j                   S )zThe loop index of forward loop.)r4   rZ   s    r(   rT   z_GradLoopState.forward_index   s     r*   c                 d   | j                   t        j                  d      5  t        j                  d      | _         ddd       | j                   j                  | j                         | j                  j                  j                  | j                          | j                   S # 1 sw Y   ixY w)zA control trigger node for synchronization in the forward loop.

    One main use is to keep the push ops of a stack executed in the
    iteration order.
    Nf_syncr   )
r5   r   control_dependenciesr   control_trigger_set_control_flow_contextr3   r4   r   _add_control_inputrZ   s    r(   forward_syncz_GradLoopState.forward_sync   s     !##D) M-==8LM
2243H3HI
//0B0BC	M Ms   B&&B/c                     | j                   S )z,The corresponding WhileContext for gradient.)r6   rZ   s    r(   rK   z_GradLoopState.grad_context        r*   c                     | j                   S )z The loop index of backprop loop.)r7   rZ   s    r(   
grad_indexz_GradLoopState.grad_index        r*   c                    | j                   t        j                  d      5  t        j                  d      | _         ddd       | j                   j                  | j                         | j                  j                  j                  | j                          | j                  j                  r/| j                  j                  j                  | j                          | j                   S # 1 sw Y   xY w)zA control trigger node for synchronization in the grad loop.

    One main use is to keep the pop ops of a stack executed in the
    iteration order.
    Nb_syncr_   )r8   r   r`   r   ra   rb   r6   r7   r   rc   r   
AddInnerOprZ   s    r(   	grad_syncz_GradLoopState.grad_sync   s     ##D) J*::IJ
oo//0B0BC
,,T__=				)	)((33DOOD??J Js   C++C4c                     | j                   S )z9The map that records all the tensors needed for backprop.)r9   rZ   s    r(   history_mapz_GradLoopState.history_map  s     r*   c                     | j                   S )z;The map that records all the Switch ops for the while loop.)r:   rZ   s    r(   
switch_mapz_GradLoopState.switch_map  ri   r*   c                     | j                   S )zThe list of "unused" exits.)r;   rZ   s    r(   unused_exitsz_GradLoopState.unused_exits  rf   r*   c                     | j                   S )zThe list of "deferred" exits.)r<   rZ   s    r(   deferred_exitsz_GradLoopState.deferred_exits  s     r*   c                     | j                   S )z&The list of exits of the forward loop.)r?   rZ   s    r(   forward_loop_exitsz!_GradLoopState.forward_loop_exits  s     ###r*   c                     | j                   S )z1The number of exits we expect to see but haven't.rA   rZ   s    r(   pending_exits_countz"_GradLoopState.pending_exits_count  s     $$$r*   c                     || _         y)zSet the pending count to cnt.Nry   )rO   rS   s     r(   rz   z"_GradLoopState.pending_exits_count!  s     !$Dr*   c                    | j                   j                  j                         5  t        j                         j                         }t        j                  d      5  |r|j                          t        j                  |      5  t        j                  |j                        s%t        j                  dt        j                        }nt!        || j"                        }t%        j&                  ||j(                  j*                  d      }ddd       |r|j-                          | j"                  j/                        }| j"                  j0                  }t        j2                  |j                        }|| j"                  k(  r|| j"                  j                          t%        j4                  |||      }	| j"                  j-                          | j6                  j                  j9                  |	j                         nt;        |t<        j>                        stA        d|z        |rh|jB                  j                          t%        j4                  |||      }	|jB                  j-                          |	j                  jE                  |       n8|j                          t%        j4                  |||      }	|j-                          | jF                  j9                  |	j                         | j6                  j                  jH                  d   j                  }
|	j                  j9                  |
       |cddd       cddd       S # 1 sw Y   BxY w# 1 sw Y   nxY w	 ddd       y# 1 sw Y   yxY w)a  Add an accumulator for each forward tensor that is needed in backprop.

    This is added to the forward loop at the first time when a tensor
    in the forward loop is used by backprop gradient computation loop.
    We create an accumulator that accumulates the value of tensor at each
    iteration. Called in the control flow context where gradients() is called.

    The pseudocode is:
    ```
      acc = stack();
      while (_pivot) {
        acc = stack_push(acc, value);
      }
    ```

    We make sure that the stack push op in one iteration is executed before
    next iteration. This is achieved by adding a control edge from
    `forward_index.op.inputs[0].op` to the push op, and another control
    edge from the push op to either `forward_index.op` or `forward_sync`.

    Args:
      value: The source tensor in forward that is to be accumulated.
      dead_branch: True iff the tensor is on a dead branch of a cond.

    Returns:
      The stack that contains the accumulated history of the tensor.

    Raises:
      TypeError: For internal errors involving the value condition context.
      ValueError: If `value` is inside a XLA scope and a valid max size
        for the stack can't be found.
    Nf_acc)r$   	elem_typer   )r0   z#value_ctxt is not a CondContext: %sr   )%r4   graphrE   r   r   r   r`   rF   colocate_withr   IsInXLAContextr   r   r   r   int32r)   rB   r   stack_v2dtype
base_dtyperH   AddValuer0   GetOutputContextstack_push_v2rT   rc   
isinstancer   CondContext	TypeErrorr   rb   rd   inputs)rO   r   dead_branchr"   r$   acc	enter_accswap_enabled
value_ctxtpushadd_ops              r(   rJ   z$_GradLoopState.AddForwardAccumulator&  s   D 
			"	"	-	-	/ 6'')CCEi##D) 4
//
u% 		Q $$UXX."++B=H=t++-H!**5;;+A+AQ#		Q 
..
 ((11#6	 ++77**5884
---



$
$
&"00L:$



#
#
%





2
2477
; J(8(D(DEAJNOO $$**,$225l<D$$))+GG--j9$225l<DOO



.
.tww
7##&&--a033""6*i4 46 6
		Q 		Q4 4 46 6 6s=   8M/(MBMH-M9	M/MMM"	M//M8c                    |j                   j                         }d}|j                   j                         }|r6||k7  r1t        |t        j                        r|}n|j
                  }|r||k7  r1t        j                  d      5  | j                  j                          |r| }d}|B|r@|j                  j                  |j                  j                        }|j                  }||r@||j                  }|rd|j                  z
  n|j                  }	t        j                   ||      |	   }t#        j$                  ||j&                  j(                        }
|
j+                  |j-                                | j                  j/                          ddd       | j                  j0                  }|dkD  r%| j2                  j5                  
j                          
S # 1 sw Y   KxY w)a  Add the getter for an accumulated value in the grad context.

    This is added to the backprop loop. Called in the grad context to
    get the value of an accumulated value. The stack pop op must be guarded
    by the pred of the controlling cond.

    Args:
      history_value: The history (a stack) of a value.
      value: The value that is pushed onto the stack.
      dead_branch: True iff the tensor is on a dead branch of a cond.

    Returns:
      The current value (the top of the stack).
    Nr   )r   r   r   r   r   r   r   r`   rK   rF   ro   getpredr   rQ   branch_SwitchRefOrTensorr   stack_pop_v2r   r   	set_shape	get_shaperH   r.   rm   rc   )rO   history_valuer   r   history_ctxt	cond_ctxtr   r1   r   r   popr.   s               r(   rM   z*_GradLoopState.AddBackpropAccumulatedValue  s     !##==?LI335J
|3	J 0 < <	=	++j	 |3
 
	!	!$	' 
	
lz''++INN,?,?@$!22* lz <$+6!i&&&I<L<L(;;4!!')**=+0;;+A+ACc	mmEOO%&
#$ ++??Q
nn''/J- s   A$G)B'GG#c                    |j                   j                  dvsJ | j                  j                  |j                        }|3|}| }	 t        j                  |      }|r9|j                  d   }|j                  }|| j                  j                  |      }nt        j                  |      r5t        j                  t        j                  |      |j                         }nG| j                  j#                          |j%                  |      }| j                  j'                          n|2|j)                  |      }|| k7  r| j                  j                  |      }|| j                  |j                  <   |S )a  Get the real value of `value`.

    If backprop "uses" a value produced by forward inference, an accumulator
    is added in the forward loop to accumulate its values.  We use the
    accumulated value. This method must be called in the grad loop context.
    `value` must be in forward and needed for backprop.

    Args:
      value: A tensor to be captured.

    Returns:
      The same tensor obtained from the saved history.
    )Variable
VariableV2r   r   )r   typer9   r   r   r   GetLoopConstantEnterr   rQ   r6   r   r   is_constantr   r   r   r   rH   rJ   rF   rM   )rO   r   
real_value	cur_valuecur_grad_stateenter_opr   s          r(   GetRealValuez_GradLoopState.GetRealValue  s`    88== ::::""&&uzz2Jin,,Y7ooa())::.#
 ++44Y?J$$Y/
 #++((39??L*
 


!
!
#(>>yI-



"
"
$
5 8 
	#??9&
T!))22:>*&0d

#r*   N)F)__name__
__module____qualname____doc__rX   propertyrQ   rB   rT   rd   rK   rh   rm   ro   rq   rs   ru   rw   rz   setterrJ   rM   r    r*   r(   r,   r,   h   sC   T"l " " ! !                     $ $ % % $ $Xv /4/b6r*   r,   c                   `    e Zd ZdZd Zdej                  fdZd Zd Z	d Z
d Zd	 Zd
 Zd Zy)_ControlFlowStatez9Maintain the mapping from the loops to their grad states.c                     i | _         y )N)_maprZ   s    r(   rX   z_ControlFlowState.__init__  s	    DIr*   r   c                     |rDt        j                  |      r/|j                         }|j                  }|r&|j	                         }nt        j                  |      }|r| j
                  j                  |      S y)zDReturn the grad state for this op if it's in a forward loop context.N)r   
IsLoopExitr   r   GetWhileContextr   r   )rO   r   beforerP   s       r(   GetGradStatez_ControlFlowState.GetGradState  sc    $//"%113l!//l	#335))"-lYY]]<((r*   c                    g }| j                   j                         D ]  }|j                  D ]~  }||j                     dk(  s|xj                  dz  c_        |j                  |vr|j
                  j                  |       |j                  dk(  sd|j                  |j
                          |j                  j                  D ]$  }||j                     dk(  sd||j                  <   &  |S )aN  Process all the "unused" loop exits.

    The "unused" exits of the loops are added to `unused_exits`. An exit is
    unused if its pending_count is 0. If there is an exit with real gradient,
    all these deferred exits will enter the backprop loop with zero gradient.
    Otherwise, they will enter the backprop loop with None. As an example,
    people often write:

    ```python
    v1, _ = tf.while_loop(p, b, [x1, x2])
    result = gradients(v1, x1)
    ```

    The exit node for x2 is not included by the betweenness analysis. But we
    need to backprop x2 if x2 is involved in computing v1.

    Args:
      pending_count: The number of backprop inputs for every op.
      to_ops_set: The set of ops for ys in gradients(ys, xs)

    Returns:
      The set of unused loop exits that we know at this point we need
      to backprop.
    r   r   )
r   valuesrw   r   rz   rs   appendextendrB   loop_enters)rO   pending_count
to_ops_setr>   r1   ys         r(   ProcessUnusedLoopExitsz(_ControlFlowState.ProcessUnusedLoopExits  s    2 Jii&&( "
,, 7!!#

(
(A
-
(TT###**1-++q0j5567 ))55 "!!# !-
"" r*   c                 b    | j                  ||      }|r|j                  j                          yy)z0Enter the WhileContext for gradient computation.N)r   rK   rF   rO   r   r   r1   s       r(   EnterGradWhileContextz'_ControlFlowState.EnterGradWhileContext%  s.    ""2v.J##% r*   c                 b    | j                  ||      }|r|j                  j                          yy)z/Exit the WhileContext for gradient computation.N)r   rK   rH   r   s       r(   ExitGradWhileContextz&_ControlFlowState.ExitGradWhileContext+  s.    ""2v.J""$ r*   c                    t        j                  |      }| j                  j                  |      }||j                  }|r|j                         }d}|r| j                  j                  |      }t        ||      }|| j                  |<   |j                  D ]G  }|j                  |vs|j                  |j                         |j                  |j                         I yy)a  Add the grad state for the while loop that op belongs to.

    Note that op is an Exit, and this method must be called in
    the control flow context where gradients() is called.

    Note that this method modifies `between_op_list` and `between_ops`.
    N)
r   r   r   r   r   r,   rw   r   addr   )	rO   r   between_op_listbetween_opsrP   r1   rR   rQ   	loop_exits	            r(   AddWhileContextz!_ControlFlowState.AddWhileContext1  s     ''+L|,J'55	/??A	99==);<!,0@Aj *dii "44 /)<<{*
//),,
'

 
 
./ r*   c                    |j                         }|j                  j                         }|j                  }|r|j	                         }d}|r| j
                  j                  |      }|r,|j                         r`|j                  j                          t        j                  |j                  |j                        }|j                  j                          |S |j                  j                          t        j                  |d      }|j                  j                          |j!                  |      }|j                  }	|	j                          |j#                  ||      }
t        j                  |
|j                        }|	j                          |S |j                         r,t        j                  |j                  |j                        }|S t        j$                  |d      }|S )a   Create zeros_like gradient for a loop exit.

    If the result of a loop variable is not used but is involved in
    computing the result of some needed loop variable, we create a
    zero-valued tensor that is fed as gradient for the Exit node of that
    loop variable. Note that val.op is an Exit, and this method must be
    called in the control flow context where gradients() is called.

    Args:
      val: The output tensor of an Exit op.

    Returns:
      A zero tensor of the same shape of val.
    NFoptimize)r   r   r   r   r   r   r   is_fully_definedrK   rF   r   zerosdimsr   rH   shape_internalrJ   rM   
zeros_like)rO   val	val_shaperP   rR   rQ   resultshapehistory_shaperV   
real_shapes              r(   ZerosLikeForExitz"_ControlFlowState.ZerosLikeForExitL  s    I66335L%33-==?'9:		#	#	% 	%%++-;%%**,. M) 	""((*((u=""'')(>>uE*77%AA5"
SYY7 M 
	#	#	% ; M %%cE:Mr*   c                    t        j                  |      ry|j                  j                  r"t	        j
                  |j                  |         S t        j                  |      }t        j                  |      }| j                  j                  |      }|t        ||      S |j                         }t        j                  |j                  |   d      }|j                         }|j!                         r|j"                  t$        j&                  k(  r>t	        j(                  t+        j,                  |      t/        j0                  |            }	n,t3        j4                  d|j6                  |j"                        }	|rW|j8                  j                  |j:                  j<                        }
|j>                  }tA        jB                  |	|
      d|z
     }	|	S |r|j:                  }
|j>                  }|jD                  jG                          tA        jB                  |jH                  d   |
      d|z
     }t	        jJ                  |d	      }|jD                  jM                          |jN                  jQ                  |       |jN                  jQ                  |       n7|jG                          t	        jJ                  |d	      }|jM                          |jR                  jM                          |jU                  ||
      }|jR                  jG                          |jW                  |||      }t	        j(                  ||j"                        }	|	S )a\  Create zeros_like for the specified output of an op.

    If op is in a while loop that is part of gradients(), this method
    must be called in its grad loop context.

    Args:
      op: A tensorflow operation.
      index: the index for a specific output of the op.

    Returns:
      A zero tensor of the same shape of op.outputs[index].
    Ntensorr_   r   r   r   r   r   Fr   )r   ),r   IsLoopSwitchr   building_functionr   r   outputsIsSwitchr   r   r   	ZerosLiker   r   convert_to_tensorr   r   r   r   resourcer   r   variable_shaper   get_zeros_dtyper   r   r   ro   r   r   r   r   r   r   rF   r   r   rH   r   rb   rK   rJ   rM   )rO   r   indexr   rP   r1   op_ctxtr   r   r   r   r   zeros_shapehistory_zeros_shapes                 r(   ZerosLikeV1WhileLoopz&_ControlFlowState.ZerosLikeV1WhileLoop  s    	xx!!!!"**U"344--#K''+L|,Jr5!!**,G




5 1
ACMMOE 
foo	%!005"22379 %%auzzK	%%))',,*;*;<!44VTB1v:N: M5 
||##%11"))A,26889F
D..sUC""$((1009..sUC ""$&<<
; = 0##% 445H5@+Oeucii0fMr*   c                 n   | j                   j                         D ]  \  }}|j                  j                         D ]  \  }}|j                  j                  d   |j                  j                  d   k(  s;|j                  j                  d   j
                  }|j                  j                  d   j                         }|j                         rb|j                  j                          t        j                  d||      }t        j                  |      }|j                  j                          n|j                  j                  }|r|j                          |j                  j                  d   j                  }	|	j                  d   }
t!        j"                  |
d      }t!        j$                  |      }|r|j                          |j                  j                          t        j                  |      }|j                  j                          |j                  j'                  d|         y)ac  Perform postprocessing at the end of gradients().

    We have created the gradient graph at this point. So this function
    can be used to perform any postprocessing on the gradient graph.
    We currently perform the following postprocessing:
      1. Patch the gradient graph if the output of a loop variable
         doesn't depend on its input.
    r   r   )r   r   Fr   N)r   itemsrq   r   r   r   r   r   rK   rF   r   r   r   _NextIterationrH   r   r   r   r   _update_input)rO   _r1   b_merger   r   grad_valnext_grad_valrV   enter_grad_op
enter_grad
grad_shapes               r(   PostProcessingz _ControlFlowState.PostProcessing  s    * 5:"--335 5*!W::Q7::#4#4Q#77 **##A&,,%**##A&002%##%##))+"++AU%HH,;;HEM##((* )55CCO##%#JJ--a033M&--a0J"11*uMJ z2H""$##))+,;;HEM##((*
**
"
"1m
4;55r*   N)r   r   r   r   rX   r   	Operationr   r   r   r   r   r   r   r   r   r*   r(   r   r     sA    AS]] &P&%/65nEN'5r*   r   c                    d}| D ]k  }t        j                  |      s|
t               }|r2t        j                  |      5  |j                  || |       ddd       Y|j                  || |       m |S # 1 sw Y   yxY w)aC  Create the state for all the while loops involved in one gradients().

  We create a _ControlFlowState when there are while loops involved in
  gradients(). In gradients(), control flow logic is only invoked when
  the _ControlFlowState is not None.

  Note that this method modifies `between_op_list` and `between_ops`.
  N)r   r   r   r   r   r   )r   r   colocate_gradients_with_ops
loop_stater   s        r(   MaybeCreateControlFlowStater    s     * Ebr		&(
	$r" 	G

$
$R+
F	G 	G 	""2DE 
		G 	Gs   A55A>	c                    | j                   |   }| j                         }|r(|j                  }|j                  }t	        j
                  | j                  d   |      d|z
     }t        j                  |      }|j                  t        j                  k(  r]t        j                  |g      5  t        j                  t        j                   |      t#        j$                  |            cddd       S t        j&                  |d      }t        j                  |g      5  t        j                  ||j                        cddd       S t        j(                  |d      S # 1 sw Y   xxY w# 1 sw Y   yxY w)zBranch of ZerosLike for TF1.r   r   r   NFr   )r   r   r   r   r   switchr   r   identityr   r   r   r   r`   r   r   r   r   r   r   r   )	r   r   r   r   r   r   
switch_valpivotr   s	            r(   _ZerosLikeV1r    s1   


5#((*'<<D^^F!((1t<QZHJ z*E
yyFOO###UG, 9%44Z@"223799 9 **:FK 
	!	!5'	* ;__[		:; ; e449 9; ;s   ,=E" !E."E+.E7c                 2    t        j                  d| |      S )zCreate a constant zero tensor.r   r   )r   r   r   s     r(   _ConstantZerosr	  &  s     
		auE	::r*   c                    | j                   |   }|j                  t        j                  k(  r=t	        j
                  t        j                  |      t        j                  |            S t        |j                  j                  t        j                        r|j                  t        j                  k7  r{|j                   j#                         r*t%        |j                   j&                  |j                        S t	        j(                  |d      }t	        j
                  ||j                        S t	        j*                  |d      S )zBranch of ZerosLike for TF2.r   Fr   )r   r   r   r   r   r   r   r   r   r   r   r   r   r
   WhileBodyFuncGraphvariantr   r   r	  r   r   r   )r   r   r   r   s       r(   _ZerosLikeV2r  ,  s    


5#YY&//!??!005..s35 5 :MMN
))v~~
%
 yy!!#CIINNCII66 ,,S5Ak__[#))44e44r*   c                 \    t        j                  |       st        | |      S t        | |      S )z4Create zeros_like for the specified output of an op.)r   r   r  r  )r   r   s     r(   r   r   D  s(    	r	E""E""r*   N)r   tensorflow.python.frameworkr   r   r   r   tensorflow.python.opsr   r   r	   r   r
   r   r   r   r   r)   r,   r   r  r  _tag_zeros_tensorr	  r  r   r   r*   r(   <module>r     s~    O 3 . + 3 + 2 ; = 2 3 ; 7
DN DG5 G5V.54 ; ;
50#r*   