
    AVhe                       d Z ddlZddlZddlZddlZddlZddlZddlmZ ddl	m
Z
 ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddl m!Z! ddl"m#Z# g dZ$ G d d      Z% G d dejL                        Z'ejP                  Z(ejR                  Z)e'jT                  Z* e#dg      jW                  e,d       de*_         d Z- G d d       Z. G d! d"ej^                        Z0d# Z1d$Z2e2D ]  Z3 e4e0e3 e1e3              d% Z5d&Z6e6D ]  Z3 e4e0e3 e5e3              d' Z7d(Z8e8D ]  Z3 e4e0e3 e7e3               ejr                  e0d)         ejt                  e0d*         e#d+g      d,        Z; e#d-g       G d. d-             Z<d/Z=d0Z> G d1 d2ej~                        Z@d3 ZA e#d4g      d5        ZBd6 ZCej                  d7        ZE G d8 d9      ZF e#d:g      ddddddddd;ddde(j                  e)j                  fd<       ZId=ZJeJd>z  eI_          e#d?g      ddddd@dddd;ddde(j                  e)j                  fdA       ZKeJdBz  eK_         ddddd;dddd;dde(j                  e)j                  fdCZL G dD dE      ZMdF ZNdG ZO e#dHg       G dI dH             ZP e#dJg      ej                  	 	 	 	 	 	 	 	 	 	 dTdK              ZQdL ZRdM ZSdN ZTdO ZUdaVdP ZW e#dQg      ej                  dR               ZX e#dQg       ej                  dS               ZYy)UzHA class to store named variables and a scope operator to manage sharing.    N)session)context)dtypes)ops)tensor)tensor_conversion_registry)tensor_shape)	array_ops)init_ops)resource_variable_ops)resource_variables_toggle)	variables)
tf_logging)core)function_utils)tf_contextlib)
tf_inspect)collections_abc)	tf_export)

AUTO_REUSEVariableScopeget_variable_scopeget_variableget_local_variablevariable_scopevariable_op_scopeno_regularizerVariableSynchronizationVariableAggregationc                   J    e Zd ZdZddgZd Zed        Zed        Zd Z	d Z
y	)
_PartitionInfoz3Holds partition info used by initializer functions._full_shape_var_offsetc           	         t        |t        t        f      s!t        dt	        |      j
                  z         t        |t        t        f      s!t        dt	        |      j
                  z         t        |      t        |      k7  r-t        dj                  t        |      t        |                  t        ||      D ],  \  }}|dk  s||k\  st        dj                  ||||             || _
        || _        y)a  Constructor.

    Args:
      full_shape: Tuple or list of `int` indicating the full combined shape of
        the partitioned variables.
      var_offset: Tuple or list of `int` specifying offset of this partition
        with respect to the full variable for each dimension.

    Raises:
      TypeError: If `full_shape` or `var_offset` is not a sequence.
      ValueError: If `full_shape` or `var_offset` differ in length. If
        `var_offset` exceeds `full_shape` in any dimension.
    z@`full_shape` must be a sequence (like tuple or list) instead of z@`var_offset` must be a sequence (like tuple or list) instead of zYExpected equal length, but `var_offset` is of length {} while full_shape is of length {}.r   z[Expected 0 <= offset < shape but found offset={}, shape={} for var_offset={}, full_shape={}N)
isinstancelisttuple	TypeErrortype__name__len
ValueErrorformatzipr"   r#   )self
full_shape
var_offsetoffsetshapes        T/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/ops/variable_scope.py__init__z_PartitionInfo.__init__9   s    j4-0
L
z

#
#$% % j4-0
L
z

#
#$% % :#j/)((.*os:)01 1
 Z4 ?	!v++16&%2<,>? 	?? "D!D    c                     | j                   S N)r"   r/   s    r4   r0   z_PartitionInfo.full_shapea       r6   c                     | j                   S r8   )r#   r9   s    r4   r1   z_PartitionInfo.var_offsete   r:   r6   c                 H    | j                  |      }|y| j                  |   S )a  Returns the offset when the variable is partitioned in at most one dim.

    Args:
      shape: Tuple or list of `int` indicating the shape of one specific
        variable partition.

    Returns:
      `int` representing the offset in the dimension along which the variable is
       partitioned. Returns 0 if the variable is not being partitioned.

    Raises:
      ValueError: Depending on self.single_slice_dim().
    r   )single_slice_dimr1   )r/   r3   r=   s      r4   single_offsetz_PartitionInfo.single_offseti   s0     ,,U3 ??+,,r6   c           
         t        |t        t        f      s!t        dt	        |      j
                  z         t        |      t        | j                        k7  rCt        dj                  |t        |      | j                  t        | j                                    t        t        |            D ]X  }| j                  |   ||   z   | j                  |   kD  s)t        dj                  | j                  || j                  |             d}t        t        |            D ]C  }||   | j                  |   k(  r|'t        dj                  || j                  ||            |}E |S )a&  Returns the slice dim when the variable is partitioned only in one dim.

    Args:
      shape: Tuple or list of `int` indicating the shape of one specific
        variable partition.

    Returns:
      `int` representing the dimension that the variable is partitioned in, or
      `None` if the variable doesn't seem to be partitioned at all.

    Raises:
      TypeError: If `shape` is not a sequence.
      ValueError: If `shape` is not the same length as `self.full_shape`. If
        the variable is partitioned in more than one dimension.
    z;`shape` must be a sequence (like tuple or list) instead of zcExpected equal length, but received shape={} of length {} while self.full_shape={} is of length {}.zaWith self.var_offset={}, a partition of shape={} would exceed self.full_shape={} in dimension {}.NzvCannot use single_slice_dim() with shape={} and self.full_shape={} since slice dim could be either dimension {} or {}.)r%   r'   r&   r(   r)   r*   r+   r0   r,   r-   ranger1   )r/   r3   i	slice_dims       r4   r=   z_PartitionInfo.single_slice_dim   sc     eeT]+
G
u+

    5zS))006uc%j7;7:4??7K1MN N 3u: =		eAh	&);	;228&3<= 	== I3u: 	qT__Q'	'		VE4??AyAC 	C i r6   N)r*   
__module____qualname____doc__	__slots__r5   propertyr0   r1   r>   r=    r6   r4   r!   r!   4   sG    ;m,)&"P    -,.r6   r!   c                       e Zd ZdZdZy)
_ReuseModez1Mode for variable access within a variable scope.   N)r*   rC   rD   rE   r   rH   r6   r4   rJ   rJ      s    9 *r6   rJ   r   )v1a+  
@compatibility(TF2)
`tf.compat.v1.AUTO_REUSE` is a legacy API that is a no-op when TF2 behaviors
are enabled.

If you rely on `get_variable` and auto-reuse, see the
[model mapping guide](https://www.tensorflow.org/guide/migrate/model_mapping)
for more info on how to migrate your code.

Note: when you use the `tf.compat.v1.keras.utils.track_tf1_style_variables`
API as described in the above guide, `get_variable` will always behave as if
`v1.AUTO_REUSE` is set. Without the decorator, reuse will be ignored and new
variables will always be created, regardless of if they have already been
created.
@end_compatibility

When passed in as the value for the `reuse` flag, `AUTO_REUSE` indicates that
get_variable() should create the requested variable if it doesn't exist or, if
it does exist, simply return it.
c                    t        t        j                  |       j                        }t        j                  |       st        | t        j                        s|dz  }|t        t        j                  |       j                  xs g       k(  S )z8Returns true if the callable needs no arguments to call.rK   )	r+   r   
getargspecargs
isfunctionr%   	functoolspartialdefaults)python_callablenum_argumentss     r4   _needs_no_argumentsrV      sx     j++O<AAB-				/
y((9* QM	#O,55;= 
= =r6   c                   ^   e Zd ZdZg dZd Zdej                  dddddddddddej                  e
j                  fdZdej                  dddddddddej                  e
j                  fdZdej                  ddddddddddej                  e
j                  fdZdej                  fd	Zy)
_VariableStoreaZ  Variable store that carries a number of named Variables.

  New variable names and new variables can be created; all stored
  variables are initialized with the initializer passed to __init__.

  Attributes:
    vars: a dictionary with string names (same as passed in GetVar) as keys and
      the corresponding TensorFlow Variables as values.
  _vars_partitioned_vars_store_eager_variablesc                 .    i | _         i | _        d| _        y)zCreate a variable store.FNrY   r9   s    r4   r5   z_VariableStore.__init__   s    DJD"'Dr6   NTc                     |t        |      st        d|z        t        j                         5  t	        j
                         rd}ddd       t	        j
                         r+ j                  s|rt        d       j                  rt        }	 |j                  }dt        j                  ddddddddddt        j                  t        j                   f fd	}t#        j$                  ||||      \  }}}|K||||||||||	|
||||d}dt'        j(                  |      v st'        j*                  |      r||d<    |d	i |S  ||||||||||	|
|||||      S # 1 sw Y   xY w# t        $ r Y w xY w)
a  Gets an existing variable with these parameters or create a new one.

    If a variable with the given name is already stored, we return the stored
    variable. Otherwise, we create a new one.

    Set `reuse` to `True` when you only want to reuse existing Variables.
    Set `reuse` to `False` when you only want to create new Variables.
    Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
    variables to be created if they don't exist or returned if they do.

    If initializer is `None` (the default), the default initializer passed in
    the constructor is used. If that one is `None` too, we use a new
    `glorot_uniform_initializer`. If initializer is a Tensor, we use
    it as a value and derive the shape from the initializer.

    If a partitioner is provided, a `PartitionedVariable` is returned.
    Accessing this object as a `Tensor` returns the shards concatenated along
    the partition axis.

    Some useful partitioners are available.  See, e.g.,
    `variable_axis_size_partitioner` and `min_max_variable_partitioner`.

    Args:
      name: The name of the new or existing variable.
      shape: Shape of the new or existing variable.
      dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
      initializer: Initializer for the variable.
      regularizer: A (Tensor -> Tensor or None) function; the result of applying
        it on a newly created variable will be added to the collection
        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
      reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
        variables. When eager execution is enabled  this argument is always
        forced to be False.
      trainable: If `True` also add the variable to the graph collection
        `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). `trainable`
        defaults to `True`, unless `synchronization` is set to `ON_READ`, in
        which case it defaults to `False`.
      collections: List of graph collections keys to add the `Variable` to.
        Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the `Variable` reside, to
        deduplicate copying through `Switch` and other conditional statements.
      partitioner: Optional callable that accepts a fully defined `TensorShape`
        and dtype of the `Variable` to be created, and returns a list of
        partitions for each axis (currently only one axis can be partitioned).
      validate_shape: If False, allows the variable to be initialized with a
        value of unknown shape. If True, the default, the shape of initial_value
        must be known.
      use_resource: If False, creates a regular Variable. If True, creates
        instead an experimental ResourceVariable which has well-defined
        semantics. Defaults to False (will later change to True). When eager
        execution is enabled this argument is always forced to be true.
      custom_getter: Callable that takes as a first argument the true getter,
        and allows overwriting the internal get_variable method. The signature
        of `custom_getter` should match that of this method,
        but the most future-proof version will allow for changes: `def
          custom_getter(getter, *args, **kwargs)`.  Direct access to
        all `get_variable` parameters is also allowed: `def
          custom_getter(getter, name, *args, **kwargs)`.  A simple identity
        custom getter that simply creates variables with modified names is:
          ```python
        def custom_getter(getter, name, *args, **kwargs): return getter(name +
          '_suffix', *args, **kwargs) ```
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value (which must have
        the same shape). Constraints are not safe to use when doing asynchronous
        distributed training.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses when to
        synchronize.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

    Returns:
      The created or existing `Variable` (or `PartitionedVariable`, if a
      partitioner was used).

    Raises:
      ValueError: when creating a new variable and shape is not declared,
        when reusing a variable and specifying a conflicting shape,
        or when violating reuse during variable creation.
      RuntimeError: when eager execution is enabled and not called from an
        EagerVariableStore.
    Nz0Passed a custom_getter which is not callable: %sTzWhen eager execution is enabled variable reuse is only supported when an EagerVariableStore is active. See the documentation on EagerVariableStore for example usage.c                    |d uxr t        |t        j                        xr | }|	Z|sXt        |	      st	        d|	z        t        j                  d       5  j                  | |||||||||	|
||||      cd d d        S |du r0|	.| j                  v r j                  | ||||||||d |
||||      S d| z  j                  v rt	        d| z        j                  | |||||||||
||||      S # 1 sw Y   {xY w)Nz.Partitioner must be callable, but received: %s)namer3   dtypeinitializerregularizerreuse	trainablecollectionscaching_devicepartitionervalidate_shapeuse_resource
constraintsynchronizationaggregationT	%s/part_0zNo partitioner was provided, but a partitioned version of the variable was found: %s/part_0. Perhaps a variable of the same name was already created with partitioning?)r`   r3   ra   rb   rc   rd   re   rf   rg   ri   rj   rk   rl   rm   )r%   r   Sequencecallabler,   r   
name_scope_get_partitioned_variabler[   rZ   _get_single_variable)r`   r3   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   	is_scalarr/   s                   r4   _true_getterz1_VariableStore.get_variable.<locals>._true_getter  s   " t
 
5/2J2J K )  
	 $K&' ( (^^D! 	'//%%!%+%+'#-% 0 '	' 	'( 4-K/d,,,--###))%!+# . % 	%$ 
t	tzz	):<@AB 	B
 &&!!!''#)! ' # #]	' 	's    C55C>)getterr`   r3   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rl   rm   rk   r3   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   rk   rl   rm   rH   )rp   r,   r   
init_scoper   executing_eagerlyr\   RuntimeErrorr   
base_dtypeAttributeErrorr   float32r   AUTOr   NONEr   .validate_synchronization_aggregation_trainabler   fn_args
has_kwargs)r/   r`   r3   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   custom_getterrk   rl   rm   ru   custom_getter_kwargss   `                  r4   r   z_VariableStore.get_variable   s   Z  -)@I$% & & 
	 		"	"	$    "((U56 	6 
	$	$e nn/44',,T#n 	@@[)T	; ,O[)   !$$ $*$*&,$& .00?
?

#
#M
2-7\*2122
!!!'!'#)!# #s 4   s   EE' E$'	E32E3c                 (   |duxr t        |t        j                        }|| j                  v rt	        d|z        t        j                  |      }|r|j                  |j                               }d}|r|rt        |||      }|| j                  v r|du rt	        d|z        | j                  |   }|j                  |j                               s#t	        d|d|d|j                         d      |j                  |j                        s3t	        d|d	|j                  d
|j                  j                  d      |6|j                         |k7  r#t	        d|d|d|j                         d      |S |du rt	        d|z        t        |      \  }}d|z  | j                  v rPd||dz
  fz  | j                  vrt	        d||||dz
  fz        d||fz  | j                  v rt	        d||||fz        g }t!        t#        |j%                         ||            D ]  \  }\  }}t'        |j%                         |      }d||fz  }t)        j*                  |dz   d      5  || j-                  |||      \  }}|rd}n|}nt/        |      r|}|}nzt        |t        j                        r0t1        j2                  |||      }|j                  j4                  }d}n0t)        j6                  ||      }t1        j2                  |||      }d}ddd       t)        j*                  d      5  | j9                  |||||||	|
|||||      }ddd       j;                  t<        j>                  jA                  ||j%                         ||             |jC                  |        t=        jD                  |||||      }tG        jH                         r| jJ                  r|| j                  |<   |S # 1 sw Y   xY w# 1 sw Y   xY w)a  Gets or creates a sharded variable list with these parameters.

    The `partitioner` must be a callable that accepts a fully defined
    `TensorShape` and returns a sequence of integers (the `partitions`).
    These integers describe how to partition the given sharded `Variable`
    along the given dimension.  That is, `partitions[1] = 3` means split
    the `Variable` into 3 shards along dimension 1.  Currently, sharding along
    only one axis is supported.

    If the list of variables with the given name (prefix) is already stored,
    we return the stored variables. Otherwise, we create a new one.

    Set `reuse` to `True` when you only want to reuse existing Variables.
    Set `reuse` to `False` when you only want to create new Variables.
    Set `reuse` to None (the default) or tf.compat.v1.AUTO_REUSE when you want
    variables to be created if they don't exist or returned if they do.

    If initializer is `None` (the default), the default initializer passed in
    the constructor is used. If that one is `None` too, we use a new
    `glorot_uniform_initializer`. If initializer is a Tensor, we use
    it as a value and derive the shape from the initializer.

    If the initializer is a callable, then it will be called for each
    shard.  Otherwise the initializer should match the shape of the entire
    sharded Variable, and it will be sliced accordingly for each shard.

    Some useful partitioners are available.  See, e.g.,
    `variable_axis_size_partitioner` and `min_max_variable_partitioner`.

    Args:
      name: the name of the new or existing sharded variable.
      partitioner: Optional callable that accepts a fully defined `TensorShape`
        and `dtype` of the Variable to be created, and returns a list of
        partitions for each axis (currently only one axis can be partitioned).
      shape: shape of the new or existing sharded variable.
      dtype: type of the new or existing sharded variable (defaults to
        `DT_FLOAT`).
      initializer: initializer for the sharded variable.
      regularizer: a (Tensor -> Tensor or None) function; the result of applying
        it on a newly created variable will be added to the collection
        GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
      reuse: a Boolean, None, or tf.AUTO_REUSE. Controls reuse or creation of
        variables.
      trainable: If `True` also add the variable to the graph collection
        `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
      collections: List of graph collections keys to add the Variable to.
        Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
      caching_device: Optional device string or function describing where the
        Variable should be cached for reading.  Defaults to the Variable's
        device.  If not `None`, caches on another device.  Typical use is to
        cache on the device where the Ops using the Variable reside, to
        deduplicate copying through `Switch` and other conditional statements.
      validate_shape: If False, allows the variable to be initialized with a
        value of unknown shape. If True, the default, the shape of initial_value
        must be known.
      use_resource: If False, creates a regular Variable. If True, creates an
        experimental ResourceVariable which has well-defined semantics. Defaults
        to False (will later change to True).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value (which must have
        the same shape). Constraints are not safe to use when doing asynchronous
        distributed training.
      synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses when to
        synchronize.
      aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

    Returns:
      A `PartitionedVariable` object.

    Raises:
      ValueError: when creating a new variable and shape is not declared,
        when reusing a variable and specifying a conflicting shape,
        when violating reuse during variable creation, or if an existing
        sharded variable exists for the given name but with different sharding.
    NzA partitioner was provided, but an unpartitioned version of the variable was found: %s.  Perhaps a variable of the same name was already created without partitioning?FztPartitioned variable with name %s already exists. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?z%Trying to reuse partitioned variable , but specified shape  and found shape ., but specified dtype  and found dtype z, but specified partitions z and found partitions TzPartitionedVariable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=False or reuse=tf.AUTO_REUSE in VarScope?rn   z
%s/part_%drK   zPartitioner returned a different partitioning than what was already found.  Partitioner returned %d shards, and shard %s/part_0 was found, but %s/part_%d was not.zPartitioner returned a different partitioning than what was already found.  Partitioner returned %d shards, and shard %s/part_0 was found, but so was the extra shard %s/part_%d.)r0   r1   z/PartitionedInitializerskip_on_eagerr`   r3   ra   ra   )r`   r3   ra   rb   partition_inforc   rd   re   rf   rg   ri   rj   rk   rl   rm   )r`   r3   ra   variable_list
partitions)&r%   r   TensorrZ   r,   r	   as_shape
merge_with	get_shape_call_partitionerr[   is_compatible_withra   r`   _get_partitions_get_slice_dim_and_num_slices	enumerate_iter_slicesas_listr!   r   rq   _get_default_initializerrp   r
   slicer{   convert_to_tensorrs   _set_save_slice_infor   VariableSaveSliceInfoappendPartitionedVariabler   ry   r\   )r/   r`   rh   r3   ra   rb   rc   rd   re   rf   rg   ri   rj   rk   rl   rm   initializing_from_valuer   existing_varrB   
num_slicesvsrA   r1   	var_shaper   var_full_nameinit
init_shapevarpartitioned_vars                                  r4   rr   z(_VariableStore._get_partitioned_variable  s   F *5 $*V]];$tzz2489: :
 !!%(E{4467eJK$[%?jt%%%	%ACGHI 	I ++D1l%%l&<&<&>?%)5,2H2H2JLM 	M %%l&8&89%)5::|7I7I7N7NPQ 	Q
 
 

&
&
(J
6 :|;;=?@ 	@ } IKOP Q Q :*EIzTTZZ'	zA~.	.djj	@; tZ!^456 	6
 
z*	*djj	8J tZ012 	2 
B&/U]]_j)<'> 3""J	%]]_=n"dAY.m>>
3
35J  *.*G*GuE +H +3
'$'$J"Jk"$ *V]]3j)D$**''%*&&{%@$z9=$*+. >>$ 
'')##))%!+# ( 

( 




*
*4*+467 	iing3l  33O $$&$*E*E%4dT"s .
 
s   ,B,O<5!P<P	P	c                    d}|t        |      sd}||rt        d      t        j                  |      }|t	        j
                  |      }| j                  v rM|du r| j                     }dz  }t        |t        j                        rt        |      |j                  j                  ddd   }|D cg c]  }d|d   vs| c}dd	 }t        |d
dj                  t        j                  |                  | j                     }|B|j                  |j                               s#t        dd|d|j                         d      |j                  |j                         s7|j"                  }|j                   j"                  }t        dd|d|d      |S |du rt        dz        |(|t        d d      | j%                  ||      \  }}t'        j(                         5  |r|}d}nt+        j,                  |      r |       }||j/                         r|dt+        j0                  |      j2                  v r(t5        j6                  ||j9                         ||      }n&t5        j6                  ||j9                         |      }|j:                  }nt=        |      r|}d}nt        d      ddd       |t?        j@                         }tC        ||	|
||||||      tE        jF                         r| jH                  rw|	rt'        jJ                  |	       n.t'        jL                  t&        jN                  jP                         |r.t'        jL                  t&        jN                  jR                         tE        jF                         r| jH                  r| j                  <   tU        jV                  ddj"                  tY        |      |       rHfd}       9t[        |      }t'        jL                  t&        jN                  j\                  |       S c c}w # 1 sw Y   zxY w)an  Get or create a single Variable (e.g.

    a shard or entire variable).

    See the documentation of get_variable above (ignore partitioning components)
    for details.

    Args:
      name: see get_variable.
      shape: see get_variable.
      dtype: see get_variable.
      initializer: see get_variable.
      regularizer: see get_variable.
      partition_info: _PartitionInfo object.
      reuse: see get_variable.
      trainable: see get_variable.
      collections: see get_variable.
      caching_device: see get_variable.
      validate_shape: see get_variable.
      use_resource: see get_variable.
      constraint: see get_variable.
      synchronization: see get_variable.
      aggregation: see get_variable.

    Returns:
      A Variable.  See documentation of get_variable above.

    Raises:
      ValueError: See documentation of get_variable above.
    FNTz3If initializer is a constant, do not specify shape.zjVariable %s already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope?ztensorflow/pythonr      z Originally defined at:

 zTrying to share variable r   r   r   r   r   z{Variable %s does not exist, or was not created with tf.get_variable(). Did you mean to set reuse=tf.AUTO_REUSE in VarScope?z	Variable zG did not get an initializer, so its `shape` argument must be specified.r   r   )ra   r   r   zThe initializer passed is not valid. It should be a callable with no arguments and the shape should not be provided or an instance of `tf.keras.initializers.*' and `shape` should be fully defined.)initial_valuer`   re   rf   rg   ra   ri   rk   rj   rl   rm   r3   rK   z-Created variable %s with shape %s and init %sc                      t        j                        5  t        j                   dz         5         cd d d        cd d d        S # 1 sw Y   nxY w	 d d d        y # 1 sw Y   y xY w)Nz/Regularizer/)r   colocate_withrq   )r`   rc   vs   r4   make_regularizer_opz@_VariableStore._get_single_variable.<locals>.make_regularizer_op  sg    q! 	"~~d_45 "q>" "	" 	"" " "	" 	" 	"s!   A!A	A!A	A!!A*)/rp   r,   r   as_dtyper	   r   rZ   r%   r   ResourceVariableop	tracebackjoinformat_listr   r   ra   r`   r   r   rx   r   isclassis_fully_definedrN   rO   rQ   rR   r   r{   rV   r   resource_variables_enabled_variable_v1r   ry   r\   add_to_collectionsadd_to_collection	GraphKeysGLOBAL_VARIABLESTRAINABLE_VARIABLESloggingvlogr-   _LazyEvalTensorREGULARIZATION_LOSSES)r/   r`   r3   ra   rb   rc   r   rd   re   rf   rg   ri   rj   rk   rl   rm   r   r   err_msgtbx	found_var	dtype_strfound_type_strinit_valvariable_dtyper   lazy_eval_tensorr   s    `   `                      @r4   rs   z#_VariableStore._get_single_variable  s;   ^ $x'< $4LMMOOE"E##E*etzz	%jj68<= c0AAB7#
#VVdd#
 ?A2!A$>a?C!2779+@+@+D#EG H 	H**T"i		5#;#;



$ y':':'<> ? 	? %%ioo6JJ	"--	>; < 	<  } :<@A B B
 	v + +
 	
 .2-J-J5 .K ./*k* 
	 -	  k*#+!7!7!9!6!6{!C!H!HH (()./48FHH
 !(().GH ++. - (. , - ---: .IIKl%%!'	A   "t'B'B	{A.cmm<<a@	cmm??C$$&$*E*E djjLLCQVV- "
 
Q	#*+>?cmmAA.	0 HW @D- -s   <P<	P<<CQQc                 *   ~|j                   rt        j                         }d}||fS |j                  s+|j                  s|j
                  s|t        j                  k(  rt        j                         }d}||fS t        d|d|j                  d      )a7  Provide a default initializer and a corresponding value.

    Args:
      name: see get_variable.
      shape: see get_variable.
      dtype: see get_variable.

    Returns:
      initializer and initializing_from_value. See get_variable above.

    Raises:
      ValueError: When giving unsupported dtype.
    FzAn initializer for variable z of z is required)is_floatingr   glorot_uniform_initializer
is_integeris_unsignedis_boolr   stringzeros_initializerr,   r{   )r/   r`   r3   ra   rb   r   s         r4   r   z'_VariableStore._get_default_initializer  s     	779k % /// 

e//5==
6==
 ..0k % /// e..0 1 1r6   )r*   rC   rD   rE   rF   r5   r   r}   r   r~   r   r   r   rr   rs   r   rH   r6   r4   rX   rX      s     G)( ##!#"&#"& $!%"#:#?#?277!Y#| '+&,nn,0,0&**.,0/3/3-1+/0G0L0L,?,D,DbL "&!''+'+*.!%%)'+*.*.(,&*+B+G+G':'?'?sl 26V^^ 0r6   rX   c                       e Zd ZdZd ZddZy)r   z=A Tensor-like object that only evaluates its thunk when used.c                 *    || _          |       | _        y)z~Initializes a _LazyEvalTensor object.

    Args:
      thunk: A callable. A thunk which computes the value of the tensor.
    N)_thunk_master_tensor)r/   thunks     r4   r5   z_LazyEvalTensor.__init__  s     DK'Dr6   Nc                 P    ~|rJ |d | j                   fv sJ | j                         S r8   )ra   r   )r/   ra   r`   as_refs       r4   
_as_tensorz_LazyEvalTensor._as_tensor  s0    :T4::&&&&;;=r6   )NNF)r*   rC   rD   rE   r5   r   rH   r6   r4   r   r     s    E"r6   r   c                 $     t          fd       }|S )Nc                 0    t        | j                        S r8   getattrr   )r/   r`   s    r4   propz#_make_master_property.<locals>.prop  s    4&&--r6   )rG   )r`   r   s   ` r4   _make_master_propertyr     s    . .	+r6   )devicera   graphr`   r   r3   value_indexc                       fd}|S )Nc                 <     t        | j                        |i |S r8   r   r/   rO   kwargsr`   s      r4   methodz#_make_master_method.<locals>.method  s"    -74&&-t>v>>r6   rH   r`   r   s   ` r4   _make_master_methodr     s    ?	-r6   )r   __str__shape_as_listc                       fd}|S )Nc                 D     t        | j                               |i |S r8   )r   r   r   s      r4   r   z_make_op_method.<locals>.method  s#    +74??$d+T<V<<r6   rH   r   s   ` r4   _make_op_methodr     s    =	-r6   )(__abs____add____and____bool____div____eq____floordiv____ge____getitem____gt__
__invert____iter____le____len____lt__
__matmul____mod____mul____ne____neg____nonzero____or____pow____radd____rand____rdiv____rfloordiv____rmatmul____rmod____rmul____ror____rpow____rsub____rtruediv____rxor____sub____truediv____xor__evalnumpyc                 (    | j                  |||      S r8   )r   )valra   r`   r   s       r4   <lambda>r#    s    S^^E4%H r6   c                 "    | j                   gd fS )Nc                     | d   S Nr   rH   )fetched_valss    r4   r#  z<lambda>.<locals>.<lambda>  s
    Q r6   )r   )fetchs    r4   r#  r#    s    E(()+OP r6   r   c                      y)z9Use this function to prevent regularization of variables.NrH   )_s    r4   r   r     s     
r6   r   c                      e Zd ZdZdddddddej
                  ddf
dZed        Zed        Z	ed        Z
ed        Zed	        Zed
        Zed        Zed        Zed        Zed        Zed        Zd Zd Zd Zd Zd Zd Zd Zd Zd Zd Zd Zd Zdddddddddddddej@                  e!jD                  fdZ#dddddddddddej@                  e!jD                  fdZ$y)r   a  Variable scope object to carry defaults to provide to `get_variable`.

  Many of the arguments we need for `get_variable` in a variable store are most
  easily handled with a context. This object is used for the defaults.

  Attributes:
    name: name of the current scope, used as prefix in get_variable.
    initializer: default initializer passed to get_variable.
    regularizer: default regularizer passed to get_variable.
    reuse: Boolean, None, or tf.compat.v1.AUTO_REUSE, setting the reuse in
      get_variable. When eager execution is enabled this argument is always
      forced to be False.
    caching_device: string, callable, or None: the caching device passed to
      get_variable.
    partitioner: callable or `None`: the partitioner passed to `get_variable`.
    custom_getter: default custom getter passed to get_variable.
    name_scope: The name passed to `tf.name_scope`.
    dtype: default type passed to get_variable (defaults to DT_FLOAT).
    use_resource: if False, create a normal Variable; if True create an
      experimental ResourceVariable with well-defined semantics. Defaults to
      False (will later change to True). When eager execution is enabled this
      argument is always forced to be True.
    constraint: An optional projection function to be applied to the variable
      after being updated by an `Optimizer` (e.g. used to implement norm
      constraints or value constraints for layer weights). The function must
      take as input the unprojected Tensor representing the value of the
      variable and return the Tensor for the projected value (which must have
      the same shape). Constraints are not safe to use when doing asynchronous
      distributed training.
  r   Nc                    || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        t        j                         r*| j                  t        d      t        | _        d| _	        yy)z6Creates a new VariableScope with the given properties.NzECaching devices is not yet supported when eager execution is enabled.T)_name_initializer_regularizer_reuse_caching_device_partitioner_custom_getter_name_scope_dtype_use_resource_constraintr   ry   NotImplementedErrorr   )r/   rd   r`   rb   rc   rg   rh   r   rq   ra   rj   rk   s               r4   r5   zVariableScope.__init__C  s     DJ#D#DDK)D#D'D!DDK%D!D  "				)! #E F 	Fdkd #r6   c                     | j                   S r8   )r-  r9   s    r4   r`   zVariableScope.nameb  s    ::r6   c                     | j                   S r8   )r4  r9   s    r4   original_name_scopez!VariableScope.original_name_scopef  r:   r6   c                     | j                   S r8   r0  r9   s    r4   rd   zVariableScope.reusej      ;;r6   c                     | j                   S r8   r.  r9   s    r4   rb   zVariableScope.initializern      r6   c                     | j                   S r8   r5  r9   s    r4   ra   zVariableScope.dtyper  r>  r6   c                     | j                   S r8   )r6  r9   s    r4   rj   zVariableScope.use_resourcev  s    r6   c                     | j                   S r8   r/  r9   s    r4   rc   zVariableScope.regularizerz  rA  r6   c                     | j                   S r8   )r1  r9   s    r4   rg   zVariableScope.caching_device~  s    r6   c                     | j                   S r8   r2  r9   s    r4   rh   zVariableScope.partitioner  rA  r6   c                     | j                   S r8   r3  r9   s    r4   r   zVariableScope.custom_getter  s    r6   c                     | j                   S r8   )r7  r9   s    r4   rk   zVariableScope.constraint  r:   r6   c                     d| _         y)zReuse variables in this scope.TNr=  r9   s    r4   reuse_variableszVariableScope.reuse_variables  s	    DKr6   c                     || _         y)zSet initializer for this scope.Nr@  )r/   rb   s     r4   set_initializerzVariableScope.set_initializer  
    #Dr6   c                     || _         y)zSet data type for this scope.NrC  )r/   ra   s     r4   	set_dtypezVariableScope.set_dtype  s	    DKr6   c                 T    t        j                         r|st        d      || _        y)z5Sets whether to use ResourceVariables for this scope.zEWhen eager execution is enabled, use_resource cannot be set to false.N)r   ry   r,   r6  )r/   rj   s     r4   set_use_resourcezVariableScope.set_use_resource  s*      "< > ? ?%Dr6   c                     || _         y)zSet regularizer for this scope.NrF  )r/   rc   s     r4   set_regularizerzVariableScope.set_regularizer  rQ  r6   c                 P    t        j                         rt        d      || _        y)z"Set caching_device for this scope.zFCaching devices are not yet supported when eager execution is enabled.N)r   ry   r8  r1  )r/   rg   s     r4   set_caching_devicez VariableScope.set_caching_device  s+      " !C D D)Dr6   c                     || _         y)zSet partitioner for this scope.NrI  )r/   rh   s     r4   set_partitionerzVariableScope.set_partitioner  rQ  r6   c                     || _         y)z!Set custom getter for this scope.NrK  )r/   r   s     r4   set_custom_getterzVariableScope.set_custom_getter  s
    'Dr6   c                 h    | j                   r| j                   dz   nd}t        j                  ||      S )zGet this scope's variables./r   )r-  r   get_collection)r/   r`   scopes      r4   r`  zVariableScope.get_collection  s+     $

DJJEdE**r6   c                 T    | j                  t        j                  j                        S )z%Get this scope's trainable variables.)r`  r   r   r   r9   s    r4   trainable_variablesz!VariableScope.trainable_variables  s    s}}@@AAr6   c                 T    | j                  t        j                  j                        S )z"Get this scope's global variables.)r`  r   r   r   r9   s    r4   global_variableszVariableScope.global_variables  s    s}}==>>r6   c                 T    | j                  t        j                  j                        S )z!Get this scope's local variables.)r`  r   r   LOCAL_VARIABLESr9   s    r4   local_variableszVariableScope.local_variables  s    s}}<<==r6   Tc                    || j                   }|
| j                  }
|| j                  }|| j                  }t	        j
                         rd}d}n|| j                  }|| j                  }| j                  r| j                  dz   |z   n|}t        j                  dd      5  |M|Kt        |      s@t        j                  |      j                  j                  }||k7  rt        d|d|d      || j                   }|| j"                  }|| j$                  }|j'                  ||||||||	|
|||||||	      cddd       S # 1 sw Y   yxY w)
=Gets an existing variable with this name or create a new one.NFTr_  r   zInitializer type 'z' and explicit dtype 'z' don't match.)r3   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   r   rk   rl   rm   )r/  r1  r2  r3  r   ry   r0  r6  r`   r   rq   rp   r   ra   r{   r,   r.  r7  r5  r   )r/   	var_storer`   r3   ra   rb   rc   rd   re   rf   rg   rh   ri   rj   r   rk   rl   rm   	full_name
init_dtypes                       r4   r   zVariableScope.get_variable  s   & %%k++n%%k))m  "el			))*.))		C$&I 
E	2 # 
 7{#**;7==HH
-7@ A A		''		%%
	##
!!!'!'#%)!! $ ## # #s   %BE		Ec                 p   || j                   }|| j                  }|| j                  }|	| j                  }	|
| j                  }
|| j
                  }|| j                  }| j                  t        d| j                  z        |
t        d      g }| j                  r|j                  | j                         |r|j                  |       dj                  |      }t        j                  dd      5  |j                  |||||| j                  |||	|
|||||      cddd       S # 1 sw Y   yxY w)rj  N  Private access to _get_partitioned_variable is not allowed when a custom getter is set.  Current custom getter: %s.  It is likely that you're using create_partitioned_variables.  If so, consider instead using get_variable with a non-empty partitioner parameter instead.zNo partitioner was specifiedr_  Fr   rw   )r.  r/  r7  r1  r2  r5  r6  r3  r,   r`   r   r   r   rq   rr   rd   )r/   rk  r`   r3   ra   rb   rc   re   rf   rg   rh   ri   rj   rk   rl   rm   full_name_listrl  s                     r4   rr   z'VariableScope._get_partitioned_variable  se   " %%k%%k##j++n%%k}kke''l&+ .2-@-@	AB B 566
 NyyDII&D!(I 
E	2 #00
!!

!'!'#)! 1 ## # #s   8*D,,D5)%r*   rC   rD   rE   r   r}   r5   rG   r`   r;  rd   rb   ra   rj   rc   rg   rh   r   rk   rN  rP  rS  rU  rW  rY  r[  r]  r`  rc  re  rh  r   r~   r   r   r   rr   rH   r6   r4   r   r   "  s   B "!^^  >                        $&$*$(+
B?> ##!#"&#"& $!%"#:#?#?277#F#V '+&*,0,0*.,0/3,0/3-1+/0G0L0L,?,D,DH#r6   )__variable_store)
__varscopec                   4     e Zd ZdZ fdZd Zd Zd Z xZS )_VariableScopeStorezEA thread local store for the current variable scope and scope counts.c                 X    t         t        |           t        d      | _        i | _        y )NF)superrt  r5   r   current_scopevariable_scopes_count)r/   	__class__s    r4   r5   z_VariableScopeStore.__init__`  s&    	
t-/&u-D!#Dr6   c                 n    || j                   v r| j                   |xx   dz  cc<   y d| j                   |<   y )NrK   )rx  r/   
scope_names     r4   open_variable_scopez'_VariableScopeStore.open_variable_scopee  s6    T///
  ,1,/0d  ,r6   c                     |!| j                   D ]  }d| j                   |<    y |dz   }t        |      }| j                   D ]  }|d | |k(  sd| j                   |<    y )Nr   r_  )rx  r+   )r/   r|  kstartswith_checkstartswith_lens        r4   close_variable_subscopesz,_VariableScopeStore.close_variable_subscopesk  sz    )) *!()""1%* $c)+,n)) ,!_n!11*+$
$
$Q
',r6   c                 :    | j                   j                  |d      S r&  )rx  getr{  s     r4   variable_scope_countz(_VariableScopeStore.variable_scope_countv  s    %%))*a88r6   )	r*   rC   rD   rE   r5   r}  r  r  __classcell__)ry  s   @r4   rt  rt  ]  s    M$
1	,9r6   rt  c                      t        j                  t              } | s&t               } t        j                  t        |        | S | d   } | S )z4Returns the variable scope store for current thread.r   )r   r`  _VARSCOPESTORE_KEYrt  r   )scope_stores    r4   get_variable_scope_storer  z  sH    ""#56+	%'K,k: 
 a.K	r6   r   c                  *    t               j                  S )a{  Returns the current variable scope.

  @compatibility(TF2)
  Although it is a legacy `compat.v1` api,
  `tf.compat.v1.get_variable` is compatible with eager
  execution and `tf.function`

  However, to maintain variable-scope based variable reuse
  you will need to combine it with
  `tf.compat.v1.keras.utils.track_tf1_style_variables`. (Though
  it will behave as if reuse is always set to `tf.compat.v1.AUTO_REUSE`.)

  See the
  [migration guide](https://www.tensorflow.org/guide/migrate/model_mapping)
  for more info.

  The TF2 equivalent, if you are just trying to track
  variable name prefixes and not control `get_variable`-based variable reuse,
  would be to use `tf.name_scope` and capture the output of opening the
  scope (which represents the current name prefix).

  For example:
  ```python
  x = tf.name_scope('foo') as current_scope:
    ...
  ```
  @end_compatibility
  )r  rw  rH   r6   r4   r   r     s    < 
"	#	1	11r6   c                      t        j                  t              } | r| d   S t               } t        j                  t        |        | S r&  )r   r`  _VARSTORE_KEYrX   r   )stores    r4   _get_default_variable_storer    s;    


]
+%
8O

%u-	,r6   c              #      K   t        j                  t              }t        |      }| g|d d  	 d  ||d d  y # ||d d  w xY wwr8   )r   get_collection_refr  r&   )r  store_collectionolds      r4   with_variable_storer    sM     ++M:#1	Q#Qs   +A8 A?Ac                   6    e Zd ZdZd	dZd Zd Zd Zd Zd Z	y)
EagerVariableStorea  Wrapper allowing functional layers to be used with eager execution.

  When eager execution is enabled Variables get deleted when they go out of
  scope, and are not stored in global collections by default. A lot of code
  (mostly the functional layers in tf.layers) assumes that variables are kept in
  a global list.

  EagerVariableStore can be used in conjunction with this code to make it
  eager-friendly. For example, to create a dense layer, use:

  ```
    container = tfe.EagerVariableStore()
    for input in dataset_iterator:
      with container.as_default():
        x = tf.compat.v1.layers.dense(input, name="l1")
    print(container.variables)  # Should print the variables used in the layer.
  ```
  Nc                     ||j                   st        d      || _        nt               | _        d| j                  _         y )NzcCannot construct EagerVariableStore from a VariableStore object that does not hold eager variables.T)r\   r,   _storerX   )r/   r  s     r4   r5   zEagerVariableStore.__init__  sB    )) & ' 	' dk"$dk)-DKK&r6   c                 ,    t        | j                        S r8   )r  r  r9   s    r4   
as_defaultzEagerVariableStore.as_default  s    t{{++r6   c                 b    t        | j                  j                  j                         d       S )Nc                     | j                   S r8   r`   r   s    r4   r#  z.EagerVariableStore.variables.<locals>.<lambda>  s
    AFF r6   key)sortedr  rZ   valuesr9   s    r4   r   zEagerVariableStore.variables  s#    $++##**,2BCCr6   c                     t        | j                  j                  j                         D cg c]  }|j                  s| c}d       S c c}w )Nc                     | j                   S r8   r  r  s    r4   r#  z8EagerVariableStore.trainable_variables.<locals>.<lambda>  
     r6   r  r  r  rZ   r  re   r/   r   s     r4   rc  z&EagerVariableStore.trainable_variables  s;    dkk//668HAKK1H&( (H
   AAc                     t        | j                  j                  j                         D cg c]  }|j                  r| c}d       S c c}w )Nc                     | j                   S r8   r  r  s    r4   r#  z<EagerVariableStore.non_trainable_variables.<locals>.<lambda>  r  r6   r  r  r  s     r4   non_trainable_variablesz*EagerVariableStore.non_trainable_variables  s;    dkk//668L1L&( (Lr  c                    t               }| j                  j                  j                         D ]y  \  }}	 |j                  j                  d      }|j                  d| }t        j                  |j                         ||j                        }||j                  j                  |<   { |S # t        $ r |j                  }Y dw xY w)a8  Copy this variable store and all of its contents.

    Variables contained in this store will be copied over to the new variable
    store, meaning that they can be modified without affecting the variables in
    this store.

    Returns:
      A new EagerVariableStore instance containing copied variables.
    :N)r`   re   )r  r  rZ   itemsr`   indexr,   r   r   
read_valuere   )r/   	new_storer  r   r  stripped_var_namenew_vars          r4   copyzEagerVariableStore.copy  s     #$IKK%%++- ,S-s#  HHVe, &66
..
!2cmmMg$+iS!,   %HH%s   B--CCr8   )
r*   rC   rD   rE   r5   r  r   rc  r  r  rH   r6   r4   r  r    s&    &	.,D((r6   r  r   Tc                 d    t               j                  t               | |||||||||	|
||||      S )N)r3   ra   rb   rc   re   rf   rg   rh   ri   rj   r   rk   rl   rm   )r   r   r  r`   r3   ra   rb   rc   re   rf   rg   rh   ri   rj   r   rk   rl   rm   s                  r4   r   r     sR      
		*	*!#
##!%! 
+ 
 r6   a^  %s

@compatibility(TF2)
Although it is a legacy `compat.v1` api,
`tf.compat.v1.get_variable` is mostly compatible with eager
execution and `tf.function` but only if you combine it with the
`tf.compat.v1.keras.utils.track_tf1_style_variables` decorator. (Though
it will behave as if reuse is always set to `AUTO_REUSE`.)

See the
[model migration guide](https://www.tensorflow.org/guide/migrate/model_mapping)
for more info.

If you do not combine it with
`tf.compat.v1.keras.utils.track_tf1_style_variables`, `get_variable` will create
a brand new variable every single time it is called and will never reuse
variables, regardless of variable names or `reuse` arguments.

The TF2 equivalent of this symbol would be `tf.Variable`, but note
that when using `tf.Variable` you must make sure you track your variables
(and regularizer arguments) either manually or via `tf.Module` or
`tf.keras.layers.Layer` mechanisms.

A section of the
[migration guide](https://www.tensorflow.org/guide/migrate/model_mapping#incremental_migration_to_native_tf2)
provides more details on incrementally migrating these usages to `tf.Variable`
as well.

Note: The `partitioner` arg is not compatible with TF2 behaviors even when
using `tf.compat.v1.keras.utils.track_tf1_style_variables`. It can be replaced
by using `ParameterServerStrategy` and its partitioners. See the
[multi-gpu migration guide](https://www.tensorflow.org/guide/migrate/multi_worker_cpu_gpu_training)
and the ParameterServerStrategy guides it references for more info.
@end_compatibility

%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](https://tensorflow.org/guide/variables)
for an extensive description of how reusing works. Here is a basic example:

```python
def foo():
  with tf.variable_scope("foo", reuse=tf.AUTO_REUSE):
    v = tf.get_variable("v", [1])
  return v

v1 = foo()  # Creates v.
v2 = foo()  # Gets the same, existing v.
assert v1 == v2
```

If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.

Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).

If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.

Some useful partitioners are available.  See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.

Args:
  name: The name of the new or existing variable.
  shape: Shape of the new or existing variable.
  dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
  initializer: Initializer for the variable if one is created. Can either be
    an initializer object or a Tensor. If it's a Tensor, its shape must be known
    unless validate_shape is False.
  regularizer: A (Tensor -> Tensor or None) function; the result of
    applying it on a newly created variable will be added to the collection
    `tf.GraphKeys.REGULARIZATION_LOSSES` and can be used for regularization.
  %scollections: List of graph collections keys to add the Variable to.
    Defaults to `[%s]` (see `tf.Variable`).
  caching_device: Optional device string or function describing where the
    Variable should be cached for reading.  Defaults to the Variable's
    device.  If not `None`, caches on another device.  Typical use is to
    cache on the device where the Ops using the Variable reside, to
    deduplicate copying through `Switch` and other conditional statements.
  partitioner: Optional callable that accepts a fully defined `TensorShape`
    and `dtype` of the Variable to be created, and returns a list of
    partitions for each axis (currently only one axis can be partitioned).
  validate_shape: If False, allows the variable to be initialized with a
      value of unknown shape. If True, the default, the shape of initial_value
      must be known. For this to be used the initializer must be a Tensor and
      not an initializer object.
  use_resource: If False, creates a regular Variable. If true, creates an
    experimental ResourceVariable instead with well-defined semantics.
    Defaults to False (will later change to True). When eager execution is
    enabled this argument is always forced to be True.
  custom_getter: Callable that takes as a first argument the true getter, and
    allows overwriting the internal get_variable method.
    The signature of `custom_getter` should match that of this method,
    but the most future-proof version will allow for changes:
    `def custom_getter(getter, *args, **kwargs)`.  Direct access to
    all `get_variable` parameters is also allowed:
    `def custom_getter(getter, name, *args, **kwargs)`.  A simple identity
    custom getter that simply creates variables with modified names is:
    ```python
    def custom_getter(getter, name, *args, **kwargs):
      return getter(name + '_suffix', *args, **kwargs)
    ```
  constraint: An optional projection function to be applied to the variable
    after being updated by an `Optimizer` (e.g. used to implement norm
    constraints or value constraints for layer weights). The function must
    take as input the unprojected Tensor representing the value of the
    variable and return the Tensor for the projected value
    (which must have the same shape). Constraints are not safe to
    use when doing asynchronous distributed training.
  synchronization: Indicates when a distributed a variable will be
    aggregated. Accepted values are constants defined in the class
    `tf.VariableSynchronization`. By default the synchronization is set to
    `AUTO` and the current `DistributionStrategy` chooses
    when to synchronize.
  aggregation: Indicates how a distributed variable will be aggregated.
    Accepted values are constants defined in the class
    `tf.VariableAggregation`.

Returns:
  The created or existing `Variable` (or `PartitionedVariable`, if a
  partitioner was used).

Raises:
  ValueError: when creating a new variable and shape is not declared,
    when violating reuse during variable creation, or when `initializer` dtype
    and `dtype` don't match. Reuse is set inside `variable_scope`.
)zDGets an existing variable with these parameters or create a new one.r   z~trainable: If `True` also add the variable to the graph collection
    `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
  zGraphKeys.GLOBAL_VARIABLESr   Fc                     |r|t         j                  j                  gz  }nt         j                  j                  g}t        | ||||d||||	|
||||      S )NF)r3   ra   rb   rc   re   rf   rg   rh   ri   rj   rl   rm   r   rk   )r   r   rg  r   r  s                  r4   r   r     sj    " CMM1122K==001K	
##%!
 r6   )z7Gets an existing *local* variable or creates a new one.zBehavior is the same as in `get_variable`, except that variables are
added to the `LOCAL_VARIABLES` collection and `trainable` is set to
`False`.
r   zGraphKeys.LOCAL_VARIABLESc                     t               }|j                  t        d|j                  z        |j                  t	               | |||||||||	|
|||      S )a  Gets or creates a sharded variable list with these parameters.

  The `partitioner` must be a callable that accepts a fully defined
  `TensorShape` and returns a sequence of integers (the `partitions`).
  These integers describe how to partition the given sharded `Variable`
  along the given dimension.  That is, `partitions[1] = 3` means split
  the `Variable` into 3 shards along dimension 1.  Currently, sharding along
  only one axis is supported.

  If the list of variables with the given name (prefix) is already stored,
  we return the stored variables. Otherwise, we create a new one.

  If initializer is `None` (the default), the default initializer passed in
  the constructor is used. If that one is `None` too, we use a new
  `glorot_uniform_initializer`. If initializer is a Tensor, we use
  it as a value and derive the shape from the initializer.

  If the initializer is a callable, then it will be called for each
  shard.  Otherwise the initializer should match the shape of the entire
  sharded Variable, and it will be sliced accordingly for each shard.

  Some useful partitioners are available.  See, e.g.,
  `variable_axis_size_partitioner` and `min_max_variable_partitioner`.

  Args:
    name: The name of the new or existing variable.
    shape: Shape of the new or existing variable.
    dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
    initializer: Initializer for the variable if one is created.
    regularizer: A (Tensor -> Tensor or None) function; the result of applying
      it on a newly created variable will be added to the collection
      GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
    trainable: If `True` also add the variable to the graph collection
      `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
    collections: List of graph collections keys to add the Variable to. Defaults
      to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
    caching_device: Optional device string or function describing where the
      Variable should be cached for reading.  Defaults to the Variable's device.
      If not `None`, caches on another device.  Typical use is to cache on the
      device where the Ops using the Variable reside, to deduplicate copying
      through `Switch` and other conditional statements.
    partitioner: Optional callable that accepts a fully defined `TensorShape`
      and `dtype` of the Variable to be created, and returns a list of
      partitions for each axis (currently only one axis can be partitioned).
    validate_shape: If False, allows the variable to be initialized with a value
      of unknown shape. If True, the default, the shape of initial_value must be
      known.
    use_resource: If False, creates a regular Variable. If True, creates an
      experimental ResourceVariable instead which has well-defined semantics.
      Defaults to False (will later change to True).
    constraint: An optional projection function to be applied to the variable
      after being updated by an `Optimizer` (e.g. used to implement norm
      constraints or value constraints for layer weights). The function must
      take as input the unprojected Tensor representing the value of the
      variable and return the Tensor for the projected value (which must have
      the same shape). Constraints are not safe to use when doing asynchronous
      distributed training.
    synchronization: Indicates when a distributed a variable will be aggregated.
      Accepted values are constants defined in the class
      `tf.VariableSynchronization`. By default the synchronization is set to
      `AUTO` and the current `DistributionStrategy` chooses when to synchronize.
    aggregation: Indicates how a distributed variable will be aggregated.
      Accepted values are constants defined in the class
      `tf.VariableAggregation`.

  Returns:
    A tuple `(shards, partitions)` where `shards` is the list of `Variable`
    shards and `partitions` is the output of the partitioner on the input
    shape.

  Raises:
    ValueError: when creating a new variable and shape is not declared,
      or when violating reuse during variable creation. Reuse is set inside
      `variable_scope`.
  ro  )r3   ra   rb   rc   re   rf   rg   rh   ri   rj   rk   rl   rm   )r   r   r,   rr   r  )r`   r3   ra   rb   rc   re   rf   rg   rh   ri   rj   rk   rl   rm   ra  s                  r4   rr   rr     s    t 
%
$
	) ,1+>+>		?@ @ 
	(	(!#
##% 
) 
 r6   c            
       L    e Zd ZdZdddddddej
                  ddf
dZd Zd Zy)_pure_variable_scopez@A context for the variable_scope, see `variable_scope` for docs.Nc                    || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        t               | _        t               | _        d| _        t!        | j                   t"              rJ| j                   j$                  | _        | j                   j(                  }t#        | j                  s| j                   j*                  n| j                  | j&                  | j                   j,                  | j                   j.                  | j                   j0                  | j                   j2                  | j                   j4                  | j                   j6                  || j                   j8                  | j                        }| j                  |j;                  | j                         | j                  |j=                  | j                         | j                  |j?                  | j                         | j
                  |jA                  | j
                         | j                  9|jC                  tE        | j                  | j                   j6                               | j                  |jG                  | j                         | j                  |jI                  | j                         || _%        yy)a5  Creates a context for the variable_scope, see `variable_scope` for docs.

    Note: this does not create a name scope.

    Args:
      name_or_scope: `string` or `VariableScope`: the scope to open.
      reuse: `True` or None, or tf.compat.v1.AUTO_REUSE; if `None`, we inherit
        the parent scope's reuse flag.
      initializer: default initializer for variables within this scope.
      regularizer: default regularizer for variables within this scope.
      caching_device: default caching device for variables within this scope.
      partitioner: default partitioner for variables within this scope.
      custom_getter: default custom getter for variables within this scope.
      old_name_scope: the original name scope when re-entering a variable scope.
      dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
      use_resource: If False, variables in this scope will be regular Variables.
        If True, experimental ResourceVariables will be creates instead, with
        well-defined semantics. Defaults to False (will later change to True).
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value (which must have
        the same shape). Constraints are not safe to use when doing asynchronous
        distributed training.
    N)
r`   rb   rc   rg   rh   ra   r   rq   rj   rk   )&_name_or_scoper0  r.  r/  r1  r2  r3  _old_name_scoper5  r6  r7  r  
_var_storer  _var_scope_store_last_variable_scope_objectr%   r   r`   	_new_namer4  rd   rb   rc   rg   rh   ra   r   rj   rP  rW  rY  r[  r]  _maybe_wrap_custom_getterrS  rU  _cached_variable_scope_object)r/   name_or_scoperd   rb   rc   rg   rh   r   old_name_scopera   rj   rk   rq   variable_scope_objects                 r4   r5   z_pure_variable_scope.__init__e  sY   L (DDK#D#D)D#D'D)DDK%D!D13DO46D'+D$$%%}5**//dn&&22j
 ,+/;;$


#
#DKK~~))55))55,,;;))55##))++99**77%%' 
			&--d.?.?@				&--d.?.?@				)001E1EF				&--d.?.?@				(//%d&9&9&*&9&9&G&GI	J 
	 ''4				'..t/A/AB+@d(G 6r6   c                    | j                   j                  | _        t        | j                  t
              ra| j                   j                  | j                         t        j                  | j                   j                        | _
        | j                  }n| j                  j                  r&| j                  j                  dz   | j                  z   n| j                  | _        | j                  xs | j                  j                  | _        | j                  | j                  }n| j                  }t        | j                  | j                  | j                  j                   | j                  j"                  | j                  j$                  | j                  j&                  | j                  j(                  | j                  j*                  | j                  j,                  || j.                        }| j0                  |j3                  | j0                         | j4                  |j7                  | j4                         | j8                  |j;                  | j8                         | j<                  |j?                  | j<                         | j@                  9|jC                  tE        | j@                  | j                  j,                               | jF                  |jI                  | jF                         | jJ                  |jM                  | jJ                         | j                   j                  | j                         || j                   _        || _'        |S )a  Begins the scope block.

    Returns:
      A VariableScope.
    Raises:
      ValueError: when trying to reuse within a create scope, or create within
        a reuse scope, or if reuse is not `None` or `True`.
      TypeError: when the types of some arguments are not appropriate.
    r_  )
r`   rb   rc   rg   rh   ra   rj   r   rq   rk   )(r  rw  _oldr%   r  r   r}  r  r  rx  _old_subscopesr  r`   r0  rd   r  rb   rc   rg   rh   ra   rj   r   r7  r.  rP  r/  rW  r1  rY  r2  r[  r3  r]  r  r5  rS  r6  rU  r  )r/   r  rq   s      r4   	__enter__z_pure_variable_scope.__enter__  s    %%33DI$%%}5
//? II



5
57d"@@ "& ))..3



595H5H n [[ %YY__ k				%((
))
+
++~~ii++ii++11ii++		yy--		//%%' 
			&--d.?.?@				&--d.?.?@				)001E1EF				&--d.?.?@				(//%d&9&9&*ii&=&=?	@ 
	 ''4				'..t/A/AB
//?*?D''<D$  r6   c                 J   | j                   j                  | j                  urt        d      t	        | j
                  t              r| j                  | j                   _        n%| j                   j                  | j                         | j                  | j                   _        y )Nz#Improper nesting of variable_scope.)r  rw  r  rz   r%   r  r   r  rx  r  r  r  r/   type_arg	value_argtraceback_args       r4   __exit__z_pure_variable_scope.__exit__  sy    ++,,	->??$%%}5484G4Gd1
44T^^D*.))D'r6   )	r*   rC   rD   rE   r   r}   r5   r  r  rH   r6   r4   r  r  b  s<    H "!"^^ WAr<!|	4r6   r  c                       S  fd}|S )z@Wrap a call to a custom_getter to use the old_getter internally.c                 D     t        j                  |       g|i |S r8   )rQ   rR   )rv   rO   r   r   
old_getters      r4   wrapped_custom_getterz8_maybe_wrap_custom_getter.<locals>.wrapped_custom_getter  s'     **:v>PPPPr6   rH   )r   r  r  s   `` r4   r  r    s    Q 
r6   c                    t               }t               }|j                  r|j                  dz   | z   n| }|j                  |      dk(  r| S d}|j                  |d|z  z         dkD  r |dz  }|j                  |d|z  z         dkD  r | d|z  z   S )zFGet a name with the given prefix unique in the current variable scope.r_  r   rK   z_%d)r  r   r`   r  )prefixvar_scope_storerw  r`   idxs        r4   _get_unique_variable_scoper    s    ,./$&-.;.@.@		c	!F	*f$))$/14M	#,,TUS[-ABQF1HC 	,,TUS[-ABQF	53;	r6   r   c                   B    e Zd ZdZ	 	 	 	 	 	 	 	 	 	 	 	 ddZd Zd Zd Zy)r   a  A context manager for defining ops that creates variables (layers).

  @compatibility(TF2)
  Although it is a legacy `compat.v1` api,
  `tf.compat.v1.variable_scope` is mostly compatible with eager
  execution and `tf.function` as long as you combine it with the
  `tf.compat.v1.keras.utils.track_tf1_style_variables` decorator (though
  it will behave as if reuse is always set to `AUTO_REUSE`.)

  See the
  [model migration guide](
      https://www.tensorflow.org/guide/migrate/model_mapping)
  for more info on
  migrating code that relies on `variable_scope`-based variable reuse.

  When you use it with eager execution enabled but without
  `tf.compat.v1.keras.utils.track_tf1_style_variables`,
  `tf.compat.v1.variable_scope` will still be able to prefix the names
  of variables created within the scope but it will not enable variable reuse
  or error-raising checks around variable reuse (`get_variable` calls within
  it would always create new variables).

  Once you have switched away from `get_variable`-based variable reuse
  mechanisms, to switch to TF2 APIs you can just use
  `tf.name_scope` to prefix variable names.
  @end_compatibility

  This context manager validates that the (optional) `values` are from the same
  graph, ensures that graph is the default graph, and pushes a name scope and a
  variable scope.

  If `name_or_scope` is not None, it is used as is. If `name_or_scope` is None,
  then `default_name` is used.  In that case, if the same name has been
  previously used in the same scope, it will be made unique by appending `_N`
  to it.

  Variable scope allows you to create new variables and to share already created
  ones while providing checks to not create or share by accident. For details,
  see the [Variable Scope How To](https://tensorflow.org/guide/variables), here
  we present only a few basic examples.

  The Variable Scope works as expected when the Eager Execution is Disabled.

  ```python
  tf.compat.v1.disable_eager_execution()
  ```

  Simple example of how to create a new variable:

  ```python
  with tf.compat.v1.variable_scope("foo"):
      with tf.compat.v1.variable_scope("bar"):
          v = tf.compat.v1.get_variable("v", [1])
          assert v.name == "foo/bar/v:0"
  ```

  Simple example of how to reenter a premade variable scope safely:

  ```python
  with tf.compat.v1.variable_scope("foo") as vs:
    pass

  # Re-enter the variable scope.
  with tf.compat.v1.variable_scope(vs,
                         auxiliary_name_scope=False) as vs1:
    # Restore the original name_scope.
    with tf.name_scope(vs1.original_name_scope):
        v = tf.compat.v1.get_variable("v", [1])
        assert v.name == "foo/v:0"
        c = tf.constant([1], name="c")
        assert c.name == "foo/c:0"
  ```

  Keep in mind that the counters for `default_name` are discarded once the
  parent scope is exited. Therefore when the code re-enters the scope (for
  instance by saving it), all nested default_name counters will be restarted.

  For instance:

  ```python
  with tf.compat.v1.variable_scope("foo") as vs:
    with tf.compat.v1.variable_scope(None, default_name="bar"):
      v = tf.compat.v1.get_variable("a", [1])
      assert v.name == "foo/bar/a:0", v.name
    with tf.compat.v1.variable_scope(None, default_name="bar"):
      v = tf.compat.v1.get_variable("b", [1])
      assert v.name == "foo/bar_1/b:0"

  with tf.compat.v1.variable_scope(vs):
    with tf.compat.v1.variable_scope(None, default_name="bar"):
      v = tf.compat.v1.get_variable("c", [1])
      assert v.name == "foo/bar/c:0"   # Uses bar instead of bar_2!
  ```

  Basic example of sharing a variable AUTO_REUSE:

  ```python
  def foo():
    with tf.compat.v1.variable_scope("foo", reuse=tf.compat.v1.AUTO_REUSE):
      v = tf.compat.v1.get_variable("v", [1])
    return v

  v1 = foo()  # Creates v.
  v2 = foo()  # Gets the same, existing v.
  assert v1 == v2
  ```

  Basic example of sharing a variable with reuse=True:

  ```python
  with tf.compat.v1.variable_scope("foo"):
      v = tf.compat.v1.get_variable("v", [1])
  with tf.compat.v1.variable_scope("foo", reuse=True):
      v1 = tf.compat.v1.get_variable("v", [1])
  assert v1 == v
  ```

  Sharing a variable by capturing a scope and setting reuse:

  ```python
  with tf.compat.v1.variable_scope("foo") as scope:
      v = tf.compat.v1.get_variable("v", [1])
      scope.reuse_variables()
      v1 = tf.compat.v1.get_variable("v", [1])
  assert v1 == v
  ```

  To prevent accidental sharing of variables, we raise an exception when getting
  an existing variable in a non-reusing scope.

  ```python
  with tf.compat.v1.variable_scope("foo"):
      v = tf.compat.v1.get_variable("v", [1])
      v1 = tf.compat.v1.get_variable("v", [1])
      #  Raises ValueError("... v already exists ...").
  ```

  Similarly, we raise an exception when trying to get a variable that does not
  exist in reuse mode.

  ```python
  with tf.compat.v1.variable_scope("foo", reuse=True):
      v = tf.compat.v1.get_variable("v", [1])
      #  Raises ValueError("... v does not exists ...").
  ```

  Note that the `reuse` flag is inherited: if we open a reusing scope, then all
  its sub-scopes become reusing as well.

  A note about name scoping: Setting `reuse` does not impact the naming of other
  ops such as mult. See related discussion on
  [github#6189](https://github.com/tensorflow/tensorflow/issues/6189)

  Note that up to and including version 1.0, it was allowed (though explicitly
  discouraged) to pass False to the reuse argument, yielding undocumented
  behaviour slightly different from None. Starting at 1.1.0 passing None and
  False as reuse has exactly the same effect.

  A note about using variable scopes in multi-threaded environment: Variable
  scopes are thread local, so one thread will not see another thread's current
  scope. Also, when using `default_name`, unique scopes names are also generated
  only on a per thread basis. If the same name was used within a different
  thread, that doesn't prevent a new thread from creating the same scope.
  However, the underlying variable store is shared across threads (within the
  same graph). As such, if another thread tries to create a new variable with
  the same name as a variable created by a previous thread, it will fail unless
  reuse is True.

  Further, each thread starts with an empty variable scope. So if you wish to
  preserve name prefixes from a scope from the main thread, you should capture
  the main thread's scope and re-enter it in each thread. For e.g.

  ```
  main_thread_scope = variable_scope.get_variable_scope()

  # Thread's target function:
  def thread_target_fn(captured_scope):
    with variable_scope.variable_scope(captured_scope):
      # .... regular code for this thread


  thread = threading.Thread(target=thread_target_fn, args=(main_thread_scope,))
  ```
  Nc                    || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        || _        | j                  | j                   t        d      | j                  du rd| _        | j                  du s)| j                  | j                  t        u st        d      | j                  g | _        t        j                           | _        | j"                  r$t%        j&                  | j                        | _        d| _        d| _        t/        |t0              st        dj3                  |            || _        y)a
  Initialize the context manager.

    Args:
      name_or_scope: `string` or `VariableScope`: the scope to open.
      default_name: The default name to use if the `name_or_scope` argument is
        `None`, this name will be uniquified. If name_or_scope is provided it
        won't be used and therefore it is not required and can be None.
      values: The list of `Tensor` arguments that are passed to the op function.
      initializer: default initializer for variables within this scope.
      regularizer: default regularizer for variables within this scope.
      caching_device: default caching device for variables within this scope.
      partitioner: default partitioner for variables within this scope.
      custom_getter: default custom getter for variables within this scope.
      reuse: `True`, None, or tf.compat.v1.AUTO_REUSE; if `True`, we go into
        reuse mode for this scope as well as all sub-scopes; if
        tf.compat.v1.AUTO_REUSE, we create variables if they do not exist, and
        return them otherwise; if None, we inherit the parent scope's reuse
        flag. When eager execution is enabled, new variables are always created
        unless an EagerVariableStore or template is currently active.
      dtype: type of variables created in this scope (defaults to the type in
        the passed scope, or inherited from parent scope).
      use_resource: If False, all variables will be regular Variables. If True,
        experimental ResourceVariables with well-defined semantics will be used
        instead. Defaults to False (will later change to True). When eager
        execution is enabled this argument is always forced to be True.
      constraint: An optional projection function to be applied to the variable
        after being updated by an `Optimizer` (e.g. used to implement norm
        constraints or value constraints for layer weights). The function must
        take as input the unprojected Tensor representing the value of the
        variable and return the Tensor for the projected value (which must have
        the same shape). Constraints are not safe to use when doing asynchronous
        distributed training.
      auxiliary_name_scope: If `True`, we create an auxiliary name scope with
        the scope. If `False`, we don't create it. Note that the argument is not
        inherited, and it only takes effect for once when creating. You should
        only use it for re-entering a premade variable scope.

    Returns:
      A scope that can be captured and reused.

    Raises:
      ValueError: when trying to reuse within a create scope, or create within
        a reuse scope.
      TypeError: when the types of some arguments are not appropriate.
    Nz6If default_name is None then name_or_scope is requiredFTz2The reuse parameter must be True or False or None.z@The auxiliary_name_scope must be `True` or `False`, while get {})r  _default_name_valuesr.  r/  r1  r2  r3  r0  r5  r6  r7  r(   r   r,   r   ry   _in_graph_moder   _get_graph_from_inputs_graph_cached_pure_variable_scope_current_name_scoper%   boolr-   _auxiliary_name_scope)r/   r  default_namer  rb   rc   rg   rh   r   rd   ra   rj   rk   auxiliary_name_scopes                 r4   r5   zvariable_scope.__init__  sL   v (D%DDL#D#D)D#D'DDKDK%D!D!d&9&9&ANOO{{edkKK4{{"{{j(KLL||dl%7799D..t||<dk'+D$#D*D1 %%+V,@%AC C!5Dr6   c                 b   t        j                         j                  rd| _        nd| _        | j                  rE| j                  s9| j
                  j                         | _        | j                  j                          | j                  @| j                  | j                  j                          | j                  j                         S 	 | j                         S #  | j                  rC| j                  s7| j                  + | j                  j                  t        j                            xY w)NTF)r   get_default_graphbuilding_function_building_functionr  r  r  _graph_context_managerr  r  r  _enter_scope_uncachedr  sysexc_infor9   s    r4   r  zvariable_scope.__enter__B	  s     00 $d %d4#:#:$(KK$:$:$<d!
!!++-''3 
	!	!	-  **,--7799''))


d&=&=

%
%
1,##,,clln=s   C AD.c                 H   | j                   rd}nJt        j                         }|r|dz  }t        j                  |d      }nt        j                  |d      }| j                  t        | j                  t        t        f      st        d      t        | j                  t              r| j                  }n(| j                  j                  j                  d      d   }|s|r|xs t        j                  |d      }	 |j                         }|| _        t        | j                  t              r|}n| j                  j                   }t#        | j                  | j$                  | j&                  | j(                  | j*                  | j,                  | j.                  || j0                  | j2                  | j4                        }	 |j                         }|| _        |S d| _        t#        | j                  | j$                  | j&                  | j(                  | j*                  | j,                  | j.                  | j0                  | j2                  | j4                  
      }	 |j                         }|| _        |S | j$                  rt9        d	      |xs! t        j                  | j:                  d      }	 |j                         }|| _        t=        | j:                        }t#        || j&                  | j(                  | j*                  | j,                  | j.                  || j0                  | j2                  | j4                  

      }	 |j                         }|| _        |S #   |j                  t        j                            xY w#   |j                  t        j                            xY w#   |j                  t        j                            xY w#   |j                  t        j                            xY w#   |j                  t        j                            xY w)zEnters the context manager when there is no cached scope yet.

    Returns:
      The entered variable scope.

    Raises:
      TypeError: A wrong type is passed as `scope` at __init__().
      ValueError: `reuse` is incorrectly set at __init__().
    Nr_  Fr   z?VariableScope: name_or_scope must be a string or VariableScope.r   )
rd   rb   rc   rg   rh   r   r  ra   rj   rk   )	rd   rb   rc   rg   rh   r   ra   rj   rk   z1reuse=True cannot be used without a name_or_scope)	rb   rc   rg   rh   r   r  ra   rj   rk   )r  r   get_name_scoperq   r  r%   r   strr(   r`   splitr  r  r  r  r  r;  r  r0  r.  r/  r1  r2  r3  r5  r6  r7  r  r,   r  r  )r/   current_name_scoperq   current_name_scope_namer  pure_variable_scopeentered_pure_variable_scopeunique_default_names           r4   r  z$variable_scope._enter_scope_uncached\	  s    !! %%'j	c
 ^^JeL !^^JeL &++mS-AB ) * 	*	D''	-((
((--33C8<
	)/ -3>>e4-	$6$@$@$B
! $6 d))3/2...BB.2++))))//))--)++++'')	(;(E(E(G
% ,?(**#' 2++))))//))--++++''
)	(;(E(E(G
% ,?(** 
LMM- 3


E23"4">">"@ "4d6t7I7IJ0
''''--''++0))%%
'&9&C&C&E# *=d&((]	
%

%
%s||~
6
(	
&

&
&
7
&	
&

&
&
7
###S\\^4 $$$clln5s<   M! N  N/ 6O O= !$N$N,/$O$O:=$P!c           	         	 | j                   j                  |||       	 | j                  r| j                  j                  |||       | j                  r+| j                  s| j
                  j                  |||       y y y # | j                  r+| j                  s| j
                  j                  |||       w w w xY w# 	 | j                  r| j                  j                  |||       | j                  r+| j                  s| j
                  j                  |||       w w w # | j                  r+| j                  s| j
                  j                  |||       w w w xY wxY wr8   )r  r  r  r  r  r  r  s       r4   r  zvariable_scope.__exit__	  sJ   >
&&//)0=?>##

"
"
+
+Hi,9; t'>'>

%
%
.
.x/<> (?4t'>'>

%
%
.
.x/<> (?>##

"
"
+
+Hi,9; t'>'>

%
%
.
.x/<> (?4t'>'>

%
%
.
.x/<> (?s.   B= )B 9B:=E?)D (8E 9EE)NNNNNNNNNNNT)r*   rC   rD   rE   r5   r  r  r  rH   r6   r4   r   r   +  sG    wv !"! $(Z6x4u)n>r6   r   c              #      K   t        j                  d       t        ||| |||||||	|
|      5 }| ddd       y# 1 sw Y   yxY ww)zFDeprecated: context manager for defining an op that creates variables.zqtf.variable_op_scope(values, name, default_name) is deprecated, use tf.variable_scope(name, default_name, values))r  r  rb   rc   rg   rh   r   rd   ra   rj   rk   N)r   warnr   )r  r  r  rb   rc   rg   rh   r   rd   ra   rj   rk   ra  s                r4   r   r   	  se      
,, D E#!  !&
K  s   -A	=	A	AA	c                    |j                         st        d|d      |j                  dk  rt        d|z         | ||      }t        |t        j
                        st        d|z        t        |      |j                  k7  rt        d|d|      t        d	 |D              rt        d
|z        t        d |D              dkD  rt        d|d|      |S )a  Call partitioner validating its inputs/output.

  Args:
    partitioner: a function mapping `Tensor` shape and dtype to a list of
      partitions.
    shape: shape of the `Tensor` to partition, must have at least two
      dimensions.
    dtype: dtype of the elements in the `Tensor`.

  Returns:
    A list with elements >=1 and exactly one >1. The index of that
    element corresponds to the partitioning axis.
  zKShape of a new partitioned variable must be fully defined, but instead was r   rK   z;A partitioned Variable must have rank at least 1, shape: %s)r3   ra   z/Partitioner must return a sequence, but saw: %szOPartitioner returned a partition list that does not match the Variable's rank: z vs. c              3   &   K   | ]	  }|d k    ywrK   NrH   .0ps     r4   	<genexpr>z$_call_partitioner.<locals>.<genexpr>!
        1Q    z6Partitioner returned zero partitions for some axes: %sc              3   &   K   | ]	  }|d kD    ywr  rH   r  s     r4   r  z$_call_partitioner.<locals>.<genexpr>$
  r  r  z6Can only slice a variable along one dimension: shape: z, partitioning: )	r   r,   ndimsr%   r   ro   r+   anysum)rh   r3   ra   slicings       r4   r   r   
  s    
			!
=BE F F
[[1_
 !#() * * e51'	G_55	6
F  \U[[ 
(/	89 9 	   
M     1$
6;WF G G	.r6   c                 L    t        |       D ]  \  }}|dkD  s ||fS  d}d}||fS )zGGet slicing dimension and number of slices from the partitioner output.rK   r   )r   )r  rB   r   s      r4   r   r   -
  sI    (1 iA~
 
J	
 IJ	J	r6   c              #      K   | |   |z  }dgt        |       z  }| |   |z  }t        |      D ]4  }| dd }|t        ||k        z   ||<   |dd |f ||xx   ||   z  cc<   6 yw)z5Slices a given a shape along the specified dimension.r   N)r+   r@   r  )r0   r   rB   num_slices_with_excessr2   min_slice_lenrA   r3   s           r4   r   r   9
  s     %i0:=3Z &Y':5- *aqME$tA0F,F'GGE)
)U

9y))	*s   A#A%c                       fdS )z<Gets around capturing loop variables in python being broken.c                       fi | S r8   rH   )r   captured_gettercaptured_previouss    r4   r#  z_make_getter.<locals>.<lambda>G
  s    /*;FvF r6   rH   )r  r  s   ``r4   _make_getterr  E
  s	    	FFr6   c                     | a y)z+Sets a reference to variable_v1.VariableV1.N)r   )variable_v1s    r4   set_variable_v1r  M
  s	     ,r6   variable_creator_scopec              #      K   t        j                         j                  |       5  d ddd       y# 1 sw Y   yxY ww)a  Scope which defines a variable creation function to be used by variable().

  variable_creator is expected to be a function with the following signature:

  ```
    def variable_creator(next_creator, **kwargs)
  ```

  The creator is supposed to eventually call the next_creator to create a
  variable if it does want to create a variable and not call Variable or
  ResourceVariable directly. This helps make creators composable. A creator may
  choose to create multiple variables, return already existing variables, or
  simply register that a variable was created and defer to the next creators in
  line. Creators can also modify the keyword arguments seen by the next
  creators.

  Custom getters in the variable scope will eventually resolve down to these
  custom creators when they do create variables.

  The valid keyword arguments in kwds are:

   * initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called. In
        that case, `dtype` must be specified. (Note that initializer functions
        from init_ops.py must first be bound to a shape before being used here.)
   * trainable: If `True`, the default, also adds the variable to the graph
        collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
        the default list of variables to use by the `Optimizer` classes.
        `trainable` defaults to `True`, unless `synchronization` is
        set to `ON_READ`, in which case it defaults to `False`.
   * collections: List of graph collections keys. The new variable is added to
        these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
   * validate_shape: If `False`, allows the variable to be initialized with a
        value of unknown shape. If `True`, the default, the shape of
        `initial_value` must be known.
   * caching_device: Optional device string describing where the Variable
        should be cached for reading.  Defaults to the Variable's device.
        If not `None`, caches on another device.  Typical use is to cache
        on the device where the Ops using the Variable reside, to deduplicate
        copying through `Switch` and other conditional statements.
   * name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
   * dtype: If set, initial_value will be converted to the given type.
        If `None`, either the datatype will be kept (if `initial_value` is
        a Tensor), or `convert_to_tensor` will decide.
   * constraint: A constraint function to be applied to the variable after
        updates by some algorithms.
   * use_resource: if True, a ResourceVariable is always created.
   * synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize.
   * aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

  This set may grow over time, so it's important the signature of creators is as
  mentioned above.

  Args:
    variable_creator: the passed creator

  Yields:
    A scope in which the creator is active
  Nr   r  _variable_creator_scopevariable_creators    r4   variable_creator_scope_v1r  S
  s8     N 667GH 
	
 
 
   $A 4	A =A c              #      K   t        j                         j                  |       5  d ddd       y# 1 sw Y   yxY ww)a   Scope which defines a variable creation function to be used by variable().

  variable_creator is expected to be a function with the following signature:

  ```
    def variable_creator(next_creator, **kwargs)
  ```

  The creator is supposed to eventually call the next_creator to create a
  variable if it does want to create a variable and not call Variable or
  ResourceVariable directly. This helps make creators composable. A creator may
  choose to create multiple variables, return already existing variables, or
  simply register that a variable was created and defer to the next creators in
  line. Creators can also modify the keyword arguments seen by the next
  creators.

  Custom getters in the variable scope will eventually resolve down to these
  custom creators when they do create variables.

  The valid keyword arguments in kwds are:

   * initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
        which is the initial value for the Variable. The initial value must have
        a shape specified unless `validate_shape` is set to False. Can also be a
        callable with no argument that returns the initial value when called. In
        that case, `dtype` must be specified. (Note that initializer functions
        from init_ops.py must first be bound to a shape before being used here.)
   * trainable: If `True`, the default, GradientTapes automatically watch
        uses of this Variable.
   * validate_shape: If `False`, allows the variable to be initialized with a
        value of unknown shape. If `True`, the default, the shape of
        `initial_value` must be known.
   * caching_device: Optional device string describing where the Variable
        should be cached for reading.  Defaults to the Variable's device.
        If not `None`, caches on another device.  Typical use is to cache
        on the device where the Ops using the Variable reside, to deduplicate
        copying through `Switch` and other conditional statements.
   * name: Optional name for the variable. Defaults to `'Variable'` and gets
        uniquified automatically.
      dtype: If set, initial_value will be converted to the given type.
        If `None`, either the datatype will be kept (if `initial_value` is
        a Tensor), or `convert_to_tensor` will decide.
   * constraint: A constraint function to be applied to the variable after
        updates by some algorithms.
   * synchronization: Indicates when a distributed a variable will be
        aggregated. Accepted values are constants defined in the class
        `tf.VariableSynchronization`. By default the synchronization is set to
        `AUTO` and the current `DistributionStrategy` chooses
        when to synchronize.
   * aggregation: Indicates how a distributed variable will be aggregated.
        Accepted values are constants defined in the class
        `tf.VariableAggregation`.

  This set may grow over time, so it's important the signature of creators is as
  mentioned above.

  Args:
    variable_creator: the passed creator

  Yields:
    A scope in which the creator is active
  Nr  r  s    r4   r  r  
  s8     B 667GH 
	
 
 
r  )
NNNNNNNNNN)ZrE   r  enumrQ   r  	threadingr   tensorflow.python.clientr   tensorflow.python.eagerr   tensorflow.python.frameworkr   r   r   r   r	   tensorflow.python.opsr
   r   r   r   r   tensorflow.python.platformr   r   tensorflow.python.typesr   tensorflow.python.utilr   r   r   tensorflow.python.util.compatr    tensorflow.python.util.tf_exportr   __all__r!   EnumrJ   r   r   r   export_constantr*   rV   rX   r   r   r   _master_property_listr-  setattrr   _master_method_listr   _op_list#register_tensor_conversion_function)register_session_run_conversion_functionsr   r   r  r  localrt  r  r   r  contextmanagerr  r  r~   r   r   get_variable_or_local_docstringr   rr   r  r  r  r   r   r   r   r   r  r   r  r  r  rH   r6   r4   <module>r5     sk   O    
   , + . + . B 4 + * 7 ; + < ( 1 0 - 9 6y yx  $;; 33 ""
 	l^  , ,X| D
 ,=$f0 f0Rdkk (( " @E	/5"7">?@
 @   >E	/5"5e"<=>
  :E	/5/%"89: ?  > >H
 2 1 1P  ! "  r# r# !r#l	 &$ 9)// 9:
 #$%2 &2@  J Jb ~!!! $! $"# !8!=!=055  DC$ H 7 :" "  #$% 
+00#((# &#L = @3 3   %)$(*.*.(,*.-1*.-1+/)-.E.J.J*=*B*Bqnc4 c4L&
    !s> s> "s>n "#$ $("&"&%)"&$(  #'!%  %>#R		*G
  '()F
  *F
T #+@
  ,@
r6   