
    BVhu\                     H   d Z ddlZddlZddlZddlmZ ddlmZ ddlm	Z	 ddlm
Z
 ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ  G d deej>                        Z  G d de       Z!ddZ" G d de#      Z$d Z%y)zEContains the base ProcessingLayer and a subclass that uses Combiners.    N)context)def_function)dtypes)ops)sparse_tensor)tensor)backend)data_adapter)Layer)tf_utils)version_utils)math_ops)
sparse_ops)	variables)ragged_tensor)basec                        e Zd ZdZdZd fd	Zed        Zed        Zd Z	d Z
d Zd	 Zd
 ZddZddZd Zej$                  d        Zd Z xZS )PreprocessingLayera  Base class for Preprocessing Layers.

  **Don't use this class directly: it's an abstract base class!** You may
  be looking for one of the many built-in
  [preprocessing layers](https://keras.io/guides/preprocessing_layers/)
  instead.

  Preprocessing layers are layers whose state gets computed before model
  training starts. They do not get updated during training.
  Most preprocessing layers implement an `adapt()` method for state computation.

  The `PreprocessingLayer` class is the base class you would subclass to
  implement your own preprocessing layers.

  Attributes:
    streaming: Whether a layer can be adapted multiple times without resetting
      the state of the layer.
  Tc                     t        t        | 
  di | || _        d| _        d| _        | j                  | _        | j                  | _        d | _	        y )NF )
superr   __init__
_streaming_is_compiled_is_adaptedreset_state_reset_state_impl_reset_state_wrapper_adapt_function)self	streamingkwargs	__class__s      g/home/dcms/DCMS/lib/python3.12/site-packages/tensorflow/python/keras/engine/base_preprocessing_layer.pyr   zPreprocessingLayer.__init__=   sS    	
d,6v6DODD "--D00DD    c                     | j                   S )z@Whether `adapt` can be called twice without resetting the state.)r   r    s    r$   r!   zPreprocessingLayer.streamingI   s     ??r%   c                     | j                   S )z/Whether the layer has been fit to data already.)r   r'   s    r$   
is_adaptedzPreprocessingLayer.is_adaptedN   s     r%   c                     t         )zyAccumulates statistics for the preprocessing layer.

    Arguments:
      data: A mini-batch of inputs to the layer.
    NotImplementedErrorr    datas     r$   update_statezPreprocessingLayer.update_stateS   s
     r%   c                     t         )z1Resets the statistics of the preprocessing layer.r+   r'   s    r$   r   zPreprocessingLayer.reset_state[   s    
r%   c                     t         )zMerge the statistics of multiple preprocessing layers.

    This layer will contain the merged state.

    Arguments:
      layers: Layers whose statistics should be merge with the statistics of
        this layer.
    r+   )r    layerss     r$   merge_statezPreprocessingLayer.merge_state_   s
     r%   c                      y)a  Finalize the statistics for the preprocessing layer.

    This method is called at the end of `adapt` or after restoring a serialized
    preprocessing layer's state. This method handles any one-time operations
    that should occur on the layer's state before `Layer.__call__`.
    Nr   r'   s    r$   finalize_statez!PreprocessingLayer.finalize_statej   s     	r%   c                      j                    j                   S  fd j                  j                         j                         dk(  r}n fd} j                  st        j                  |      }| _          j                   S )a  Creates a function to execute one step of `adapt`.

    This method can be overridden to support custom adapt logic.
    This method is called by `PreprocessingLayer.adapt`.

    Typically, this method directly controls `tf.function` settings,
    and delegates the actual state update logic to
    `PreprocessingLayer.update_state`.

    This function is cached the first time `PreprocessingLayer.adapt`
    is called. The cache is cleared whenever `PreprocessingLayer.compile`
    is called.

    Returns:
      Function. The function created by this method should accept a
      `tf.data.Iterator`, retrieve a batch, and update the state of the
      layer.
    c                 `    t        |       }j                  |       j                  |       y N)next_adapt_maybe_buildr/   )iteratorr.   r    s     r$   
adapt_stepz:PreprocessingLayer.make_adapt_function.<locals>.adapt_step   s(    (^d
d#
r%      c                 ^    t        j                  j                        D ]
  } |         y r8   )r   range_steps_per_execution)r;   _r<   r    s     r$   adapt_fnz8PreprocessingLayer.make_adapt_function.<locals>.adapt_fn   s)     9 9: 	A
X
	r%   )r   r@   numpyitem_run_eagerlyr   function)r    rB   r<   s   ` @r$   make_adapt_functionz&PreprocessingLayer.make_adapt_functions   s}    & '!!!
   &&(--/14h &&x0h#Dr%   c                 f    |d}| j                  |       || j                  }|| _        d| _        y)a8  Configures the layer for `adapt`.

    Arguments:
      run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s logic
        will not be wrapped in a `tf.function`. Recommended to leave this as
        `None` unless your `Model` cannot be run inside a `tf.function`.
        steps_per_execution: Int. Defaults to 1. The number of batches to run
          during each `tf.function` call. Running multiple batches inside a
          single `tf.function` call can greatly improve performance on TPUs or
          small models with a large Python overhead.
    Nr=   T)_configure_steps_per_executiondynamicrE   r   )r    run_eagerlysteps_per_executions      r$   compilezPreprocessingLayer.compile   s>     "''(;<LLk#DDr%   c                    t        d       t        j                         st        d      | j                  s<| j
                  r0|s.t        dj                  | j                  j                              | j                  s| j                          | j                  r|r| j                          t        j                  |||d| j                   d      }| j#                         | _        |j'                         D ]f  \  }}|j)                         5  |j+                         D ]4  }| j%                  |       |j,                  s!t/        j0                          6 	 ddd       h | j3                          d| _        y# 1 sw Y   xY w)	a6  Fits the state of the preprocessing layer to the data being passed.

    After calling `adapt` on a layer, a preprocessing layer's state will not
    update during training. In order to make preprocessing layers efficient in
    any distribution context, they are kept constant with respect to any
    compiled `tf.Graph`s that call the layer. This does not affect the layer use
    when adapting each layer only once, but if you adapt a layer multiple times
    you will need to take care to re-compile any compiled functions as follows:

     * If you are adding a preprocessing layer to a `keras.Model`, you need to
       call `model.compile` after each subsequent call to `adapt`.
     * If you are calling a preprocessing layer inside `tf.data.Dataset.map`,
       you should call `map` again on the input `tf.data.Dataset` after each
       `adapt`.
     * If you are using a `tf.function` directly which calls a preprocessing
       layer, you need to call `tf.function` again on your callable after
       each subsequent call to `adapt`.

    `tf.keras.Model` example with multiple adapts:

    >>> layer = tf.keras.layers.experimental.preprocessing.Normalization(
    ...     axis=None)
    >>> layer.adapt([0, 2])
    >>> model = tf.keras.Sequential(layer)
    >>> model.predict([0, 1, 2])
    array([-1.,  0.,  1.], dtype=float32)
    >>> layer.adapt([-1, 1])
    >>> model.compile() # This is needed to re-compile model.predict!
    >>> model.predict([0, 1, 2])
    array([0., 1., 2.], dtype=float32)

    `tf.data.Dataset` example with multiple adapts:

    >>> layer = tf.keras.layers.experimental.preprocessing.Normalization(
    ...     axis=None)
    >>> layer.adapt([0, 2])
    >>> input_ds = tf.data.Dataset.range(3)
    >>> normalized_ds = input_ds.map(layer)
    >>> list(normalized_ds.as_numpy_iterator())
    [array([-1.], dtype=float32),
     array([0.], dtype=float32),
     array([1.], dtype=float32)]
    >>> layer.adapt([-1, 1])
    >>> normalized_ds = input_ds.map(layer) # Re-map over the input dataset.
    >>> list(normalized_ds.as_numpy_iterator())
    [array([0.], dtype=float32),
     array([1.], dtype=float32),
     array([2.], dtype=float32)]

    Arguments:
        data: The data to train on. It can be passed either as a tf.data
          Dataset, or as a numpy array.
        batch_size: Integer or `None`.
            Number of samples per state update.
            If unspecified, `batch_size` will default to 32.
            Do not specify the `batch_size` if your data is in the
            form of datasets, generators, or `keras.utils.Sequence` instances
            (since they generate batches).
        steps: Integer or `None`.
            Total number of steps (batches of samples)
            When training with input tensors such as
            TensorFlow data tensors, the default `None` is equal to
            the number of samples in your dataset divided by
            the batch size, or 1 if that cannot be determined. If x is a
            `tf.data` dataset, and 'steps' is None, the epoch will run until
            the input dataset is exhausted. When passing an infinitely
            repeating dataset, you must specify the `steps` argument. This
            argument is not supported with array inputs.
        reset_state: Optional argument specifying whether to clear the state of
          the layer at the start of the call to `adapt`, or whether to start
          from the existing state. This argument may not be relevant to all
          preprocessing layers: a subclass of PreprocessingLayer may choose to
          throw if 'reset_state' is set to False.
    adaptz+`adapt` is only supported in tensorflow v2.zI{} does not supporting calling `adapt` twice without resetting the state.r=   F)
batch_sizesteps_per_epochepochsrL   
distributeNT)_disallow_inside_tf_functionr   should_use_v2RuntimeErrorr!   r   
ValueErrorformatr#   __name__r   rM   builtr   r
   DataHandlerr@   rG   r   enumerate_epochscatch_stop_iterationstepsshould_syncr   
async_waitr5   )r    r.   rP   r^   r   data_handlerrA   r;   s           r$   rO   zPreprocessingLayer.adapt   sS   V !)&&(FGG>>d..{ ..4fT^^5L5L.MO O
llnzzk
++ 55L  335D#446 !8,,. !##% 	!A


x
(%% 	!! !! 	D! !s   1E8?E88F	c                 2    | j                          d| _        y)z2Calls `reset_state` and sets `adapted` to `False`.FN)r   r   r'   s    r$   r   z'PreprocessingLayer._reset_state_wrapper  s    Dr%   c                 n    t        j                  |dt         j                  j                        | _        y )Nint64)dtypeaggregation)r   VariableVariableAggregationV2ONLY_FIRST_REPLICAr@   )r    rL   s     r$   rI   z1PreprocessingLayer._configure_steps_per_execution  s+     ) 2 233FF!HDr%   c                     | j                   s^	 |j                  }t        d gt        |j                        z        }t        | dd       }||| _        | j                  |       d| _         y y # t        $ r d }d }Y ?w xY w)N_batch_input_shapeT)rZ   shapetuplelenAttributeErrorgetattrrk   build)r    r.   
data_shapedata_shape_nonesbatch_input_shapes        r$   r:   z%PreprocessingLayer._adapt_maybe_build%  s    ::  ZZ
 $#djj/!9: "$(<dC		""2
jjdj#    
 s   .A, ,A<;A<)TNNNNT)rY   
__module____qualname____doc___must_restore_from_configr   propertyr!   r)   r/   r   r3   r5   rG   rM   rO   r   	trackable no_automatic_dependency_trackingrI   r:   __classcell__r#   s   @r$   r   r   (   s    $ #
     		' R,dL
 --H .Hr%   r   )	metaclassc                        e Zd ZdZ fdZd Zej                  d        Zd Z	d Z
d fd	Zd fd	Z	 	 	 dd	Zd
 Zd Zd Zd Z xZS )CombinerPreprocessingLayera  Base class for PreprocessingLayers that do computation using a Combiner.

  This class provides several helper methods to make creating a
  PreprocessingLayer easier. It assumes that the core of your computation will
  be done via a Combiner object. Subclassing this class to create a
  PreprocessingLayer allows your layer to be compatible with distributed
  computation.

  This class is compatible with Tensorflow 2.0+.
  c                 z    t        t        | 
  di | t        j                         | _        || _        d | _        y )Nr   )r   r   r   collectionsOrderedDictstate_variables	_combiner_adapt_accumulator)r    combinerr"   r#   s      r$   r   z#CombinerPreprocessingLayer.__init__G  s6    	
$d4>v>&224DDN"Dr%   c                     d | _         y r8   )r   r'   s    r$   r   z&CombinerPreprocessingLayer.reset_stateM  s
    "Dr%   c                     | j                   | j                         | _         | j                  j                  || j                         | _         y r8   )r   _get_accumulatorr   computer-   s     r$   r/   z'CombinerPreprocessingLayer.update_stateP  sB    & $ 5 5 7d"nn44T595L5LNDr%   c                     | j                         g|D cg c]  }|j                          c}z   }| j                  j                  |      }| j                  |       y c c}w r8   )r   r   merge_set_accumulator)r    r2   laccumulatorsmerged_accumulators        r$   r3   z&CombinerPreprocessingLayer.merge_stateW  sX    **,-39:aQ''):;L--l;,- ;s   Ac                 T    | j                   | j                  | j                          y y r8   )r   r   r'   s    r$   r5   z)CombinerPreprocessingLayer.finalize_state]  s'    *
D334 +r%   c                 8    |d}t         t        |   ||       y )NT)rK   rL   )r   r   rM   )r    rK   rL   r#   s      r$   rM   z"CombinerPreprocessingLayer.compilea  s*    k	
$d35H 4 Jr%   c                     |s.| j                   j                  | j                               | _        t        t
        |   ||||       y )N)rP   r^   r   )r   restore_restore_updatesr   r   r   rO   )r    r.   rP   r^   r   r#   s        r$   rO   z CombinerPreprocessingLayer.adapth  sE     $ 6 6t7L7L7N Od	
$d15k 2 Kr%   c                 \     | j                   d||||ddd||d	|}|| j                  |<   |S )a:  Add a variable that can hold state which is updated during adapt().

    Args:
      name: Variable name.
      shape: Variable shape. Defaults to scalar if unspecified.
      dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
      initializer: initializer instance (callable).
      partitioner: Partitioner to be passed to the `Trackable` API.
      use_resource: Whether to use `ResourceVariable`
      **kwargs: Additional keyword arguments. Accepted values are `getter` and
        `collections`.

    Returns:
      The created variable.
    NF)	namerl   re   initializerregularizer	trainable
constraintpartitioneruse_resourcer   )
add_weightr   )	r    r   rl   re   r   r   r   r"   weights	            r$   _add_state_variablez.CombinerPreprocessingLayer._add_state_variablen  sU    . T__ 
!
 
F "(DMr%   c                 t    i }| j                   j                         D ]  \  }}|j                         ||<    |S )z5Recreates a dict of updates from the layer's weights.)r   itemsrC   )r    	data_dictr   vars       r$   r   z+CombinerPreprocessingLayer._restore_updates  s>    I))//1 $	c		io$r%   c                 n    | j                   r)| j                  j                  | j                               S y r8   )r   r   r   r   r'   s    r$   r   z+CombinerPreprocessingLayer._get_accumulator  s,    ^^##D$9$9$;<<r%   c                 j    | j                   j                  |      }| j                  |       d | _        y r8   )r   extract_set_state_variablesr   )r    accumulatorupdatess      r$   r   z+CombinerPreprocessingLayer._set_accumulator  s,    nn$$[1Gg&"Dr%   c                     | j                   st        d      t        j                         5  |j	                         D ]#  \  }}| j
                  |   j                  |       % 	 ddd       y# 1 sw Y   yxY w)a  Directly update the internal state of this Layer.

    This method expects a string-keyed dict of {state_variable_name: state}. The
    precise nature of the state, and the names associated, are describe by
    the subclasses of CombinerPreprocessingLayer.

    Args:
      updates: A string keyed dict of weights to update.

    Raises:
      RuntimeError: if 'build()' was not called before 'set_processing_state'.
    z4_set_state_variables() must be called after build().N)rZ   rV   r   
init_scoper   r   assign)r    r   var_namevalues       r$   r   z/CombinerPreprocessingLayer._set_state_variables  sh     ::OPP		 5$]]_ 5/(EX&--e455 5 5s   7A--A6ru   rv   )NNN)rY   rw   rx   ry   r   r   r|   r}   r/   r3   r5   rM   rO   r   r   r   r   r   r~   r   s   @r$   r   r   ;  si    	## --N .N.5JK '+&*'+$L#
5r%   r   c                    t        j                  |       rbt        | t        j                        r8t        j                         s$t        j                  |       j                  |       } | j                         } t        | t        j                  t        j                  f      rm|?t        j                  | j                   j"                        t        j$                  k(  rd}nd}t'        j(                  | |      }t        j*                  |      } t        | t,        j.                        rt        j*                  |       } t        | t0        j2                        r| j5                         } | S )zEConvert a TensorLike, CompositeTensor, or ndarray into a Python list. )default_value)r   	is_ragged
isinstancer   RaggedTensorr   executing_eagerlyr	   get_sessionrunto_listr   SparseTensorSparseTensorValuer   as_dtypevaluesre   stringr   sparse_tensor_to_dense	get_valuer   Tensornpndarraytolist)r   sparse_default_valuedense_tensors      r$   convert_to_listr     s	    	6=556%%'""6*..v6f^^F++]-L-LMO#	,,	-	>!!4424L|,F&v&F 

#]]_F	-r%   c                      e Zd ZdZej
                  Zd Zej                  d
d       Z	ej                  d        Z
ej                  d        Zej                  d        Zej                  d        Zej                  d	        Zy)CombineraQ  Functional object that defines a shardable computation.

  This object defines functions required to create and manipulate data objects.
  These data objects, referred to below as 'accumulators', are computation-
  specific and may be implemented alongside concrete subclasses of Combiner
  (if necessary - some computations may be simple enough that standard Python
  types can be used as accumulators).

  The intent for this class is that by describing computations in this way, we
  can arbitrarily shard a dataset, perform computations on a subset, and then
  merge the computation into a final result. This enables distributed
  computation.

  The combiner itself does not own any state - all computational state is owned
  by the accumulator objects. This is so that we can have an arbitrary number of
  Combiners (thus sharding the computation N ways) without risking any change
  to the underlying computation. These accumulator objects are uniquely
  associated with each Combiner; a Combiner defines what the accumulator object
  should be and will only work with accumulators of that type.
  c                 L    dj                  | j                  j                        S )Nz<{}>)rX   r#   rY   r'   s    r$   __repr__zCombiner.__repr__  s    ==0011r%   Nc                      y)a  Compute a step in this computation, returning a new accumulator.

    This method computes a step of the computation described by this Combiner.
    If an accumulator is passed, the data in that accumulator is also used; so
    compute(batch_values) results in f(batch_values), while
    compute(batch_values, accumulator) results in
    merge(f(batch_values), accumulator).

    Args:
      batch_values: A list of ndarrays representing the values of the inputs for
        this step of the computation.
      accumulator: the current accumulator. Can be None.

    Returns:
      An accumulator that includes the passed batch of inputs.
    Nr   )r    batch_valuesr   s      r$   r   zCombiner.compute  s    $ 	r%   c                      y)a  Merge several accumulators to a single accumulator.

    This method takes the partial values in several accumulators and combines
    them into a single accumulator. This computation must not be order-specific
    (that is, merge([a, b]) must return the same result as merge([b, a]).

    Args:
      accumulators: the accumulators to merge, as a list.

    Returns:
      A merged accumulator.
    Nr   )r    r   s     r$   r   zCombiner.merge  s     	r%   c                      y)zConvert an accumulator into a dict of output values.

    Args:
      accumulator: The accumulator to convert.

    Returns:
      A dict of ndarrays representing the data in this accumulator.
    Nr   r    r   s     r$   r   zCombiner.extract   s     	r%   c                      y)af  Create an accumulator based on 'output'.

    This method creates a new accumulator with identical internal state to the
    one used to create the data in 'output'. This means that if you do

    output_data = combiner.extract(accumulator_1)
    accumulator_2 = combiner.restore(output_data)

    then accumulator_1 and accumulator_2 will have identical internal state, and
    computations using either of them will be equivalent.

    Args:
      output: The data output from a previous computation. Should be in the same
        form as provided by 'extract_output'.

    Returns:
      A new accumulator.
    Nr   )r    outputs     r$   r   zCombiner.restore,  s    ( 	r%   c                      y)a   Serialize an accumulator for a remote call.

    This function serializes an accumulator to be sent to a remote process.

    Args:
      accumulator: The accumulator to serialize.

    Returns:
      A byte string representing the passed accumulator.
    Nr   r   s     r$   	serializezCombiner.serializeB       	r%   c                      y)a$  Deserialize an accumulator received from 'serialize()'.

    This function deserializes an accumulator serialized by 'serialize()'.

    Args:
      encoded_accumulator: A byte string representing an accumulator.

    Returns:
      The accumulator represented by the passed byte_string.
    Nr   )r    encoded_accumulators     r$   deserializezCombiner.deserializeP  r   r%   r8   )rY   rw   rx   ry   abcABCMeta__metaclass__r   abstractmethodr   r   r   r   r   r   r   r%   r$   r   r     s    ( ++-2 	 	& 	 	 		 		 	 	* 	 	 	 	r%   r   c                 f    t        j                         rdj                  |       }t        |      y)z1Disallow calling a method inside a `tf.function`.a  Detected a call to `PreprocessingLayer.{method_name}` inside a `tf.function`. `PreprocessingLayer.{method_name} is a high-level endpoint that manages its own `tf.function`. Please move the call to `PreprocessingLayer.{method_name}` outside of all enclosing `tf.function`s. Note that you can call a `PreprocessingLayer` directly on `Tensor`s inside a `tf.function` like: `layer(x)`, or update its state like: `layer.update_state(x)`.)method_nameN)r   inside_functionrX   rV   )r   	error_msgs     r$   rT   rT   _  s@    	= ?Ef# ?E ?%  y
!! r%   r8   )&ry   r   r   rC   r   tensorflow.python.eagerr   r   tensorflow.python.frameworkr   r   r   r   tensorflow.python.kerasr	   tensorflow.python.keras.enginer
   )tensorflow.python.keras.engine.base_layerr   tensorflow.python.keras.utilsr   r   tensorflow.python.opsr   r   r   tensorflow.python.ops.raggedr   tensorflow.python.trackabler   r|   r   r   r   r   objectr   rT   r   r%   r$   <module>r      s    L 
   + 0 . + 5 . + 7 ; 2 7 * , + 6 9O#++ Of~5!3 ~5B"Lz	v z	z"r%   