
    2Vh>                        d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZ d dlm	Z	 d dl
mZ dadZdZd	Zd
ZdZdZdZdZ e	d      dd       Zej-                  ede      e_         e	d      dd       Zd Zd Z	 ddZd Zd Zy)    N)activations)backend)ops)keras_export)
file_utilszUhttps://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.jsona:  
  Preprocesses a tensor or Numpy array encoding a batch of images.

  Usage example with `applications.MobileNet`:

  ```python
  i = keras.layers.Input([None, None, 3], dtype="uint8")
  x = ops.cast(i, "float32")
  x = keras.applications.mobilenet.preprocess_input(x)
  core = keras.applications.MobileNet()
  x = core(x)
  model = keras.Model(inputs=[i], outputs=[x])
  result = model(image)
  ```

  Args:
        x: A floating point `numpy.array` or a backend-native tensor,
            3D or 4D with 3 color
            channels, with values in the range [0, 255].
            The preprocessed data are written over the input data
        if the data types are compatible. To avoid this
        behaviour, `numpy.copy(x)` can be used.
        data_format: Optional data format of the image tensor/array. None, means
        the global setting `keras.backend.image_data_format()` is used
        (unless you changed it, it uses "channels_last").{mode}
        Defaults to `None`.

  Returns:
      Preprocessed array with type `float32`.
      {ret}

  Raises:
      {error}
  a  
    mode: One of "caffe", "tf" or "torch".
      - caffe: will convert the images from RGB to BGR,
          then will zero-center each color channel with
          respect to the ImageNet dataset,
          without scaling.
      - tf: will scale pixels between -1 and 1,
          sample-wise.
      - torch: will scale pixels between 0 and 1 and then
          will normalize each channel with respect to the
          ImageNet dataset.
      Defaults to `"caffe"`.
  zE
    ValueError: In case of unknown `mode` or `data_format` argument.z;
    ValueError: In case of unknown `data_format` argument.zH
      The inputs pixel values are scaled between -1 and 1, sample-wise.z
      The input pixels values are scaled between 0 and 1 and each channel is
      normalized with respect to the ImageNet dataset.z
      The images are converted from RGB to BGR, then each color channel is
      zero-centered with respect to the ImageNet dataset, without scaling.z2keras.applications.imagenet_utils.preprocess_inputc                     |dvrt        d|       |t        j                         }n|dvrt        d|       t        | t        j
                        rt        | ||      S t        | ||      S )z@Preprocesses a tensor or Numpy array encoding a batch of images.>   tfcaffetorchzDExpected mode to be one of `caffe`, `tf` or `torch`. Received: mode=>   channels_lastchannels_firstz]Expected data_format to be one of `channels_first` or `channels_last`. Received: data_format=)data_formatmode)
ValueErrorr   image_data_format
isinstancenpndarray_preprocess_numpy_input_preprocess_tensor_input)xr   r   s      U/home/dcms/DCMS/lib/python3.12/site-packages/keras/src/applications/imagenet_utils.pypreprocess_inputr   V   s     ++"V%
 	

 //1	?	?66A]D
 	

 !RZZ &qkMM'{NN     )r   reterrorz4keras.applications.imagenet_utils.decode_predictionsc           
      \   t        | j                        dk7  s| j                  d   dk7  rt        d| j                         t        Ft	        j
                  dt        dd	      }t        |      5 }t        j                  |      addd       g }t        j                  |       } | D ]p  }|j                         | d ddd
   }|D cg c]$  }t        t        t        |               ||   fz   & }}|j                  d d       |j!                  |       r |S # 1 sw Y   xY wc c}w )a  Decodes the prediction of an ImageNet model.

    Args:
        preds: NumPy array encoding a batch of predictions.
        top: Integer, how many top-guesses to return. Defaults to `5`.

    Returns:
        A list of lists of top class prediction tuples
        `(class_name, class_description, score)`.
        One list of tuples per sample in batch input.

    Raises:
        ValueError: In case of invalid shape of the `pred` array
            (must be 2D).
          i  z{`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Received array with shape: Nzimagenet_class_index.jsonmodels c2c37ea517e94d9795004a39431a14cb)cache_subdir	file_hashc                     | d   S )Nr    )r   s    r   <lambda>z$decode_predictions.<locals>.<lambda>   s
    !A$ r   T)keyreverse)lenshaper   CLASS_INDEXr   get_fileCLASS_INDEX_PATHopenjsonloadr   convert_to_numpyargsorttuplestrsortappend)	predstopfpathfresultspredtop_indicesiresults	            r   decode_predictionsrB   t   s,   & 5;;1A$ 6* +0++8
 	
 ##'!8	
 %[ 	'A))A,K	'G  'E llncTU+DbD1CNOa%CF+,Qz9OO5v	
 N	' 	' Ps   1D	)D)D&c                    t        | j                  j                  t        j                        s%| j                  t        j                         d      } |dk(  r| dz  } | dz  } | S |dk(  r| dz  } g d}g d	}nF|d
k(  r1t        | j                        dk(  r| ddddf   } n| ddddddf   } n
| ddddf   } g d}d}|d
k(  rFt        | j                        dk(  r| dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   || dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   | dddddfxx   |d   z  cc<   | S | dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   |N| dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   | dddddddfxx   |d   z  cc<   | S | dxx   |d   z  cc<   | dxx   |d   z  cc<   | dxx   |d   z  cc<   |0| dxx   |d   z  cc<   | dxx   |d   z  cc<   | dxx   |d   z  cc<   | S )a  Preprocesses a NumPy array encoding a batch of images.

    Args:
      x: Input array, 3D or 4D.
      data_format: Data format of the image array.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed Numpy array.
    F)copyr	        _@      ?r        o@g
ףp=
?gv/?gCl?gZd;O?gy&1?g?r      Nr%   .gjtY@g`"1]@gQ^@r   r    r   ).r   ).r    ).r   )

issubclassdtypetyper   floatingastyper   floatxr+   r,   )r   r   r   meanstds        r   r   r      s   ( aggllBKK0HHW^^%EH2t|	U
	S		U
$#**177|q ddCiLa2slO #tt)A) &&qww<1aAgJ$q'!JaAgJ$q'!JaAgJ$q'!J!Q'
c!f$
!Q'
c!f$
!Q'
c!f$
" H aAqjMT!W$MaAqjMT!W$MaAqjMT!W$M!Q1*Q'!Q1*Q'!Q1*Q' H 	
&	T!W		&	T!W		&	T!W	?fIQIfIQIfIQIHr   c           
         t        | j                        }|dk(  r| dz  } | dz  } | S |dk(  r| dz  } g d}g d}n|dk(  rst        | j                        d	k(  r+t        j                  d
D cg c]	  }| |df    c}d      } nZt        j                  d
D cg c]  }| dd|ddf    c}d      } n*t        j                  d
D cg c]	  }| d|f    c}d      } g d}d}t        j                  t        j                  |       | j                        }|dk(  r t        j                  |dd|dz
  z  z         }nt        j                  |d|dz
  z  dz         }| |z  } |Tt        j                  t        j                  |      | j                        }|dk(  rt        j                  |d      }| |z  } | S c c}w c c}w c c}w )a  Preprocesses a tensor encoding a batch of images.

    Args:
      x: Input tensor, 3D or 4D.
      data_format: Data format of the image tensor.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed tensor.
    r	   rE   rF   r   rG   rH   rI   r   rJ   )r   r    r   .r   )axisNr    r%   rK   )rM   )r    rJ   )r    r   )rJ   )r%   r    r    )	r+   r,   r   stackconvert_to_tensorr   arrayrM   reshape)	r   r   r   ndimrR   rS   r@   mean_tensor
std_tensors	            r   r   r      s   ( qww<Dt|	U
	S		U
$#**177|q II)<QqCy<1EII9=aqAqz=AF 		i81S!V98rBA)''$qwwGK &&kk+vq8I/IJkk+ttax/@4/GHA
**288C=H
**Z<J	ZH- == 9s   *F8F=Gc                 `   |dk7  r| rt        |       dk(  rs|dk(  r@t        |       dk(  rdnd}| |   dvrt        j                  d| d    d	d
       | d   ||f}n>| d   dvrt        j                  d| d    d	d
       ||| d   f}n|dk(  rd||f}n||df}|dk(  r|r| | |k7  rt        d| d|        |S | r|dk(  re| t        |       dk7  rt        d      | d   dk7  r|dk(  rt        d|  d      | d   | d   |k  s| d
   | d
   |k  rt        d| d| d|        | rt        |       dk7  rt        d      | d   dk7  r|dk(  rt        d|  d      | d   | d   |k  s| d   +| d   |k  r#t        d| d| d|        |r|} n
|dk(  rd} nd} |rd| v rt        d|        | S )a  Internal utility to compute/validate a model's input shape.

    Args:
      input_shape: Either None (will return the default network input shape),
        or a user-provided shape to be validated.
      default_size: Default input width/height for the model.
      min_size: Minimum input width/height accepted by the model.
      data_format: Image data format to use.
      require_flatten: Whether the model is expected to
        be linked to a classifier via a Flatten layer.
      weights: One of `None` (random initialization)
        or 'imagenet' (pre-training on ImageNet).
        If weights='imagenet' input channels must be equal to 3.

    Returns:
      An integer shape tuple (may include None entries).

    Raises:
      ValueError: In case of invalid argument values.
    imagenetrJ   r      r    r   >   r    rJ   z]This model usually expects 1 or 3 input channels. However, it was passed an input_shape with z input channels.r   )
stacklevelr%   NzXWhen setting `include_top=True` and loading `imagenet` weights, `input_shape` should be z.  Received: input_shape=z0`input_shape` must be a tuple of three integers.z6The input must have 3 channels; Received `input_shape=`zInput size must be at least r   z; Received: input_shape=)rJ   NN)NNrJ   z[If `include_top` is True, you should specify a static `input_shape`. Received: input_shape=)r+   warningswarnr   )input_shapedefault_sizemin_sizer   require_flattenweightscorrect_channel_axisdefault_shapes           r   obtain_input_shaperk   %  s   8 *[1AQ1F**(+K(8A(=11 /0>'N++;=  !	 )^\<HM2f,'O,,<>  !	 *<RIM**l;M)<;M*"m+ //<o >--8M;  **&{#q($J  q>Q&7j+@$((3}A7 
  N.;q>H3L!!n0[^h5N$6xjH: &''2m5  &{#q($J  r?a'Gz,A$((3}A7 
  N.;q>H3L!!n0[^h5N$6#*AhZ 0''2m5  'K..--;))47 
 r   c                     t        j                         dk(  rdnd}| j                  ||dz    }t        |t              r||f}|d   d}nd|d   dz  z
  d|d   dz  z
  f}|d   dz  |d   dz  f}|d   |d   z
  |d   f|d   |d   z
  |d   ffS )zReturns a tuple for zero-padding for 2D convolution with downsampling.

    Args:
      inputs: Input tensor.
      kernel_size: An integer or tuple/list of 2 integers.

    Returns:
      A tuple.
    r   r   r    r   )r    r    )r   r   r,   r   int)inputskernel_sizeimg_dim
input_sizeadjustcorrects         r   correct_padrt     s     ,,.2BBaGg16J+s#"K0!}jma''Z]Q->)>?1~"KNa$78G	fQi	,	fQi	, r   c                     |yt        j                  |       } | t        j                  d      t        j                  d      hvrt        d|        y)a@  validates that the classifer_activation is compatible with the weights.

    Args:
      classifier_activation: str or callable activation function
      weights: The pretrained weights to load.

    Raises:
      ValueError: if an activation other than `None` or `softmax` are used with
        pretrained weights.
    NsoftmaxzOnly `None` and `softmax` activations are allowed for the `classifier_activation` argument when using pretrained weights, with `include_top=True`; Received: classifier_activation=)r   getr   )classifier_activationrh   s     r   validate_activationry     sg     'OO,AB	"%  % &;$;=
 	
	r   )Nr
   )   )N)r1   rb   numpyr   	keras.srcr   r   r   keras.src.api_exportr   keras.src.utilsr   r-   r/   PREPROCESS_INPUT_DOCPREPROCESS_INPUT_MODE_DOC"PREPROCESS_INPUT_DEFAULT_ERROR_DOCPREPROCESS_INPUT_ERROR_DOCPREPROCESS_INPUT_RET_DOC_TFPREPROCESS_INPUT_RET_DOC_TORCHPREPROCESS_INPUT_RET_DOC_CAFFEr   format__doc__rB   r   r   rk   rt   ry   r'   r   r   <module>r      s       !   - &% ! F &H "> K ": "N 
 BCO DO, 066	"

, 7    DE) F)XFR8B up2
r   