a
    SicA                     @   s   d Z ddlZddlZddlZddlmZ ddlmZ ddlm	Z	 ddl
mZ dadZdZd	Zd
ZdZdZdZdZedd#ddZejedede_ edd$ddZdd Zdd Zd%ddZdd  Zd!d" ZdS )&z@Utilities for ImageNet data preprocessing & prediction decoding.    N)activations)backend)
data_utils)keras_exportzUhttps://storage.googleapis.com/download.tensorflow.org/data/imagenet_class_index.jsonat  
  Preprocesses a tensor or Numpy array encoding a batch of images.

  Usage example with `applications.MobileNet`:

  ```python
  i = tf.keras.layers.Input([None, None, 3], dtype = tf.uint8)
  x = tf.cast(i, tf.float32)
  x = tf.keras.applications.mobilenet.preprocess_input(x)
  core = tf.keras.applications.MobileNet()
  x = core(x)
  model = tf.keras.Model(inputs=[i], outputs=[x])

  image = tf.image.decode_png(tf.io.read_file('file.png'))
  result = model(image)
  ```

  Args:
    x: A floating point `numpy.array` or a `tf.Tensor`, 3D or 4D with 3 color
      channels, with values in the range [0, 255].
      The preprocessed data are written over the input data
      if the data types are compatible. To avoid this
      behaviour, `numpy.copy(x)` can be used.
    data_format: Optional data format of the image tensor/array. Defaults to
      None, in which case the global setting
      `tf.keras.backend.image_data_format()` is used (unless you changed it,
      it defaults to "channels_last").{mode}

  Returns:
      Preprocessed `numpy.array` or a `tf.Tensor` with type `float32`.
      {ret}

  Raises:
      {error}
  a  
    mode: One of "caffe", "tf" or "torch". Defaults to "caffe".
      - caffe: will convert the images from RGB to BGR,
          then will zero-center each color channel with
          respect to the ImageNet dataset,
          without scaling.
      - tf: will scale pixels between -1 and 1,
          sample-wise.
      - torch: will scale pixels between 0 and 1 and then
          will normalize each channel with respect to the
          ImageNet dataset.
  zE
    ValueError: In case of unknown `mode` or `data_format` argument.z;
    ValueError: In case of unknown `data_format` argument.zH
      The inputs pixel values are scaled between -1 and 1, sample-wise.z
      The input pixels values are scaled between 0 and 1 and each channel is
      normalized with respect to the ImageNet dataset.z
      The images are converted from RGB to BGR, then each color channel is
      zero-centered with respect to the ImageNet dataset, without scaling.z2keras.applications.imagenet_utils.preprocess_inputcaffec                 C   sj   |dvrt d| |du r(t }n|dvr>t d| t| tjrXt| ||dS t| ||dS dS )z@Preprocesses a tensor or Numpy array encoding a batch of images.>   r   torchtfzDExpected mode to be one of `caffe`, `tf` or `torch`. Received: mode=N>   channels_firstchannels_lastz]Expected data_format to be one of `channels_first` or `channels_last`. Received: data_format=)data_formatmode)
ValueErrorr   image_data_format
isinstancenpndarray_preprocess_numpy_input_preprocess_symbolic_input)xr   r    r   ]/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/applications/imagenet_utils.pypreprocess_inputg   s"    
r    )r   reterrorz4keras.applications.imagenet_utils.decode_predictions   c                    s   t | jdks| jd dkr.tdt| j tdu rztjdtddd	}t|}t	
|aW d   n1 sp0    Y  g }| D ]N   | d ddd
 } fdd|D }|jdd dd || q|S )a  Decodes the prediction of an ImageNet model.

    Args:
      preds: Numpy array encoding a batch of predictions.
      top: Integer, how many top-guesses to return. Defaults to 5.

    Returns:
      A list of lists of top class prediction tuples
      `(class_name, class_description, score)`.
      One list of tuples per sample in batch input.

    Raises:
      ValueError: In case of invalid shape of the `pred` array
        (must be 2D).
          i  zx`decode_predictions` expects a batch of predictions (i.e. a 2D array of shape (samples, 1000)). Found array with shape: Nzimagenet_class_index.jsonmodelsZ c2c37ea517e94d9795004a39431a14cb)cache_subdir	file_hashc                    s&   g | ]}t tt|  | f qS r   )tupleCLASS_INDEXstr).0ipredr   r   
<listcomp>       z&decode_predictions.<locals>.<listcomp>c                 S   s   | d S )Nr   r   )r   r   r   r   <lambda>   r*   z$decode_predictions.<locals>.<lambda>T)keyreverse)lenshaper   r$   r#   r   get_fileCLASS_INDEX_PATHopenjsonloadargsortsortappend)predstopfpathfresultsZtop_indicesresultr   r'   r   decode_predictions   s,    
(r>   c                 C   s.  t | jjtjs"| jt dd} |dkr>| d } | d8 } | S |dkr`| d } g d}g d	}n^|d
kr| jdkr| ddddf } q| ddddddf } n| ddddf } g d}d}|d
kr| jdkr| dddddf  |d 8  < | dddddf  |d 8  < | dddddf  |d 8  < |dur| dddddf  |d   < | dddddf  |d   < | dddddf  |d   < n| dddddddf  |d 8  < | dddddddf  |d 8  < | dddddddf  |d 8  < |dur*| dddddddf  |d   < | dddddddf  |d   < | dddddddf  |d   < n| d  |d 8  < | d  |d 8  < | d  |d 8  < |dur*| d  |d   < | d  |d   < | d  |d   < | S )a  Preprocesses a Numpy array encoding a batch of images.

    Args:
      x: Input array, 3D or 4D.
      data_format: Data format of the image array.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed Numpy array.
    F)copyr        _@      ?r        o@g
ףp=
?gv/?gCl?gZd;O?gy&1?g?r	      Nr!   .gjtY@g`"1]@gQ^@r   r   r   ).r   ).r   ).r   )	
issubclassdtypetyper   floatingastyper   floatxndim)r   r   r   meanstdr   r   r   r      sR    


"""
""$(((
((*
r   c                 C   s:  |dkr| d } | d8 } | S |dkr>| d } g d}g d}nb|dkrt | d	krh| d
d
ddf } q| d
d
d
d
ddf } n| dd
d
df } g d}d
}t t| }t | t |krt j| t |t | |d} nt | ||} |d
ur6t jt|t | d}|dkr.t |d}| | } | S )a  Preprocesses a tensor encoding a batch of images.

    Args:
      x: Input tensor, 3D or 4D.
      data_format: Data format of the image tensor.
      mode: One of "caffe", "tf" or "torch".
        - caffe: will convert the images from RGB to BGR,
            then will zero-center each color channel with
            respect to the ImageNet dataset,
            without scaling.
        - tf: will scale pixels between -1 and 1,
            sample-wise.
        - torch: will scale pixels between 0 and 1 and then
            will normalize each channel with respect to the
            ImageNet dataset.

    Returns:
        Preprocessed tensor.
    r   r@   rA   r   rB   rC   rD   r	   rE   Nr!   .rF   )r   )rH   )r!   r   r   )	r   rM   constantr   arrayrH   bias_addcastreshape)r   r   r   rN   rO   mean_tensorZ
std_tensorr   r   r   r      s:    


r   c                 C   sp  |dkr| rt | dkr|dkrZ| d dvrJtjdt| d  d dd	 | d ||f}q| d
 dvrtjdt| d
  d dd	 ||| d
 f}n|dkrd||f}n
||df}|dkr|r| dur| |krtd| d|  |S | r.|dkr| dur,t | dkrtd| d dkr>|dkr>td|  d| d durZ| d |k sv| d dur,| d |k r,td| d| d|  n| durNt | dkrtd| d
 dkr|dkrtd|  d| d dur| d |k s| d durN| d |k rNtd| d| d|  n |r:|} n|dkrJd} nd} |rld| v rltd|  | S )a  Internal utility to compute/validate a model's input shape.

    Args:
      input_shape: Either None (will return the default network input shape),
        or a user-provided shape to be validated.
      default_size: Default input width/height for the model.
      min_size: Minimum input width/height accepted by the model.
      data_format: Image data format to use.
      require_flatten: Whether the model is expected to
        be linked to a classifier via a Flatten layer.
      weights: One of `None` (random initialization)
        or 'imagenet' (pre-training on ImageNet).
        If weights='imagenet' input channels must be equal to 3.

    Returns:
      An integer shape tuple (may include None entries).

    Raises:
      ValueError: In case of invalid argument values.
    imagenetrE   r	   r   >   r   rE   z]This model usually expects 1 or 3 input channels. However, it was passed an input_shape with z input channels.r   )
stacklevelr!   NzXWhen setting `include_top=True` and loading `imagenet` weights, `input_shape` should be z.  Received: input_shape=z0`input_shape` must be a tuple of three integers.z6The input must have 3 channels; Received `input_shape=`r   zInput size must be at least r   z; Received: input_shape=)rE   NN)NNrE   z[If `include_top` is True, you should specify a static `input_shape`. Received: input_shape=)r.   warningswarnr$   r   )input_shapedefault_sizemin_sizer   require_flattenweightsZdefault_shaper   r   r   obtain_input_shape6  s    















r`   c                 C   s   t  dkrdnd}t | ||d  }t|tr<||f}|d du rNd}n d|d d  d|d d  f}|d d |d d f}|d |d  |d f|d |d  |d ffS )zReturns a tuple for zero-padding for 2D convolution with downsampling.

    Args:
      inputs: Input tensor.
      kernel_size: An integer or tuple/list of 2 integers.

    Returns:
      A tuple.
    r	   r   r   r   N)r   r   )r   r   	int_shaper   int)inputskernel_sizeZimg_dim
input_sizeadjustZcorrectr   r   r   correct_pad  s    

 rg   c                 C   s@   |du rdS t | } | t dt dhvr<td|  dS )a@  validates that the classifer_activation is compatible with the weights.

    Args:
      classifier_activation: str or callable activation function
      weights: The pretrained weights to load.

    Raises:
      ValueError: if an activation other than `None` or `softmax` are used with
        pretrained weights.
    NsoftmaxzOnly `None` and `softmax` activations are allowed for the `classifier_activation` argument when using pretrained weights, with `include_top=True`; Received: classifier_activation=)r   getr   )classifier_activationr_   r   r   r   validate_activation  s    
rk   )Nr   )r   )N)__doc__r3   rY   numpyr   kerasr   r   keras.utilsr    tensorflow.python.util.tf_exportr   r#   r1   ZPREPROCESS_INPUT_DOCZPREPROCESS_INPUT_MODE_DOCZ"PREPROCESS_INPUT_DEFAULT_ERROR_DOCZPREPROCESS_INPUT_ERROR_DOCZPREPROCESS_INPUT_RET_DOC_TFZPREPROCESS_INPUT_RET_DOC_TORCHZPREPROCESS_INPUT_RET_DOC_CAFFEr   formatr>   r   r   r`   rg   rk   r   r   r   r   <module>   s@   $+IB 
y