a
    Sic!F                  	   @   sb  d Z ddlZddlZddlZddlZddlm  mZ	 ddl
mZ ddlmZ z2ddlmZ z
ejZW n ey|   eZY n0 W n ey   dZdZY n0 edurejejejejejejdZe	jjZejejejejej ej!ej"ej#dZ$edg d	d#ddZ%dd Z&eddd$ddZ'eddd%ddZ(eddd&ddZ)eddd'd!d"Z*dS )(z$Utilities related to image handling.    N)backend)keras_export)Image)nearestbilinearbicubichammingboxZlanczos)r   r   r   arealanczos3lanczos5gaussianmitchellcubicz&keras.preprocessing.image.smart_resize)v1r   c                 C   s  t |dkrtd| dt| }|jjdur\|jjdk sJ|jjdkr\td|j dt|}|d |d	  }}|\}}|jjdur|jd
 }	nd}	tt|| d| d}
tt|| d| d}t||
}
t||}tt||
 dd d}tt|| dd d}|jjdkrXtd||dg}td
|
|d
g}n t||dg}t|
|d
g}t	|||}tj
j|||d}|jjdur|jjdkr|ddd|	f |jjdkr|dd|	f t| tjr| S |S )a
  Resize images to a target size without aspect ratio distortion.

    Warning: `tf.keras.preprocessing.image.smart_resize` is not recommended for
    new code. Prefer `tf.keras.layers.Resizing`, which provides the same
    functionality as a preprocessing layer and adds `tf.RaggedTensor` support.
    See the [preprocessing layer guide](
    https://www.tensorflow.org/guide/keras/preprocessing_layers)
    for an overview of preprocessing layers.

    TensorFlow image datasets typically yield images that have each a different
    size. However, these images need to be batched before they can be
    processed by Keras layers. To be batched, images need to share the same
    height and width.

    You could simply do:

    ```python
    size = (200, 200)
    ds = ds.map(lambda img: tf.image.resize(img, size))
    ```

    However, if you do this, you distort the aspect ratio of your images, since
    in general they do not all have the same aspect ratio as `size`. This is
    fine in many cases, but not always (e.g. for GANs this can be a problem).

    Note that passing the argument `preserve_aspect_ratio=True` to `resize`
    will preserve the aspect ratio, but at the cost of no longer respecting the
    provided target size. Because `tf.image.resize` doesn't crop images,
    your output images will still have different sizes.

    This calls for:

    ```python
    size = (200, 200)
    ds = ds.map(lambda img: smart_resize(img, size))
    ```

    Your output images will actually be `(200, 200)`, and will not be distorted.
    Instead, the parts of the image that do not fit within the target size
    get cropped out.

    The resizing process is:

    1. Take the largest centered crop of the image that has the same aspect
    ratio as the target size. For instance, if `size=(200, 200)` and the input
    image has size `(340, 500)`, we take a crop of `(340, 340)` centered along
    the width.
    2. Resize the cropped image to the target size. In the example above,
    we resize the `(340, 340)` crop to `(200, 200)`.

    Args:
      x: Input image or batch of images (as a tensor or NumPy array). Must be in
        format `(height, width, channels)` or `(batch_size, height, width,
        channels)`.
      size: Tuple of `(height, width)` integer. Target size.
      interpolation: String, interpolation to use for resizing. Defaults to
        `'bilinear'`. Supports `bilinear`, `nearest`, `bicubic`, `area`,
        `lanczos3`, `lanczos5`, `gaussian`, `mitchellcubic`.

    Returns:
      Array with shape `(size[0], size[1], channels)`. If the input image was a
      NumPy array, the output is a NumPy array, and if it was a TF tensor,
      the output is a TF tensor.
       z6Expected `size` to be a tuple of 2 integers, but got: .N      zExpected an image array with shape `(height, width, channels)`, or `(batch_size, height, width, channels)`, but got input with incorrect rank, of shape float32int32r   )imagessizemethod)len
ValueErrortfconvert_to_tensorshaperankcastminimumstacksliceimageresize	set_shape
isinstancenpndarraynumpy)xr   interpolationimgr    heightwidthtarget_heighttarget_widthZstatic_num_channelscrop_height
crop_widthcrop_box_hstartcrop_box_wstartZcrop_box_startZcrop_box_size r8   S/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/utils/image_utils.pysmart_resizeB   s^    B


r:   c                 C   s,   |   } | tvr$td| t t|  S )NzFValue not recognized for `interpolation`: {}. Supported values are: {})lower_TF_INTERPOLATION_METHODSNotImplementedErrorformatkeys)r.   r8   r8   r9   get_interpolation   s    r@   zkeras.utils.array_to_imgz&keras.preprocessing.image.array_to_imgTc                 C   sx  |du rt  }|du r t  }tdu r0tdtj| |d} | jdkrXtd| j	 |dvrntd| |dkr| 
d	d
d} |r| t|  } t| }|dkr| | } | d9 } | j	d
 dkrt| ddS | j	d
 dkrt| ddS | j	d
 d	kr`t| dkr<t| dddddf ddS t| dddddf ddS td| j	d
  dS )aA  Converts a 3D Numpy array to a PIL Image instance.

    Usage:

    ```python
    from PIL import Image
    img = np.random.random(size=(100, 100, 3))
    pil_img = tf.keras.preprocessing.image.array_to_img(img)
    ```


    Args:
        x: Input data, in any form that can be converted to a Numpy array.
        data_format: Image data format, can be either `"channels_first"` or
          `"channels_last"`. Defaults to `None`, in which case the global
          setting `tf.keras.backend.image_data_format()` is used (unless you
          changed it, it defaults to `"channels_last"`).
        scale: Whether to rescale the image such that minimum and maximum values
          are 0 and 255 respectively. Defaults to `True`.
        dtype: Dtype to use. Default to `None`, in which case the global setting
          `tf.keras.backend.floatx()` is used (unless you changed it, it
          defaults to `"float32"`)

    Returns:
        A PIL Image instance.

    Raises:
        ImportError: if PIL is not available.
        ValueError: if invalid `x` or `data_format` is passed.
    NzCCould not import PIL.Image. The use of `array_to_img` requires PIL.dtyper   zJExpected image array to have rank 3 (single image). Got array with shape: >   channels_firstchannels_lastzInvalid data_format: rC      r   r      r   uint8RGBARGBr   ILzUnsupported channel number: )r   image_data_formatfloatx	pil_imageImportErrorr*   asarrayndimr   r    	transposeminmaxZ	fromarrayastype)r-   data_formatscalerB   x_maxr8   r8   r9   array_to_img   sD    #

$$rY   zkeras.utils.img_to_arrayz&keras.preprocessing.image.img_to_arrayc                 C   s   |du rt  }|du r t  }|dvr6td| tj| |d}t|jdkrj|dkr|ddd	}nbt|jdkr|dkr|	d	|jd |jd	 f}q|	|jd |jd	 d	f}ntd
|j |S )a  Converts a PIL Image instance to a Numpy array.

    Usage:

    ```python
    from PIL import Image
    img_data = np.random.random(size=(100, 100, 3))
    img = tf.keras.preprocessing.image.array_to_img(img_data)
    array = tf.keras.preprocessing.image.img_to_array(img)
    ```


    Args:
        img: Input PIL Image instance.
        data_format: Image data format, can be either `"channels_first"` or
          `"channels_last"`. Defaults to `None`, in which case the global
          setting `tf.keras.backend.image_data_format()` is used (unless you
          changed it, it defaults to `"channels_last"`).
        dtype: Dtype to use. Default to `None`, in which case the global setting
          `tf.keras.backend.floatx()` is used (unless you changed it, it
          defaults to `"float32"`).

    Returns:
        A 3D Numpy array.

    Raises:
        ValueError: if invalid `img` or `data_format` is passed.
    N>   rC   rD   zUnknown data_format: rA   r   rC   r   r   rE   zUnsupported image shape: )
r   rL   rM   r   r*   rP   r   r    rR   reshape)r/   rV   rB   r-   r8   r8   r9   img_to_array  s     !r[   zkeras.utils.save_imgz"keras.preprocessing.image.save_imgc                 K   sf   |du rt  }t|||d}|jdkrL|dks8|dkrLtd |d}|j| fd|i| dS )	aj  Saves an image stored as a Numpy array to a path or file object.

    Args:
        path: Path or file object.
        x: Numpy array.
        data_format: Image data format, either `"channels_first"` or
          `"channels_last"`.
        file_format: Optional file format override. If omitted, the format to
          use is determined from the filename extension. If a file object was
          used instead of a filename, this parameter should always be used.
        scale: Whether to rescale image values to be within `[0, 255]`.
        **kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
    N)rV   rW   rH   jpgjpegz?The JPG format does not support RGBA images, converting to RGB.rI   r>   )r   rL   rY   modewarningswarnconvertsave)pathr-   rV   file_formatrW   kwargsr/   r8   r8   r9   save_imgR  s    
rf   zkeras.utils.load_imgz"keras.preprocessing.image.load_imgFrgbr   c                 C   s  |rt d d}tdu r"tdt| tjr:t| }n|t| tj	t
tfrt| tj	rdt|  } t| d$}tt| }W d   q1 s0    Y  ntdt| |dkr|jdvr|d}nH|d	kr|jd
kr|d
}n*|dkr|jdkr|d}ntd|dur|d |d f}|j|kr|tvrftd|dt t| }	|r|j\}
}|\}}|
| | }|| | }t||}t|
|}|| d }|
| d }|| }|| }||||g}|j||	|d}n|||	}|S )a  Loads an image into PIL format.

    Usage:

    ```
    image = tf.keras.preprocessing.image.load_img(image_path)
    input_arr = tf.keras.preprocessing.image.img_to_array(image)
    input_arr = np.array([input_arr])  # Convert single image to a batch.
    predictions = model.predict(input_arr)
    ```

    Args:
        path: Path to image file.
        grayscale: DEPRECATED use `color_mode="grayscale"`.
        color_mode: One of `"grayscale"`, `"rgb"`, `"rgba"`. Default: `"rgb"`.
          The desired image format.
        target_size: Either `None` (default to original size) or tuple of ints
          `(img_height, img_width)`.
        interpolation: Interpolation method used to resample the image if the
          target size is different from that of the loaded image. Supported
          methods are `"nearest"`, `"bilinear"`, and `"bicubic"`. If PIL version
          1.1.3 or newer is installed, `"lanczos"` is also supported. If PIL
          version 3.4.0 or newer is installed, `"box"` and `"hamming"` are also
          supported. By default, `"nearest"` is used.
        keep_aspect_ratio: Boolean, whether to resize images to a target
                size without aspect ratio distortion. The image is cropped in
                the center with target aspect ratio before resizing.

    Returns:
        A PIL Image instance.

    Raises:
        ImportError: if PIL is not available.
        ValueError: if interpolation method is not supported.
    z<grayscale is deprecated. Please use color_mode = "grayscale"	grayscaleNz?Could not import PIL.Image. The use of `load_img` requires PIL.rbz.path should be path-like or io.BytesIO, not {})rK   zI;16rJ   rK   rgbarH   rg   rI   z0color_mode must be "grayscale", "rgb", or "rgba"rE   r   zCInvalid interpolation method {} specified. Supported methods are {}z, r   )r	   )r_   r`   rN   rO   r)   ioBytesIOopenpathlibPathbytesstrresolveread	TypeErrorr>   typer^   ra   r   r   _PIL_INTERPOLATION_METHODSjoinr?   rS   r'   )rc   rh   
color_modetarget_sizer.   Zkeep_aspect_ratior/   fZwidth_height_tupleresampler1   r0   r3   r2   r4   r5   r6   r7   Zcrop_box_wendZcrop_box_hendZcrop_boxr8   r8   r9   load_imgl  sz    ,4







r|   )r   )NTN)NN)NNT)Frg   Nr   F)+__doc__rk   rn   r_   r,   r*   tensorflow.compat.v2compatv2r   kerasr    tensorflow.python.util.tf_exportr   PILr   rN   Z
ResamplingZpil_image_resamplingAttributeErrorrO   ZNEARESTBILINEARBICUBICZHAMMINGBOXZLANCZOSrv   r&   ResizeMethodNEAREST_NEIGHBORAREALANCZOS3LANCZOS5GAUSSIANMITCHELLCUBICr<   r:   r@   rY   r[   rf   r|   r8   r8   r8   r9   <module>   sl   

	
|
N5     