a
    Sic                     @   s  d Z ddlZddlZddlm  mZ ddlmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZ ddlmZ dd	lmZ d
ZddddddddZddddddddddddddddddddddddddddddddddddddddd d!dddddd!dd"d#dddgddddddddddd$ddddddddd$dd%ddddddd&d%d!ddddddd'd!d(ddddddd)d(d*ddddddd$d*d+dddddgddd,d,ddddddd&d,dddddddd&dd-ddddddd.d-d/ddddddd0d/d1ddddddd2d1d3ddddddd&d3d4dddddgddd,d5dddddddd5d,dddddddd,dddddddddd-ddddddd$d-d6ddddddd7d6d/dddddgddd,d5dddddddd5d,dddddddd,dddddddddd-ddddddd$d-d6ddddddd7d6d/dddddgddd,d5dddddddd5d,dddddddd,dddddddddd-ddddddd$d-d6ddddddd7d6d/dddddgddd,d5dddddddd5d,dddddddd,dddddddddd-ddddddd$d-d6ddddddd7d6d/dddddgd8Zd9d:d;d<d=d>Zd9d?d;d@d=d>ZdAZdBdC ZdDdE ZdzeeedIdJdKZd{eeedIdLdMZd|dUdVZedWdXd}dYdZZ ed[d\d~d]d^Z!ed_d`ddadbZ"edcddddedfZ#edgdhddidjZ$edkdlddmdnZ%edodpddqdrZ&ej'dZdse _ ej'd^dse!_ ej'dbdse"_ ej'dfdse#_ ej'djdse$_ ej'dndse%_ ej'drdse&_ edtddudvZ(edwddxdyZ)ej)j e)_ dS )zEfficientNet V2 models for Keras.

Reference:
- [EfficientNetV2: Smaller Models and Faster Training](
    https://arxiv.org/abs/2104.00298) (ICML 2021)
    N)backend)layers)imagenet_utils)training)
data_utils)layer_utils)keras_exportzMhttps://storage.googleapis.com/tensorflow/keras-applications/efficientnet_v2/)Z 21ecbf6da12460d5c40bb2f29ceb2188Z 893217f2bb855e2983157299931e43ff)Z 069f0534ff22adf035c89e2d9547a9dcZ 0e80663031ca32d657f9caa404b6ec37)Z 424e49f28180edbde1e94797771950a7Z 1dfe2e7a5d45b6632553a8961ea609eb)Z 1f1fc43bd98a6e4fd8fdfd551e02c7a0Z f6abf7b5849ac99a89b50dd3fd532856)Z e1d88a8495beba45748fedd0cecbe016Z af0682fb74e8c54910f2d4393339c070)Z a3bf6aa3276309f4fc6a34aa114c95cdZ 1b8dc055df72dde80d614482840fe342)Z 27e6d408b53c7ebc868fefa357689935Z b0b66b5c863aef5b46e8608fe1711615)b0b1b2b3z-sz-mz-l                    )kernel_size
num_repeatinput_filtersoutput_filtersexpand_ratiose_ratiostrides	conv_type   0   @   )r   r   r   r   r   r   r   r         g      ?	               P               i0  i       `   
                 i     p      )efficientnetv2-sefficientnetv2-mefficientnetv2-lefficientnetv2-b0efficientnetv2-b1efficientnetv2-b2efficientnetv2-b3VarianceScalingg       @fan_outtruncated_normal)scalemodedistribution)
class_nameconfiggUUUUUU?uniforma  Instantiates the {name} architecture.

  Reference:
  - [EfficientNetV2: Smaller Models and Faster Training](
      https://arxiv.org/abs/2104.00298) (ICML 2021)

  This function returns a Keras image classification model,
  optionally loaded with weights pre-trained on ImageNet.

  For image classification use cases, see
  [this page for detailed examples](
    https://keras.io/api/applications/#usage-examples-for-image-classification-models).

  For transfer learning use cases, make sure to read the
  [guide to transfer learning & fine-tuning](
    https://keras.io/guides/transfer_learning/).

  Note: each Keras Application expects a specific kind of input preprocessing.
  For EfficientNetV2, by default input preprocessing is included as a part of
  the model (as a `Rescaling` layer), and thus
  `tf.keras.applications.efficientnet_v2.preprocess_input` is actually a
  pass-through function. In this use case, EfficientNetV2 models expect their
  inputs to be float tensors of pixels with values in the [0-255] range.
  At the same time, preprocessing as a part of the model (i.e. `Rescaling`
  layer) can be disabled by setting `include_preprocessing` argument to False.
  With preprocessing disabled EfficientNetV2 models expect their inputs to be
  float tensors of pixels with values in the [-1, 1] range.

  Args:
    include_top: Boolean, whether to include the fully-connected
      layer at the top of the network. Defaults to True.
    weights: One of `None` (random initialization),
      `"imagenet"` (pre-training on ImageNet),
      or the path to the weights file to be loaded. Defaults to `"imagenet"`.
    input_tensor: Optional Keras tensor
      (i.e. output of `layers.Input()`)
      to use as image input for the model.
    input_shape: Optional shape tuple, only to be specified
      if `include_top` is False.
      It should have exactly 3 inputs channels.
    pooling: Optional pooling mode for feature extraction
      when `include_top` is `False`. Defaults to None.
      - `None` means that the output of the model will be
          the 4D tensor output of the
          last convolutional layer.
      - `"avg"` means that global average pooling
          will be applied to the output of the
          last convolutional layer, and thus
          the output of the model will be a 2D tensor.
      - `"max"` means that global max pooling will
          be applied.
    classes: Optional number of classes to classify images
      into, only to be specified if `include_top` is True, and
      if no `weights` argument is specified. Defaults to 1000 (number of
      ImageNet classes).
    classifier_activation: A string or callable. The activation function to use
      on the `"top"` layer. Ignored unless `include_top=True`. Set
      `classifier_activation=None` to return the logits of the "top" layer.
      Defaults to `"softmax"`.
      When loading pretrained weights, `classifier_activation` can only
      be `None` or `"softmax"`.

  Returns:
    A `keras.Model` instance.
c                 C   s6   | |9 } |p|}t |t| |d  | | }t|S )z2Round number of filters based on depth multiplier.r   )maxint)filterswidth_coefficient	min_depthdepth_divisorZminimum_depthnew_filters rK   ^/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/applications/efficientnet_v2.pyround_filtersf  s    rM   c                 C   s   t t||  S )z2Round number of repeats based on depth multiplier.)rE   mathceilrepeatsdepth_coefficientrK   rK   rL   round_repeatsq  s    rS   ?swish皙?)r   r   survival_probabilityc
                    sJ   t  dkrdnddu r&t d 	
fdd}
|
S )z2MBConv block: Mobile Inverted Residual Bottleneck.channels_lastr   r   Nblock0c              
      s   }dkrbt j|ddtdddd d| }t jd d|}t j d	 d
|}n| }t j	tdddd d|}t jd d|}t j d d
|}d  k rdkr n n0tdt }t jd d
|}dkr|ddf}n
dd|f}t j	|d d
|}t j|dd td d|}t j|dddtd d|}t j
||gd d
}t jddtdddd d|}t jd d|}	dkr kr 
rt j
dd d|}t j|| gd d
}|S )Nr   samerX   Fexpand_convrF   r   r   kernel_initializerpaddingdata_formatuse_biasname	expand_bnaxismomentumra   expand_activationra   Zdwconv2)r   r   depthwise_initializerr^   r_   r`   ra   bn
activationr   
se_squeeze
se_reshape	se_reducer^   rj   r]   ra   sigmoid	se_expand	se_exciteproject_conv
project_bnNr   r   r   dropnoise_shapera   add)r   Conv2DCONV_KERNEL_INITIALIZERBatchNormalization
ActivationDepthwiseConv2DrD   rE   GlobalAveragePooling2DReshapemultiplyDropoutrx   inputsrF   x
filters_sesese_shaperj   bn_axisbn_momentumr   r   r   ra   r   r   r   rW   rK   rL   apply  s    	
	


		

zMBConvBlock.<locals>.applyr   image_data_formatget_uidr   r   r   r   r   r   r   rj   rW   ra   r   rK   r   rL   MBConvBlockv  s
    
 ^r   c
                    sJ   t  dkrdnddu r&t d 	
fdd}
|
S )zQFused MBConv Block: Fusing the proj conv1x1 and depthwise_conv into a
    conv2d.rX   r   r   NrY   c              
      s   }dkrbt j|	tdddd d| }t jd d|}t j d	 d
|}n| }d  k r|dkr,n ntdt }t jd d|}dkr|ddf}n
dd|f}t j|d d|}t j|dd td d|}t j|dddtd d|}t j	||gd d}t jdkr@dndkrPdn	tddd d|}t jd d|}dkrt j d d
|}	dkrkr
rt j

dd d|}t j|| gd d}|S )Nr   rX   rZ   Fr[   )r   r   r]   r_   r^   r`   ra   rb   rc   rf   rj   ra   r   rk   rg   rl   rm   rn   ro   rp   rq   rr   )r   r   r]   r^   r`   ra   rs   Zproject_activationrt   ru   rv   rx   )r   ry   rz   r{   r|   rD   rE   r~   r   r   r   rx   r   r   rK   rL   r     s    	


		

zFusedMBConvBlock.<locals>.applyr   r   rK   r   rL   FusedMBConvBlock  s
    
 Qr   皙?defaultefficientnetv2Timagenet  softmaxc           &      C   s,  |	dkrt |
 }	|dv s4tjj|s4td| |dkrV|rV|dkrVtd| tj||dt	 ||d}|d	u rt
j|d
}nt|st
j||d}n|}t	 dkrdnd}|}|r.||d  }|
dd dr|dkrt
jdd|}t
jg dg d|d|}nt
jddd|}t|	d d | ||d}t
j|ddtdddd |}t
j||d!d"|}t
j|d#d$|}t|	}	d}ttd%d& |	D }t|	D ]\}}|d' dksJ t|d | ||d|d< t|d( | ||d|d(< ttd)|d* }t|d'|d+}t|D ]d}|dkrNd|d,< |d( |d< |f |||| | d- |d t!|d. d/||}|d7 }q,qtd0| ||d}t
j|ddtdddd1d2|}t
j||d3d"|}t
j|d4d5|}|rHt
j"d6d$|}|dkrt
j#|d7d$|}t$|| t
j%||t&t'dd8d9|}n6|d:krdt
j"d6d$|}n|d;kr~t
j(d<d$|}|d	urt)*|} n|} t+j,| ||
d$}!|dkr|rd=}"t-|
d>d	  d }#nd?}"t-|
d>d	  d }#|
|" }$t.j/|$t0|$ d@|#dA}%|!1|% n|d	ur(|!1| |!S )Ba
  Instantiates the EfficientNetV2 architecture using given scaling
    coefficients.

    Args:
      width_coefficient: float, scaling coefficient for network width.
      depth_coefficient: float, scaling coefficient for network depth.
      default_size: integer, default input image size.
      dropout_rate: float, dropout rate before final classifier layer.
      drop_connect_rate: float, dropout rate at skip connections.
      depth_divisor: integer, a unit of network width.
      min_depth: integer, minimum number of filters.
      bn_momentum: float. Momentum parameter for Batch Normalization layers.
      activation: activation function.
      blocks_args: list of dicts, parameters to construct block modules.
      model_name: string, model name.
      include_top: whether to include the fully-connected layer at the top of
        the network.
      weights: one of `None` (random initialization), `"imagenet"` (pre-training
        on ImageNet), or the path to the weights file to be loaded.
      input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) or
        numpy array to use as image input for the model.
      input_shape: optional shape tuple, only to be specified if `include_top`
        is False. It should have exactly 3 inputs channels.
      pooling: optional pooling mode for feature extraction when `include_top`
        is `False`.
        - `None` means that the output of the model will be the 4D tensor output
          of the last convolutional layer.
        - "avg" means that global average pooling will be applied to the output
          of the last convolutional layer, and thus the output of the model will
          be a 2D tensor.
        - `"max"` means that global max pooling will be applied.
      classes: optional number of classes to classify images into, only to be
        specified if `include_top` is True, and if no `weights` argument is
        specified.
      classifier_activation: A string or callable. The activation function to
        use on the `"top"` layer. Ignored unless `include_top=True`. Set
        `classifier_activation=None` to return the logits of the `"top"` layer.
      include_preprocessing: Boolean, whether to include the preprocessing layer
        (`Rescaling`) at the bottom of the network. Defaults to `True`.

    Returns:
      A `keras.Model` instance.

    Raises:
      ValueError: in case of invalid argument for `weights`,
        or invalid input shape.
      ValueError: if `classifier_activation` is not `"softmax"` or `None` when
        using a pretrained top layer.
    r   >   Nr   zThe `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.Received: weights=r   r   ziIf using `weights` as `'imagenet'` with `include_top` as true, `classes` should be 1000Received: classes=r)   )default_sizemin_sizer_   require_flattenweightsN)shape)tensorr   rX   r   r   -bgp?)r>   )g
ףp=
?gv/?gCl?)gg٪?g.?gQ?)meanvariancerd   g      ?)r>   offsetr   r   )rF   rG   rH   rI   r   rZ   F	stem_conv)rF   r   r   r]   r^   r`   ra   stem_bnrc   stem_activationrg   c                 s   s   | ]}|d  V  qdS )r   NrK   ).0argsrK   rK   rL   	<genexpr>      z!EfficientNetV2.<locals>.<genexpr>r   r   )r   r   r   rP   r   z
block{}{}_a   )rj   r   rW   ra   i   top_convr\   top_bntop_activationr   avg_pooltop_dropoutpredictions)rj   r]   bias_initializerra   avgrD   max_poolz.h5z	_notop.h5models)cache_subdir	file_hash)2DEFAULT_BLOCKS_ARGStfiogfileexists
ValueErrorr   obtain_input_shaper   r   r   Inputis_keras_tensorsplit
startswith	RescalingNormalizationrM   ry   rz   r{   r|   copydeepcopyfloatsum	enumerater   r   poprS   rangeformatchrr~   r   validate_activationDenseDENSE_KERNEL_INITIALIZERconstant_initializerGlobalMaxPooling2Dr   get_source_inputsr   ModelWEIGHTS_HASHESr   get_fileBASE_WEIGHTS_PATHload_weights)&rG   rR   r   dropout_ratedrop_connect_raterI   rH   r   rj   blocks_args
model_nameinclude_topr   input_tensorinput_shapepoolingclassesclassifier_activationinclude_preprocessing	img_inputr   r   num_channelsZstem_filtersr   blocksir   blockrQ   jZtop_filtersr   modelfile_suffixr   	file_nameweights_pathrK   rK   rL   EfficientNetV2P  sN   G	
 
	





	







r   z3keras.applications.efficientnet_v2.EfficientNetV2B0z#keras.applications.EfficientNetV2B0c                 C   s    t dddd| |||||||dS )N      ?r.   r7   rG   rR   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rK   rK   rL   EfficientNetV2B0Z  s    r   z3keras.applications.efficientnet_v2.EfficientNetV2B1z#keras.applications.EfficientNetV2B1c                 C   s    t dddd| |||||||dS )Nr   皙?   r8   r   r   r   rK   rK   rL   EfficientNetV2B1x  s    r   z3keras.applications.efficientnet_v2.EfficientNetV2B2z#keras.applications.EfficientNetV2B2c                 C   s    t dddd| |||||||dS )Nr   333333?i  r9   r   r   r   rK   rK   rL   EfficientNetV2B2  s    r   z3keras.applications.efficientnet_v2.EfficientNetV2B3z#keras.applications.EfficientNetV2B3c                 C   s    t dddd| |||||||dS )Nr   gffffff?i,  r:   r   r   r   rK   rK   rL   EfficientNetV2B3  s    r   z2keras.applications.efficientnet_v2.EfficientNetV2Sz"keras.applications.EfficientNetV2Sc                 C   s    t dddd| |||||||dS )Nr   r0   r4   r   r   r   rK   rK   rL   EfficientNetV2S  s    r   z2keras.applications.efficientnet_v2.EfficientNetV2Mz"keras.applications.EfficientNetV2Mc                 C   s    t dddd| |||||||dS )Nr     r5   r   r   r   rK   rK   rL   EfficientNetV2M  s    r   z2keras.applications.efficientnet_v2.EfficientNetV2Lz"keras.applications.EfficientNetV2Lc                 C   s    t dddd| |||||||dS )Nr   r   r6   r   r   r   rK   rK   rL   EfficientNetV2L  s    r   rg   z3keras.applications.efficientnet_v2.preprocess_inputc                 C   s   | S )a  A placeholder method for backward compatibility.

    The preprocessing logic has been included in the EfficientNetV2 model
    implementation. Users are no longer required to call this method to
    normalize the input data. This method does nothing and only kept as a
    placeholder to align the API surface between old and new version of model.

    Args:
      x: A floating point `numpy.array` or a `tf.Tensor`.
      data_format: Optional data format of the image tensor/array. Defaults to
        None, in which case the global setting
        `tf.keras.backend.image_data_format()` is used (unless you changed it,
        it defaults to "channels_last").{mode}

    Returns:
      Unchanged `numpy.array` or `tf.Tensor`.
    rK   )r   r_   rK   rK   rL   preprocess_input5  s    r   z5keras.applications.efficientnet_v2.decode_predictionsc                 C   s   t j| |dS )N)top)r   decode_predictions)predsr  rK   rK   rL   r  K  s    r  )r   r   r   r   rT   rU   rV   N)r   r   r   r   rT   rU   rV   N)r   r   r3   r3   rT   rU   r   r   Tr   NNNr   r   T)Tr   NNNr   r   T)Tr   NNNr   r   T)Tr   NNNr   r   T)Tr   NNNr   r   T)Tr   NNNr   r   T)Tr   NNNr   r   T)Tr   NNNr   r   T)N)r#   )*__doc__r   rN   Ztensorflow.compat.v2compatv2r   kerasr   r   Zkeras.applicationsr   keras.enginer   keras.utilsr   r    tensorflow.python.util.tf_exportr   r   r   r   rz   r   BASE_DOCSTRINGrM   rS   rE   r   r   r   r   r   r   r   r   r   r   r   r   r   r  rK   rK   rK   rL   <module>   s  "@JJ@@@      M
	C        v        k                
                                                          