a
    PSic                  
   @   s  U d dl Z d dlZd dlZd dlmZ d dlmZ d dlmZm	Z	m
Z
mZmZmZmZmZ d dlZd dlmZmZ d dlmZ ddlmZmZ dd	lmZmZ dd
lmZ ddlmZmZ ddl m!Z! ddl"m#Z#m$Z$m%Z% g dZ&eG dd dZ'G dd de'Z(G dd de'Z)G dd dej*Z+G dd dej*Z,G dd dej*Z-eee(e)f  e.ee/ ee e0ee-dddZ1e2eeeee(e)f  ee/ f dd d!Z3d"e!iZ4e
e2ef e5d#< i e4d$d%d&Z6i e4d'd(d&Z7G d)d* d*eZ8G d+d, d,eZ9G d-d. d.eZ:G d/d0 d0eZ;G d1d2 d2eZ<G d3d4 d4eZ=G d5d6 d6eZ>G d7d8 d8eZ?G d9d: d:eZ@G d;d< d<eZAG d=d> d>eZBe#d?e8jCfd@ddAdBee8 e0ee-dCdDdEZDe#d?e9jCfd@ddAdBee9 e0ee-dCdFdGZEe#d?e:jCfd@ddAdBee: e0ee-dCdHdIZFe#d?e;jCfd@ddAdBee; e0ee-dCdJdKZGe#d?e<jCfd@ddAdBee< e0ee-dCdLdMZHe#d?e=jCfd@ddAdBee= e0ee-dCdNdOZIe#d?e>jCfd@ddAdBee> e0ee-dCdPdQZJe#d?e?jCfd@ddAdBee? e0ee-dCdRdSZKe#d?e@jCfd@ddAdBee@ e0ee-dCdTdUZLe#d?eAjCfd@ddAdBeeA e0ee-dCdVdWZMe#d?eBjCfd@ddAdBeeB e0ee-dCdXdYZNddZl"mOZO eOe8jCjPe9jCjPe:jCjPe;jCjPe<jCjPe=jCjPe>jCjPe?jCjPd[ZQdS )\    N)	dataclass)partial)AnyCallableDictOptionalListSequenceTupleUnion)nnTensor)StochasticDepth   )Conv2dNormActivationSqueezeExcitation)ImageClassificationInterpolationMode)_log_api_usage_once   )WeightsEnumWeights)_IMAGENET_CATEGORIES)handle_legacy_interface_ovewrite_named_param_make_divisible)EfficientNetEfficientNet_B0_WeightsEfficientNet_B1_WeightsEfficientNet_B2_WeightsEfficientNet_B3_WeightsEfficientNet_B4_WeightsEfficientNet_B5_WeightsEfficientNet_B6_WeightsEfficientNet_B7_WeightsEfficientNet_V2_S_WeightsEfficientNet_V2_M_WeightsEfficientNet_V2_L_Weightsefficientnet_b0efficientnet_b1efficientnet_b2efficientnet_b3efficientnet_b4efficientnet_b5efficientnet_b6efficientnet_b7efficientnet_v2_sefficientnet_v2_mefficientnet_v2_lc                   @   sn   e Zd ZU eed< eed< eed< eed< eed< eed< edejf ed< e	deee
e ed
ddZd	S )_MBConvConfigexpand_ratiokernelstrideinput_channelsout_channels
num_layers.blockN)channels
width_mult	min_valuereturnc                 C   s   t | | d|S )N   )r   )r;   r<   r=    r@   [/var/www/html/django/DPS/env/lib/python3.9/site-packages/torchvision/models/efficientnet.pyadjust_channels9   s    z_MBConvConfig.adjust_channels)N)__name__
__module____qualname__float__annotations__intr   r   Modulestaticmethodr   rB   r@   r@   r@   rA   r3   /   s   
r3   c                       sX   e Zd Zd
eeeeeeeeeedejf  dd
 fddZ	e
eeddd	Z  ZS )MBConvConfig      ?N.)
r4   r5   r6   r7   r8   r9   r<   
depth_multr:   r>   c
           
   	      sL   |  ||}|  ||}| ||}|	d u r0t}	t |||||||	 d S N)rB   adjust_depthMBConvsuper__init__)
selfr4   r5   r6   r7   r8   r9   r<   rM   r:   	__class__r@   rA   rR   @   s    zMBConvConfig.__init__r9   rM   c                 C   s   t t| | S rN   )rH   mathceilrV   r@   r@   rA   rO   S   s    zMBConvConfig.adjust_depth)rL   rL   N)rC   rD   rE   rF   rH   r   r   r   rI   rR   rJ   rO   __classcell__r@   r@   rT   rA   rK   >   s"   
   rK   c                       s@   e Zd Zdeeeeeeeedejf  dd fddZ	  Z
S )FusedMBConvConfigN.)r4   r5   r6   r7   r8   r9   r:   r>   c              	      s(   |d u rt }t ||||||| d S rN   )FusedMBConvrQ   rR   )rS   r4   r5   r6   r7   r8   r9   r:   rT   r@   rA   rR   Z   s    
zFusedMBConvConfig.__init__)N)rC   rD   rE   rF   rH   r   r   r   rI   rR   rY   r@   r@   rT   rA   rZ   X   s   
 rZ   c                       sR   e Zd Zefeeedejf edejf dd fddZ	e
e
dddZ  ZS )	rP   .N)cnfstochastic_depth_prob
norm_layerse_layerr>   c           	         s  t    d|j  kr dks*n td|jdko>|j|jk| _g }tj}|	|j|j
}||jkr|t|j|d||d |t|||j|j|||d td|jd }||||ttjddd	 |t||jd|d d tj| | _t|d
| _|j| _d S )Nr   r   illegal stride valuekernel_sizer^   activation_layer)rb   r6   groupsr^   rc      T)inplace)
activationrow)rQ   rR   r6   
ValueErrorr7   r8   use_res_connectr   SiLUrB   r4   appendr   r5   maxr   
Sequentialr:   r   stochastic_depth)	rS   r\   r]   r^   r_   layersrc   expanded_channelssqueeze_channelsrT   r@   rA   rR   j   sL    

zMBConv.__init__inputr>   c                 C   s&   |  |}| jr"| |}||7 }|S rN   r:   rj   ro   rS   rt   resultr@   r@   rA   forward   s
    

zMBConv.forward)rC   rD   rE   r   rK   rF   r   r   rI   rR   r   rx   rY   r@   r@   rT   rA   rP   i   s   :rP   c                       sB   e Zd Zeeedejf dd fddZe	e	dddZ
  ZS )	r[   .N)r\   r]   r^   r>   c              
      s   t    d|j  kr dks*n td|jdko>|j|jk| _g }tj}|	|j|j
}||jkr|t|j||j|j||d |t||jd|d d n"|t|j|j|j|j||d tj| | _t|d| _|j| _d S )Nr   r   r`   rb   r6   r^   rc   ra   rh   )rQ   rR   r6   ri   r7   r8   rj   r   rk   rB   r4   rl   r   r5   rn   r:   r   ro   )rS   r\   r]   r^   rp   rc   rq   rT   r@   rA   rR      sH    

zFusedMBConv.__init__rs   c                 C   s&   |  |}| jr"| |}||7 }|S rN   ru   rv   r@   r@   rA   rx      s
    

zFusedMBConv.forward)rC   rD   rE   rZ   rF   r   r   rI   rR   r   rx   rY   r@   r@   rT   rA   r[      s   4r[   c                
       sp   e Zd Zdeeeef  eeee	e
dejf  e	e edd fddZeedd	d
ZeedddZ  ZS )r   皙?  N.)inverted_residual_settingdropoutr]   num_classesr^   last_channelkwargsr>   c              
      sz  t    t|  |s tdn$t|tr<tdd |D sDtdd|v rt	d |d dur|D ]}t|t
rf|d |_qf|du rtj}g }	|d j}
|	td	|
d	d
|tjd tdd |D }d}|D ]r}g }t|jD ]N}t|}|r|j|_d|_|t| | }||||| |d7 }q|	tj|  q|d j}|dur^|nd| }|	t||d|tjd tj|	 | _td| _ttj|ddt||| _|   D ]}t|tj!r tj"j#|j$dd |j%durrtj"&|j% nrt|tjtj'fr2tj"(|j$ tj"&|j% n@t|tjrdt)*|j+ }tj",|j$| | tj"&|j% qdS )a  
        EfficientNet V1 and V2 main class

        Args:
            inverted_residual_setting (Sequence[Union[MBConvConfig, FusedMBConvConfig]]): Network structure
            dropout (float): The droupout probability
            stochastic_depth_prob (float): The stochastic depth probability
            num_classes (int): Number of classes
            norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
            last_channel (int): The number of channels on the penultimate layer
        z1The inverted_residual_setting should not be emptyc                 S   s   g | ]}t |tqS r@   )
isinstancer3   ).0sr@   r@   rA   
<listcomp>      z)EfficientNet.__init__.<locals>.<listcomp>z:The inverted_residual_setting should be List[MBConvConfig]r:   zThe parameter 'block' is deprecated since 0.13 and will be removed 0.15. Please pass this information on 'MBConvConfig.block' instead.Nr      r   ry   c                 s   s   | ]}|j V  qd S rN   )r9   )r   r\   r@   r@   rA   	<genexpr>"  r   z(EfficientNet.__init__.<locals>.<genexpr>r   re   ra   T)prf   fan_out)moderL   )-rQ   rR   r   ri   r   r	   all	TypeErrorwarningswarnrK   r:   r   BatchNorm2dr7   rl   r   rk   sumranger9   copyr8   r6   rF   rn   featuresAdaptiveAvgPool2davgpoolDropoutLinear
classifiermodulesConv2dinitkaiming_normal_weightbiaszeros_	GroupNormones_rW   sqrtout_featuresuniform_)rS   r|   r}   r]   r~   r^   r   r   r   rp   firstconv_output_channelstotal_stage_blocksstage_block_idr\   stage_Z	block_cnfsd_probZlastconv_input_channelslastconv_output_channelsmZ
init_rangerT   r@   rA   rR      s    








zEfficientNet.__init__)xr>   c                 C   s.   |  |}| |}t|d}| |}|S )Nr   )r   r   torchflattenr   rS   r   r@   r@   rA   _forward_implX  s
    


zEfficientNet._forward_implc                 C   s
   |  |S rN   )r   r   r@   r@   rA   rx   b  s    zEfficientNet.forward)rz   r{   NN)rC   rD   rE   r	   r   rK   rZ   rF   rH   r   r   r   rI   r   rR   r   r   rx   rY   r@   r@   rT   rA   r      s        n
r   )r|   r}   r   weightsprogressr   r>   c                 K   sR   |d urt |dt|jd  t| |fd|i|}|d urN||j|d |S )Nr~   
categoriesr   )r   )r   lenmetar   load_state_dictget_state_dict)r|   r}   r   r   r   r   modelr@   r@   rA   _efficientnetf  s    r   )archr   r>   c                 K   s:  |  drtt|d|dd}|dddddd|d	dd
ddd
|d	dd
ddd
|d	dd
ddd|d	ddddd|d	dd
ddd|d	dddddg}d }n|  drtdddddd
tddd
dddtddd
dddtddd
ddd	td	dddddtd	dd
dddg}d}n|  drtddddddtddd
dddtddd
dddtddd
dddtd	dddddtd	dd
dd d!td	ddd d"dg}d}n|  d#r$tddddddtddd
dddtddd
dd$dtddd
d$dd%td	dddd&d'td	dd
d&d(d)td	ddd(d*dg}d}ntd+|  ||fS ),NZefficientnet_br<   rM   r<   rM   r   r             r         (   P   p      re   @  r0   0   @         	         i   r1            i0     i   r2   `   
              i  zUnsupported model type )
startswithr   rK   poprZ   ri   )r   r   Z
bneck_confr|   r   r@   r@   rA   _efficientnet_confy  sT    
			r   r   _COMMON_META)r   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v1)min_sizerecipe)!   r   zUhttps://github.com/pytorch/vision/tree/main/references/classification#efficientnet-v2c                	   @   sF   e Zd Zedeeddejdi eddddd	id
ddZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/efficientnet_b0_rwightman-3dd342df.pthr   r   	crop_sizeresize_sizeinterpolationidP ImageNet-1Kg?5^IlS@g5^IbW@zacc@1zacc@51These weights are ported from the original paper.
num_params_metrics_docsurl
transformsr   NrC   rD   rE   r   r   r   r   BICUBIC_COMMON_META_V1IMAGENET1K_V1DEFAULTr@   r@   r@   rA   r     s$   
r   c                
   @   s~   e Zd Zedeeddejdi eddddd	id
ddZ	edeeddej
di edddddd	idddZeZdS )r   zJhttps://download.pytorch.org/models/efficientnet_b1_rwightman-533bc792.pth   r   r   iv r   g+S@gClW@r   r   r   r   z@https://download.pytorch.org/models/efficientnet_b1-c27df63c.pth   zOhttps://github.com/pytorch/vision/issues/3995#new-recipe-with-lr-wd-crop-tuninggʡS@gƻW@$  
                These weights improve upon the results of the original paper by using a modified version of TorchVision's
                `new training recipe
                <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
            )r   r   r   r   N)rC   rD   rE   r   r   r   r   r   r   r   BILINEARZIMAGENET1K_V2r   r@   r@   r@   rA   r     sH   

r   c                	   @   sF   e Zd Zedeeddejdi edddddid	d
dZ	e	Z
dS )r   zJhttps://download.pytorch.org/models/efficientnet_b2_rwightman-bcdf34b7.pthi   r   i r   gx&T@gp=
W@r   r   r   r   Nr   r@   r@   r@   rA   r     s$   
r   c                	   @   sF   e Zd Zedeeddejdi eddddd	id
ddZ	e	Z
dS )r    zJhttps://download.pytorch.org/models/efficientnet_b3_rwightman-cf984f9c.pthi,  r   r   i r   gnT@g~jtX@r   r   r   r   Nr   r@   r@   r@   rA   r      s$   
r    c                	   @   sF   e Zd Zedeeddejdi eddddd	id
ddZ	e	Z
dS )r!   zJhttps://download.pytorch.org/models/efficientnet_b4_rwightman-7eb33cd5.pthi|  r   r   i0!'r   gjtT@gt&X@r   r   r   r   Nr   r@   r@   r@   rA   r!   1  s$   
r!   c                	   @   sF   e Zd Zedeeddejdi edddddid	d
dZ	e	Z
dS )r"   zJhttps://download.pytorch.org/models/efficientnet_b5_lukemelas-b6417697.pthi  r   ir   g#~jT@gx&1(X@r   r   r   r   Nr   r@   r@   r@   rA   r"   G  s$   
r"   c                	   @   sF   e Zd Zedeeddejdi edddddid	d
dZ	e	Z
dS )r#   zJhttps://download.pytorch.org/models/efficientnet_b6_lukemelas-c76e70fd.pthi  r   ir   gn U@gv:X@r   r   r   r   Nr   r@   r@   r@   rA   r#   ]  s$   
r#   c                	   @   sF   e Zd Zedeeddejdi edddddid	d
dZ	e	Z
dS )r$   zJhttps://download.pytorch.org/models/efficientnet_b7_lukemelas-dcc49843.pthiX  r   icr   g+U@g'1:X@r   r   r   r   Nr   r@   r@   r@   rA   r$   s  s$   
r$   c                	   @   sF   e Zd Zedeeddejdi edddddid	d
dZ	e	Z
dS )r%   zBhttps://download.pytorch.org/models/efficientnet_v2_s-dd5fe13b.pthr   r   i8nGr   g;OU@gx&18X@r   r   r   r   NrC   rD   rE   r   r   r   r   r   _COMMON_META_V2r   r   r@   r@   r@   rA   r%     s*   r%   c                	   @   sF   e Zd Zedeeddejdi edddddid	d
dZ	e	Z
dS )r&   zBhttps://download.pytorch.org/models/efficientnet_v2_m-dc08266a.pth  r   i:r   gI+GU@gDlIX@r   r   r   r   Nr   r@   r@   r@   rA   r&     s*   r&   c                
   @   sJ   e Zd Zedeeddejdddi eddddd	id
ddZ	e	Z
dS )r'   zBhttps://download.pytorch.org/models/efficientnet_v2_l-59c71312.pthr   )      ?r   r   )r   r   r   meanstdiHfr   gʡEsU@gOnrX@r   r   r   r   N)rC   rD   rE   r   r   r   r   r   r   r   r   r@   r@   r@   rA   r'     s.   r'   
pretrained)r   T)r   r   )r   r   r   r>   c                 K   s4   t | } tdddd\}}t|d|| |fi |S )a  EfficientNet B0 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B0_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B0_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B0_Weights
        :members:
    r(   rL   r   rz   )r   verifyr   r   r   r   r   r|   r   r@   r@   rA   r(     s    
r(   c                 K   s4   t | } tdddd\}}t|d|| |fi |S )a  EfficientNet B1 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B1_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B1_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B1_Weights
        :members:
    r)   rL   皙?r   rz   )r   r   r   r   r   r@   r@   rA   r)     s    
r)   c                 K   s4   t | } tdddd\}}t|d|| |fi |S )a  EfficientNet B2 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B2_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B2_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B2_Weights
        :members:
    r*   r   333333?r   333333?)r   r   r   r   r   r@   r@   rA   r*     s    
r*   c                 K   s4   t | } tdddd\}}t|d|| |fi |S )a  EfficientNet B3 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B3_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B3_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B3_Weights
        :members:
    r+   r  ffffff?r   r  )r    r   r   r   r   r@   r@   rA   r+   0  s    
r+   c                 K   s4   t | } tdddd\}}t|d|| |fi |S )a  EfficientNet B4 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B4_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B4_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B4_Weights
        :members:
    r,   r  ?r   皙?)r!   r   r   r   r   r@   r@   rA   r,   L  s    
r,   c                 K   sD   t | } tdddd\}}t|d|| |fdttjddd	i|S )
a  EfficientNet B5 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B5_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B5_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B5_Weights
        :members:
    r-   g?g@r   r  r^   MbP?{Gz?epsmomentum)r"   r   r   r   r   r   r   r   r@   r@   rA   r-   h  s    
r-   c                 K   sD   t | } tdddd\}}t|d|| |fdttjddd	i|S )
a  EfficientNet B6 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B6_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B6_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B6_Weights
        :members:
    r.   r  g@r   r   r^   r  r  r  )r#   r   r   r   r   r   r   r   r@   r@   rA   r.     s    
r.   c                 K   sD   t | } tdddd\}}t|d|| |fdttjddd	i|S )
a  EfficientNet B7 model architecture from the `EfficientNet: Rethinking Model Scaling for Convolutional
    Neural Networks <https://arxiv.org/abs/1905.11946>`_ paper.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_B7_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_B7_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_B7_Weights
        :members:
    r/   g       @g@r   r   r^   r  r  r  )r$   r   r   r   r   r   r   r   r@   r@   rA   r/     s    
r/   c                 K   s<   t | } td\}}t|d|| |fdttjddi|S )a  
    Constructs an EfficientNetV2-S architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_S_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_S_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_S_Weights
        :members:
    r0   rz   r^   r  r	  )r%   r   r   r   r   r   r   r   r@   r@   rA   r0     s    
r0   c                 K   s<   t | } td\}}t|d|| |fdttjddi|S )a  
    Constructs an EfficientNetV2-M architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_M_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_M_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_M_Weights
        :members:
    r1   r  r^   r  r  )r&   r   r   r   r   r   r   r   r@   r@   rA   r1     s    
r1   c                 K   s<   t | } td\}}t|d|| |fdttjddi|S )a  
    Constructs an EfficientNetV2-L architecture from
    `EfficientNetV2: Smaller Models and Faster Training <https://arxiv.org/abs/2104.00298>`_.

    Args:
        weights (:class:`~torchvision.models.EfficientNet_V2_L_Weights`, optional): The
            pretrained weights to use. See
            :class:`~torchvision.models.EfficientNet_V2_L_Weights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the
            download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.efficientnet.EfficientNet``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/efficientnet.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.EfficientNet_V2_L_Weights
        :members:
    r2   r  r^   r  r  )r'   r   r   r   r   r   r   r   r@   r@   rA   r2     s    
r2   )
_ModelURLs)r(   r)   r*   r+   r,   r-   r.   r/   )Rr   rW   r   dataclassesr   	functoolsr   typingr   r   r   r   r   r	   r
   r   r   r   r   Ztorchvision.opsr   Zops.miscr   r   Ztransforms._presetsr   r   utilsr   _apir   r   _metar   _utilsr   r   r   __all__r3   rK   rZ   rI   rP   r[   r   rF   rH   boolr   strr   r   rG   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r  r   
model_urlsr@   r@   r@   rA   <module>   s  
(C=~8,





#
#
#
$
$
%