a
    PSic{:                     @   s*  d dl mZ d dlmZmZmZmZmZ d dlZd dlm	Z	m
Z
 d dlmZ ddlmZmZ ddlmZ dd	lmZ dd
lmZ ddlmZmZ ddlmZ ddlmZmZ g dZG dd de	j Z!G dd de	j"Z#G dd dZ$G dd de	j"Z%ee$ e&ee e'ee%dddZ(dedddZ)G dd  d eZ*G d!d" d"eZ+G d#d$ d$eZ,G d%d& d&eZ-ed'e*j.fd(dd)d*ee* e'ee%d+d,d-Z/ed'e+j.fd(dd)d*ee+ e'ee%d+d.d/Z0ed'e,j.fd(dd)d*ee, e'ee%d+d0d1Z1ed'e-j.fd(dd)d*ee- e'ee%d+d2d3Z2dS )4    )partial)AnyCallableListOptionalSequenceN)nnTensor)
functional   )Conv2dNormActivationPermute)StochasticDepth)ImageClassification)_log_api_usage_once   )WeightsEnumWeights)_IMAGENET_CATEGORIES)handle_legacy_interface_ovewrite_named_param)	ConvNeXtConvNeXt_Tiny_WeightsConvNeXt_Small_WeightsConvNeXt_Base_WeightsConvNeXt_Large_Weightsconvnext_tinyconvnext_smallconvnext_baseconvnext_largec                   @   s   e Zd ZeedddZdS )LayerNorm2dxreturnc                 C   s>   | dddd}t|| j| j| j| j}| dddd}|S )Nr   r      r   )permuteF
layer_normnormalized_shapeweightbiasepsselfr"    r.   W/var/www/html/django/DPS/env/lib/python3.9/site-packages/torchvision/models/convnext.pyforward   s    zLayerNorm2d.forwardN)__name__
__module____qualname__r	   r0   r.   r.   r.   r/   r       s   r    c                       sH   e Zd Zd	eeeedejf  dd fddZe	e	dddZ
  ZS )
CNBlockN.)layer_scalestochastic_depth_prob
norm_layerr#   c                    s   t    |d u r ttjdd}ttj||dd|ddtg d||tj|d| dd	t	 tjd| |dd	tg d
| _
tt|dd| | _t|d| _d S )Nư>r+      r$   T)kernel_sizepaddinggroupsr*   )r   r   r$   r      )in_featuresout_featuresr*   )r   r$   r   r   r   row)super__init__r   r   	LayerNorm
SequentialConv2dr   LinearGELUblock	Parametertorchonesr5   r   stochastic_depth)r-   dimr5   r6   r7   	__class__r.   r/   rC   '   s    


	zCNBlock.__init__)inputr#   c                 C   s&   | j | | }| |}||7 }|S N)r5   rI   rM   )r-   rQ   resultr.   r.   r/   r0   >   s    
zCNBlock.forward)N)r1   r2   r3   floatr   r   r   ModulerC   r	   r0   __classcell__r.   r.   rO   r/   r4   &   s    r4   c                   @   s2   e Zd Zeee eddddZedddZdS )CNBlockConfigN)input_channelsout_channels
num_layersr#   c                 C   s   || _ || _|| _d S rR   )rX   rY   rZ   )r-   rX   rY   rZ   r.   r.   r/   rC   G   s    zCNBlockConfig.__init__)r#   c                 C   s>   | j jd }|d7 }|d7 }|d7 }|d7 }|jf i | jS )N(zinput_channels={input_channels}z, out_channels={out_channels}z, num_layers={num_layers}))rP   r1   format__dict__)r-   sr.   r.   r/   __repr__Q   s    zCNBlockConfig.__repr__)r1   r2   r3   intr   rC   strr`   r.   r.   r.   r/   rW   E   s   
rW   c                
       sr   e Zd Zdee eeeeede	j
f  eede	j
f  edd fddZeed	d
dZeed	ddZ  ZS )r           r8     N.)block_settingr6   r5   num_classesrI   r7   kwargsr#   c                    s  t    t|  |s tdn$t|tr<tdd |D sDtd|d u rPt}|d u rdt	t
dd}g }|d j}	|td|	d	d	d|d d
d tdd |D }
d}|D ]}g }t|jD ]0}|| |
d  }|||j|| |d7 }q|tj|  |jd ur|t||jtj|j|jddd qtj| | _td| _|d }|jd urf|jn|j}t||tdt||| _|  D ]F}t|tjtjfrtjj|jdd |jd urtj |j qd S )Nz%The block_setting should not be emptyc                 S   s   g | ]}t |tqS r.   )
isinstancerW   ).0r_   r.   r.   r/   
<listcomp>j       z%ConvNeXt.__init__.<locals>.<listcomp>z/The block_setting should be List[CNBlockConfig]r8   r9   r   r$   r>   T)r;   strider<   r7   Zactivation_layerr*   c                 s   s   | ]}|j V  qd S rR   )rZ   )ri   cnfr.   r.   r/   	<genexpr>   rk   z$ConvNeXt.__init__.<locals>.<genexpr>g      ?r   r   )r;   rl   g{Gz?)std)!rB   rC   r   
ValueErrorrh   r   all	TypeErrorr4   r   r    rX   appendr   sumrangerZ   r   rE   rY   rF   featuresAdaptiveAvgPool2davgpoolFlattenrG   
classifiermodulesinittrunc_normal_r)   r*   zeros_)r-   re   r6   r5   rf   rI   r7   rg   layersZfirstconv_output_channelsZtotal_stage_blocksZstage_block_idrm   stage_Zsd_probZ	lastblockZlastconv_output_channelsmrO   r.   r/   rC   [   sj    





zConvNeXt.__init__r!   c                 C   s"   |  |}| |}| |}|S rR   )rw   ry   r{   r,   r.   r.   r/   _forward_impl   s    


zConvNeXt._forward_implc                 C   s
   |  |S rR   )r   r,   r.   r.   r/   r0      s    zConvNeXt.forward)rc   r8   rd   NN)r1   r2   r3   r   rW   rT   ra   r   r   r   rU   r   rC   r	   r   r0   rV   r.   r.   rO   r/   r   Z   s"        Nr   )re   r6   weightsprogressrg   r#   c                 K   sP   |d urt |dt|jd  t| fd|i|}|d urL||j|d |S )Nrf   
categoriesr6   )r   )r   lenmetar   load_state_dictget_state_dict)re   r6   r   r   rg   modelr.   r.   r/   	_convnext   s    r   )    r   zNhttps://github.com/pytorch/vision/tree/main/references/classification#convnexta  
        These weights improve upon the results of the original paper by using a modified version of TorchVision's
        `new training recipe
        <https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/>`_.
    )min_sizer   recipe_docsc                	   @   s@   e Zd Zedeedddi eddddd	id
dZeZdS )r   z>https://download.pytorch.org/models/convnext_tiny-983f1562.pth      	crop_sizeresize_sizeiH<ImageNet-1KgzGT@gMbX	X@zacc@1zacc@5
num_params_metricsurl
transformsr   N	r1   r2   r3   r   r   r   _COMMON_METAIMAGENET1K_V1DEFAULTr.   r.   r.   r/   r      s   r   c                	   @   s@   e Zd Zedeedddi eddddd	id
dZeZdS )r   z?https://download.pytorch.org/models/convnext_small-0c510722.pthr      r   iHZr   gClT@g)X@r   r   r   Nr   r.   r.   r.   r/   r      s   r   c                	   @   s@   e Zd Zedeedddi eddddd	id
dZeZdS )r   z>https://download.pytorch.org/models/convnext_base-6075fbad.pthr      r   ihGr   gU@gHz7X@r   r   r   Nr   r.   r.   r.   r/   r      s   r   c                	   @   s@   e Zd Zedeedddi eddddd	id
dZeZdS )r   z?https://download.pytorch.org/models/convnext_large-ea097f82.pthr   r   r   ir   g"~U@gX9v>X@r   r   r   Nr   r.   r.   r.   r/   r     s   r   
pretrained)r   T)r   r   )r   r   rg   r#   c                 K   sX   t | } tdddtdddtdddtdddg}|dd	}t||| |fi |S )
a  ConvNeXt Tiny model architecture from the
    `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.

    Args:
        weights (:class:`~torchvision.models.convnext.ConvNeXt_Tiny_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Tiny_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ConvNeXt_Tiny_Weights
        :members:
    `      r$        	   Nr6   g?)r   verifyrW   popr   r   r   rg   re   r6   r.   r.   r/   r     s    




r   c                 K   sX   t | } tdddtdddtdddtdddg}|dd	}t||| |fi |S )
a  ConvNeXt Small model architecture from the
    `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.

    Args:
        weights (:class:`~torchvision.models.convnext.ConvNeXt_Small_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Small_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ConvNeXt_Small_Weights
        :members:
    r   r   r$   r   r      Nr6   g?)r   r   rW   r   r   r   r.   r.   r/   r   7  s    




r   c                 K   sX   t | } tdddtdddtdddtdddg}|dd	}t||| |fi |S )
a  ConvNeXt Base model architecture from the
    `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.

    Args:
        weights (:class:`~torchvision.models.convnext.ConvNeXt_Base_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Base_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ConvNeXt_Base_Weights
        :members:
          r$   i   i   r   Nr6         ?)r   r   rW   r   r   r   r.   r.   r/   r   W  s    




r   c                 K   sX   t | } tdddtdddtdddtdddg}|dd	}t||| |fi |S )
a  ConvNeXt Large model architecture from the
    `A ConvNet for the 2020s <https://arxiv.org/abs/2201.03545>`_ paper.

    Args:
        weights (:class:`~torchvision.models.convnext.ConvNeXt_Large_Weights`, optional): The pretrained
            weights to use. See :class:`~torchvision.models.convnext.ConvNeXt_Large_Weights`
            below for more details and possible values. By default, no pre-trained weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        **kwargs: parameters passed to the ``torchvision.models.convnext.ConvNext``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/convnext.py>`_
            for more details about this class.

    .. autoclass:: torchvision.models.ConvNeXt_Large_Weights
        :members:
    r   r   r$   r   i   r   Nr6   r   )r   r   rW   r   r   r   r.   r.   r/   r   u  s    




r   )3	functoolsr   typingr   r   r   r   r   rK   r   r	   torch.nnr
   r&   Zops.miscr   r   Zops.stochastic_depthr   Ztransforms._presetsr   utilsr   _apir   r   _metar   _utilsr   r   __all__rD   r    rU   r4   rW   r   rT   boolr   r   r   r   r   r   r   r   r   r   r   r.   r.   r.   r/   <module>   s`   Z"
"
