a
    PSic                     @   sV  d dl mZ d dlmZmZmZ d dlmZ d dlmZ d dl	m
Z
mZ d dlmZmZmZ ddlmZ dd	lmZ d
dlmZmZ d
dlmZ d
dlmZmZ ddlmZmZmZ g dZ G dd deZ!G dd deZ"G dd deZ#eddd fdddddeee#ef  e$e$ee"dd d!Z%d
d"lm&Z& d
d#l'm(Z( e&d$e#j)j*iZ+dS )%    )partial)AnyOptionalUnion)Tensor)nn)	QuantStubDeQuantStub)InvertedResidualMobileNetV2MobileNet_V2_Weights   )Conv2dNormActivation)ImageClassification   )WeightsEnumWeights)_IMAGENET_CATEGORIES)handle_legacy_interface_ovewrite_named_param   )_fuse_modules_replace_reluquantize_model)QuantizableMobileNetV2MobileNet_V2_QuantizedWeightsmobilenet_v2c                       sL   e Zd Zeedd fddZeedddZdee ddd	d
Z	  Z
S )QuantizableInvertedResidualNargskwargsreturnc                    s"   t  j|i | tj | _d S N)super__init__r   	quantizedFloatFunctionalskip_addselfr   r    	__class__ g/var/www/html/django/DPS/env/lib/python3.9/site-packages/torchvision/models/quantization/mobilenetv2.pyr$      s    z$QuantizableInvertedResidual.__init__xr!   c                 C   s(   | j r| j|| |S | |S d S r"   )use_res_connectr'   addconvr)   r/   r,   r,   r-   forward   s    z#QuantizableInvertedResidual.forwardis_qatr!   c                 C   sN   t t| jD ]:}t| j| tju rt| jt|t|d g|dd qd S )Nr   Tinplace)rangelenr2   typer   Conv2dr   str)r)   r6   idxr,   r,   r-   
fuse_model#   s    z&QuantizableInvertedResidual.fuse_model)N__name__
__module____qualname__r   r$   r   r4   r   boolr?   __classcell__r,   r,   r*   r-   r      s   r   c                       sL   e Zd Zeedd fddZeedddZdee ddd	d
Z	  Z
S )r   Nr   c                    s&   t  j|i | t | _t | _dS )zq
        MobileNet V2 main class

        Args:
           Inherits args from floating point MobileNetV2
        N)r#   r$   r   quantr	   dequantr(   r*   r,   r-   r$   *   s    zQuantizableMobileNetV2.__init__r.   c                 C   s"   |  |}| |}| |}|S r"   )rF   _forward_implrG   r3   r,   r,   r-   r4   5   s    


zQuantizableMobileNetV2.forwardr5   c                 C   sH   |   D ]:}t|tu r,t|g d|dd t|tu r|| qd S )N)012Tr7   )modulesr;   r   r   r   r?   )r)   r6   mr,   r,   r-   r?   ;   s
    z!QuantizableMobileNetV2.fuse_model)Nr@   r,   r,   r*   r-   r   )   s   r   c                   @   sD   e Zd Zedeeddddeddejdd	d
didddZ	e	Z
dS )r   zOhttps://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth   )	crop_sizeiz5 )r   r   qnnpackzUhttps://github.com/pytorch/vision/tree/main/references/classification#qat-mobilenetv2zImageNet-1Kg'1Q@gV@)zacc@1zacc@5z
                These weights were produced by doing Quantization Aware Training (eager mode) on top of the unquantized
                weights listed below.
            )
num_paramsmin_size
categoriesbackendrecipeunquantized_metrics_docs)url
transformsmetaN)rA   rB   rC   r   r   r   r   r   IMAGENET1K_V1IMAGENET1K_QNNPACK_V1DEFAULTr,   r,   r,   r-   r   C   s$   
r   
pretrainedc                 C   s   |  ddrtjS tjS )NquantizeF)getr   r]   r   r\   )r    r,   r,   r-   <lambda>`   s    
rb   )weightsNTF)rc   progressr`   )rc   rd   r`   r    r!   c                 K   s   |rt nt| } | durLt|dt| jd  d| jv rLt|d| jd  |dd}tf dti|}t	| |rt
|| | dur|| j|d |S )a  
    Constructs a MobileNetV2 architecture from
    `MobileNetV2: Inverted Residuals and Linear Bottlenecks
    <https://arxiv.org/abs/1801.04381>`_.

    .. note::
        Note that ``quantize = True`` returns a quantized model with 8 bit
        weights. Quantized models only support inference and run on CPUs.
        GPU inference is not yet supported.

    Args:
        weights (:class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` or :class:`~torchvision.models.MobileNet_V2_Weights`, optional): The
            pretrained weights for the model. See
            :class:`~torchvision.models.quantization.MobileNet_V2_QuantizedWeights` below for
            more details, and possible values. By default, no pre-trained
            weights are used.
        progress (bool, optional): If True, displays a progress bar of the download to stderr. Default is True.
        quantize (bool, optional): If True, returns a quantized version of the model. Default is False.
        **kwargs: parameters passed to the ``torchvision.models.quantization.QuantizableMobileNetV2``
            base class. Please refer to the `source code
            <https://github.com/pytorch/vision/blob/main/torchvision/models/quantization/mobilenetv2.py>`_
            for more details about this class.
    .. autoclass:: torchvision.models.quantization.MobileNet_V2_QuantizedWeights
        :members:
    .. autoclass:: torchvision.models.MobileNet_V2_Weights
        :members:
        :noindex:
    Nnum_classesrS   rT   rP   block)rd   )r   r   verifyr   r:   r[   popr   r   r   r   load_state_dictget_state_dict)rc   rd   r`   r    rT   modelr,   r,   r-   r   ]   s    +

r   )
_ModelURLs)
model_urlsZmobilenet_v2_qnnpack),	functoolsr   typingr   r   r   torchr   r   torch.ao.quantizationr   r	   Ztorchvision.models.mobilenetv2r
   r   r   Zops.miscr   Ztransforms._presetsr   _apir   r   _metar   _utilsr   r   utilsr   r   r   __all__r   r   r   rD   r   rl   mobilenetv2rm   r]   rY   quant_model_urlsr,   r,   r,   r-   <module>   sH   
7