a
    yfh                     @   s*  d Z ddlZddlZddlZddlmZ ddlmZmZ ddl	m
Z
mZmZmZ ddlmZmZmZmZ ddlmZmZ ddlmZmZmZ dd	lmZmZ d
ZG dd dejZG dd deZ G dd deZ!G dd deZ"G dd dejZ#G dd deZ$G dd dejZ%G dd deZ&dS )zModel head modules.    N)	constant_xavier_uniform_)
TORCH_1_10	dist2bbox	dist2rboxmake_anchors   )DFLBNContrastiveHeadContrastiveHeadProto)ConvDWConv)MLPDeformableTransformerDecoder!DeformableTransformerDecoderLayer)bias_init_with_problinear_init)DetectSegmentPoseClassifyOBBRTDETRDecoder	v10Detectc                       s   e Zd ZdZdZdZdZdZdZe	
dZe	
dZd fdd		Zd
d Zdd Zdd Zdd Zdd Zede	jeedddZ  ZS )r   z(YOLOv8 Detect head for detection models.F,  Nr   P    c                    s   t    |_t|_d_|jd  _tj_	t
d|d d jd ft
|d tjd  t fdd|D _tfdd|D _jdkrtjnt _jrtj_tj_d	S )
zUInitializes the YOLOv8 detection layer with specified number of classes and channels.      r   d   c              
   3   s<   | ]4}t t| d t  d t  dj dV  qdS )   r   r   N)nn
Sequentialr   Conv2dreg_max.0x)c2selfr   W/var/www/html/django/DPS/env/lib/python3.9/site-packages/ultralytics/nn/modules/head.py	<genexpr>)   s   z"Detect.__init__.<locals>.<genexpr>c                 3   sX   | ]P}t t t||d t| dt t  d t  dt  jdV  qdS r!   r   N)r"   r#   r   r   r$   ncr&   c3r*   r   r+   r,   ,   s   r   N)super__init__r.   lennlr%   notorchZzerosstridemaxminr"   
ModuleListcv2cv3r	   ZIdentitydflend2endcopydeepcopyone2one_cv2one2one_cv3r*   r.   ch	__class__)r)   r0   r*   r+   r2       s"    

4

zDetect.__init__c                 C   sv   | j r| |S t| jD ]4}t| j| || | j| || fd||< q| jrZ|S | 	|}| j
rn|S ||fS )JConcatenates and returns predicted bounding boxes and class probabilities.r   )r>   forward_end2endranger4   r6   catr;   r<   training
_inferenceexport)r*   r(   iyr   r   r+   forward:   s    
2
zDetect.forwardc                    s   dd |D  fddt  jD }t  jD ]4}t j| ||  j| || fd||< q2 jrx||dS  |} |	ddd j
 j} jr|S |||dfS )a  
        Performs forward pass of the v10Detect module.

        Args:
            x (tensor): Input tensor.

        Returns:
            (dict, tensor): If not in training mode, returns a dictionary containing the outputs of both one2many and one2one detections.
                           If in training mode, returns a dictionary containing the outputs of one2many and one2one detections separately.
        c                 S   s   g | ]}|  qS r   )detachr'   xir   r   r+   
<listcomp>Q       z*Detect.forward_end2end.<locals>.<listcomp>c                    s8   g | ]0}t  j| |  j| | fd qS )r   )r6   rJ   rA   rB   r'   rN   r*   Zx_detachr   r+   rT   R   s   r   )Zone2manyone2oner      )rI   r4   r6   rJ   r;   r<   rK   rL   postprocesspermutemax_detr.   rM   )r*   r(   rX   rN   rO   r   rW   r+   rH   F   s    2

zDetect.forward_end2endc           
         s  |d j t fdd|D d} js6 j kr\dd t| jdD \ _ _ _  jr j	dv r|d	d	d	 j
d
 f }|d	d	 j
d
 d	f }n| j
d
  jfd\}} jrL j	dv rLd }d }tj||||g|jddd
d} j jd |  }  ||  jd|d	d	d	df  }	n   | jd j }	t|	| fdS )z]Decode predicted bounding boxes and class probabilities based on multiple-level feature maps.r   c                    s    g | ]}| d   jdqS )r   )viewr5   rR   r*   shaper   r+   rT   b   rU   z%Detect._inference.<locals>.<listcomp>rY   c                 s   s   | ]}| d dV  qdS r   r   NZ	transposer&   r   r   r+   r,   d   rU   z$Detect._inference.<locals>.<genexpr>      ?   ZtfjstfliteZsaved_modeledgetpuZpbNr   r      re   rf   r!   device)r`   r6   rJ   dynamicr   r7   anchorsstridesrM   formatr%   splitr.   tensorri   reshapedecode_bboxesr=   	unsqueezesigmoid)
r*   r(   x_catboxclsgrid_hgrid_w	grid_sizenormdboxr   r_   r+   rL   ^   s"    
 "4 zDetect._inferencec                 C   s   | }t |j|j|jD ]L\}}}d|d jjdd< td|j d| d  |d jjd|j< q| j	rt |j
|j|jD ]L\}}}d|d jjdd< td|j d| d  |d jjd|j< q|dS )BInitialize Detect() biases, WARNING: requires stride availability.      ?r]   N   i  rY   )zipr;   r<   r7   biasdatamathlogr.   r>   rA   rB   r*   mabsr   r   r+   	bias_initz   s    0zDetect.bias_initc                 C   s   t ||| j ddS )zDecode bounding boxes.r   )Zxywhdim)r   r>   r*   Zbboxesrk   r   r   r+   rq      s    zDetect.decode_bboxes)predsr\   r.   c           
      C   s   | j \}}}| jd|gdd\}}|jddt||d d}|jd|dddd}|jd|dd|d}|dt||\}}t	
|d }	t	j||	|| f |d || d  gddS )a4  
        Post-processes YOLO model predictions.

        Args:
            preds (torch.Tensor): Raw predictions with shape (batch_size, num_anchors, 4 + nc) with last dimension
                format [x, y, w, h, class_probs].
            max_det (int): Maximum detections per image.
            nc (int, optional): Number of classes. Default: 80.

        Returns:
            (torch.Tensor): Processed predictions with shape (batch_size, min(max_det, num_anchors), 6) and last
                dimension format [x, y, w, h, max_class_prob, class_index].
        r   r]   r   r   )r   index).N)r`   rn   Zamaxtopkr9   rr   gatherrepeatflattenr6   arangerJ   float)
r   r\   r.   Z
batch_sizerk   _ZboxesZscoresr   rN   r   r   r+   rZ      s    "zDetect.postprocess)r   r   )r   )__name__
__module____qualname____doc__rj   rM   r>   r\   r`   r6   emptyrk   rl   r2   rP   rH   rL   r   rq   staticmethodZTensorintrZ   __classcell__r   r   rE   r+   r      s    

r   c                       s*   e Zd ZdZd
 fdd	Zdd	 Z  ZS )r   z,YOLOv8 Segment head for segmentation models.r          r   c                    sd   t  || |_|_t|d jj_t|d d j t fdd|D _	dS )ziInitialize the YOLO model attributes such as the number of masks, prototypes, and the convolution layers.r   r   c              
   3   s8   | ]0}t t| d t  d t  jdV  qdS r-   )r"   r#   r   r$   nmr&   Zc4r*   r   r+   r,      rU   z#Segment.__init__.<locals>.<genexpr>N)
r1   r2   r   nprr   protor8   r"   r:   cv4)r*   r.   r   r   rD   rE   r   r+   r2      s    zSegment.__init__c                    s    d }|jd  t fddtjD d}tjrX||fS j	rrt|gd|fS td |gdd ||ffS )zgReturn model outputs and mask coefficients if training, otherwise return outputs and mask coefficients.r   c                    s*   g | ]"}j | |  jd qS r]   )r   r^   r   rV   bsr*   r(   r   r+   rT      rU   z#Segment.forward.<locals>.<listcomp>rY   r   )
r   r`   r6   rJ   rI   r4   r   rP   rK   rM   )r*   r(   pZmcr   r   r+   rP      s    
$
zSegment.forward)r   r   r   r   r   r   r   r   r2   rP   r   r   r   rE   r+   r      s   
r   c                       s2   e Zd ZdZd fdd	Zdd Zd	d
 Z  ZS )r   z=YOLOv8 OBB detection head for detection with rotation models.r   r   r   c                    sH   t  || |_t|d d j t fdd|D _dS )zCInitialize OBB with number of classes `nc` and layer channels `ch`.r   r   c              
   3   s8   | ]0}t t| d t  d t  jdV  qdS r-   )r"   r#   r   r$   ner&   r   r   r+   r,      rU   zOBB.__init__.<locals>.<genexpr>N)r1   r2   r   r8   r"   r:   r   )r*   r.   r   rD   rE   r   r+   r2      s    zOBB.__init__c                    s   d j d  t fddtjD d}| d tj }jsP|_	t
jrj|fS jrt|gdS td |gdd |ffS )rG   r   c                    s*   g | ]"}j | |  jd qS r   )r   r^   r   rV   r   r   r+   rT      rU   zOBB.forward.<locals>.<listcomp>rY   g      ?r   )r`   r6   rJ   rI   r4   rs   r   pirK   angler   rP   rM   )r*   r(   r   r   r   r+   rP      s    $zOBB.forwardc                 C   s   t || j|ddS )zDecode rotated bounding boxes.r   r   )r   r   r   r   r   r+   rq      s    zOBB.decode_bboxes)r   r   r   )r   r   r   r   r2   rP   rq   r   r   r   rE   r+   r      s   r   c                       s2   e Zd ZdZd fdd	Zdd Zd	d
 Z  ZS )r   z&YOLOv8 Pose head for keypoints models.r      r!   r   c                    sZ   t  || |_|d |d  _t|d d j t fdd|D _dS )zIInitialize YOLO network with default parameters and Convolutional Layers.r   r   r   c              
   3   s8   | ]0}t t| d t  d t  jdV  qdS r-   )r"   r#   r   r$   nkr&   r   r   r+   r,      rU   z Pose.__init__.<locals>.<genexpr>N)r1   r2   	kpt_shaper   r8   r"   r:   r   )r*   r.   r   rD   rE   r   r+   r2      s
    zPose.__init__c                    s   d j d  t fddtjD d}tjrL|fS  |}j	rnt|gdS td |gdd |ffS )z?Perform forward pass through YOLO model and return predictions.r   c                    s*   g | ]"}j | |  jd qS r   )r   r^   r   rV   r   r   r+   rT      rU   z Pose.forward.<locals>.<listcomp>r]   r   )
r`   r6   rJ   rI   r4   r   rP   rK   kpts_decoderM   )r*   r(   ZkptZpred_kptr   r   r+   rP      s    $zPose.forwardc                 C   s\  | j d }| jr|j|g| j dR  }|ddddddf d | jd  | j }|dkrt||ddddddf  fd}||| jdS |	 }|dkr|dddddf  |dddddf< |dddd|f d | jd d  | j |dddd|f< |dddd|f d | jd d  | j |dddd|f< |S dS )	zDecodes keypoints.r   r]   NrY          @rc   r!   r   )
r   rM   r^   rk   rl   r6   rJ   rs   r   clone)r*   r   ZkptsndimrO   r   r   r   r+   r      s    
.*,@@zPose.kpts_decode)r   r   r   )r   r   r   r   r2   rP   r   r   r   r   rE   r+   r      s   	
r   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS )	r   z:YOLOv8 classification head, i.e. x(b,c1,20,20) to x(b,c2).r   Nc                    sP   t    d}t||||||| _td| _tjddd| _t	||| _
dS )zcInitializes YOLOv8 classification head to transform input tensor from (b,c1,20,20) to (b,c2) shape.i   r           T)r   ZinplaceN)r1   r2   r   convr"   ZAdaptiveAvgPool2dpoolZDropoutdropLinearlinear)r*   c1r)   kr   r   gZc_rE   r   r+   r2     s    
zClassify.__init__c              	   C   sL   t |trt|d}| | | | |d}| j	rB|S |
dS )z>Performs a forward pass of the YOLO model on input image data.r   )
isinstancelistr6   rJ   r   r   r   r   r   rK   Zsoftmax)r*   r(   r   r   r+   rP     s    
"zClassify.forward)r   r   Nr   r   r   r   rE   r+   r     s   	r   c                       s2   e Zd ZdZd fdd	Zdd	 Zd
d Z  ZS )WorldDetectz^Head for integrating YOLOv8 detection models with semantic understanding from text embeddings.r      Fr   c                    s`   t  || t|d t| jd t fdd|D | _tfdd|D | _dS )zHInitialize YOLOv8 detection layer with nc classes and layer channels ch.r   r    c              
   3   s6   | ].}t t| d t  d t  dV  qdS r-   )r"   r#   r   r$   r&   )r0   embedr   r+   r,     rU   z'WorldDetect.__init__.<locals>.<genexpr>c                 3   s    | ]}rt  nt V  qd S )N)r
   r   r'   r   )r   with_bnr   r+   r,     rU   N)	r1   r2   r8   r9   r.   r"   r:   r<   r   )r*   r.   r   r   rD   rE   )r0   r   r   r+   r2     s    zWorldDetect.__init__c                    s  t  jD ]@}t j| ||  j|  j| || |fd||< q
 jrV|S |d jt fdd|D d} j	s jkrdd t
| jdD \ _ _ _ jr jd	v r|d
d
d
 jd f }|d
d
 jd d
f }n| jd  jfd\}} jr jdv rd }d }tj||||g|jdddd}	 j jd |	  }
  ||
  jd|
d
d
d
df  }n   | jd j }t|| fd} jr|S ||fS )rG   r   r   c                    s*   g | ]"}| d   j jd  dqS )r   r   r]   )r^   r.   r%   rR   r_   r   r+   rT   *  rU   z'WorldDetect.forward.<locals>.<listcomp>rY   c                 s   s   | ]}| d dV  qdS ra   rb   r&   r   r   r+   r,   ,  rU   z&WorldDetect.forward.<locals>.<genexpr>rc   rd   Nr   rg   r!   rh   )rI   r4   r6   rJ   r;   r   r<   rK   r`   rj   r   r7   rk   rl   rM   rm   r%   rn   r.   ro   ri   rp   rq   r=   rr   rs   )r*   r(   textrN   rt   ru   rv   rw   rx   ry   rz   r{   rO   r   r_   r+   rP   !  s,    >
 "4 zWorldDetect.forwardc                 C   s:   | }t |j|j|jD ]\}}}d|d jjdd< qdS )r|   r}   r]   N)r   r;   r<   r7   r   r   r   r   r   r+   r   C  s    zWorldDetect.bias_init)r   r   Fr   )r   r   r   r   r2   rP   r   r   r   r   rE   r+   r     s   "r   c                       s   e Zd ZdZdZddddddd	d
de dddddf fdd	Zd ddZde	j
ddfddZdd Zd!ddZdd Z  ZS )"r   a  
    Real-Time Deformable Transformer Decoder (RTDETRDecoder) module for object detection.

    This decoder module utilizes Transformer architecture along with deformable convolutions to predict bounding boxes
    and class labels for objects in an image. It integrates features from multiple layers and runs through a series of
    Transformer decoder layers to output the final predictions.
    Fr   )r      i   r   r   r         r   r   r]   r    rc   r}   c                    sF  t     | _|| _t|| _| _|| _|| _t	
 fdd|D | _t |||	|
| j|}t |||| _t	 | _|| _|| _|| _|| _|rt	| | _tdd   dd| _t	t	  t	 | _t	 | _t  ddd| _t	
 fddt|D | _t	
 fd	dt|D | _ | !  d
S )a|  
        Initializes the RTDETRDecoder module with the given parameters.

        Args:
            nc (int): Number of classes. Default is 80.
            ch (tuple): Channels in the backbone feature maps. Default is (512, 1024, 2048).
            hd (int): Dimension of hidden layers. Default is 256.
            nq (int): Number of query points. Default is 300.
            ndp (int): Number of decoder points. Default is 4.
            nh (int): Number of heads in multi-head attention. Default is 8.
            ndl (int): Number of decoder layers. Default is 6.
            d_ffn (int): Dimension of the feed-forward networks. Default is 1024.
            dropout (float): Dropout rate. Default is 0.
            act (nn.Module): Activation function. Default is nn.ReLU.
            eval_idx (int): Evaluation index. Default is -1.
            nd (int): Number of denoising. Default is 100.
            label_noise_ratio (float): Label noise ratio. Default is 0.5.
            box_noise_scale (float): Box noise scale. Default is 1.0.
            learnt_init_query (bool): Whether to learn initial query embeddings. Default is False.
        c              	   3   s.   | ]&}t t j| d ddt  V  qdS )r   F)r   N)r"   r#   r$   ZBatchNorm2dr&   hdr   r+   r,     rU   z)RTDETRDecoder.__init__.<locals>.<genexpr>r   rY   Z
num_layersr!   c                    s   g | ]}t  qS r   )r"   r   r   r   r.   r   r+   rT     rU   z*RTDETRDecoder.__init__.<locals>.<listcomp>c                    s   g | ]}t   d ddqS )r   r!   r   )r   r   r   r   r+   rT     rU   N)"r1   r2   Z
hidden_dimZnheadr3   r4   r.   num_queriesZnum_decoder_layersr"   r:   
input_projr   r   decoderZ	Embeddingdenoising_class_embednum_denoisinglabel_noise_ratiobox_noise_scalelearnt_init_query	tgt_embedr   query_pos_headr#   r   Z	LayerNorm
enc_outputenc_score_headenc_bbox_headrI   dec_score_headdec_bbox_head_reset_parameters)r*   r.   rD   r   ZnqZndpZnhZndlZd_ffnZdropoutZactZeval_idxndr   r   r   Zdecoder_layerrE   r   r+   r2   X  s0    '

 zRTDETRDecoder.__init__Nc              
   C   s   ddl m} | |\}}||| j| j| jj| j| j| j	| j
\}}}}	| ||||\}
}}}| j|
|||| j| j| j|d\}}|||||	f}| j
r|S t|d|d fd}| jr|S ||fS )zdRuns the forward pass of the module, returning bounding box and classification scores for the input.r   )get_cdn_group)	attn_maskr]   )Zultralytics.models.utils.opsr   _get_encoder_inputr.   r   r   weightr   r   r   rK   _get_decoder_inputr   r   r   r   r6   rJ   Zsqueezers   rM   )r*   r(   batchr   featsshapesdn_embeddn_bboxr   Zdn_metar   
refer_bbox
enc_bboxes
enc_scoresZ
dec_bboxesZ
dec_scoresrO   r   r   r+   rP     s8    

 zRTDETRDecoder.forwardg?cpu{Gz?c                 C   s*  g }t |D ]\}\}}	tj|||d}
tj|	||d}trLtj|
|ddn
t|
|\}}t||gd}tj|	|g||d}|dd | }tj|||d| d|  }|	t
||gdd||	 d	 qt
|d
}||k|d
| k @ jddd}t|d
|  }|| td}||fS )z\Generates anchor bounding boxes for given shapes with specific grid size and validates them.)enddtyperi   Zij)Zindexingr]   r   ri   r   rc   r   r   r   T)Zkeepdiminf)	enumerater6   r   r   Zmeshgridstackro   rr   Z	ones_likeappendrJ   r^   allr   Zmasked_fillr   )r*   r   ry   r   ri   Zepsrk   rN   hwZsysxZgrid_yZgrid_xZgrid_xyZvalid_WHZwh
valid_maskr   r   r+   _generate_anchors  s    $&zRTDETRDecoder._generate_anchorsc                    sv    fddt |D }g }g }|D ]>}|jdd \}}||dddd |||g q"t|d}||fS )zfProcesses and returns encoder inputs by getting projection features from input and concatenating them.c                    s   g | ]\}} j | |qS r   )r   )r'   rN   featr*   r   r+   rT     rU   z4RTDETRDecoder._get_encoder_input.<locals>.<listcomp>rY   Nr   r   )r   r`   r   r   r[   r6   rJ   )r*   r(   r   r   r   r   r   r   r   r+   r     s    z RTDETRDecoder._get_encoder_inputc                 C   sf  |j d }| j||j|jd\}}| || }| |}	tj|	dj	| j
ddjd}
tj||
jddd| j
d}|||
f || j
d}|dd|
f || j
d}| || }| }|durt||gd}|	||
f || j
d}| jr| jjd|ddn|}| jr@| }| js@| }|durZt||gd}||||fS )z`Generates and prepares the input required for the decoder from the provided features and shapes.r   r   r]   r   r   )r   r   N)r`   r   r   ri   r   r   r6   r   r8   valuesr   indicesr^   r   rr   r   r   rs   rJ   r   r   r   rK   rQ   )r*   r   r   r   r   r   rk   r   featuresZenc_outputs_scoresZtopk_indZ	batch_indZtop_k_featuresZtop_k_anchorsr   r   r   Z
embeddingsr   r   r+   r     s*    

"&$
z RTDETRDecoder._get_decoder_inputc                 C   s  t dd | j }t| jj| t| jjd jd t| jjd jd t| j	| j
D ]8\}}t|j| t|jd jd t|jd jd qVt| jd  t| jd j | jrt| jj t| jjd j t| jjd j | jD ]}t|d j qdS )zjInitializes or resets the parameters of the model's various components with predefined weights and biases.r   r   r]   r   r   r   N)r   r.   r   r   r   r   Zlayersr   r   r   r   r   r   r   r   r   r   r   )r*   Zbias_clsZcls_Zreg_layerr   r   r+   r     s     
zRTDETRDecoder._reset_parameters)N)NN)r   r   r   r   rM   r"   ZReLUr2   rP   r6   Zfloat32r   r   r   r   r   r   r   rE   r+   r   M  s.   O
'
'r   c                       s&   e Zd ZdZdZd fdd	Z  ZS )r   a  
    v10 Detection head from https://arxiv.org/pdf/2405.14458.

    Args:
        nc (int): Number of classes.
        ch (tuple): Tuple of channel sizes.

    Attributes:
        max_det (int): Maximum number of detections.

    Methods:
        __init__(self, nc=80, ch=()): Initializes the v10Detect object.
        forward(self, x): Performs forward pass of the v10Detect module.
        bias_init(self): Initializes biases of the Detect module.

    Tr   r   c                    sR   t  || t|d tjd t fdd|D _t	j_
dS )zYInitializes the v10Detect object with the specified number of classes and input channels.r   r    c                 3   s`   | ]X}t t t||d |dt| dt t  d  dt  dt  jdV  qdS )r!   )r   r   N)r"   r#   r   r$   r.   r&   r/   r   r+   r,   M  s   z%v10Detect.__init__.<locals>.<genexpr>N)r1   r2   r8   r9   r.   r"   r:   r<   r?   r@   rB   rC   rE   r/   r+   r2   H  s    
zv10Detect.__init__)r   r   )r   r   r   r   r>   r2   r   r   r   rE   r+   r   4  s   r   )'r   r?   r   r6   Ztorch.nnr"   Ztorch.nn.initr   r   Zultralytics.utils.talr   r   r   r   blockr	   r
   r   r   r   r   r   Ztransformerr   r   r   utilsr   r   __all__Moduler   r   r   r   r   r   r   r   r   r   r   r+   <module>   s*    (6 h