a
    yfb                     @   sd   d Z ddlmZ ddlZddlZddlmZmZm	Z	 ddl
mZmZ dd	d
ZddejfddZdS )zlFunctions for estimating the best YOLO batch size to use a fraction of the available CUDA memory in PyTorch.    )deepcopyN)DEFAULT_CFGLOGGERcolorstr)autocastprofile  Tc                 C   s`   t |dB tt|  |d|  k r.dk r6n n|nddW  d   S 1 sR0    Y  dS )a`  
    Compute optimal YOLO training batch size using the autobatch() function.

    Args:
        model (torch.nn.Module): YOLO model to check batch size for.
        imgsz (int, optional): Image size used for training.
        amp (bool, optional): Use automatic mixed precision if True.
        batch (float, optional): Fraction of GPU memory to use. If -1, use default.

    Returns:
        (int): Optimal batch size computed using the autobatch() function.

    Note:
        If 0.0 < batch < 1.0, it's used as the fraction of GPU memory to use.
        Otherwise, a default fraction of 0.6 is used.
    )enabledg        g      ?333333?)fractionN)r   	autobatchr   train)modelimgszampbatch r   W/var/www/html/django/DPS/env/lib/python3.9/site-packages/ultralytics/utils/autobatch.pycheck_train_batch_size   s    r   r   c                    s  t d}t| d  d|d  d t|  j}|jdv rXt| d|  |S tjj	j
rzt| d|  |S d	}t| }tj|}|j| }	tj|| }
tj|| }|	|
|  }t| | d
|j d|	dd|
dd|dd|dd g d}z$ fdd|D }t|| d|d}dd |D }tj|dt| |dd}t|| |d  |d  }d|v r|d}||| kr|t|d d }|dk s|dkr|}t| d| d t|||
 | |	 }t| d| d| d |	| dd!|	dd"|d d#d$ |W S  ty| } z,t| d%| d&| d |W  Y d}~S d}~0 0 dS )'a   
    Automatically estimate the best YOLO batch size to use a fraction of the available CUDA memory.

    Args:
        model (torch.nn.module): YOLO model to compute batch size for.
        imgsz (int, optional): The image size used as input for the YOLO model. Defaults to 640.
        fraction (float, optional): The fraction of available CUDA memory to use. Defaults to 0.60.
        batch_size (int, optional): The default batch size to use if an error is detected. Defaults to 16.

    Returns:
        (int): The optimal batch size.
    zAutoBatch: z'Computing optimal batch size for imgsz=z at d   z% CUDA memory utilization.>   cpuZmpsu<    ⚠️ intended for CUDA devices, using default batch-size uP    ⚠️ Requires torch.backends.cudnn.benchmark=False, using default batch-size i   @z (z) z.2fz	G total, zG reserved, zG allocated, zG free)               c                    s   g | ]}t |d   qS )   )torchempty).0br   r   r   
<listcomp>G       zautobatch.<locals>.<listcomp>r   )ndevicec                 S   s   g | ]}|r|d  qS )r   r   )r    xr   r   r   r#   K   r$   Nr   )degr   i   u?   WARNING ⚠️ CUDA anomaly detected, using default batch-size .zUsing batch-size z for  zG/zG (z.0fu   %) ✅u   WARNING ⚠️ error detected: z,  using default batch-size )r   r   infonext
parametersr&   typer   backendsZcudnnZ	benchmarkstruppercudaZget_device_propertiesZtotal_memoryZmemory_reservedZmemory_allocatednamer   npZpolyfitlenintindexmaxZpolyval	Exceptionwarning)r   r   r   Z
batch_sizeprefixr&   gbd
propertiestrafZbatch_sizesZimgresultsypr!   ier   r"   r   r   "   sJ     


<

<r   )r   Tr	   )__doc__copyr   numpyr4   r   Zultralytics.utilsr   r   r   Zultralytics.utils.torch_utilsr   r   r   r   r   r   r   r   r   <module>   s   
