a
    yf                     @   sX   d Z ddlmZ ddlmZ ddlmZ ddlmZ ddl	m
Z
mZ G dd	 d	eZd
S )a>  
SAM model interface.

This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image
segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis,
and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new
image distributions and tasks without prior knowledge.

Key Features:
    - Promptable segmentation
    - Real-time performance
    - Zero-shot transfer capabilities
    - Trained on SA-1B dataset
    )Path)Model)
model_info   )	build_sam)	PredictorSAM2Predictorc                       sf   e Zd ZdZddd fddZdeddd	Zd fdd	ZdddZdddZ	e
dd Z  ZS )SAMa  
    SAM (Segment Anything Model) interface class for real-time image segmentation tasks.

    This class provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for
    promptable segmentation with versatility in image analysis. It supports various prompts such as bounding
    boxes, points, or labels, and features zero-shot performance capabilities.

    Attributes:
        model (torch.nn.Module): The loaded SAM model.
        is_sam2 (bool): Indicates whether the model is SAM2 variant.
        task (str): The task type, set to "segment" for SAM models.

    Methods:
        predict: Performs segmentation prediction on the given image or video source.
        info: Logs information about the SAM model.

    Examples:
        >>> sam = SAM("sam_b.pt")
        >>> results = sam.predict("image.jpg", points=[[500, 375]])
        >>> for r in results:
        >>>     print(f"Detected {len(r.masks)} masks")
    sam_b.ptN)returnc                    s>   |rt |jdvrtddt |jv | _t j|dd dS )a  
        Initializes the SAM (Segment Anything Model) instance.

        Args:
            model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension.

        Raises:
            NotImplementedError: If the model file extension is not .pt or .pth.

        Examples:
            >>> sam = SAM("sam_b.pt")
            >>> print(sam.is_sam2)
        >   z.ptz.pthz8SAM prediction requires pre-trained *.pt or *.pth model.Zsam2segment)modeltaskN)r   suffixNotImplementedErrorstemis_sam2super__init__)selfr   	__class__ X/var/www/html/django/DPS/env/lib/python3.9/site-packages/ultralytics/models/sam/model.pyr   2   s    zSAM.__init__)weightsc                 C   s   t || _dS )aK  
        Loads the specified weights into the SAM model.

        This method initializes the SAM model with the provided weights file, setting up the model architecture
        and loading the pre-trained parameters.

        Args:
            weights (str): Path to the weights file. Should be a .pt or .pth file containing the model parameters.
            task (str | None): Task name. If provided, it specifies the particular task the model is being loaded for.

        Examples:
            >>> sam = SAM("sam_b.pt")
            >>> sam._load("path/to/custom_weights.pt")
        N)r   r   )r   r   r   r   r   r   _loadE   s    z	SAM._loadFc           	         sD   t ddddd}i ||}t |||d}t j||fd|i|S )a  
        Performs segmentation prediction on the given image or video source.

        Args:
            source (str | PIL.Image | numpy.ndarray): Path to the image or video file, or a PIL.Image object, or
                a numpy.ndarray object.
            stream (bool): If True, enables real-time streaming.
            bboxes (List[List[float]] | None): List of bounding box coordinates for prompted segmentation.
            points (List[List[float]] | None): List of points for prompted segmentation.
            labels (List[int] | None): List of labels for prompted segmentation.
            **kwargs (Any): Additional keyword arguments for prediction.

        Returns:
            (List): The model predictions.

        Examples:
            >>> sam = SAM("sam_b.pt")
            >>> results = sam.predict("image.jpg", points=[[500, 375]])
            >>> for r in results:
            ...     print(f"Detected {len(r.masks)} masks")
        g      ?r   predicti   )confr   modeZimgsz)bboxespointslabelsprompts)dictr   r   )	r   sourcestreamr   r    r!   kwargsZ	overridesr"   r   r   r   r   V   s    zSAM.predictc                 K   s   | j |||||fi |S )a  
        Performs segmentation prediction on the given image or video source.

        This method is an alias for the 'predict' method, providing a convenient way to call the SAM model
        for segmentation tasks.

        Args:
            source (str | PIL.Image | numpy.ndarray | None): Path to the image or video file, or a PIL.Image
                object, or a numpy.ndarray object.
            stream (bool): If True, enables real-time streaming.
            bboxes (List[List[float]] | None): List of bounding box coordinates for prompted segmentation.
            points (List[List[float]] | None): List of points for prompted segmentation.
            labels (List[int] | None): List of labels for prompted segmentation.
            **kwargs (Any): Additional keyword arguments to be passed to the predict method.

        Returns:
            (List): The model predictions, typically containing segmentation masks and other relevant information.

        Examples:
            >>> sam = SAM("sam_b.pt")
            >>> results = sam("image.jpg", points=[[500, 375]])
            >>> print(f"Detected {len(results[0].masks)} masks")
        )r   )r   r$   r%   r   r    r!   r&   r   r   r   __call__q   s    zSAM.__call__Tc                 C   s   t | j||dS )a  
        Logs information about the SAM model.

        This method provides details about the Segment Anything Model (SAM), including its architecture,
        parameters, and computational requirements.

        Args:
            detailed (bool): If True, displays detailed information about the model layers and operations.
            verbose (bool): If True, prints the information to the console.

        Returns:
            (Tuple): A tuple containing the model's information (string representations of the model).

        Examples:
            >>> sam = SAM("sam_b.pt")
            >>> info = sam.info()
            >>> print(info[0])  # Print summary information
        )detailedverbose)r   r   )r   r(   r)   r   r   r   info   s    zSAM.infoc                 C   s   dd| j rtntiiS )a  
        Provides a mapping from the 'segment' task to its corresponding 'Predictor'.

        Returns:
            (Dict[str, Type[Predictor]]): A dictionary mapping the 'segment' task to its corresponding Predictor
                class. For SAM2 models, it maps to SAM2Predictor, otherwise to the standard Predictor.

        Examples:
            >>> sam = SAM("sam_b.pt")
            >>> task_map = sam.task_map
            >>> print(task_map)
            {'segment': <class 'ultralytics.models.sam.predict.Predictor'>}
        r   Z	predictor)r   r   r   )r   r   r   r   task_map   s    zSAM.task_map)r
   )N)FNNN)NFNNN)FT)__name__
__module____qualname____doc__r   strr   r   r'   r*   propertyr+   __classcell__r   r   r   r   r	      s   

r	   N)r/   pathlibr   Zultralytics.engine.modelr   Zultralytics.utils.torch_utilsr   buildr   r   r   r   r	   r   r   r   r   <module>   s   