a
    PSic`                     @   sp   d dl Z d dlmZmZmZmZmZmZ d dlm	Z	 ddl
mZmZ ddlmZ ddlmZ G dd	 d	eZdS )
    N)AnyDictListTupleOptionalCallable)Tensor   )find_classesmake_dataset)
VideoClips)VisionDatasetc                       s   e Zd ZdZdeeeeee eeee ee	ee
f  eeeeeedd fdd	Zee	ee
f d
ddZee eeeee dddZed
ddZeeeeef dddZ  ZS )UCF101a  
    `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset.

    UCF101 is an action recognition video dataset.
    This dataset consider every video as a collection of video clips of fixed size, specified
    by ``frames_per_clip``, where the step in frames between each clip is given by
    ``step_between_clips``. The dataset itself can be downloaded from the dataset website;
    annotations that ``annotation_path`` should be pointing to can be downloaded from `here
    <https://www.crcv.ucf.edu/data/UCF101/UCF101TrainTestSplits-RecognitionTask.zip>`.

    To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
    and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
    elements will come from video 1, and the next three elements from video 2.
    Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
    frames in a video might be present.

    Internally, it uses a VideoClips object to handle clip creation.

    Args:
        root (string): Root directory of the UCF101 Dataset.
        annotation_path (str): path to the folder containing the split files;
            see docstring above for download instructions of these files
        frames_per_clip (int): number of frames in a clip.
        step_between_clips (int, optional): number of frames between each clip.
        fold (int, optional): which fold to use. Should be between 1 and 3.
        train (bool, optional): if ``True``, creates a dataset from the train split,
            otherwise from the ``test`` split.
        transform (callable, optional): A function/transform that  takes in a TxHxWxC video
            and returns a transformed version.
        output_format (str, optional): The format of the output video tensors (before transforms).
            Can be either "THWC" (default) or "TCHW".

    Returns:
        tuple: A 3-tuple with the following entries:

            - video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames
            -  audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
               and `L` is the number of points
            - label (int): class of the video clip
    r	   NTr   THWC)rootannotation_pathframes_per_clipstep_between_clips
frame_ratefoldtrain	transform_precomputed_metadatanum_workers_video_width_video_height_video_min_dimension_audio_samplesoutput_formatreturnc                    s   t  | d|  kr dks0n td| d}|| _|| _t| j\| _}t| j||d d| _	dd | j	D }t
|||||	|
|||||d}|| _| ||||| _|| j| _|| _d S )	Nr	      z$fold should be between 1 and 3, got )avi)is_valid_filec                 S   s   g | ]}|d  qS )r    .0xr#   r#   W/var/www/html/django/DPS/env/lib/python3.9/site-packages/torchvision/datasets/ucf101.py
<listcomp>Q       z#UCF101.__init__.<locals>.<listcomp>)r   r   r   r   r   r   )super__init__
ValueErrorr   r   r
   r   classesr   samplesr   full_video_clips_select_foldindicessubsetvideo_clipsr   )selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   
extensionsclass_to_idx
video_listr3   	__class__r#   r'   r+   5   s4    zUCF101.__init__)r   c                 C   s   | j jS N)r/   metadatar4   r#   r#   r'   r;   g   s    zUCF101.metadata)r7   r   r   r   r   c           
         s   |rdnd}| d|dd}t j||}t  t|B}| }dd |D }fdd|D } | W d    n1 s0    Y   fd	dttD }	|	S )
Nr   testlist02dz.txtc                 S   s   g | ]}|  d d qS ) r   )stripsplitr$   r#   r#   r'   r(   r   r)   z'UCF101._select_fold.<locals>.<listcomp>c                    s(   g | ] }t jj jg|d R  qS )/)ospathjoinr   rB   r$   r<   r#   r'   r(   s   r)   c                    s   g | ]}|  v r|qS r#   r#   )r%   i)selected_filesr7   r#   r'   r(   u   r)   )	rD   rE   rF   setopen	readlinesupdaterangelen)
r4   r7   r   r   r   nameffiddatar1   r#   )rH   r4   r7   r'   r0   k   s    
(zUCF101._select_foldc                 C   s
   | j  S r:   )r3   	num_clipsr<   r#   r#   r'   __len__x   s    zUCF101.__len__)idxr   c                 C   sF   | j |\}}}}| j| j|  d }| jd ur<| |}|||fS )Nr	   )r3   get_clipr.   r1   r   )r4   rU   videoaudioinfo	video_idxlabelr#   r#   r'   __getitem__{   s
    

zUCF101.__getitem__)r	   Nr	   TNNr	   r   r   r   r   r   )__name__
__module____qualname____doc__strintr   boolr   r   r   r+   propertyr;   r   r0   rT   r   r   r\   __classcell__r#   r#   r8   r'   r      sH   .            2r   )rD   typingr   r   r   r   r   r   torchr   folderr
   r   video_utilsr   visionr   r   r#   r#   r#   r'   <module>   s    