a
    7Sic/                     @   s,  d dl Z d dlZd dlmZmZmZmZmZmZm	Z	m
Z
 d dlmZmZ d dlmZ ddlmZmZ g dZe
dd	d
Ze
dZG dd dee ZG dd dee ZG dd dee	edf  ZG dd dee ZG dd deZG dd dee Zefee ee ee eee  dddZdS )    N)GenericIterableIteratorListOptionalSequenceTupleTypeVar)default_generatorrandperm)_accumulate   )	GeneratorTensor)DatasetIterableDatasetTensorDatasetConcatDatasetChainDatasetSubsetrandom_splitT_coT)	covariantTc                   @   s.   e Zd ZdZedddZddddd	Zd
S )r   a  An abstract class representing a :class:`Dataset`.

    All datasets that represent a map from keys to data samples should subclass
    it. All subclasses should overwrite :meth:`__getitem__`, supporting fetching a
    data sample for a given key. Subclasses could also optionally overwrite
    :meth:`__len__`, which is expected to return the size of the dataset by many
    :class:`~torch.utils.data.Sampler` implementations and the default options
    of :class:`~torch.utils.data.DataLoader`.

    .. note::
      :class:`~torch.utils.data.DataLoader` by default constructs a index
      sampler that yields integral indices.  To make it work with a map-style
      dataset with non-integral indices/keys, a custom sampler must be provided.
    returnc                 C   s   t d S NNotImplementedErrorselfindex r"   T/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/utils/data/dataset.py__getitem__2   s    zDataset.__getitem__zDataset[T_co]zConcatDataset[T_co])otherr   c                 C   s   t | |gS r   )r   r    r%   r"   r"   r#   __add__5   s    zDataset.__add__N)__name__
__module____qualname____doc__r   r$   r'   r"   r"   r"   r#   r   "   s   r   c                   @   s4   e Zd ZdZee dddZee dddZdS )	r   a  An iterable Dataset.

    All datasets that represent an iterable of data samples should subclass it.
    Such form of datasets is particularly useful when data come from a stream.

    All subclasses should overwrite :meth:`__iter__`, which would return an
    iterator of samples in this dataset.

    When a subclass is used with :class:`~torch.utils.data.DataLoader`, each
    item in the dataset will be yielded from the :class:`~torch.utils.data.DataLoader`
    iterator. When :attr:`num_workers > 0`, each worker process will have a
    different copy of the dataset object, so it is often desired to configure
    each copy independently to avoid having duplicate data returned from the
    workers. :func:`~torch.utils.data.get_worker_info`, when called in a worker
    process, returns information about the worker. It can be used in either the
    dataset's :meth:`__iter__` method or the :class:`~torch.utils.data.DataLoader` 's
    :attr:`worker_init_fn` option to modify each copy's behavior.

    Example 1: splitting workload across all workers in :meth:`__iter__`::

        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         worker_info = torch.utils.data.get_worker_info()
        ...         if worker_info is None:  # single-process data loading, return the full iterator
        ...             iter_start = self.start
        ...             iter_end = self.end
        ...         else:  # in a worker process
        ...             # split workload
        ...             per_worker = int(math.ceil((self.end - self.start) / float(worker_info.num_workers)))
        ...             worker_id = worker_info.id
        ...             iter_start = self.start + worker_id * per_worker
        ...             iter_end = min(iter_start + per_worker, self.end)
        ...         return iter(range(iter_start, iter_end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [3, 4, 5, 6]

        >>> # Mult-process loading with two worker processes
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [3, 5, 4, 6]

        >>> # With even more workers
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=20)))
        [3, 4, 5, 6]

    Example 2: splitting workload across all workers using :attr:`worker_init_fn`::

        >>> class MyIterableDataset(torch.utils.data.IterableDataset):
        ...     def __init__(self, start, end):
        ...         super(MyIterableDataset).__init__()
        ...         assert end > start, "this example code only works with end >= start"
        ...         self.start = start
        ...         self.end = end
        ...
        ...     def __iter__(self):
        ...         return iter(range(self.start, self.end))
        ...
        >>> # should give same set of data as range(3, 7), i.e., [3, 4, 5, 6].
        >>> ds = MyIterableDataset(start=3, end=7)

        >>> # Single-process loading
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=0)))
        [3, 4, 5, 6]
        >>>
        >>> # Directly doing multi-process loading yields duplicate data
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2)))
        [3, 3, 4, 4, 5, 5, 6, 6]

        >>> # Define a `worker_init_fn` that configures each dataset copy differently
        >>> def worker_init_fn(worker_id):
        ...     worker_info = torch.utils.data.get_worker_info()
        ...     dataset = worker_info.dataset  # the dataset copy in this worker process
        ...     overall_start = dataset.start
        ...     overall_end = dataset.end
        ...     # configure the dataset to only process the split workload
        ...     per_worker = int(math.ceil((overall_end - overall_start) / float(worker_info.num_workers)))
        ...     worker_id = worker_info.id
        ...     dataset.start = overall_start + worker_id * per_worker
        ...     dataset.end = min(dataset.start + per_worker, overall_end)
        ...

        >>> # Mult-process loading with the custom `worker_init_fn`
        >>> # Worker 0 fetched [3, 4].  Worker 1 fetched [5, 6].
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=2, worker_init_fn=worker_init_fn)))
        [3, 5, 4, 6]

        >>> # With even more workers
        >>> print(list(torch.utils.data.DataLoader(ds, num_workers=20, worker_init_fn=worker_init_fn)))
        [3, 4, 5, 6]
    r   c                 C   s   t d S r   r   r    r"   r"   r#   __iter__   s    zIterableDataset.__iter__)r%   c                 C   s   t | |gS r   )r   r&   r"   r"   r#   r'      s    zIterableDataset.__add__N)	r(   r)   r*   r+   r   r   r-   r   r'   r"   r"   r"   r#   r   =   s   er   c                   @   sB   e Zd ZU dZeedf ed< eddddZdd	 Zd
d Z	dS )r   zDataset wrapping tensors.

    Each sample will be retrieved by indexing tensors along the first dimension.

    Args:
        *tensors (Tensor): tensors that have the same size of the first dimension.
    .tensorsN)r.   r   c                    s(   t  fdd D sJ d | _d S )Nc                 3   s&   | ]} d   d | d kV  qdS )r   N)size.0tensorr.   r"   r#   	<genexpr>       z)TensorDataset.__init__.<locals>.<genexpr>zSize mismatch between tensors)allr.   )r    r.   r"   r3   r#   __init__   s    zTensorDataset.__init__c                    s   t  fdd| jD S )Nc                 3   s   | ]}|  V  qd S r   r"   r0   r!   r"   r#   r4      r5   z,TensorDataset.__getitem__.<locals>.<genexpr>)tupler.   r   r"   r8   r#   r$      s    zTensorDataset.__getitem__c                 C   s   | j d dS Nr   )r.   r/   r,   r"   r"   r#   __len__   s    zTensorDataset.__len__)
r(   r)   r*   r+   r   r   __annotations__r7   r$   r;   r"   r"   r"   r#   r      s
   
r   .c                       sr   e Zd ZU dZeee  ed< ee ed< e	dd Z
ee dd fdd	Zd
d Zdd Zedd Z  ZS )r   zDataset as a concatenation of multiple datasets.

    This class is useful to assemble different existing datasets.

    Args:
        datasets (sequence): List of datasets to be concatenated
    datasetscumulative_sizesc                 C   s6   g d }}| D ]"}t |}|||  ||7 }q|S r:   )lenappend)sequencerselr"   r"   r#   cumsum   s    

zConcatDataset.cumsumNr=   r   c                    s^   t t|   t|| _t| jdks.J d| jD ]}t|tr4J dq4| | j| _	d S )Nr   z(datasets should not be an empty iterablez.ConcatDataset does not support IterableDataset)
superr   r7   listr=   r?   
isinstancer   rF   r>   )r    r=   d	__class__r"   r#   r7      s    

zConcatDataset.__init__c                 C   s
   | j d S )N)r>   r,   r"   r"   r#   r;      s    zConcatDataset.__len__c                 C   sf   |dk r*| t | krtdt | | }t| j|}|dkrF|}n|| j|d   }| j| | S )Nr   z8absolute value of index should not exceed dataset length   )r?   
ValueErrorbisectbisect_rightr>   r=   )r    idxZdataset_idxZ
sample_idxr"   r"   r#   r$      s    zConcatDataset.__getitem__c                 C   s   t jdtdd | jS )Nz:cummulative_sizes attribute is renamed to cumulative_sizes   )
stacklevel)warningswarnDeprecationWarningr>   r,   r"   r"   r#   cummulative_sizes   s    zConcatDataset.cummulative_sizes)r(   r)   r*   r+   r   r   r   r<   intstaticmethodrF   r   r7   r;   r$   propertyrY   __classcell__r"   r"   rL   r#   r      s   

r   c                       s<   e Zd ZdZee dd fddZdd Zdd	 Z  Z	S )
r   a_  Dataset for chaining multiple :class:`IterableDataset` s.

    This class is useful to assemble different existing dataset streams. The
    chaining operation is done on-the-fly, so concatenating large-scale
    datasets with this class will be efficient.

    Args:
        datasets (iterable of IterableDataset): datasets to be chained together
    NrG   c                    s   t t|   || _d S r   )rH   r   r7   r=   )r    r=   rL   r"   r#   r7      s    zChainDataset.__init__c                 c   s2   | j D ]&}t|tsJ d|D ]
}|V  q qd S )N*ChainDataset only supports IterableDataset)r=   rJ   r   )r    rK   xr"   r"   r#   r-     s    
zChainDataset.__iter__c                 C   s2   d}| j D ]"}t|ts J d|t|7 }q
|S )Nr   r^   )r=   rJ   r   r?   )r    totalrK   r"   r"   r#   r;     s
    
zChainDataset.__len__)
r(   r)   r*   r+   r   r   r7   r-   r;   r]   r"   r"   rL   r#   r      s   	r   c                   @   sT   e Zd ZU dZee ed< ee ed< ee ee ddddZ	dd	 Z
d
d ZdS )r   z
    Subset of a dataset at specified indices.

    Args:
        dataset (Dataset): The whole Dataset
        indices (sequence): Indices in the whole set selected for subset
    datasetindicesN)ra   rb   r   c                 C   s   || _ || _d S r   ra   rb   )r    ra   rb   r"   r"   r#   r7     s    zSubset.__init__c                    s2   t |tr" j fdd|D  S  j j|  S )Nc                    s   g | ]} j | qS r"   )rb   )r1   ir,   r"   r#   
<listcomp>!  r5   z&Subset.__getitem__.<locals>.<listcomp>)rJ   rI   ra   rb   )r    rS   r"   r,   r#   r$     s    
zSubset.__getitem__c                 C   s
   t | jS r   )r?   rb   r,   r"   r"   r#   r;   $  s    zSubset.__len__)r(   r)   r*   r+   r   r   r<   r   rZ   r7   r$   r;   r"   r"   r"   r#   r     s   
r   )ra   lengths	generatorr   c                    sJ   t |t krtdtt ||d  fddtt||D S )a  
    Randomly split a dataset into non-overlapping new datasets of given lengths.
    Optionally fix the generator for reproducible results, e.g.:

    >>> random_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42))

    Args:
        dataset (Dataset): Dataset to be split
        lengths (sequence): lengths of splits to be produced
        generator (Generator): Generator used for the random permutation.
    zDSum of input lengths does not equal the length of the input dataset!)rg   c                    s&   g | ]\}}t  || | qS r"   )r   )r1   offsetlengthrc   r"   r#   re   :  r5   z random_split.<locals>.<listcomp>)sumr?   rP   r   tolistzipr   )ra   rf   rg   r"   rc   r#   r   (  s    r   )rQ   rV   typingr   r   r   r   r   r   r   r	   torchr
   r   Ztorch._utilsr    r   r   __all__r   r   r   r   r   r   r   r   rZ   r   r"   r"   r"   r#   <module>   s$   (
p2