a
    7Sic+                    @   s  U d Z ddlZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
 ddlmZmZmZmZmZmZmZmZmZ ddlZddlZddlmZ ddlmZ ddlZddlmZ ddlm Z  ddl!m"Z"m#Z#m$Z$m%Z%m&Z&m'Z'm(Z(m)Z) dd	l*m+Z+m,Z, dd
l!m-Z- g dZ.edddZ/edZ0ee1gdf Z2eee0 gef Z3e-j4j5Z5e3e6d< e-j4j7Z7e-j8j9Z9e:e;Z<G dd de=Z>G dd de%Z?dd Z@dd ZAG dd dee/ ZBG dd de=ZCG dd deCZDG dd  d eCZEdS )!a  Definition of the DataLoader and associated iterators that subclass _BaseDataLoaderIter

To support these two classes, in `./_utils` we define many utility methods and
functions to be run in multiprocessing. E.g., the data loading worker loop is
in `./_utils/worker.py`.
    N)	timedelta)	AnyCallableIterableTypeVarGenericSequenceListOptionalUnion)ExceptionWrapper)string_classes   )IterDataPipeMapDataPipeIterableDatasetSamplerSequentialSamplerRandomSamplerBatchSamplerDataset)!_IterDataPipeSerializationWrapper _MapDataPipeSerializationWrapper)_utils)
DataLoaderget_worker_infodefault_collatedefault_convertT_coT)	covariantTr   c                   @   s    e Zd ZdZdZedd ZdS )_DatasetKindr   r   c                 C   s2   | t jkrtj||||S tj||||S d S N)r!   Mapr   fetch_MapDatasetFetcher_IterableDatasetFetcher)kinddatasetauto_collation
collate_fn	drop_last r,   W/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/utils/data/dataloader.pycreate_fetcherM   s    
z_DatasetKind.create_fetcherN)__name__
__module____qualname__r#   r   staticmethodr.   r,   r,   r,   r-   r!   I   s   r!   c                       s(   e Zd ZdZ fddZdd Z  ZS )_InfiniteConstantSamplerzAnalogous to ``itertools.repeat(None, None)``.
    Used as sampler for :class:`~torch.utils.data.IterableDataset`.

    Args:
        data_source (Dataset): dataset to sample from
    c                    s   t t| d  d S r"   )superr3   __init__self	__class__r,   r-   r5   ]   s    z!_InfiniteConstantSampler.__init__c                 c   s   d V  q d S r"   r,   r6   r,   r,   r-   __iter__`   s    z!_InfiniteConstantSampler.__iter__)r/   r0   r1   __doc__r5   r:   __classcell__r,   r,   r8   r-   r3   U   s   r3   c                   C   s(   t  r t  r t  t  fS dS d S )N)r   r   )distis_availableis_initializedget_world_sizeget_rankr,   r,   r,   r-   _get_distributed_settingse   s    rB   c                 C   sX   |}t jj }|j}|j}||9 }|| | }t jjj||| | d urT| | d S r"   )torchutilsdatar   num_workersr(   graph_settingsapply_sharding)worker_init_fn
world_sizeZrank_id	worker_idZglobal_worker_idinfoZtotal_workersdatapiper,   r,   r-   _sharding_worker_init_fnl   s    rN   c                       sZ  e Zd ZU dZee ed< ee ed< eed< e	ed< e	ed< e
ed< eeef ed< eed	< eed
< ed ed< dZd+ddddee ee ee	 eeedf eee ee df eee e	e	e
ee ee	edddZddddZedd Zejdd Z fddZdddd Zed!d" Zed#d$ Zedd%d&Zd'd( Zd)d* Z  ZS ),r   a  
    Data loader. Combines a dataset and a sampler, and provides an iterable over
    the given dataset.

    The :class:`~torch.utils.data.DataLoader` supports both map-style and
    iterable-style datasets with single- or multi-process loading, customizing
    loading order and optional automatic batching (collation) and memory pinning.

    See :py:mod:`torch.utils.data` documentation page for more details.

    Args:
        dataset (Dataset): dataset from which to load the data.
        batch_size (int, optional): how many samples per batch to load
            (default: ``1``).
        shuffle (bool, optional): set to ``True`` to have the data reshuffled
            at every epoch (default: ``False``).
        sampler (Sampler or Iterable, optional): defines the strategy to draw
            samples from the dataset. Can be any ``Iterable`` with ``__len__``
            implemented. If specified, :attr:`shuffle` must not be specified.
        batch_sampler (Sampler or Iterable, optional): like :attr:`sampler`, but
            returns a batch of indices at a time. Mutually exclusive with
            :attr:`batch_size`, :attr:`shuffle`, :attr:`sampler`,
            and :attr:`drop_last`.
        num_workers (int, optional): how many subprocesses to use for data
            loading. ``0`` means that the data will be loaded in the main process.
            (default: ``0``)
        collate_fn (callable, optional): merges a list of samples to form a
            mini-batch of Tensor(s).  Used when using batched loading from a
            map-style dataset.
        pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
            into device/CUDA pinned memory before returning them.  If your data elements
            are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
            see the example below.
        drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
            if the dataset size is not divisible by the batch size. If ``False`` and
            the size of dataset is not divisible by the batch size, then the last batch
            will be smaller. (default: ``False``)
        timeout (numeric, optional): if positive, the timeout value for collecting a batch
            from workers. Should always be non-negative. (default: ``0``)
        worker_init_fn (callable, optional): If not ``None``, this will be called on each
            worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as
            input, after seeding and before data loading. (default: ``None``)
        generator (torch.Generator, optional): If not ``None``, this RNG will be used
            by RandomSampler to generate random indexes and multiprocessing to generate
            `base_seed` for workers. (default: ``None``)
        prefetch_factor (int, optional, keyword-only arg): Number of batches loaded
            in advance by each worker. ``2`` means there will be a total of
            2 * num_workers batches prefetched across all workers. (default: ``2``)
        persistent_workers (bool, optional): If ``True``, the data loader will not shutdown
            the worker processes after a dataset has been consumed once. This allows to
            maintain the workers `Dataset` instances alive. (default: ``False``)
        pin_memory_device (str, optional): the data loader will copy Tensors
            into device pinned memory before returning them if pin_memory is set to true.


    .. warning:: If the ``spawn`` start method is used, :attr:`worker_init_fn`
                 cannot be an unpicklable object, e.g., a lambda function. See
                 :ref:`multiprocessing-best-practices` on more details related
                 to multiprocessing in PyTorch.

    .. warning:: ``len(dataloader)`` heuristic is based on the length of the sampler used.
                 When :attr:`dataset` is an :class:`~torch.utils.data.IterableDataset`,
                 it instead returns an estimate based on ``len(dataset) / batch_size``, with proper
                 rounding depending on :attr:`drop_last`, regardless of multi-process loading
                 configurations. This represents the best guess PyTorch can make because PyTorch
                 trusts user :attr:`dataset` code in correctly handling multi-process
                 loading to avoid duplicate data.

                 However, if sharding results in multiple workers having incomplete last batches,
                 this estimate can still be inaccurate, because (1) an otherwise complete batch can
                 be broken into multiple ones and (2) more than one batch worth of samples can be
                 dropped when :attr:`drop_last` is set. Unfortunately, PyTorch can not detect such
                 cases in general.

                 See `Dataset Types`_ for more details on these two types of datasets and how
                 :class:`~torch.utils.data.IterableDataset` interacts with
                 `Multi-process data loading`_.

    .. warning:: See :ref:`reproducibility`, and :ref:`dataloader-workers-random-seed`, and
                 :ref:`data-loading-randomness` notes for random seed related questions.
    r(   
batch_sizerF   
pin_memoryr+   timeoutsamplerpin_memory_deviceprefetch_factor_BaseDataLoaderIter	_iteratorFr   Nr       )rT   persistent_workersrS   )r(   rO   shufflerR   batch_samplerrF   r*   rP   r+   rQ   rI   rT   rY   rS   c                C   s$  t jd |dk rtd|
dk r,td|dkrD|dkrDtd|dksPJ |rd|dkrdtd|| _|| _|| _|| _|| _|
| _	|| _
|| _t| jtrt| j| _t \}}|dkrtt| j
||| _
nt jjj| j|| nZt| jtrFt| j| _t \}}|dkr0tt| j
||| _
nt jjj| j|| t|trtj| _t|tr|d urt jjjj||d}n|d	vrtd
||d urtd|n|d urtd|nt|}tj | _|d ur|rtd|d ur6|dks$|s$|d us$|	r,tdd }d}	n|d u rN|	rNtd|d u r| jtjkrnt! }n|rt"||d}nt#|}|d ur|d u rt$|||	}|| _%|	| _&|| _'|| _(|| _)|d u r| j*rt+j,j-}nt+j,j.}|| _/|| _0d| _1d | _2d | _3| 4  t 5ddd d S )Nzpython.data_loaderr   zXnum_workers option should be non-negative; use num_workers=0 to disable multiprocessing.z%timeout option should be non-negativerW   zpprefetch_factor option could only be specified in multiprocessing.let num_workers > 0 to enable multiprocessing.z/persistent_workers option needs num_workers > 0)rZ   >   FNzXDataLoader with IterableDataset: expected unspecified shuffle option, but got shuffle={}zXDataLoader with IterableDataset: expected unspecified sampler option, but got sampler={}zdDataLoader with IterableDataset: expected unspecified batch_sampler option, but got batch_sampler={}z1sampler option is mutually exclusive with shuffler   z[batch_sampler option is mutually exclusive with batch_size, shuffle, sampler, and drop_lastFzVbatch_size=None option disables auto-batching and is mutually exclusive with drop_last	generatorTZ
DataloaderenabledTrue)6rC   _C_log_api_usage_once
ValueErrorr(   rF   rT   rP   rS   rQ   rI   multiprocessing_context
isinstancer   r   rB   	functoolspartialrN   rD   rE   rG   rH   r   r   r   r!   r   _dataset_kindZapply_shuffle_settingsformatboolr#   r3   r   r   r   rO   r+   rR   r[   r]   _auto_collationr   collater   r   r*   rY   _DataLoader__initialized_IterableDataset_len_calledrV   check_worker_number_rationality	set_vital)r7   r(   rO   rZ   rR   r[   rF   r*   rP   r+   rQ   rI   rc   r]   rT   rY   rS   wsrankr,   r,   r-   r5      s    










 



zDataLoader.__init__returnc                 C   s&   | j dkrt| S |   t| S d S Nr   )rF   _SingleProcessDataLoaderIterrn   _MultiProcessingDataLoaderIterr6   r,   r,   r-   _get_iterator  s    
zDataLoader._get_iteratorc                 C   s   | j S r"   )$_DataLoader__multiprocessing_contextr6   r,   r,   r-   rc     s    z"DataLoader.multiprocessing_contextc                 C   s~   |d urt| j dkrdt|trFt }||vr<td||t|}t|tj	j
sttd|ntd| j || _d S )Nr   zpmultiprocessing_context option should specify a valid start method in {!r}, but got multiprocessing_context={!r}zmultiprocessing_context option should be a valid context object or a string specifying the start method, but got multiprocessing_context={}zmmultiprocessing_context can only be used with multi-process loading (num_workers > 0), but got num_workers={})rF   rd   r   multiprocessingget_all_start_methodsrb   rh   get_contextpython_multiprocessingcontextBaseContext	TypeErrorrx   )r7   rc   Zvalid_start_methodsr,   r,   r-   rc     s&    


c                    s8   | j r"|dv r"td|| jjtt| || d S )N)rO   r[   rR   r+   r(   rY   z6{} attribute should not be set after {} is initialized)rl   rb   rh   r9   r/   r4   r   __setattr__)r7   attrvalr8   r,   r-   r     s
    zDataLoader.__setattr__c                 C   sD   | j r8| jdkr8| jd u r&|  | _n| j|  | jS |  S d S rt   )rY   rF   rV   rw   _resetr6   r,   r,   r-   r:     s    
zDataLoader.__iter__c                 C   s
   | j d uS r"   )r[   r6   r,   r,   r-   rj     s    zDataLoader._auto_collationc                 C   s   | j r| jS | jS d S r"   )rj   r[   rR   r6   r,   r,   r-   _index_sampler  s    zDataLoader._index_samplerc                 C   sd   | j tjkrVt| j }| _| jd urRddlm} | j	rD|| j }n||| j }|S t| j
S d S )Nr   )ceil)rg   r!   r   lenr(   rm   rO   mathr   r+   r   )r7   lengthr   r,   r,   r-   __len__  s    
zDataLoader.__len__c                 C   s   dd }| j r| j dkrd S d }d}ttdrXzttd}d}W n tyV   Y n0 |d u rtt }|d urt|}|d u rt||| j | d S | j |krt||| j | d S )Nc                 S   s0   | d urd | |rdndnd}d ||}|S )Nz|Our suggested max number of worker in current system is {}{}, which is smaller than what this DataLoader is going to create.rX   z% (`cpuset` is not taken into account)zUDataLoader is not able to compute a suggested max number of worker in current system.zThis DataLoader will create {} worker processes in total. {} Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.)rh   )Znum_worker_suggestZnum_worker_createdcpuset_checkedZsuggested_max_worker_msgwarn_msgr,   r,   r-   _create_warning_msg  s    
	zGDataLoader.check_worker_number_rationality.<locals>._create_warning_msgr   Fsched_getaffinityT)	rF   hasattrosr   r   	Exception	cpu_countwarningswarn)r7   r   Zmax_num_worker_suggestr   r   r,   r,   r-   rn     s8    

z*DataLoader.check_worker_number_rationalityc                 C   s  t | jtrtjdtjdj| jd }t	
 rt	 rt	 }t	 }t	j }|dkr2t|}|tj| td| d |tjd}t }||k rttj |tjd}tt | dttjdkrtd	| d
| dtj dq|tjd |tj| }|dksJ nd}t }t|dkrttj |tj}tt | dttjdkr>td| d| dtj dq>td| d|  |tjd}|dkrttj |tjd}qt |}|S d S d S )Nr,   dtyper\   r   zShared seed (z) sent to store on rank 0r   )secondszTimed out receiving the signal from the distribtued store on Rank 0 that all other Ranks have received the shared seed. (world_size=z, received=z
, timeout=)rX   zGTimed out receiving the shared seed from the distribtued store on Rank z. (world_size=z) received from store on rank )!rd   r(   r   rC   emptyint64random_r]   itemr=   r>   r?   rA   r@   distributed_c10d_get_default_storestrsetr   DATAPIPE_SHARED_SEEDloggerrL   addDATAPIPE_SHARED_SEED_COUNTERtimesleep#DATAPIPE_SHARED_SEED_CHECK_INTERVALr   $DATAPIPE_SHARED_SEED_DEFAULT_TIMEOUTRuntimeErrorr   getint)r7   _shared_seedrq   rp   storeZ_shared_seed_strZ_shared_seed_recv_cntstartr,   r,   r-   _get_shared_seed8  sf    





zDataLoader._get_shared_seed)r   NNNr   NFFr   NNN) r/   r0   r1   r;   r   r   __annotations__r
   r   ri   floatr   r   r   r   rl   r   _collate_fn_t_worker_init_fn_tr5   rw   propertyrc   setterr   r:   rj   r   r   rn   r   r<   r,   r,   r8   r-   r   z   s`   
Q        *




Mr   c                   @   sl   e Zd ZeddddZd dddZdd	d
Zdd Zdd Ze	dddZ
e
ZedddZdd ZdS )rU   N)loaderrs   c                 C   s*  |j | _| | _t| jtrHt }|| j tj	j
j| j|| _|j| _|j| _|j| _|j| _|j| _|j| _|j| _t|jdkr|jotj | _d | _n$|jsd}t| |j| _|j| _|j | _!|j"| _#t$| j| _%tj&dtj'dj(|j)d* | _+|j,| _-d| _.d/| j0j1| _2d S )Nr   zpin memory device is set and pin_memory flag is not used then device pinned memory won't be usedplease set pin_memory to true, if you need to use the device pin memoryr,   r   r\   z!enumerate(DataLoader)#{}.__next__)3r(   _datasetr   r   rd   r   rC   	Generatormanual_seedrD   rE   rG   apply_shuffle_seedrg   rm   rj   r+   
_drop_lastr   rF   _num_workersrT   _prefetch_factorr   rS   rP   cudar>   _pin_memory_pin_memory_devicer   r   rQ   _timeoutr*   _collate_fniter_sampler_iterr   r   r   r]   r   
_base_seedrY   _persistent_workers_num_yieldedrh   r9   r/   _profile_name)r7   r   
shared_rngr   r,   r,   r-   r5   m  s8    

 z_BaseDataLoaderIter.__init__rr   c                 C   s   | S r"   r,   r6   r,   r,   r-   r:     s    z_BaseDataLoaderIter.__iter__Fc                 C   s^   t | j| _d| _|j| _| | _t| jt	rZt
 }|| j t
jjj| j|| _d S rt   )r   r   r   r   rm   r   r   rd   r   r   rC   r   r   rD   rE   rG   r   )r7   r   
first_iterr   r,   r,   r-   r     s    
z_BaseDataLoaderIter._resetc                 C   s
   t | jS r"   )nextr   r6   r,   r,   r-   _next_index  s    z_BaseDataLoaderIter._next_indexc                 C   s   t d S r"   )NotImplementedErrorr6   r,   r,   r-   
_next_data  s    z_BaseDataLoaderIter._next_datac                 C   s   t jj| j | jd u r$|   |  }|  jd7  _| j	t
jkr| jd ur| j| jkrd| j| j| j}| jdkr|d7 }t| |W  d    S 1 s0    Y  d S )Nr   zwLength of IterableDataset {} was reported to be {} (when accessing len(dataloader)), but {} samples have been fetched. r   zFor multiprocessing data-loading, this could be caused by not properly configuring the IterableDataset replica at each worker. Please see https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset for examples.)rC   autogradprofilerrecord_functionr   r   r   r   r   rg   r!   r   rm   rh   r   r   r   r   )r7   rE   r   r,   r,   r-   __next__  s$    



z_BaseDataLoaderIter.__next__c                 C   s
   t | jS r"   )r   r   r6   r,   r,   r-   r     s    z_BaseDataLoaderIter.__len__c                 C   s   t d| jjd S )Nz{} cannot be pickled)r   r9   r/   r6   r,   r,   r-   __getstate__  s    z _BaseDataLoaderIter.__getstate__)F)r/   r0   r1   r   r5   r:   r   r   r   r   r   r   r   r   r   r,   r,   r,   r-   rU   l  s   $

rU   c                       s$   e Zd Z fddZdd Z  ZS )ru   c                    sN   t t| | | jdksJ | jdks,J t| j| j| j	| j
| j| _d S rt   )r4   ru   r5   r   r   r!   r.   rg   r   rj   r   r   _dataset_fetcher)r7   r   r8   r,   r-   r5     s    z%_SingleProcessDataLoaderIter.__init__c                 C   s.   |   }| j|}| jr*tj|| j}|S r"   )r   r   r$   r   r   rP   r   )r7   indexrE   r,   r,   r-   r     s
    z'_SingleProcessDataLoaderIter._next_data)r/   r0   r1   r5   r   r<   r,   r,   r8   r-   ru     s   ru   c                       s   e Zd ZdZ fddZd fdd	ZejfddZd	d
 Z	dd Z
dd Zdd ZdddZdd Zedd Zdd Z  ZS )rv   zHIterates once over the DataLoader's dataset, as specified by the samplerc                    s  t t| | | jdksJ | jdks,J |jd u r<t}n|j}|j| _|	 | _
d| _d| _| | _g | _g | _t| jD ]}|	 }|  |jtjj| j| j|| j
| j| j| j| j| j| j|| j| j| jfd}d|_|   | j!| | j!| q| j"r^t# | _$t%	 | _&t#j'tj(j)| j
| j&t*j+, | j$| j-fd}d|_|   || _.n| j
| _&| jr| j"rdd l/}| jD ]}|0tj1| qtj23t4| t5dd | jD  tj26  d| _| j7|dd d S )Nr   F)targetargsTc                 s   s   | ]}|j V  qd S r"   )pid.0wr,   r,   r-   	<genexpr>X      z:_MultiProcessingDataLoaderIter.__init__.<locals>.<genexpr>)r   )8r4   rv   r5   r   r   rc   ry   rI   Z_worker_init_fnQueue_worker_result_queue_worker_pids_set	_shutdownEvent_workers_done_event_index_queues_workersrangecancel_join_threadProcessr   worker_worker_looprg   r   rj   r   r   r   r   r   daemonr   appendr   	threading_pin_memory_thread_done_eventqueue_data_queueThreadrP   _pin_memory_looprC   r   current_devicer   _pin_memory_threadatexitregister_clean_up_workersignal_handling_set_worker_pidsidtuple_set_SIGCHLD_handlerr   )r7   r   rc   iindex_queuer   Zpin_memory_threadr   r8   r,   r-   r5     sf    





	
"
z'_MultiProcessingDataLoaderIter.__init__Fc                    s   t  || d| _d| _i | _d| _dd t| jD | _t	
t| j| _|st| jD ]}| j| tj| j q\| j}|dkr|  \}}t|tjjr|d u sJ |d8 }qt| j| j D ]}|   qd S )Nr   c                 S   s   g | ]}d qS )Tr,   r   r   r,   r,   r-   
<listcomp>m  r   z9_MultiProcessingDataLoaderIter._reset.<locals>.<listcomp>r   )r4   r   	_send_idx	_rcvd_idx
_task_info_tasks_outstandingr   r   _workers_status	itertoolscycle_worker_queue_idx_cycler   putr   r   _ResumeIterationr   	_get_datard   r   _try_put_index)r7   r   r   idxZresume_iteration_cntZ
return_idxZreturn_data_r8   r,   r-   r   ]  s$    
z%_MultiProcessingDataLoaderIter._resetc                    s>  z| j j|d}d|fW S  ty8 } zg }t| jD ].\}}| j| r:| s:|| | | q:t	|dkrd
dd |D }td||t|tjrW Y d }~dS dd l dd l}zd	}	 fd
dt|	D }
W n> ty  } z$|j|jkrtdd W Y d }~n
d }~0 0  W Y d }~n
d }~0 0 d S )NrQ   Tr   z, c                 s   s   | ]}t |jV  qd S r"   )r   r   r   r,   r,   r-   r     r   z?_MultiProcessingDataLoaderIter._try_get_data.<locals>.<genexpr>z1DataLoader worker (pid(s) {}) exited unexpectedly)FN
   c                    s   g | ]}   qS r,   )NamedTemporaryFiler   tempfiler,   r-   r     r   z@_MultiProcessingDataLoaderIter._try_get_data.<locals>.<listcomp>a  Too many open files. Communication with the workers is no longer possible. Please increase the limit using `ulimit -n` in the shell or change the sharing strategy by calling `torch.multiprocessing.set_sharing_strategy('file_system')` at the beginning of your code)r   r   r   	enumerater   r  is_aliver   _mark_worker_as_unavailabler   joinr   rh   rd   r   Emptyr  errnor   OSErrorEMFILE)r7   rQ   rE   eZfailed_workersrK   r   Zpids_strr  Zfds_limit_marginfsr,   r  r-   _try_get_data~  s6    

z,_MultiProcessingDataLoaderIter._try_get_datac                 C   s~   | j dkr4| | j \}}|r"|S td| j nF| jrd| j rZ|  \}}|r:|S q:tdn|  \}}|rd|S qdd S )Nr   z%DataLoader timed out after {} secondsz%Pin memory thread exited unexpectedly)r   r  r   rh   r   r   r  )r7   successrE   r,   r,   r-   r
    s    


z(_MultiProcessingDataLoaderIter._get_datac                 C   sD  | j | jk rR| j| j  }|d }t|dksd| j| r8qd| j| j = |  j d7  _ q | js`|   tt| j| j  dkr| j| j d }| 	|S | j
s| jdksJ |  \}}|  jd8  _| jtjkr
t|tjjr
| jrd| j|j< n| |j |   q || j kr,| j|  |f7  < q | j|= | 	|S q d S )Nr   rW   r   F)r  r   r  r   r  r   _shutdown_workersStopIterationpop_process_datar   r  r
  rg   r!   r   rd   r   r   _IterableDatasetStopIterationrK   r  r  )r7   rL   rK   rE   r  r,   r,   r-   r   1  s6    	

z)_MultiProcessingDataLoaderIter._next_datac                 C   s   | j | j| j k sJ z|  }W n ty6   Y d S 0 t| jD ]}t| j}| j| rB qdqBd S | j	| 
| j|f |f| j| j< |  j d7  _ |  jd7  _d S Nr   )r  r   r   r   r   r   r   r  r  r   r  r   r  )r7   r   r  Zworker_queue_idxr,   r,   r-   r  b  s    

z-_MultiProcessingDataLoaderIter._try_put_indexc                 C   s,   |  j d7  _ |   t|tr(|  |S r$  )r  r  rd   r   reraise)r7   rE   r,   r,   r-   r"  v  s
    
z,_MultiProcessingDataLoaderIter._process_datac                 C   sL   | j | s| jr|sJ | j| }|d  d| j |< | j |ksHJ d S )NF)r  r   r   r  r   is_set)r7   rK   shutdownqr,   r,   r-   r  }  s
    


z:_MultiProcessingDataLoaderIter._mark_worker_as_unavailablec              
   C   sZ  t j}|du s|d u rd S | jsVd| _zt| drh| j  | jd | j	  | j
  | j  | j  tt| jD ]"}| js| j| r| j|dd q| jD ]}|j	t jd q| jD ]}|
  |  qW | jrt jt|  d| _| jD ]}| r|  qn@| jr4t jt|  d| _| jD ]}| r:|  q:0 d S )NTr   )NN)r'  r  F)r   python_exit_statusr   r   r   r   r   r  r   r  r   closer   r   r   r   r   r  r  MP_STATUS_CHECK_INTERVALr   r   r   _remove_worker_pidsr   r  	terminate)r7   r)  rK   r   r(  r,   r,   r-   r    sB    










z0_MultiProcessingDataLoaderIter._shutdown_workersc                 C   s:   z"| j tjd W |  r6|   n|  r4|   0 d S )Nr  )r  r   r+  r  r-  )r   r,   r,   r-   r     s    
z/_MultiProcessingDataLoaderIter._clean_up_workerc                 C   s   |    d S r"   )r  r6   r,   r,   r-   __del__  s    z&_MultiProcessingDataLoaderIter.__del__)F)F)r/   r0   r1   r;   r5   r   r   r+  r  r
  r   r  r"  r  r  r2   r   r.  r<   r,   r,   r8   r-   rv     s      7P! !1
F
rv   )Fr;   re   r  loggingr   r   r   r   r   datetimer   typingr   r   r   r   r   r   r	   r
   r   ry   r|   rC   Ztorch.distributeddistributedr=   torch.multiprocessingtorch.utils.data.graph_settingsZtorch._utilsr   Z
torch._sixr   rX   r   r   r   r   r   r   r   r   #torch.utils.data.datapipes.datapiper   r   r   __all__r   r    r   r   r   rk   r   r   r   r   r   	getLoggerr/   r   objectr!   r3   rB   rN   r   rU   ru   rv   r,   r,   r,   r-   <module>   sP   ,(

   uZ