a
    7Sic                     @   s   d dl Z d dlmZ d dlZd dlmZ d dlZejddddZejddd	Z	ej
jdd
dZedddZedddZdae jdedddZdS )    N)	Generator)default_generator)	new_statereturnc                 C   s   t |  dS )zSets the random number generator state.

    .. note: This function only works for CPU. For CUDA, please use
             torch.manual_seed(seed), which works for both CPU and CUDA.

    Args:
        new_state (torch.ByteTensor): The desired state
    N)r   	set_state)r    r   H/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/random.pyset_rng_state	   s    	r	   )r   c                   C   s   t  S )zBReturns the random number generator state as a `torch.ByteTensor`.)r   	get_stater   r   r   r   get_rng_state   s    r   c                 C   s0   t | } ddl}|j s&|j|  t| S )a  Sets the seed for generating random numbers. Returns a
    `torch.Generator` object.

    Args:
        seed (int): The desired seed. Value must be within the inclusive range
            `[-0x8000_0000_0000_0000, 0xffff_ffff_ffff_ffff]`. Otherwise, a RuntimeError
            is raised. Negative inputs are remapped to positive values with the formula
            `0xffff_ffff_ffff_ffff + seed`.
    r   N)int
torch.cudacuda_is_in_bad_forkmanual_seed_allr   manual_seedseedtorchr   r   r   r      s
    

r   c                  C   s*   t  } ddl}|j s&|j|  | S )zSets the seed for generating random numbers to a non-deterministic
    random number. Returns a 64 bit number used to seed the RNG.
    r   N)r   r   r   r   r   r   r   r   r   r   r   -   s
    
r   c                   C   s   t  S )zSReturns the initial seed for generating random numbers as a
    Python `long`.
    )r   initial_seedr   r   r   r   r   :   s    r   FTfork_rngdevicesc           
      c   s   ddl }|sdV  dS | du r\|j }|dkrNtsNtdj|||d datt|} nt| } |	 }g }| D ]}|
|j	| qtz6dV  W || t| |D ]\}}	|j|	| qn.|| t| |D ]\}}	|j|	| q0 dS )a  
    Forks the RNG, so that when you return, the RNG is reset
    to the state that it was previously in.

    Args:
        devices (iterable of CUDA IDs): CUDA devices for which to fork
            the RNG.  CPU RNG state is always forked.  By default, :meth:`fork_rng` operates
            on all devices, but will emit a warning if your machine has a lot
            of devices, since this function will run very slowly in that case.
            If you explicitly specify devices, this warning will be suppressed
        enabled (bool): if ``False``, the RNG is not forked.  This is a convenience
            argument for easily disabling the context manager without having
            to delete it and unindent your Python code under it.
    r   N   a  CUDA reports that you have {num_devices} available devices, and you have used {caller} without explicitly specifying which devices are being used. For safety, we initialize *every* CUDA device by default, which can be quite slow if you have a lot of GPUs.  If you know that you are only making use of a few CUDA devices, set the environment variable CUDA_VISIBLE_DEVICES or the '{devices_kw}' keyword argument of {caller} with the set of devices you are actually using.  For example, if you are using CPU only, set CUDA_VISIBLE_DEVICES= or devices=[]; if you are using GPU 0 only, set CUDA_VISIBLE_DEVICES=0 or devices=[0].  To initialize all devices and suppress this warning, set the '{devices_kw}' keyword argument to `range(torch.cuda.device_count())`.)num_devicesZcallerZ
devices_kwT)r   r   device_count_fork_rng_warned_alreadywarningswarnformatlistranger   appendr	   zip)
r   enabledZ_callerZ_devices_kwr   r   Zcpu_rng_stateZgpu_rng_statesdeviceZgpu_rng_stater   r   r   r   D   s6    


)NTr   r   )
contextlibtypingr   r   torch._Cr   r   Tensorr	   r   _Cr   r   r   r   r   contextmanagerr   r   r   r   r   <module>   s   