a
    7Sicp]                     @   s  U d dl Z d dlmZmZmZmZ d dlZd dlmZ d dl	Z	d dl
Z
dQddZdRddZd	d
 Zdd Zdd Zg Zed ed< dd Zdd Zdd Zdd ZeZdd Zdd Zdd Zdd  Zd!d" Zd#d$ fd%d&Zd'd( Zd)d* Zd+d, Zd-d. Z d/d0 Z!d1d2 Z"d3d4 Z#G d5d6 d6e$Z%G d7d8 d8e&Z'd9d: Z(d;d< Z)d=d> Z*d?d@ Z+dAdB Z,e-dCdDdEZ.dSee/e/e-dFdGdHZ0dIdJ Z1dKdL Z2G dMdN dNZ3dOdP Z4dS )T    N)OptionalListDefaultDictAny)defaultdictFc           	      K   s   t d||}|du r&| jd | jj S t|tr8t|}|t| krH| S | jr|js\t	d|j
dd}|d |j }tj| ||}|d }tj| ||}||||  S |jrt	d||  | |S )	a  Returns the type if `dtype` is not provided, else casts this object to
    the specified type.

    If this is already of the correct type, no copy is performed and the
    original object is returned.

    Args:
        dtype (type or string): The desired type
        non_blocking (bool): If ``True``, and the source is in pinned memory
            and destination is on the GPU or vice versa, the copy is performed
            asynchronously with respect to the host. Otherwise, the argument
            has no effect.
        **kwargs: For compatibility, may contain the key ``async`` in place of
            the ``non_blocking`` argument. The ``async`` arg is deprecated.
    typeN.z)Cannot cast sparse tensor to dense tensorz.sparse z.LongTensorz)Cannot cast dense tensor to sparse tensor)_get_async_or_non_blocking
__module__	__class____name__
isinstancestr_import_dotted_namer   	is_sparseRuntimeErrorreplacetorchTensor_values_indicessizecopy_)	selfdtypenon_blockingkwargsZnew_module_nameZnew_values_type_name
new_valuesZnew_indices_type_nameZnew_indices r   H/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/_utils.py_type
   s&    
r!   c                 K   s   t d||}| jr6|du r$tj }|  |krB| S n|du rBd}tj| | jrttjj	| j
j}tj| ||}tj| ||}||||  W  d   S tj|  tdd| |W  d   S W d   n1 s0    Y  dS )aU  Returns a copy of this object in CUDA memory.

    If this object is already in CUDA memory and on the correct device, then
    no copy is performed and the original object is returned.

    Args:
        device (int): The destination GPU id. Defaults to the current device.
        non_blocking (bool): If ``True`` and the source is in pinned memory,
            the copy will be asynchronous with respect to the host. Otherwise,
            the argument has no effect.
        **kwargs: For compatibility, may contain the key ``async`` in place of
            the ``non_blocking`` argument.
    cudaN)device)r
   Zis_cudar   r"   current_deviceZ
get_devicer$   r   getattrsparser   r   r   r   r   r   _UntypedStorager   )r   r$   r   r   new_typeindicesvaluesr   r   r    _cuda0   s    
r,   c                 C   sR   |s|S t |dksd|vr@d}t|  }t|| |td |d S )N   asyncz,{}() got an unexpected keyword argument '{}'z)'async' is deprecated; use 'non_blocking')lenlistkeyspop	TypeErrorformatwarningswarn)function_namer   r   messageargumentr   r   r    r
   Q   s    
r
   c                 C   s,   t jg | j|  jd}||  |||S N)r   r$   )r   tensorr   Z_untypedr$   set_)storagestorage_offsetr   stridetr   r   r    _rebuild_tensor   s    rA   c                 C   s   t | |||}||_||_|S N)rA   requires_grad_backward_hooks)r=   r>   r   r?   rC   backward_hooksr;   r   r   r    _rebuild_tensor_v2   s    rF   ztorch.Tensor_sparse_tensors_to_validatec                  C   s~   znt D ]\} | jr,t|  |  |   q| jrTt| 	 | 
 |  |   qtd| j qW t   n
t   0 d S )Nz/_validate_loaded_sparse_tensors for layout `%s`)rG   r   r   Z _validate_sparse_coo_tensor_argsr   r   r   Zis_sparse_csrZ _validate_sparse_csr_tensor_argscrow_indicescol_indicesr+   NotImplementedErrorlayoutclear)r@   r   r   r    _validate_loaded_sparse_tensors   s    rM   c                 C   s@   | t jkr0|\}}}t |||}t| |S td|  d S Nz&rebuilding sparse tensor for layout %s)r   Z
sparse_cooZ_sparse_coo_tensor_unsaferG   appendrJ   )rK   datar*   r+   r   resultr   r   r    _rebuild_sparse_tensor   s    


rR   c                 C   sD   | t jkr4|\}}}}t ||||}t| |S td|  d S rN   )r   Z
sparse_csrZ_sparse_csr_tensor_unsaferG   rO   rJ   )rK   rP   rH   rI   r+   r   rQ   r   r   r    _rebuild_sparse_csr_tensor   s    

rS   c                 C   s   t | j||d}||_|S r:   )r   Z
from_numpytorC   )rP   r   r$   rC   r;   r   r   r    !_rebuild_device_tensor_from_numpy   s    rU   c                 C   s   t j||| d|dS )Nmeta)r   r$   rC   )r   Zempty_strided)r   r   r?   rC   r   r   r    _rebuild_meta_tensor_no_storage   s    rW   c              	   C   s   t jj| ||||||dS )N)stridesr>   rK   r$   rC   )r   r   Z_make_wrapper_subclass)clsr   r   r?   r>   rK   r$   rC   r   r   r    _rebuild_wrapper_subclass   s    
rZ   c                 C   s   |d }|t jkr2|\}}	}
t j||	|
| jd}n|t jt jfv r|\}}}}t|tu rt|tu r|t jkrt j|t j	d}t j|t j
d}n t j|t jd}t j|t jd}t j||||| jd}ntd||| ||| ||_||_|S )Nr   )scale
zero_pointr   r   )scaleszero_pointsaxisr   z2Can't deserialize quantized tensor with qscheme {})r   Zper_tensor_affineZ_empty_affine_quantizedr   Zper_channel_affineZ per_channel_affine_float_qparamsr   r0   r;   doublelongfloatZ#_empty_per_channel_affine_quantizedr   r4   r<   rC   rD   )r=   r>   r   r?   Zquantizer_paramsrC   rE   Zqscheme_r[   r\   r;   r^   r_   r`   r   r   r    _rebuild_qtensor   s(    


re   c                 C   s   t j| |}||_|S rB   )r   nn	ParameterrD   )rP   rC   rE   paramr   r   r    _rebuild_parameter   s    ri   c                 C   s6   |  d}t|d }|dd  D ]}t||}q"|S )Nr   r   r-   )split
__import__r&   )name
componentsobj	componentr   r   r    r      s
    
r   c                 C   s   | | S rB   r   )xyr   r   r    <lambda>      rr   c                 c   sN   t | }zt|}W n ty(   Y dS 0 |V  |D ]}|||}|V  q4dS )zReturn running totalsN)iternextStopIteration)iterablefnittotalelementr   r   r    _accumulate  s    
r|   c                 C   s   t jj| S )a  Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
    same dense type.

    Since inputs are dense, the resulting tensor will be a concatenated 1D
    buffer. Element-wise operation on this buffer will be equivalent to
    operating individually.

    Args:
        tensors (Iterable[Tensor]): dense tensors to flatten.

    Returns:
        A contiguous 1D buffer containing input tensors.
    r   _C_nnZflatten_dense_tensors)tensorsr   r   r    _flatten_dense_tensors  s    r   c                 C   s8   t jjdd | D }t jjdd | D }||fS )ab  Flatten sparse tensors into two contiguous 1D buffers, one of indices and
    one of values. Assume tensors are of same sparse type.

    Args:
        tensors (Iterable[Tensor]): sparse tensors to flatten.

    Returns:
        A tuple of two contiguous 1D buffers, one containing input tensors'
        indices and the other containing the values.
    c                 S   s   g | ]}t j|qS r   r   r   r   .0r@   r   r   r    
<listcomp>2  rs   z+_flatten_sparse_tensors.<locals>.<listcomp>c                 S   s   g | ]}t j|qS r   r   r   r   r   r   r   r    r   3  rs   r}   )r   flat_indicesflat_valuesr   r   r    _flatten_sparse_tensors'  s    r   c                 C   s   t jj| |S )a  View a flat buffer using the sizes of tensors. Assume that tensors are of
    same dense type, and that flat is given by _flatten_dense_tensors.

    Args:
        flat (Tensor): flattened dense tensors to unflatten.
        tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
          unflatten flat.

    Returns:
        Unflattened dense tensors with sizes same as tensors and values from
        flat.
    )r   r~   r   unflatten_dense_tensors)flatr   r   r   r    _unflatten_dense_tensors7  s    r   c           
   	   C   sx   | \}}t jj|dd |D }t jj|dd |D }g }t|||D ]"\}}}	||||	|  qLt|S )a  View flat buffer (containing indices and values) using the sizes of
    tensors. Assume that tensors are of same sparse type, and that flat is given
    by _flatten_sparse_tensors.

    Args:
        flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
          tensors to unflatten.
        tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
          unflatten flat.

    Returns:
        Unflattened sparse tensors with sizes same as tensors and values from
        flat.
    c                 S   s   g | ]}t j|qS r   r   r   r   r   r    r   W  rs   z-_unflatten_sparse_tensors.<locals>.<listcomp>c                 S   s   g | ]}t j|qS r   r   r   r   r   r    r   X  rs   )	r   r~   r   r   ziprO   newr   tuple)
r   r   r   r   r*   r+   Zoutputsr@   ivr   r   r    _unflatten_sparse_tensorsG  s    r   c                    sL   t t}| D ]}||  | qdd | D  t fdd|D S )a  Assume that tensors are of same order as ordered_tensors within their
    types, e.g., from _take_tensors. Reorder them to be of same order as
    ordered_tensors.

    Args:
        tensors (Iterable[Tensor]): tensors to be reordered. They should be of
          the same order as ordered_tensors within their own types.
        ordered_tensors (Iterable[Tensor]): tensors whose order will be the
          reference.

    Returns:
        Ordered tuple of tensors with contents from tensors and order of
        ordered_tensors.
    c                 S   s   i | ]\}}|t |qS r   )rt   )r   r@   Zcollr   r   r    
<dictcomp>q  rs   z'_reorder_tensors_as.<locals>.<dictcomp>c                 3   s   | ]}t  |  V  qd S rB   )ru   r   )r   r;   Z
type_dict_r   r    	<genexpr>r  rs   z&_reorder_tensors_as.<locals>.<genexpr>)r   r0   r   rO   itemsr   )r   Zordered_tensorsZ	type_dictr;   r   r   r    _reorder_tensors_as_  s
    r   c                 c   s   t dd }| D ]}| }|jr\tj|}tj|}| |  | |   }n| |  }|| }|d | |kr|d dkr|d V  g dg }||< |d 	| |d  |7  < q|
 D ]\}	}
t|	dkr|	V  qdS )a  Group tensors into chunks. This generator yields a chunk at each time,
    each containing tensors of same type up to certain byte limit in total size.

    Args:
        tensors (Sequence): A sequence of tensors to be separated into chunks.
        size_limit (int): The limit of each chunk in bytes.

    Yields:
        Blocks of tensors of same type and within size_limit. The yielded
        tensors are only ordered as the original sequence within its types.
    c                   S   s   g dgS )Nr   r   r   r   r   r    rr     rs   z_take_tensors.<locals>.<lambda>r-   r   N)r   r   r   r   r   r   r   ZnumelZelement_sizerO   r+   r/   )r   Z
size_limitZbuf_dictr;   r@   r*   r+   r   Zbuf_and_sizebufrd   r   r   r    _take_tensorsu  s"    "
r   c                    s    fdd}|S )Nc                    s   t  | _| jd< | S )Nreturn)dict__annotations__)Zfunr   retr   r    dec  s    

zannotate.<locals>.decr   )r   r   r   r   r   r    annotate  s    r   c                   @   s   e Zd ZdZdd ZdS )KeyErrorMessagez(str subclass that returns itself in reprc                 C   s   | S rB   r   )r   r   r   r    __repr__  s    zKeyErrorMessage.__repr__N)r   r   __qualname____doc__r   r   r   r   r    r     s   r   c                   @   s"   e Zd ZdZdddZdd ZdS )	ExceptionWrapperz?Wraps an exception plus traceback to communicate across threadsNin backgroundc                 C   s6   |d u rt  }|d | _dtj| | _|| _d S )Nr   r	   )sysexc_infoexc_typejoin	tracebackformat_exceptionexc_msgwhere)r   r   r   r   r   r    __init__  s
    
zExceptionWrapper.__init__c                 C   sx   d | jj| j| j}| jtkr*t|}nt| jddrD| j|dz| |}W n tyn   t	|dY n0 |dS )z4Reraises the wrapped exception in the current threadzCaught {} {}.
Original {}r8   N)r8   )
r4   r   r   r   r   KeyErrorr   r&   r3   r   )r   msg	exceptionr   r   r    reraise  s    

zExceptionWrapper.reraise)Nr   )r   r   r   r   r   r   r   r   r   r    r     s   
	r   c                   C   s   t j rdS d S Nr"   )r   r"   Zis_availabler   r   r   r    _get_available_device_type  s    
r   c                 C   s$   t  }|r | dkr | tjS d S r   )r   lowerr   r"   )Z
get_memberdevice_typer   r   r    _get_device_attr  s    
r   c                   C   s   t dd S )Nc                 S   s   |   S rB   )r%   mr   r   r    rr     rs   z+_get_current_device_index.<locals>.<lambda>r   r   r   r   r    _get_current_device_index  s    r   c                   C   s   t dd S )Nc                 S   s   t t|  S rB   )r0   rangedevice_countr   r   r   r    rr     rs   z)_get_all_device_indices.<locals>.<lambda>r   r   r   r   r    _get_all_device_indices  s    r   c                 C   s   dd | D S )Nc                    s   g | ] t  fd dqS )c                    s
   |   S rB   )Zget_device_propertiesr   r   r   r    rr     rs   z4_get_devices_properties.<locals>.<listcomp>.<lambda>r   )r   r   r   r    r     rs   z+_get_devices_properties.<locals>.<listcomp>r   )Z
device_idsr   r   r    _get_devices_properties  s    r   )r   c                   C   s   t j dkrt j S dS )zChecks if there are CUDA devices available and
    returns the device index of the current default CUDA device.
    Returns -1 in case there are no CUDA devices available.
    Arguments: ``None``
    r   r#   )r   r"   r   r%   r   r   r   r    get_current_device_index  s    
r   )r$   optional	allow_cpur   c                 C   s   t | trt| } d}t | tjrT|s@| jdkr@td| | jdkrNdn| j}t | trb| }|du r|rtj	
 rt }qt }ntd| |S )a'  Gets the device index from :attr:`device`, which can be a torch.device
    object, a Python integer, or ``None``.

    If :attr:`device` is a torch.device object, returns the device index if it
    has index. Note that for a device without a specified index,
    i.e., ``torch.device('xxx')``, this will return the current default
    device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
    CPU devices will be accepted and ``-1`` will be returned in this case.

    If :attr:`device` is a Python integer, it is returned as is.

    If :attr:`device` is ``None``, this will return the current default
    device of the supported runtime platform if :attr:`optional` is ``True``.
    i.e., the current default CUDA device will be returned if CUDA runtime is supported.
    Ncpuz&Expected a non cpu device, but got: {}r#   zHExpected a torch.device with a specified index or an integer, but got:{})r   r   r   r$   r   
ValueErrorr4   indexintjitZis_scriptingr   r   )r$   r   r   Z
device_idxr   r   r    _get_device_index  s$    



r   c                 C   s$   t | tjjs |  r t| S | S )z
    Returns a real view of a tensor if complex dtype else just the tensor
    need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
    )r   r   rf   ZUninitializedParameter
is_complexZview_as_real)r;   r   r   r    _handle_complex!  s    r   c                 C   sl   t | tjstdt|  | jr4t| jd? S | jrJt| jd? S | tj	krXdS t
| jd? S dS )z8
    Returns the element size for a dtype, in bytes
    zexpected torch.dtype, but got       r-   N)r   r   r   r   r   r   ZfinfobitsZis_floating_pointboolZiinfor]   r   r   r    _element_size)  s    
r   c                   @   s    e Zd ZdddZdddZdS )_ClassPropertyDescriptorNc                 C   s
   || _ d S rB   )fget)r   r   fsetr   r   r    r   ;  s    z!_ClassPropertyDescriptor.__init__c                 C   s    |d u rt |}| j|| S rB   )r   r   __get__)r   instanceownerr   r   r    r   >  s    z _ClassPropertyDescriptor.__get__)N)N)r   r   r   r   r   r   r   r   r    r   :  s   
r   c                 C   s   t | ttfst| } t| S rB   )r   classmethodstaticmethodr   )funcr   r   r    classpropertyC  s    r   )NF)NF)FF)5r   typingr   r   r   r   r5   collectionsr   r   r   r!   r,   r
   rA   rF   rG   r   rM   rR   rS   rU   Z_rebuild_xla_tensorrW   rZ   re   ri   r   r|   r   r   r   r   r   r   r   r   r   objectr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    <module>   sT   

&
!2
	

	"#
*	