a
    OSicc                     @  s  d dl mZ d dlmZmZmZmZmZmZm	Z	m
Z
 d dlmZ d dlmZmZ d dlZd dlZeejdrd dlmZ ejejejejejejejejejejej ej!ej"ej#ej$ej%ej&ej'i	Z(ni Z(dd	d
dZ)eej*e
e$ e	e$df f Z+ee
e$ e	e$df f Z,ee$e
e$ e	e$df f Z-ee
e$ e	e$df f Z.ee&e$ee/f Z0e&e$ee/fZ1G dd dej2Z3eej2e3f Z4ej2e3fZ5ee
e4 e	e4df f Z6dddddZ7ddddddZ8dddddZ9ddddZ:dd d!d"Z;d#d$d%d&Z<d'd(d)d*Z=ddd+d,d-Z>dd.d/d0d1Z?ddd2d3d4Z@dddd5d6d7ZAdd8d8d9d:d;ZBdd.dd<d=d>ZCd?d?ddd@dAZDdBdddCdDZEdEdF ZFdGdH ZGejHejIejJejKejLfZMejNej ejOejPfZQejRejSejTfZUdddIdJdKZVdddIdLdMZWdddIdNdOZXdddIdPdQZYejTejPejSejOejRejNiZZejNejRej ejSejOejSejPejTiZ[dddIdRdSZ\dddIdTdUZ]ddVdIdWdXZ^e&ej&e$ejLeejPe/ejTiZ_dVddYdZd[Z`e&e$ee/fZadVdVdVdd\d]Zbd^d^d_dd`daZcdVdVdddbdcZdddddddedfZedgdh Zfej ejOejNejOejRejSiZgdddIdidjZhG dkdl dleZidldmdndodpZjdqdrdsdtduZkd#ddvdwdxZld#d?ddydzd{Zmd?d|d}d~Znd#dddddZodd#d'ddddZpdS )    )annotations)AnyUnionSequenceOptionalCallableDictTupleList)Enum)reduce
cmp_to_keyN_nvfuser)DataTypeztorch.dtypedtypec                 C  s   t |  S )z@
    Translates from torch.dtype to nvFuser's DataType enum
    )!_torch_dtype_to_nvfuser_dtype_mapr    r   N/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/_prims/utils.pygetnvFuserDtype   s    r   .c                	      s   e Zd ZU dZded< ded< edddddddd	d
dddddZedddddd fddZed ddZ	dd Z
dd Z  ZS )!
TensorMetaz
    Model tensor metadata.  Not a stock meta tensor because device is modeled
    as the original device (not meta device), also we have different behavior
    for some high level Python bindings
    zOptional[Any]nodestrtnameN)shapestridesr   devicez5Optional[Union[TensorMeta, NumberType, torch.Tensor]]zOptional[ShapeType]zOptional[StrideType]Optional[torch.dtype]z"Optional[Union[torch.device, str]])
tensorliker   r   r   r   c             	   C  sZ  t |tr^|s |d u s$t |ts$J |s:|d u s>t |ts>J d}d}tt|}td}	np|d urt |ttjfszJ t	|j
}t	| }|j}|j}	n0|d usJ |d usJ |d usJ |d usJ |d u r|nt	|}|d u r|nt	|}|d u r|n|}|d u r|	n|}t |tr0t|}tjj| ||d||dd}
d|
_d |
_|
S )Nr   cpur   F)r   storage_offsetr   r   requires_grad )
isinstanceNumberr   type_to_dtypetypetorchr   r   Tensortupler   strider   r   _make_wrapper_subclassr   r   )clsr   r   r   r   r   Zinferred_shapeZinferred_stridesZinferred_dtypeZinferred_devicerr   r   r   __new__8   sF    



zTensorMeta.__new__r   r   r   zSequence[Any]zOptional[Dict])functypesargskwargsc                   s|   |d u ri }|t jjjt jjt jjt jjjt jjjt jjjhv rRt	 
||||S t|dsltd| d|j|i |S )Nmetaz	Callable z has no meta function!)r'   r(   ndim__get__numelr*   r   r   r   super__torch_function__hasattr
ValueErrorr3   r,   r/   r0   r1   r2   	__class__r   r   r8   q   s    
zTensorMeta.__torch_function__c                 C  s   t dd S )Nzthis should be unreachable)RuntimeErrorr;   r   r   r   __torch_dispatch__   s    zTensorMeta.__torch_dispatch__c                 C  s   | j S Nr   )selfr   r   r   __repr__   s    zTensorMeta.__repr__c                 C  s   | j S r@   rA   )rB   format_specr   r   r   
__format__   s    zTensorMeta.__format__)N)r   N)r   N)__name__
__module____qualname____doc____annotations__staticmethodr.   classmethodr8   r?   rC   rE   __classcell__r   r   r<   r   r   ,   s*   
 8    
r   TensorLikeTypeabc                 C  s  t | tsJ t |tsJ t| j|jD ](\}}||kr*d| j|j}t|q*| j|jkrxd| j|j}t|| j|jkrt| jdkst| jdkrt|jdkst|jdkrnd| j|j}t|t	| |\}}|sd| 
 |
 |}t|dS )z
    Checks that two tensor likes have the same shape,
    dtype and device.

    In the future this will validate additional metadata, like
    strides.
    z!Shapes {0} and {1} are not equal!z!Dtypes {0} and {1} are not equal!zcuda:0cudaz"Devices {0} and {1} are not equal!z=Stride mismatch! Strides are {0} and {1} (mismatched at {2})!N)r#   
TensorLikezipr   formatAssertionErrorr   r   r   check_significant_stridesr*   r>   )rP   rQ   xymsgZsame_stridesidxr   r   r   compare_tensor_meta   s0    
r\   zTuple[bool, Optional[int]])rP   rQ   returnc                 C  sj   | j jdks|j jdkrf|  dkrft| jD ]6}|  | | | kr.| j| dkr.d|f  S q.dS )NrR   r      F)TN)r   r&   r6   ranger4   r*   r   )rP   rQ   r[   r   r   r   rW      s
    $&rW   bool)rP   r]   c                 C  sZ   |   dkrdS d}ttt| j|  D ](\}}|dkr>q,||krL dS || }q,dS )z
    Tests whether a tensor is contiguous or not.

    Tensors are contiguous when they have no elements,
    or when they have "nested" strides.
    r   Tr^   F)r6   reversedr)   rT   r   r*   )rP   Zexpected_striderX   rY   r   r   r   is_contiguous   s     
rb   zTuple[int, ...]r]   c                    s  t dkrd}t|tddi tdd D }tdd D t dkrZdS d j}|dkrpdS |d	kr|d
S d j  fdd}tt|}tt|t	|dd}dg| }t
|D ]\}} | ||< qt|}	dg| }
t
|D ]\}}|	| |
|< qt|
S )zA
    Computes the output strides for elementwise operations.
    r   z:Can't compute elementwise output strides for zero tensors!allow_cpu_scalar_tensorsTc                 s  s   | ]}t |tV  qd S r@   )r#   rS   .0rP   r   r   r   	<genexpr>      z5compute_elementwise_output_strides.<locals>.<genexpr>c                 s  s$   | ]}t |trt|s|V  qd S r@   )r#   rS   is_cpu_scalar_tensorre   r   r   r   rg     s   r   r^   r^   c                   s   D ]v}|  |  }|  | }|dks|dkr2q||k r@ dS ||krN dS  |   | krd dS  |   | k r dS qdS )Nr   r^   )r*   )Zidx_aZidx_btensorZstride_aZstride_br   tensorsr   r   _cmp  s    z0compute_elementwise_output_strides.<locals>._cmp)keyreverserk   )lenr:   check_same_shapeallr)   r4   r   r_   sortedr   	enumeratemake_contiguous_strides_for)rn   rZ   Zall_tensorsr4   ro   permZpermuted_shaper[   rX   new_stridesZpermuted_stridesr   rm   r   "compute_elementwise_output_strides   s6    



rz   intlengthc                 C  s   t | tsJ | dksJ dS )zK
    Validates that an object represents a valid
    dimension length.
    r   Nr#   r{   r|   r   r   r   validate_dim_lengthJ  s    r   	ShapeTyper   c                 C  s$   t | tsJ | D ]}t| qdS )z=
    Validates that a sequence represents a valid shape.
    N)r#   r   r   )r   lr   r   r   validate_shapeT  s    r   
StrideType)r   c                 C  s(   t | tsJ | D ]}|dksJ qdS )z6
    Verifies the object specifies valid strides.
    r   N)r#   r   )r   r*   r   r   r   validate_strides^  s    r   rankr[   c                 C  s<   t |tsJ t | tsJ |dkr,|| k s8|dks8J dS )zr
    Validates that idx is a valid index for the given shape.
    Assumes the index is already canonicalized.
    r   Nr~   r   r   r   r   validate_idxh  s    r   DimsSequenceTyper   indicesc                 C  s   |D ]}t | | qd S r@   )r   )r   r   r[   r   r   r   validate_dimension_indicest  s    r   r   Zex_idxc                 C  s4   t |tsJ t | tsJ |dkr,|| ks0J dS )zS
    Validates that ex_idx is a valid exclusive index
    for the given shape.
    r   Nr~   r   r   r   r   validate_exclusive_idxy  s    r   )r   r[   r]   c                 C  sb   | dkr| nd}|dkr$||k r$|S |dk r6|| }n|}|dk sJ||kr^d || }t||S )Nr   r^   z8Received out of bounds index {0} for tensor of rank {1}!)rU   r:   )r   r[   _rank_idxrZ   r   r   r   canonicalize_dim  s    
r   DimsType)r   r   r]   c                   s*   t |trt |S t fdd|D S )Nc                 3  s   | ]}t  |V  qd S r@   )r   rf   rX   r   r   r   rg     rh   z$canonicalize_dims.<locals>.<genexpr>)r#   r{   r   r)   r   r   r   r   canonicalize_dims  s    

r   )r   rx   r]   c                 C  s0   t |tsdS tt|ttd| ks,dS dS )z>
    Validates that perm is a permutation of length rank.
    Fr   T)r#   r   r)   ru   r_   )r   rx   r   r   r   is_valid_permutation  s
    
r   r   c                 C  s   t | t |kS )z
    Compares two shapes a and b, returning True if they are the same
    (their ranks and corresponding lengths match) and False otherwise.
    )r)   rO   r   r   r   is_same_shape  s    r   r   c                 C  s    t | to| jdko| jjdkS )Nr   r   )r#   rS   r4   r   r&   )rP   r   r   r   ri     s    ri   c                 G  s   t |dkrdS d}|D ]}t|tr*qqt|tr| rBt|rBq|du rP|j}||jkrdt|j d t| d }t|qdtt| d }t|qdS )a%  
    Checks that all Tensors in args have the same device.

    Raises a RuntimeError when:
      - args contains an object whose type is not Tensor or Number
      - two Tensor objects in args have different devices, unless one is a CPU scalar tensor and allow_cpu_scalar_tensors is True
    r^   NzTensor on device z is not on the expected device !z/Unexpected type when checking for same device, )	rr   r#   r$   rS   ri   r   r   r>   r&   )rd   r1   r   argrZ   r   r   r   check_same_device  s4    	



r   c                 G  s   d}|D ]x}t |trqqt |trd| r2t|r2q|du r@|j}t||jsd|j|}t|qdtt	| d }t|qdS )z
    Checks that all Tensors in args have the same shape.

    Raises a RuntimeError when:
      - args contains an object whose type is not Tensor or Number
      - two Tensor objects in args have different devices
    Nz(Shape {0} is not the expected shape {1}!z.Unexpected type when checking for same shape, r   )
r#   r$   rS   ri   r   r   rU   r>   r   r&   )rd   r1   r   r   rZ   r   r   r   rs     s"    


rs   )r   r]   c                 C  s   t | tjsJ | tju S r@   )r#   r'   r   r`   r   r   r   r   is_boolean_dtype  s    r   c                 C  s   t | tjsJ | tv S r@   )r#   r'   r   _integer_dtypesr   r   r   r   is_integer_dtype  s    r   c                 C  s   t | tjsJ | tv S r@   )r#   r'   r   _float_dtypesr   r   r   r   is_float_dtype  s    r   c                 C  s   t | tjsJ | tv S r@   )r#   r'   r   _complex_dtypesr   r   r   r   is_complex_dtype  s    r   c                 C  s   t |  S r@   )_complex_to_real_dtype_mapr   r   r   r   corresponding_real_dtype1  s    r   c                 C  s   t |  S r@   )_real_to_complex_dtype_mapr   r   r   r   corresponding_complex_dtype5  s    r   r&   c                 C  sN   t | tjsJ | tju rtS | tv r*tS | tv r6tS | tv rBt	S t
ddS )z[
    Computes the corresponding Python type (AKA "type kind") for the
    given dtype.
    zInvalid dtype!N)r#   r'   r   r`   r   r{   r   floatr   complexr:   r   r   r   r   dtype_to_type9  s    
r   )typr]   c                 C  s   t |  S )z=
    Computes the corresponding dtype for a Number type.
    )_type_to_dtype_map)r   r   r   r   r%   T  s    r%   c                 C  sZ   | t v sJ |t v sJ | |u r$| S t D ]$}| |u r<|  S ||u r(|   S q(tddS )zw
    Returns the higher of the two given Number types.

    The types are ordered bool -> int -> float -> complex.
    zUnknown Python scalar type!N)_ordered_typesr:   )rP   rQ   r   r   r   r   get_higher_type^  s    
r   8Optional[Union[torch.dtype, TensorLikeType, NumberType]]r   c                 C  s8  | du st | tjttfsJ |du s<t |tjttfs<J ddddd}|| || } }| |u rj| S | du rv|S |du r| S tjftjtjftjftj	ftj
ftjtjftjftjftjftjftjff}t|D ]R\}}| |v r||v r||d  d   S | |v r|  S ||v r|   S qtd	dS )
zW
    Computes the "lowest" datatype that is weakly
    "higher" than both a and b.
    Nr   r   )rX   r]   c                 S  sN   | d u rd S t | tjr| S t | tr,| jS t | trBtt| S tdd S )Nz(Unexpected type given to _extract_dtype!)r#   r'   r   rS   r$   r%   r&   r>   rX   r   r   r   _extract_dtype  s    

z(get_higher_dtype.<locals>._extract_dtyper^   r   Unexpected termination!)r#   r'   r   rS   r$   r`   uint8int8int16int32int64float16bfloat16float32float64	complex32	complex64
complex128rv   r>   )rP   rQ   r   Zordered_datatypesr[   dtypesr   r   r   get_higher_dtypew  s<    




r   c                 C  sV   t tttf}| |v sJ ||v s$J |D ] }| |kr: dS ||kr( dS q(tddS )z
    Compares two types, a and b, returning True if a is weakly "less" than b.

    The comparison is determined by the following type ordering: bool, int, float, complex.
    TFr   N)r`   r{   r   r   r>   )rP   rQ   Zordered_typesr   r   r   r   is_weakly_lesser_type  s    r   )cast_to	cast_fromr]   c                 C  sB   t tttfD ] }|| r dS ||r dS qtd| |d S )NTFz!Received unknown dtypes {0}, {1}!)r   r   r   r   r:   rU   )r   r   fnr   r   r   can_safe_cast_to  s    r   c                  G  s   d}d}| D ]}t |trqqt |tr|du r6|j}|du rHt|j}||jurxdt|j d t| d }t|t|j}||urdt| d t| d }t|qdtt| d }t|qdS )a  
    Checks that all Tensors in args have the same device and that all Numbers have the
    same corresponding Python type.

    Raises a RuntimeError when:
      - args contains an object whose type is not Tensor or Number
      - two Tensors objects in args have different dtypes
      - two Number objects in args have different types
      - there are Tensors and Numbers in args, and one of those Tensors corresponding
          Python types is different from the type of one of those Numbers
    NzTensor with dtype z is not the expected dtype of r   z&Tensor with corresponding Python type z is not the expected type of z.Unexpected type when checking for same dtype, )r#   r$   rS   r   r   r   r>   r&   )r1   
full_dtypescalar_typer   rZ   arg_typer   r   r   check_same_dtype  sL    





r   c                 C  s   t | | S r@   )_computation_dtype_mapgetr   r   r   r   _get_computation_dtype"  s    r   c                   @  s$   e Zd ZdZdZdZdZdZdZdS )ELEMENTWISE_TYPE_PROMOTION_KIND)r   rj   )   )   )   )   N)	rF   rG   rH   DEFAULT	NO_OPMATHINT_TO_FLOATALWAYS_BOOLCOMPLEX_TO_FLOATBOOL_TO_LONGr   r   r   r   r   &  s   r   zTuple[torch.dtype, torch.dtype])type_promotion_kindr]   c           	      G  s  t dd |D }t}|D ]V}t|ttfsFdtt|}t|t|tr`t	|t|}qt	|t
|j}qd}ddddd	d
}|tu r||t}|du rt n|}n`|tu r||dd dd}|du rtt }n0|tu r||t}|du rtjn|}ntj}| tju r*t||fS | tju r>||fS | tju rrt|s^t|rft }t||fS | tju rt|}t|rt|}||fS | tju rt|rtjtjfS t||fS | tju rt|tjfS tdt| dS )a  
    Computes the computation and result dtypes for elementwise type promotion
    on the given arguments and with the given elementwise type promotion kind.

    Note that not all inputs to an elementwise operation necessarily participate in type promotion.
    For example, the "alpha" parameter of torch.add does not participate in type promotion,
    although it may be cast to the Python type corresponding to the computation dtype that
    the type promotion algorithm determines.

    Default elementwise type promotion, which all other type promotion kinds tweak (see below),
    first decides which of four ordered types to use:

    bool -> integer -> floating point -> complex

    The selected type is the "lowest" type in the above list such that all number arguments
    have a weakly "lower" type and all tensor arguments have a weakly lower corresponding
    type for their dtype.

    Once the type is determined, the particular result dtype is found. The dtypes are
    partially ordered as follows:

    bool -> uint8, int8 -> int16 -> int32 -> int64 ->
      float16, bfloat16 -> float32 -> float64 -> complex32 -> complex64 -> complex128

    The result dtype is selected by:
      - if no tensor's dtype has the same corresponding type as the one selected,
          then the result dtype is the (default) dtype corresponding to the selected type
          (for example, 1.5 + an integer tensor has a result dtype of the default floating point dtype)
      - if the result type is complex then the dtype is:
        -  the default complex dtype if there are no floating point or complex tensors
        -  if there are floating point or complex tensors with one or more dimensions, then
            the complex dtype corresponding to the highest corresponding complex dtype among those tensors
            (for example, double + cfloat -> cdouble)
        -  if there are only floating point or complex tensors with zero dimensions, then
            the complex dtype corresponding to the highest corresponding complex dtype among those tensors
      - if the first two cases do not apply, the result dtype is the highest dtype among
          all tensors with one or more dimensions of the output type, and if there are no such
          tensors then it's the highest dtype among all tensors with zero dimensions of the output type
          (for example, long + half -> half, even if the half tensor has zero dimensions)

    The "corresponding complex dtypes" are:
      float16    -> complex32
      bfloat16   -> complex64
      float32    -> complex64
      float64    -> complex128
      complex32  -> complex32
      complex64  -> complex64
      complex128 -> complex128

    The DEFAULT type promotion kind computes per above, and then uses the result dtype to pick a computation
    dtype by mapping low precision floating point and complex dtypes as follows:

      float16   -> float32
      bfloat16  -> float32
      complex32 -> complex64

    This is referred to as "op math", and the NO_OPMATH type promotion kind disables this mapping, making the
    computation dtype the same as the result dtype when it's selected. NO_OPMATH is appropriate for kernels
    which perform no mathematical operations on their tensors (see below for examples).

    The INT_TO_FLOAT type promotion kind maps boolean and integer maps result dtypes to the default floating point dtype,
    and computation dtypes to the appropriate op math dtype.

    The COMPLEX_TO_FLOAT type promotion kind maps complex result dtypes to the corresponding float dtype, following this
    mapping:

        complex32  -> float16
        complex64  -> float32
        complex128 -> float64

    Note that COMPLEX_TO_FLOAT derives the computation dtype as the DEFAULT setting does.

    The BOOL_TO_LONG type promotion kind maps boolean computation and result dtypes to long.

    The ALWAYS_BOOL type promotion kind always sets the result dtype to bool.

    Example operators for each type promotion option:
      DEFAULT                 : add
      NO_OPMATH               : where, nextafter, cat
      INT_TO_FLOAT            : sin
      COMPLEX_TO_FLOAT        : abs
      BOOL_TO_LONG            : pow
      ALWAYS_BOOL             : eq

    c                 s  s   | ]}|d ur|V  qd S r@   r   r   r   r   r   rg     rh   z%elementwise_dtypes.<locals>.<genexpr>z>Unexpected type {0} when computing elementwise type promotion!NF)float_as_complexr   rc   c                S  sp   d }d }| D ]R}t |tr||jr|j}|r>t|r>t|}|jdkrTt||}qt||}q|d url|S |S )Nr   )r#   rS   r   r   r   r4   r   )r1   filterr   Zzero_dim_tensor_dtypeZone_plus_dim_tensor_dtyperX   _dtyper   r   r   _find_highest_dtype_filtered  s"    
z8elementwise_dtypes.<locals>._find_highest_dtype_filteredc                 S  s   t | pt| S r@   )r   r   r   r   r   r   <lambda>  rh   z$elementwise_dtypes.<locals>.<lambda>TzUnknown type promotion kind {0}) r)   r`   r#   r$   rS   rU   r   r&   r:   r   r   r   r   r   r'   get_default_dtyper   r   r{   r   longr   r   r   r   r   r   r   r   r   r   r   )	r   _argsr1   Zhighest_typerX   rZ   result_dtyper   Zcomputation_dtyper   r   r   elementwise_dtypes0  sj    Z






r   zUnion[str, torch.device]ztorch.device)dr]   c                 C  s,   t | ttjfsJ t | tr(t| S | S )zk
    Wraps strings into torch.device objects.

    Given torch.device objects are returned unmodified.
    )r#   r   r'   r   )r   r   r   r   wrap_device  s    

r   )r   r]   c                 C  s\   t |  | sdS d}g }t| D ]*}|dkr@|| || }q || q tt|}|S )Nr   r^   r   )r   ra   appendr)   )r   Z
multiplierr   r   resultr   r   r   rw     s    

rw   )r   
dimensionsr]   c                 C  sN   |D ]}t t| | qg }tt| D ]}||v r6q(|| |  q(t|S r@   )r   rr   r_   r   r)   )r   r   r[   	new_shaper   r   r   compute_reduction_output_shape  s    r   dimsc                 C  s    t | t t| krtdd S )Nz#duplicate value in the list of dims)rr   setr>   r   r   r   r   validate_no_repeating_dims  s    r   zOptional[Sequence])r   r   r]   c                   s:   |d u rt tt S t  fdd|D }t| |S )Nc                 3  s   | ]}t t |V  qd S r@   )r   rr   )rf   r[   r   r   r   rg     rh   z!reduction_dims.<locals>.<genexpr>)r)   r_   rr   r   )r   r   r   r   r   reduction_dims  s
    r   ztorch._TypedStorage)rP   r   r   r    c           
      C  s   t tj|dkrdS |  | }d}t||D ]\}}||d |  }q.||kr~|| }d|  |t|t||}	t|	dS )z]
    Determines if the given shape, strides, and offset are valid for the given storage.
    r   Nr^   z~Can't view a storage of size {0} with an offset of {1}, shape of {2}, and strides of {3}, which requires a storage of size {4})r   operatormulsizerT   rU   r   r:   )
rP   r   r   r    r}   Z
max_offsetrX   rY   Zrequired_lengthrZ   r   r   r   check_in_bounds_for_storage#  s    r   )q
__future__r   typingr   r   r   r   r   r   r	   r
   enumr   	functoolsr   r   r   r'   r9   _CZtorch._C._nvfuserr   cdoubleComplexDoublecfloatComplexFloatdoubleDoubler   FloathalfHalfr   BFloat16r   Intr{   Int32r`   Boolr   r   Sizer   r   r   r   r   
NumberTyper$   r(   r   rN   rS   ZTensorSequenceTyper\   rW   rb   rz   r   r   r   r   r   r   r   r   r   r   ri   r   rs   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r%   r   r   r   r   r   r   r   r   r   r   r   rw   r   r   r   r   r   r   r   r   <module>   s   (r
'P


	+ @
B
 7