a
    7Sico•  ã                   @   s€  U d Z ddlZddlZddlZddlZddlZddlZddlZejdk rPe	dƒ‚ddl
mZmZ ddlmZmZmZmZ ejdkrˆd	Zndd
lmZ ddlmZ ddlmZmZmZmZmZ ddlZg d¢Zejdkrþe  dd¡Z!ej" #ej$dd¡Z%ej" #ej" &e'¡d¡Z(ej$ej)kr.ej" #ej)dd¡Z*ndZ*e+e,ej"j-e(e%e*gƒƒZ.e/dd„ e.D ƒƒr„ej" #e  dej" #e!dd¡¡dd¡Z0ndZ0ddl1m2Z3 ddl4Z4e3røe/dd„ e.D ƒƒrøe3 5dd¡Z6de6 Z7ej" #e!d d!d"e3 ¡Z8ej" #e  e7e8¡d¡Z9ndZ9e. :e,ej"j-e0e9gƒ¡ ej;d#d$d%Z<e=e<d&ƒZ>e< ?d¡Z@ejAe<jB_Ce>rZejAe<jD_CejAe<jE_Ce.D ]`ZFejd'krze GeF¡ nBe>r^e< DeF¡ZHeHdu r^e Ie J¡ ¡ZKeK jLd(eF› d)7  _LeK‚q^z"e Md*¡ e Md+¡ e Md,¡ W n eNyþ   eOd-ƒ Y n0 e4 4ej" #e(d.¡¡ZPd/ZQePD ]ÖZRd/ZSe>rˆe< EeRdd0¡ZHe J¡ ZTeHdu rzeTd1krze IeT¡ZKeK jLd2eR› d37  _LeK‚neHdurˆd$ZSeSseQs´d4 #e.ejUd5 g ¡ejUd5< d$ZQe< BeR¡ZHeHdu re Ie J¡ ¡ZKeK jLd2eR› d37  _LeK‚qe< ?e@¡ d6d7„ ZVese  d8¡r¦e W¡ d9kr¦ddlZXe=eXd:ƒrFe=eXd;ƒstzddlYZXW n  eZyr   ddl[m\ZX Y n0 e ]¡ Z^e _eXj`eXjaB ¡ dd<lbT e _e^¡ [^[Xner²eVƒ  dd<lbT erÌddlbmcZc zdd=lbmdZd W nN eZy*   ddlbmcZe ejd>kr$eej'du r$eZe fd?¡ g¡ ƒd‚‚ Y n0 ehecƒD ]hZieid dkr4ei jd@¡s4e kei¡ eleceiƒZmenemeƒs~e oem¡r4emjpdAkr4eidBvr4dAem_pq4esòehecƒD ]DZqeleceqƒZreserƒesecƒu r¬dCeq› ejtvr¬erejtdCeq› < q¬dDdE„ ZudFdG„ ZvdHdI„ ZwdJdK„ ZxdLdM„ Zyd/dNœdOdP„ZzdQdR„ Z{dSdT„ Z|eej}e~f ddUœdVdW„Zej}dXœdYdZ„Z€ej~dXœd[d\„Zd]d^„ Z‚d_d`„ Zƒdadb„ Z„ddcl…m†Z†m‡Z‡mˆZˆm‰Z‰ e :g dd¢¡ ddelŠm‹Z‹ ddflŒmZmŽZŽmZmZ G dgdh„ dheƒZ‘G didj„ djeƒZ’G dkdl„ dleƒZ“G dmdn„ dneƒZ”G dodp„ dpeƒZ•G dqdr„ dreƒZ–G dsdt„ dteƒZ—G dudv„ dveƒZ˜G dwdx„ dxeƒZ™G dydz„ dzeƒZšG d{d|„ d|eƒZ›G d}d~„ d~eƒZœG dd€„ d€eƒZG dd‚„ d‚eƒZžG dƒd„„ d„eƒZŸG d…d†„ d†eƒZ G d‡dˆ„ dˆeƒZ¡ee’e“e•e–e—e˜e‘e”e™eežeŸešeœe›e e¡eŽhZ¢e£ƒ Z¤ee e¥d‰< ddŠl¦m§Z§m¨Z¨m©Z©mªZªm«Z« dd‹l¬m­Z­m®Z® ddŒl¯m°Z° ddŽ„ Z±ddl²m³Z³ ec de±ƒ ¡ [±erˆdd<l´T dZµehecj¶ƒD ]RZiei ·d‘¡s–eieµv r´q–elecj¶eiƒZmdAem_peme¸ƒ ei< ei ·d¡s–e kei¡ q–dd<l¹T [[d’d“„ Zºddl»m2Z2 dd”l»m¼Z¼ dd•l»m½Z½ dd–l¾m¿Z¿mÀZÀmÁZÁmÂZÂ dd—l»mÃZÃ dd˜l»mÄZÄ dd™l»mÅZÅ ddšl»mÆZÆ ddlÇZ»dd›l»mÈZÈ ddœl»mÉZÉ ddl»mÊZÊ ddlËZ»ddžl»mÌZÌ ddŸl»mÍZÍ dd l»mÎZÎ dd¡l»mÏZÏ dd¢l»m¦Z¦ dd£l»mÐZÐ dd¤l»mÑZÑ ddlÒZ»ddlÓZ»ddlÔZ»ddlÕZ»ddlÖZ»ddl×Z»ddlØZ»ddlÙZ»dd¥l»mÚZÚ dd¦l»mÛZÛ dd§l»mÜZÜ ddlÝZ»ddlÞZ»ddlßZ»dd¨l»màZà ec áe+e»j¢ƒ¡ dd©lâmãZãmäZämåZå [ã[ä[ådªd«„ Zædd¬lçmèZè dd­lémêZê dd®l»mëZë dd¯l»mìZì eíZîdd°lïmðZð eðe»jñƒ [ðdd±lòmóZó e»jèjôjõZõe»jèjôjöZödd²l÷møZømùZù dd³lâmúZú dd´lûmüZü dµd¶„ Zýdd·lâmþZþ ejdkr|dd¸lâmÿZÿ dd¹lâm Z  dS )ºa  
The torch package contains data structures for multi-dimensional
tensors and defines mathematical operations over these tensors.
Additionally, it provides many utilities for efficient serializing of
Tensors and arbitrary types, and other useful utilities.

It has a CUDA counterpart, that enables you to run your tensor computations
on an NVIDIA GPU with compute capability >= 3.0.
é    N)é   zGPython 2 has reached end-of-life and is no longer supported by PyTorch.é   )Ú_import_dotted_nameÚclassproperty)Úget_file_pathÚ#prepare_multiprocessing_environmentÚUSE_RTLD_GLOBAL_WITH_LIBTORCHÚUSE_GLOBAL_DEPSÚtorch_deployztorch-deploy-1.8)Ú__version__)Ústring_classes)ÚSetÚTypeÚTYPE_CHECKINGÚUnionÚCallable)1ÚtypenameÚ	is_tensorÚ
is_storageÚset_default_tensor_typeÚset_rng_stateÚget_rng_stateÚmanual_seedÚinitial_seedÚseedÚsaveÚloadÚset_printoptionsÚchunkÚsplitÚstackÚmatmulÚno_gradÚenable_gradZrandZrandnÚinference_modeÚDoubleStorageÚFloatStorageÚLongStorageÚ
IntStorageÚShortStorageÚCharStorageÚByteStorageÚBoolStorageÚ_TypedStorageZDoubleTensorZFloatTensorZ
LongTensorZ	IntTensorZShortTensorZ
CharTensorZ
ByteTensorZ
BoolTensorÚTensorÚlobpcgÚuse_deterministic_algorithmsÚ$are_deterministic_algorithms_enabledÚ-is_deterministic_algorithms_warn_only_enabledÚset_deterministic_debug_modeÚget_deterministic_debug_modeÚset_float32_matmul_precisionÚget_float32_matmul_precisionÚset_warn_alwaysÚis_warn_always_enabledÚwin32ZProgramFileszC:\Program FilesÚLibraryÚbinÚlibÚ c                 C   s$   g | ]}t j t j |d ¡¡ ‘qS )znvToolsExt64_1.dll)ÚosÚpathÚexistsÚjoin©Ú.0Úp© rE   úJ/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/__init__.pyÚ
<listcomp>J   ó    rG   ZNVTOOLSEXT_PATHzNVIDIA CorporationZ
NvToolsExtZx64)Úcudac                 C   s"   g | ]}t   tj |d ¡¡ ‘qS )zcudart64*.dll)Úglobr>   r?   rA   rB   rE   rE   rF   rG   R   rH   Ú.Ú_ZCUDA_PATH_VzNVIDIA GPU Computing ToolkitÚCUDAÚvzkernel32.dllT)Úuse_last_errorÚAddDllDirectory)r   é   z Error adding "z" to the DLL directories.zvcruntime140.dllzmsvcp140.dllzvcruntime140_1.dllzµMicrosoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
                 It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exez*.dllFi   é~   z Error loading "z" or one of its dependencies.ú;ÚPATHc                  C   sj   t  ¡ dkstjdkrd S dt  ¡ dkr,dnd } tj t¡}tj tj 	|¡d| ¡}t
j|t
jd d S )	NÚWindowsr
   Zlibtorch_global_depsÚDarwinz.dylibz.sor<   )Úmode)ÚplatformÚsystemÚsysÚ
executabler>   r?   ÚabspathÚ__file__rA   ÚdirnameÚctypesÚCDLLÚRTLD_GLOBAL)Zlib_nameÚhereZlib_pathrE   rE   rF   Ú_load_global_deps’   s    rc   ZTORCH_USE_RTLD_GLOBALrU   ra   Ú	RTLD_LAZY)Ú*)Ú_initExtension)r   é   a’  
            Failed to load PyTorch C extensions:
                It appears that PyTorch has loaded the `torch/_C` folder
                of the PyTorch repository rather than the C extensions which
                are expected in the `torch._C` namespace. This can occur when
                using the `install` workflow. e.g.
                    $ python setup.py install && python -c "import torch"

                This error can generally be solved using the `develop` workflow
                    $ python setup.py develop && python -c "import torch"  # This should succeed
                or by running Python from a different directory.
            ZBaseÚtorch)ZDisableTorchFunctionÚ	Generatorz	torch._C.c                 C   s‚   t | tjƒr|  ¡ S d}d}t| dƒrN| jdkrN| jdkrN| jd urN| jd }t| dƒr`| j}nt| dƒrr| j}n| jj}|| S )Nr=   Ú
__module__ÚbuiltinsÚ__builtin__rK   Ú__qualname__Ú__name__)	Ú
isinstancerh   r.   ÚtypeÚhasattrrj   rm   rn   Ú	__class__)ÚoÚmoduleÚ
class_namerE   rE   rF   r     s     ÿÿ


r   c                 C   s   t | tjƒS )a©  Returns True if `obj` is a PyTorch tensor.

    Note that this function is simply doing ``isinstance(obj, Tensor)``.
    Using that ``isinstance`` check is better for typechecking with mypy,
    and more explicit - so it's recommended to use that instead of
    ``is_tensor``.

    Args:
        obj (Object): Object to test
    Example::

        >>> x=torch.tensor([1,2,3])
        >>> torch.is_tensor(x)
        True

    )ro   rh   r.   ©ÚobjrE   rE   rF   r     s    r   c                 C   s   t | ƒtv S )zgReturns True if `obj` is a PyTorch storage object.

    Args:
        obj (Object): Object to test
    )rp   Ú_storage_classesrv   rE   rE   rF   r   -  s    r   c                 C   s    t | tƒrt| ƒ} t | ¡ dS )aˆ  Sets the default ``torch.Tensor`` type to floating point tensor type
    ``t``. This type will also be used as default floating point type for
    type inference in :func:`torch.tensor`.

    The default floating point tensor type is initially ``torch.FloatTensor``.

    Args:
        t (type or string): the floating point tensor type or its name

    Example::

        >>> torch.tensor([1.2, 3]).dtype    # initial default for floating point is torch.float32
        torch.float32
        >>> torch.set_default_tensor_type(torch.DoubleTensor)
        >>> torch.tensor([1.2, 3]).dtype    # a new floating point tensor
        torch.float64

    N)ro   Ú_string_classesr   Ú_CZ_set_default_tensor_type)ÚtrE   rE   rF   r   6  s    
r   c                 C   s   t  | ¡ dS )aj  

    Sets the default floating point dtype to :attr:`d`. Supports torch.float32
    and torch.float64 as inputs. Other dtypes may be accepted without complaint
    but are not supported and are unlikely to work as expected.

    When PyTorch is initialized its default floating point dtype is torch.float32,
    and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
    type inference. The default floating point dtype is used to:

    1. Implicitly determine the default complex dtype. When the default floating point
       type is float32 the default complex dtype is complex64, and when the default
       floating point type is float64 the default complex type is complex128.
    2. Infer the dtype for tensors constructed using Python floats or complex Python
       numbers. See examples below.
    3. Determine the result of type promotion between bool and integer tensors and
       Python floats and complex Python numbers.

    Args:
        d (:class:`torch.dtype`): the floating point dtype to make the default.
                                  Either torch.float32 or torch.float64.

    Example:
        >>> # initial default for floating point is torch.float32
        >>> # Python floats are interpreted as float32
        >>> torch.tensor([1.2, 3]).dtype
        torch.float32
        >>> # initial default for floating point is torch.complex64
        >>> # Complex Python numbers are interpreted as complex64
        >>> torch.tensor([1.2, 3j]).dtype
        torch.complex64

        >>> torch.set_default_dtype(torch.float64)

        >>> # Python floats are now interpreted as float64
        >>> torch.tensor([1.2, 3]).dtype    # a new floating point tensor
        torch.float64
        >>> # Complex Python numbers are now interpreted as complex128
        >>> torch.tensor([1.2, 3j]).dtype   # a new complex tensor
        torch.complex128

    N)rz   Z_set_default_dtype)ÚdrE   rE   rF   Úset_default_dtypeN  s    +r}   ©Ú	warn_onlyc                C   s   t j| |d dS )aR   Sets whether PyTorch operations must use "deterministic"
    algorithms. That is, algorithms which, given the same input, and when
    run on the same software and hardware, always produce the same output.
    When enabled, operations will use deterministic algorithms when available,
    and if only nondeterministic algorithms are available they will throw a
    :class:`RuntimeError` when called.

    .. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
        interface for this feature.

    The following normally-nondeterministic operations will act
    deterministically when ``mode=True``:

        * :class:`torch.nn.Conv1d` when called on CUDA tensor
        * :class:`torch.nn.Conv2d` when called on CUDA tensor
        * :class:`torch.nn.Conv3d` when called on CUDA tensor
        * :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
        * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
        * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
        * :func:`torch.bmm` when called on sparse-dense CUDA tensors
        * :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
          and the index is a list of tensors
        * :func:`torch.Tensor.index_put` with ``accumulate=False``
        * :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
          tensor
        * :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
          tensor
        * :func:`torch.Tensor.scatter_add_` when ``input`` dimension is one and called
          on a CUDA tensor
        * :func:`torch.gather` when ``input`` dimension is one and called
          on a CUDA tensor that requires grad
        * :func:`torch.index_add` when called on CUDA tensor
        * :func:`torch.index_select` when attempting to differentiate a CUDA tensor
        * :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
        * :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor

    The following normally-nondeterministic operations will throw a
    :class:`RuntimeError` when ``mode=True``:

        * :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
        * :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
          and one of the following modes is used:

          - ``linear``
          - ``bilinear``
          - ``bicubic``
          - ``trilinear``

        * :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.NLLLoss` when called on a CUDA tensor
        * :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
        * :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
          ``mode='max'``
        * :func:`torch.Tensor.scatter_add_` when ``input`` dimension is larger than one
          and called on a CUDA tensor
        * :func:`torch.gather` when ``input`` dimension is larger than one
          and called on a CUDA tensor that requires grad
        * :func:`torch.Tensor.put_` when ``accumulate=False``
        * :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
        * :func:`torch.histc` when called on a CUDA tensor
        * :func:`torch.bincount` when called on a CUDA tensor
        * :func:`torch.kthvalue` with called on a CUDA tensor
        * :func:`torch.median` with indices output when called on a CUDA tensor
        * :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor

    A handful of CUDA operations are nondeterministic if the CUDA version is
    10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
    or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
    details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_
    If one of these environment variable configurations is not set, a :class:`RuntimeError`
    will be raised from these operations when called with CUDA tensors:

        * :func:`torch.mm`
        * :func:`torch.mv`
        * :func:`torch.bmm`

    Note that deterministic operations tend to have worse performance than
    nondeterministic operations.

    .. note::

        This flag does not detect or prevent nondeterministic behavior caused
        by calling an inplace operation on a tensor with an internal memory
        overlap or by giving such a tensor as the :attr:`out` argument for an
        operation. In these cases, multiple writes of different data may target
        a single memory location, and the order of writes is not guaranteed.

    Args:
        mode (:class:`bool`): If True, makes potentially nondeterministic
            operations switch to a deterministic algorithm or throw a runtime
            error. If False, allows nondeterministic operations.

    Keyword args:
        warn_only (:class:`bool`, optional): If True, operations that do not
            have a deterministic implementation will throw a warning instead of
            an error. Default: ``False``

    Example::

        >>> torch.use_deterministic_algorithms(True)

        # Forward mode nondeterministic error
        >>> torch.randn(10).index_copy(0, torch.tensor([0]), torch.randn(1))
        ...
        RuntimeError: index_copy does not have a deterministic implementation...

        # Backward mode nondeterministic error
        >>> torch.randn(10, requires_grad=True, device='cuda').index_select(0, torch.tensor([0], device='cuda')).backward()
        ...
        RuntimeError: index_add_cuda_ does not have a deterministic implementation...
    r~   N)rz   Ú_set_deterministic_algorithms)rW   r   rE   rE   rF   r0   {  s    {r0   c                   C   s   t  ¡ S )z˜Returns True if the global deterministic flag is turned on. Refer to
    :func:`torch.use_deterministic_algorithms` documentation for more details.
    )rz   Ú_get_deterministic_algorithmsrE   rE   rE   rF   r1   ø  s    r1   c                   C   s   t  ¡ S )z£Returns True if the global deterministic flag is set to warn only.
    Refer to :func:`torch.use_deterministic_algorithms` documentation for more
    details.
    )rz   Ú'_get_deterministic_algorithms_warn_onlyrE   rE   rE   rF   r2   þ  s    r2   )Ú
debug_modeÚreturnc                 C   s¶   t | tjtfƒs"tdt| ƒ› ƒ‚t | tƒrd| dkr:d} n*| dkrHd} n| dkrVd} ntd| › ƒ‚| dkrxt d	¡ n:| dkrtjd
d
d n"| dkr¤t d
¡ ntd| › ƒ‚dS )aö  Sets the debug mode for deterministic operations.

    .. note:: This is an alternative interface for
        :func:`torch.use_deterministic_algorithms`. Refer to that function's
        documentation for details about affected operations.

    Args:
        debug_mode(str or int): If "default" or 0, don't error or warn on
            nondeterministic operations. If "warn" or 1, warn on
            nondeterministic operations. If "error" or 2, error on
            nondeterministic operations.
    z'debug_mode must be str or int, but got Údefaultr   Úwarnr   Úerroré   zQinvalid value of debug_mode, expected one of `default`, `warn`, `error`, but got FTr~   z:invalid value of debug_mode, expected 0, 1, or 2, but got N)	ro   rk   ÚintÚstrÚ	TypeErrorrp   ÚRuntimeErrorrz   r€   )rƒ   rE   rE   rF   r3     s2    
ÿÿÿÿr3   )r„   c                   C   s"   t  ¡ rt  ¡ rdS dS ndS dS )zªReturns the current value of the debug mode for deterministic
    operations. Refer to :func:`torch.set_deterministic_debug_mode`
    documentation for more details.
    r   rˆ   r   N)rz   r   r‚   rE   rE   rE   rF   r4   /  s
    r4   c                   C   s   t  ¡ S )z¢Returns the current value of float32 matrix multiplication precision. Refer to
    :func:`torch.set_float32_matmul_precision` documentation for more details.
    )rz   Z_get_float32_matmul_precisionrE   rE   rE   rF   r6   =  s    r6   c                 C   s   t  | ¡ dS )aÛ  Sets the internal precision of float32 matrix multiplications.

    Running float32 matrix multiplications in lower precision may significantly increase
    performance, and in some programs the loss of precision has a negligible impact.

    Supports three settings:

        * "highest", float32 matrix multiplications use the float32 datatype for
          internal computations.
        * "high", float32 matrix multiplications use the TensorFloat32 or bfloat16_3x
          datatypes for internal computations, if fast matrix multiplication algorithms
          using those datatypes internally are available. Otherwise float32
          matrix multiplications are computed as if the precision is "highest".
        * "medium", float32 matrix multiplications use the bfloat16 datatype for
          internal computations, if a fast matrix multiplication algorithm
          using that datatype internally is available. Otherwise float32
          matrix multiplications are computed as if the precision is "high".

    .. note::

        This does not change the output dtype of float32 matrix multiplications,
        it controls how the internal computation of the matrix multiplication is performed.

    .. note::

        This does not change the precision of convolution operations. Other flags,
        like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
        operations.

    .. note::

        This flag currently only affects one native device type: CUDA.
        If "high" or "medium" are set then the TensorFloat32 datatype will be used
        when computing float32 matrix multiplications, equivalent to setting
        `torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
        is set then the float32 datatype is used for internal computations, equivalent
        to setting `torch.backends.cuda.matmul.allow_tf32 = False`.

    Args:
        precision(str): can be set to "highest" (default), "high", or "medium" (see above).

    N)rz   Z_set_float32_matmul_precision)Ú	precisionrE   rE   rF   r5   C  s    +r5   c                 C   s   t  | ¡ dS )a”  When this flag is False (default) then some PyTorch warnings may only
    appear once per process. This helps avoid excessive warning information.
    Setting it to True causes these warnings to always appear, which may be
    helpful when debugging.

    Args:
        b (:class:`bool`): If True, force warnings to always be emitted
                           If False, set to the default behaviour
    N)rz   Z_set_warnAlways)ÚbrE   rE   rF   r7   p  s    
r7   c                   C   s   t  ¡ S )z‰Returns True if the global warn_always flag is turned on. Refer to
    :func:`torch.set_warn_always` documentation for more details.
    )rz   Z_get_warnAlwaysrE   rE   rE   rF   r8   |  s    r8   )ÚeÚnanÚinfÚpi)r   r’   r   r‘   )r.   )Ú_StorageBaser-   Ú_LegacyStorageÚ_UntypedStoragec                   @   s   e Zd Zedd„ ƒZdS )r+   c                 C   s   t jS ©N)rh   Zuint8©ÚselfrE   rE   rF   Údtype–  s    zByteStorage.dtypeN©rn   rj   rm   r   r™   rE   rE   rE   rF   r+   •  s   r+   c                   @   s   e Zd Zedd„ ƒZdS )r%   c                 C   s   t jS r–   )rh   Údoubler—   rE   rE   rF   r™   ›  s    zDoubleStorage.dtypeNrš   rE   rE   rE   rF   r%   š  s   r%   c                   @   s   e Zd Zedd„ ƒZdS )r&   c                 C   s   t jS r–   )rh   Úfloatr—   rE   rE   rF   r™      s    zFloatStorage.dtypeNrš   rE   rE   rE   rF   r&   Ÿ  s   r&   c                   @   s   e Zd Zedd„ ƒZdS )ÚHalfStoragec                 C   s   t jS r–   )rh   Zhalfr—   rE   rE   rF   r™   ¥  s    zHalfStorage.dtypeNrš   rE   rE   rE   rF   r   ¤  s   r   c                   @   s   e Zd Zedd„ ƒZdS )r'   c                 C   s   t jS r–   )rh   Úlongr—   rE   rE   rF   r™   ª  s    zLongStorage.dtypeNrš   rE   rE   rE   rF   r'   ©  s   r'   c                   @   s   e Zd Zedd„ ƒZdS )r(   c                 C   s   t jS r–   )rh   r‰   r—   rE   rE   rF   r™   ¯  s    zIntStorage.dtypeNrš   rE   rE   rE   rF   r(   ®  s   r(   c                   @   s   e Zd Zedd„ ƒZdS )r)   c                 C   s   t jS r–   )rh   Úshortr—   rE   rE   rF   r™   ´  s    zShortStorage.dtypeNrš   rE   rE   rE   rF   r)   ³  s   r)   c                   @   s   e Zd Zedd„ ƒZdS )r*   c                 C   s   t jS r–   )rh   Zint8r—   rE   rE   rF   r™   ¹  s    zCharStorage.dtypeNrš   rE   rE   rE   rF   r*   ¸  s   r*   c                   @   s   e Zd Zedd„ ƒZdS )r,   c                 C   s   t jS r–   )rh   Úboolr—   rE   rE   rF   r™   ¾  s    zBoolStorage.dtypeNrš   rE   rE   rE   rF   r,   ½  s   r,   c                   @   s   e Zd Zedd„ ƒZdS )ÚBFloat16Storagec                 C   s   t jS r–   )rh   Zbfloat16r—   rE   rE   rF   r™   Ã  s    zBFloat16Storage.dtypeNrš   rE   rE   rE   rF   r¡   Â  s   r¡   c                   @   s   e Zd Zedd„ ƒZdS )ÚComplexDoubleStoragec                 C   s   t jS r–   )rh   Zcdoubler—   rE   rE   rF   r™   È  s    zComplexDoubleStorage.dtypeNrš   rE   rE   rE   rF   r¢   Ç  s   r¢   c                   @   s   e Zd Zedd„ ƒZdS )ÚComplexFloatStoragec                 C   s   t jS r–   )rh   Zcfloatr—   rE   rE   rF   r™   Í  s    zComplexFloatStorage.dtypeNrš   rE   rE   rE   rF   r£   Ì  s   r£   c                   @   s   e Zd Zedd„ ƒZdS )ÚQUInt8Storagec                 C   s   t jS r–   )rh   Zquint8r—   rE   rE   rF   r™   Ò  s    zQUInt8Storage.dtypeNrš   rE   rE   rE   rF   r¤   Ñ  s   r¤   c                   @   s   e Zd Zedd„ ƒZdS )ÚQInt8Storagec                 C   s   t jS r–   )rh   Zqint8r—   rE   rE   rF   r™   ×  s    zQInt8Storage.dtypeNrš   rE   rE   rE   rF   r¥   Ö  s   r¥   c                   @   s   e Zd Zedd„ ƒZdS )ÚQInt32Storagec                 C   s   t jS r–   )rh   Zqint32r—   rE   rE   rF   r™   Ü  s    zQInt32Storage.dtypeNrš   rE   rE   rE   rF   r¦   Û  s   r¦   c                   @   s   e Zd Zedd„ ƒZdS )ÚQUInt4x2Storagec                 C   s   t jS r–   )rh   Zquint4x2r—   rE   rE   rF   r™   á  s    zQUInt4x2Storage.dtypeNrš   rE   rE   rE   rF   r§   à  s   r§   c                   @   s   e Zd Zedd„ ƒZdS )ÚQUInt2x4Storagec                 C   s   t jS r–   )rh   Zquint2x4r—   rE   rE   rF   r™   æ  s    zQUInt2x4Storage.dtypeNrš   rE   rE   rE   rF   r¨   å  s   r¨   Ú_tensor_classes)r   r   r   r   r   )r   r   )r   c                  C   sT   t  ¡ dkstjdkrdS tdddƒ} ttdƒƒ tj | ¡sJt	d|  ƒ‚|  
d¡S )	NrU   r
   rH   rh   r;   Ztorch_shm_managerz$Unable to find torch_shm_manager at zutf-8)rX   rY   rZ   r[   r   r   r>   r?   r@   rŒ   Úencode)r?   rE   rE   rF   Úmanager_pathþ  s    r«   )Úautocast)Z
unique_dimÚ__c                 C   sH   ddl m}m} t| ƒtjur8|| fƒr8|t| f| |ƒS | sDJ |ƒ‚dS )zFA wrapper around Python's assert which is symbolically traceable.
    r   )Úhas_torch_functionÚhandle_torch_functionN)Ú	overridesr®   r¯   rp   rh   r.   Ú_assert)Ú	conditionÚmessager®   r¯   rE   rE   rF   r±   :  s    r±   )Úcpu)Úautograd)r"   r#   Úset_grad_enabledr$   )Úfft)Úfutures)Únn)Úoptim)Úmultiprocessing)Úsparse)Úspecial)Úonnx)Újit)Úlinalg)Úhub)Úrandom)Údistributions)Útesting)Ú
__config__)Ú
__future__)Úprofiler)Úao)Ú_torch_docsÚ_tensor_docsÚ_storage_docsc                   C   s   t jS )z?Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1)rz   Z_GLIBCXX_USE_CXX11_ABIrE   rE   rE   rF   Úcompiled_with_cxx11_abi~  s    rÌ   )Úops)Úclasses)Úquantization)Úquasirandom)Úregister_after_fork)r/   )Úfrom_dlpackÚ	to_dlpack)Ú_masked)Úsolvec                 C   sF   t  | ¡j} tjt }t|| ƒr6td | t	|| ƒ¡ƒ‚t
|| |ƒ dS )zùRegister an external runtime module of the specific :attr:`device_type`
    supported by torch.

    After the :attr:`module` is registered correctly, the user can refer
    the external runtime module as part of torch with attribute torch.xxx.
    z@The runtime module of '{}' has already been registered with '{}'N)rh   Údevicerp   rZ   Úmodulesrn   rq   rŒ   ÚformatÚgetattrÚsetattr)Zdevice_typert   ÚmrE   rE   rF   Ú_register_device_module­  s    


ÿrÜ   )Úreturn_types)Úlibrary)Ú_meta_registrations(  Ú__doc__r>   rZ   rX   Útextwrapr_   ÚwarningsÚinspectÚversion_infoÚ	ExceptionZ_utilsr   r   Z_utils_internalr   r   r   r	   r[   r   Ztorch_versionZ_sixr   ry   Útypingr   r   r   r   r   rk   Ú__all__ÚgetenvZpfiles_pathr?   rA   Úexec_prefixZpy_dll_pathr^   r]   Zth_dll_pathÚbase_exec_prefixZbase_py_dll_pathÚlistÚfilterr@   Z	dll_pathsÚallZnvtoolsext_dll_pathÚversionrI   Zcuda_versionrJ   ÚreplaceZcuda_version_1Zcuda_path_varZdefault_pathZ	cuda_pathÚextendZWinDLLÚkernel32rq   Zwith_load_library_flagsZSetErrorModeZprev_error_modeÚc_void_pZLoadLibraryWÚrestyperP   ZLoadLibraryExWZdll_pathZadd_dll_directoryÚresZWinErrorZget_last_errorÚerrÚstrerrorr`   ÚOSErrorÚprintZdllsZpath_patchedÚdllZ	is_loadedZ
last_errorÚenvironrc   rY   Z	_dl_flagsZDLFCNÚImportErrorZ	torch._dlZ_dlÚgetdlopenflagsZ	old_flagsÚsetdlopenflagsra   rd   Ztorch._Crz   rf   Z_C_for_compiled_checkÚdedentÚstripÚdirÚnameÚendswithÚappendrÙ   rw   ro   Úisclassrj   ÚattrÚ	candidaterp   r×   r   r   r   r   r}   r0   r1   r2   r‰   rŠ   r3   r4   r6   r5   r7   r8   Úmathr   r   r‘   r’   Z_tensorr.   Ústorager“   r-   r”   r•   r+   r%   r&   r   r'   r(   r)   r*   r,   r¡   r¢   r£   r¤   r¥   r¦   r§   r¨   rx   Úsetr©   Ú__annotations__rÂ   r   r   r   r   r   Zserializationr   r   Z_tensor_strr   r«   Z	torch.ampr¬   Ztorch._C._VariableFunctionsZPRIVATE_OPSZ_VariableFunctionsÚ
startswithÚglobalsÚ
functionalr±   rh   r´   rµ   Ztorch.autogradr"   r#   r¶   r$   r·   r¸   r¹   rº   Ztorch.optim._multi_tensorr»   r¼   r½   Ztorch.utils.backcompatr¾   r¿   rÀ   rÁ   rÃ   rÄ   Ztorch.backends.cudaZtorch.backends.mpsZtorch.backends.cudnnZtorch.backends.mklZtorch.backends.mkldnnZtorch.backends.openmpZtorch.backends.quantizedZtorch.utils.datarÅ   rÆ   rÇ   Ztorch.nn.intrinsicZtorch.nn.quantizableZtorch.nn.quantizedrÈ   Z_init_namesr=   rÉ   rÊ   rË   rÌ   Z
torch._opsrÍ   Ztorch._classesrÎ   rÏ   rÐ   Zcontiguous_formatZlegacy_contiguous_formatZtorch.multiprocessing._atforkrÑ   Zget_num_threadsZ_lobpcgr/   ZatenZquantized_lstmZquantized_gruZtorch.utils.dlpackrÒ   rÓ   rÔ   Z_linalg_utilsrÕ   rÜ   rÝ   rÞ   rß   rE   rE   rE   rF   Ú<module>   sö  


ÿ














ÿ
õ




	-}*-

û		



