a
    7Sicf6                  	   @   s|  d dl Z d dlZd dl mZ d dlmZmZmZmZmZm	Z	 d dl
mZmZmZ d dlZeeef Zeeeedf f Ze	ee  e	edddZeeeedf f ed	d
dZeeeg ef edddZeeeeeeef dddZd%eeeedf f eeeeeedddZeeddddZeeddddZedddZd&eeeedd d!Zd'eeeeed"d#d$ZdS )(    N)Tensor)AnyCallableOptionalTupleUnionList)tree_flattentree_unflatten_broadcast_to_and_flatten.)flat_in_dims	flat_argsreturnc                    sF   dd t | |D   r>t fdd D r>td  d d S )Nc                 S   s"   g | ]\}}|d ur| |qS N)size.0in_dimarg r   Q/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/_vmap_internals.py
<listcomp>   s   z0_validate_and_get_batch_size.<locals>.<listcomp>c                    s   g | ]}| d  kqS )r   r   )r   r   batch_sizesr   r   r          zTvmap: Expected all tensors to have the same size in the mapped dimension, got sizes z for the mapped dimensionr   )zipany
ValueError)r   r   r   r   r   _validate_and_get_batch_size   s    r   )batched_outputsr   c                 C   s   t | trt| S dS )N   )
isinstancetuplelen)r   r   r   r   _num_outputs   s    
r$   )valuenum_elementserror_message_lambdar   c                 C   s.   t | ts| f| S t| |kr*t| | S r   )r!   r"   r#   r   )r%   r&   r'   r   r   r   	_as_tuple   s
    


r(   )in_dimsargs
vmap_levelfuncr   c                    s  t | ts8t | ts8tdt| d|  dt|  dt|dkrXtdt| dt|\}}t| |}|d u rtdt| d|  dt| d  d	| d	t	||D ]\}}t |ts|d urtdt| d|  d
| dt |tr*t |t
s*tdt| d|  d
| dt| d	|d ur|dk sH|| krtdt| d|  d
| d|  d|  dqt||}	 fddt	||D }
t|
||	fS )Nvmap(z
, in_dims=zv, ...)(<inputs>): expected `in_dims` to be int or a (potentially nested) tuple matching the structure of inputs, got: .r   z)(<inputs>): got no inputs. Maybe you forgot to add inputs, or you are trying to vmap over a function with no inputs. The latter is unsupported.zb, ...)(<inputs>): in_dims is not compatible with the structure of `inputs`. in_dims has structure r    z but inputs has structure z, ...)(<inputs>): Got in_dim=zE for an input but in_dim must be either an integer dimension or None.z' for an input but the input is of type zT. We cannot vmap over non-Tensor arguments, please use None as the respective in_dimz> for some input, but that input is a Tensor of dimensionality z- so expected in_dim to satisfy 0 <= in_dim < c                    s*   g | ]"\}}|d u r|nt || qS r   )torchZ_add_batch_dimr   r+   r   r   r   R   s   z*_create_batched_inputs.<locals>.<listcomp>)r!   intr"   r   	_get_nametyper#   r	   r   r   r   dimr   r
   )r)   r*   r+   r,   r   Z	args_specr   r   r   
batch_sizebatched_inputsr   r0   r   _create_batched_inputs'   sd    



r7   F)r   out_dimsr+   r5   r,   allow_none_pass_throughr   c                    s   t | tfdd}t| trB|d }t|  |S |rdt fddt| |D S t fddt| |D S d S )Nc                	      s&   dt   d d dt   d	S )Nr-   , ..., out_dims=z0): `out_dims` must have one dim per output (got z outputs) of r.   )r2   r   )r,   num_outputsr8   r   r   <lambda>_   s   z!_unwrap_batched.<locals>.<lambda>r   c                 3   s.   | ]&\}}|d ur"t | |nd V  qd S r   r/   _remove_batch_dimr   outout_dimr5   r+   r   r   	<genexpr>i   s   z"_unwrap_batched.<locals>.<genexpr>c                 3   s"   | ]\}}t | |V  qd S r   r=   r?   rB   r   r   rC   l   s   )r$   r(   r!   r   r/   r>   r"   r   )r   r8   r+   r5   r,   r9   Zout_dims_as_tuplerA   r   )r5   r,   r;   r8   r+   r   _unwrap_batchedX   s    
rD   )outputsr,   r   c                 C   s   t | trd S t | ts@tdt| dt| dt|  dt| D ]B\}}t |tr\qHtdt| dt| dt| d| d	qHd S )Nr-   z	, ...): `z%` must only return Tensors, got type z as the return.z for return r.   )r!   r   r"   r   r2   r3   	enumerate)rE   r,   idxoutputr   r   r   _validate_outputss   s    



rI   )r8   r,   r   c                 C   sH   t | trd S t | tr*tdd | D sDtdt| d|  dd S )Nc                 S   s   g | ]}t |tqS r   )r!   r1   )r   rA   r   r   r   r      r   z7_check_out_dims_is_int_or_int_tuple.<locals>.<listcomp>r-   r:   zu): `out_dims` must be an int or a tuple of int representing where in the outputs the vmapped dimension should appear.)r!   r1   r"   allr   r2   )r8   r,   r   r   r   #_check_out_dims_is_int_or_int_tuple   s    

rK   r,   c                 C   s   t | dr| jS t| S )N__name__)hasattrrM   reprrL   r   r   r   r2      s    
r2   )r,   r)   r8   r   c                 C   s   t jddd t| ||S )aN  
    vmap is the vectorizing map. Returns a new function that maps `func` over some
    dimension of the inputs. Semantically, vmap pushes the map into PyTorch
    operations called by `func`, effectively vectorizing those operations.

    vmap is useful for handling batch dimensions: one can write a function `func`
    that runs on examples and then lift it to a function that can take batches of
    examples with `vmap(func)`. vmap can also be used to compute batched
    gradients when composed with autograd.

    .. note::
        We have moved development of vmap to
        `functorch. <https://github.com/pytorch/functorch>`_ functorch's
        vmap is able to arbitrarily compose with gradient computation
        and contains significant performance improvements.
        Please give that a try if that is what you're looking for.

        Furthermore, if you're interested in using vmap for your use case,
        please `contact us! <https://github.com/pytorch/pytorch/issues/42368>`_
        We're interested in gathering feedback from early adopters to inform
        the design.

    .. warning::
        torch.vmap is an experimental prototype that is subject to
        change and/or deletion. Please use at your own risk.

    Args:
        func (function): A Python function that takes one or more arguments.
            Must return one or more Tensors.
        in_dims (int or nested structure): Specifies which dimension of the
            inputs should be mapped over. `in_dims` should have a structure
            like the inputs. If the `in_dim` for a particular input is None,
            then that indicates there is no map dimension. Default: 0.
        out_dims (int or Tuple[int]): Specifies where the mapped dimension
            should appear in the outputs. If `out_dims` is a Tuple, then it should
            have one element per output. Default: 0.

    Returns:
        Returns a new "batched" function. It takes the same inputs as `func`,
        except each input has an extra dimension at the index specified by `in_dims`.
        It takes returns the same outputs as `func`, except each output has
        an extra dimension at the index specified by `out_dims`.

    .. warning:
        vmap works best with functional-style code. Please do not perform any
        side-effects in `func`, with the exception of in-place PyTorch operations.
        Examples of side-effects include mutating Python data structures and
        assigning values to variables not captured in `func`.

    One example of using `vmap` is to compute batched dot products. PyTorch
    doesn't provide a batched `torch.dot` API; instead of unsuccessfully
    rummaging through docs, use `vmap` to construct a new function.

        >>> torch.dot                            # [D], [D] -> []
        >>> batched_dot = torch.vmap(torch.dot)  # [N, D], [N, D] -> [N]
        >>> x, y = torch.randn(2, 5), torch.randn(2, 5)
        >>> batched_dot(x, y)

    `vmap` can be helpful in hiding batch dimensions, leading to a simpler
    model authoring experience.

        >>> batch_size, feature_size = 3, 5
        >>> weights = torch.randn(feature_size, requires_grad=True)
        >>>
        >>> def model(feature_vec):
        >>>     # Very simple linear model with activation
        >>>     return feature_vec.dot(weights).relu()
        >>>
        >>> examples = torch.randn(batch_size, feature_size)
        >>> result = torch.vmap(model)(examples)

    `vmap` can also help vectorize computations that were previously difficult
    or impossible to batch. One example is higher-order gradient computation.
    The PyTorch autograd engine computes vjps (vector-Jacobian products).
    Computing a full Jacobian matrix for some function f: R^N -> R^N usually
    requires N calls to `autograd.grad`, one per Jacobian row. Using `vmap`,
    we can vectorize the whole computation, computing the Jacobian in a single
    call to `autograd.grad`.

        >>> # Setup
        >>> N = 5
        >>> f = lambda x: x ** 2
        >>> x = torch.randn(N, requires_grad=True)
        >>> y = f(x)
        >>> I_N = torch.eye(N)
        >>>
        >>> # Sequential approach
        >>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0]
        >>>                  for v in I_N.unbind()]
        >>> jacobian = torch.stack(jacobian_rows)
        >>>
        >>> # vectorized gradient computation
        >>> def get_vjp(v):
        >>>     return torch.autograd.grad(y, x, v)
        >>> jacobian = torch.vmap(get_vjp)(I_N)

    .. note::
        vmap does not provide general autobatching or handle variable-length
        sequences out of the box.
    zPlease use functorch.vmap instead of torch.vmap (https://github.com/pytorch/functorch). We've moved development on torch.vmap over to functorch; functorch's vmap has a multitude of significant performance and functionality improvements.   )
stacklevel)warningswarn_vmap)r,   r)   r8   r   r   r   vmap   s
    erU   )r,   r)   r8   r9   r   c                    s    t  fdd}|S )Nc                     sn   t  tj }zHt| |\}}| } s>t| t||| dW tj  S tj  0 d S )N)r9   )rK   r/   _C_vmapmode_increment_nestingr7   rI   rD   _vmapmode_decrement_nesting)r*   r+   r6   r5   r   r9   r,   r)   r8   r   r   wrapped	  s    



z_vmap.<locals>.wrapped)	functoolswraps)r,   r)   r8   r9   rZ   r   rY   r   rT     s    rT   )F)r   r   )r   r   F)r/   r[   r   typingr   r   r   r   r   r   Ztorch.utils._pytreer	   r
   r   rR   r1   Z	in_dims_tZ
out_dims_tr   r$   strr(   r7   boolrD   rI   rK   r2   rU   rT   r   r   r   r   <module>   s2    
 
4 
o