a
    8Sic0                     @   s   d dl Z d dlZd dlmZ ddlmZ d dlmZmZ G dd deZdee ee ee ee ee e	e
e
e
e
e
dd	d
Zee ee ee ee ee e
e
e
e
e
d
ddZee ee ee ee ee e
e
e
e
e
d
ddZdS )    N)Tensor   )	Optimizer)ListOptionalc                       sJ   e Zd ZdZdee d fdd	Z fd
dZe	 dddZ
  ZS )RAdama  Implements RAdam algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \beta_1, \beta_2
                \text{ (betas)}, \: \theta_0 \text{ (params)}, \:f(\theta) \text{ (objective)}, \:
                \lambda \text{ (weightdecay)},                                                   \\
            &\hspace{13mm} \epsilon \text{ (epsilon)}                                            \\
            &\textbf{initialize} :  m_0 \leftarrow 0 \text{ ( first moment)},
                v_0 \leftarrow 0 \text{ ( second moment)},                                       \\
            &\hspace{18mm} \rho_{\infty} \leftarrow 2/(1-\beta_2) -1                      \\[-1.ex]
            &\rule{110mm}{0.4pt}  \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{6mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1}                             \\
            &\hspace{6mm}m_t           \leftarrow   \beta_1 m_{t-1} + (1 - \beta_1) g_t          \\
            &\hspace{6mm}v_t           \leftarrow   \beta_2 v_{t-1} + (1-\beta_2) g^2_t          \\
            &\hspace{6mm}\widehat{m_t} \leftarrow   m_t/\big(1-\beta_1^t \big)                   \\
            &\hspace{6mm}\rho_t \leftarrow \rho_{\infty} -
                2 t \beta^t_2 /\big(1-\beta_2^t \big)                                    \\[0.1.ex]
            &\hspace{6mm}\textbf{if} \: \rho_t > 5                                               \\
            &\hspace{12mm} l_t \leftarrow \sqrt{ (1-\beta^t_2) / \big( v_t +\epsilon \big) }     \\
            &\hspace{12mm} r_t \leftarrow
      \sqrt{\frac{(\rho_t-4)(\rho_t-2)\rho_{\infty}}{(\rho_{\infty}-4)(\rho_{\infty}-2) \rho_t}} \\
            &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t} r_t l_t        \\
            &\hspace{6mm}\textbf{else}                                                           \\
            &\hspace{12mm}\theta_t \leftarrow \theta_{t-1} - \gamma \widehat{m_t}                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `On the variance of the adaptive learning rate and beyond`_.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-3)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        foreach (bool, optional): whether foreach implementation of optimizer
            is used (default: None)

    .. _On the variance of the adaptive learning rate and beyond:
        https://arxiv.org/abs/1908.03265
    MbP?g?g+?:0yE>r   N)foreachc                    s   d|kst d|d|ks,t d|d|d   krDdk sXn t d|d d|d   krpdk sn t d|d d|kst d	|t|||||d
}tt| || d S )N        zInvalid learning rate: {}zInvalid epsilon value: {}r         ?z%Invalid beta parameter at index 0: {}r   z%Invalid beta parameter at index 1: {}zInvalid weight_decay value: {})lrbetasepsweight_decayr   )
ValueErrorformatdictsuperr   __init__)selfparamsr   r   r   r   r   defaults	__class__ M/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/optim/radam.pyr   >   s    
zRAdam.__init__c                    sx   t  | | jD ]}|dd  qt| j }t|dkoNt	|d d }|st|D ]}t
t|d |d< qXd S )Nr   r   step)r   __setstate__param_groups
setdefaultliststatevalueslentorch	is_tensortensorfloat)r   r#   groupstate_valuesstep_is_tensorsr   r   r   r   N   s    
zRAdam.__setstate__c                 C   sN  d}|dur:t   | }W d   n1 s00    Y  | jD ]}g }g }g }g }g }|d \}	}
|d D ]}|jdurn|| |jjrtd||j | j| }t|dkrt 	d|d< t j
|t jd|d	< t j
|t jd|d
< ||d	  ||d
  ||d  qnt||||||	|
|d |d |d |d d q@|S )zPerforms a single optimization step.

        Args:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   z'RAdam does not support sparse gradientsr   r   r   memory_formatexp_avg
exp_avg_sqr   r   r   r   )beta1beta2r   r   r   r   )r&   enable_gradr    gradappend	is_sparseRuntimeErrorr#   r%   r(   
zeros_likepreserve_formatradam)r   closurelossr*   params_with_gradgradsexp_avgsexp_avg_sqsstate_stepsr2   r3   pr#   r   r   r   r   X   sL    
$


z
RAdam.step)r   r	   r
   r   N)N)__name__
__module____qualname____doc__r   boolr   r   r&   no_gradr   __classcell__r   r   r   r   r   	   s   4  
r   )r   r?   r@   rA   rB   r   r2   r3   r   r   r   c                C   st   t dd |D std|du r&d}|r<tj r<td|rPtj sPt}nt}|| ||||||||	|
d
 dS )zpFunctional API that performs RAdam algorithm computation.

    See :class:`~torch.optim.RAdam` for details.
    c                 S   s   g | ]}t |tjqS r   )
isinstancer&   r   ).0tr   r   r   
<listcomp>       zradam.<locals>.<listcomp>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNFz6torch.jit.script not supported with foreach optimizers)r2   r3   r   r   r   )allr8   r&   jitis_scripting_multi_tensor_radam_single_tensor_radam)r   r?   r@   rA   rB   r   r2   r3   r   r   r   funcr   r   r   r;      s(    r;   )
r   r?   r@   rA   rB   r2   r3   r   r   r   c                C   sV  t | D ]F\}
}||
 }||
 }||
 }||
 }|d7 }| }d||  }d||  }|dkrp|j||d}||j|d| d ||j||d| d || }dd|  d }|d| ||  |  }|dkr>t|d |d  | |d |d  |  }t|| |	 }|j|| | | dd q|j|| dd qd S )	Nr   r   alpha)value   g      @   g      )	enumerateitemaddmul_add_addcmul_mathsqrt)r   r?   r@   rA   rB   r2   r3   r   r   r   iparamr5   r0   r1   step_tr   bias_correction1bias_correction2Zbias_corrected_exp_avgrho_infrho_trectZadaptive_lrr   r   r   rT      s*    
.rT   c                   sb  t | dkrd S t|d dd  d fdd|D }
 fdd|D }fdd|D }|dkr|tj|| |d t|  tj||d  d t| t|||d  fd	d|
D }d
d |D }t|}dd |D }t||}fddt||D }t| ||| dd |D }fddt||D }t| ||| d S )Nr   r   rY   c                    s8   g | ]0}d |    |    d |      qS )rY   r   r\   rL   r   )r3   rh   r   r   rN     rO   z'_multi_tensor_radam.<locals>.<listcomp>c                    s   g | ]}d  |    qS r   rk   rl   )r2   r   r   rN     rO   c                    s   g | ]}d  |    qS rm   rk   rl   )r3   r   r   rN     rO   rV   c                    sF   g | ]>}|d kr>t |d |d     d  d  |  ndqS )   rZ   rY   r   ra   rb   )rL   ri   )rh   r   r   rN     s   0c                 S   s   g | ]}|d krd ndqS )r   r   r   )rL   rj   r   r   r   rN     rO   c                 S   s   g | ]}t |qS r   ro   )rL   bcr   r   r   rN     rO   c                    s    g | ]\}} | | d  qS r   rL   rj   rp   r   r   r   rN     rO   c                 S   s   g | ]}t j|t jd qS )r.   )r&   	ones_liker:   )rL   Zexp_avr   r   r   rN     rO   c                    s    g | ]\}} | | d  qS rq   r   rs   rt   r   r   rN     rO   )	r%   r&   _foreach_add__foreach_mul__foreach_addcmul__foreach_sqrt_foreach_divzip_foreach_addcdiv_)r   r?   r@   rA   rB   r2   r3   r   r   r   Z
rho_t_listrf   rg   rj   Zunrectifiedexp_avg_sq_sqrtbias_correction_sqrtdenom	step_sizer   )r2   r3   r   rh   r   rS      s2    

rS   )N)ra   r&   r   	optimizerr   typingr   r   r   rH   r)   r;   rT   rS   r   r   r   r   <module>   sR     .1