a
    8Sic?/                     @   s   d dl Z d dl mZ ddlmZ d dlmZmZ G dd deZdee ee ee ee eee	e	e	e	edd	d
Z
dd Zee ee ee ee e	e	e	e	eed
ddZee ee ee ee e	e	e	e	eed
ddZdS )    N)Tensor   )	Optimizer)ListOptionalc                       sZ   e Zd ZdZdddee ed fd	d
Z fddZdd Ze	
 dddZ  ZS )Adagrada
  Implements Adagrad algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
                \text{ (objective)}, \: \lambda \text{ (weight decay)},                          \\
            &\hspace{12mm}    \tau \text{ (initial accumulator value)}, \: \eta\text{ (lr decay)}\\
            &\textbf{initialize} :  state\_sum_0 \leftarrow 0                             \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \tilde{\gamma}    \leftarrow \gamma / (1 +(t-1) \eta)                  \\
            &\hspace{5mm} \textbf{if} \: \lambda \neq 0                                          \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda \theta_{t-1}                             \\
            &\hspace{5mm}state\_sum_t  \leftarrow  state\_sum_{t-1} + g^2_t                      \\
            &\hspace{5mm}\theta_t \leftarrow
                \theta_{t-1}- \tilde{\gamma} \frac{g_t}{\sqrt{state\_sum_t}+\epsilon}            \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to `Adaptive Subgradient Methods for Online Learning
    and Stochastic Optimization`_.

    Args:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 1e-2)
        lr_decay (float, optional): learning rate decay (default: 0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-10)
        foreach (bool, optional): whether foreach implementation of optimizer is used (default: None)
        maximize (bool, optional): maximize the params based on the objective, instead of
            minimizing (default: False)

    .. _Adaptive Subgradient Methods for Online Learning and Stochastic
        Optimization: http://jmlr.org/papers/v12/duchi11a.html
    {Gz?r   绽|=NF)maximize)foreachr
   c             	      s   d|kst d|d|ks,t d|d|ksBt d|d|ksXt d|d|ksnt d|t|||||||d}	tt| ||	 | jD ]X}
|
d D ]J}| j| }t	d|d	< t
|rt||n|}tj||tjd
|d< qqd S )Ng        zInvalid learning rate: {}zInvalid lr_decay value: {}zInvalid weight_decay value: {}z+Invalid initial_accumulator_value value: {}zInvalid epsilon value: {})lrlr_decayepsweight_decayinitial_accumulator_valuer   r
   paramsstep)memory_formatsum)
ValueErrorformatdictsuperr   __init__param_groupsstatetorchtensor
is_complexcomplex	full_likepreserve_format)selfr   r   r   r   r   r   r   r
   defaultsgrouppr   Z
init_value	__class__ O/var/www/html/django/DPS/env/lib/python3.9/site-packages/torch/optim/adagrad.pyr   3   sF    	

zAdagrad.__init__c                    s   t  | | jD ]}|dd  |dd qt| j }t|dkoZt	|d d }|s|D ]}t
t|d |d< qdd S )Nr   r
   Fr   r   )r   __setstate__r   
setdefaultlistr   valueslenr   	is_tensorr   float)r"   r   r$   Zstate_valuesZstep_is_tensorsr&   r(   r)   r*   f   s    

zAdagrad.__setstate__c                 C   s4   | j D ](}|d D ]}| j| }|d   qqd S )Nr   r   )r   r   share_memory_)r"   r$   r%   r   r(   r(   r)   share_memoryt   s    

zAdagrad.share_memoryc                 C   s   d}|dur:t   | }W d   n1 s00    Y  | jD ]}g }g }g }g }d}|d D ]V}	|	jdur`|	jjrzd}||	 ||	j | j|	 }
||
d  ||
d  q`t|||||d |d |d	 |d
 ||d |d d q@|S )zPerforms a single optimization step.

        Args:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        NFr   Tr   r   r   r   r   r   r   r
   )r   r   r   r   has_sparse_gradr   r
   )r   enable_gradr   grad	is_sparseappendr   adagrad)r"   closurelossr$   params_with_gradgrads
state_sumsstate_stepsr4   r%   r   r(   r(   r)   r   z   sB    
$



zAdagrad.step)r   r   r   r   r	   N)N)__name__
__module____qualname____doc__r   boolr   r*   r3   r   no_gradr   __classcell__r(   r(   r&   r)   r      s"   -      
3r   )r   r=   r>   r?   r4   r   r   r   r   r   r
   c                C   st   t dd |D std|du r&d}|r<tj r<td|rPtj sPt}nt}|| |||||||	||
d
 dS )ztFunctional API that performs Adagrad algorithm computation.

    See :class:`~torch.optim.Adagrad` for details.
    c                 S   s   g | ]}t |tjqS r(   )
isinstancer   r   ).0tr(   r(   r)   
<listcomp>       zadagrad.<locals>.<listcomp>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNFz6torch.jit.script not supported with foreach optimizersr   r   r   r   r4   r
   )allRuntimeErrorr   jitis_scripting_multi_tensor_adagrad_single_tensor_adagrad)r   r=   r>   r?   r4   r   r   r   r   r   r
   funcr(   r(   r)   r9      s.    r9   c                 C   s8   |   }| dks | dkr*t| S t|||S )Nr   )sizenumelr   
empty_likesparse_coo_tensor)r6   grad_indicesr-   rT   r(   r(   r)   _make_sparse   s    
rY   )
r   r=   r>   r?   r   r   r   r   r4   r
   c             	   C   sf  t | |||D ]P\}
}}}|d7 }| }|	s4|n| }|dkr^|jrPtd|j|
|d}|d|d |   }|jr| }| }| }| }|	t
|||d ||}|  	|}|
j	t
|||| | d qt|
}|rt|}t|}t|
}
|j||dd | 	|}|
j||| d |rt|
}
t|}qd S )Nr   r   z;weight_decay option is not compatible with sparse gradientsalpha   value)zipitemr7   rN   addcoalesce_indices_valuesrT   add_rY   powsparse_masksqrt_r   r   view_as_realaddcmul_sqrtaddcdiv_view_as_complex)r   r=   r>   r?   r   r   r   r   r4   r
   paramr6   Z	state_sumZstep_tr   ZclrrX   Zgrad_valuesrT   stdZ
std_valuesr   r(   r(   r)   rR      sB    





rR   c                   s   t dkrd S |	rt|}|d u r8tdd |D }|rXt||| |||dd
S t|d |dkr|tj||d  fdd|D }
d	d |D }d
d |D }tj|||dd tt||}t	t
||
|}fddt|D }t| fddt|D }d S )Nr   c                 S   s   g | ]
}|j qS r(   )r7   )rH   r6   r(   r(   r)   rJ   2  rK   z)_multi_tensor_adagrad.<locals>.<listcomp>FrL   r   rZ   c                    s"   g | ]}  d |d     qS )r   r(   )rH   r   )r   r   r(   r)   rJ   H  rK   c                 S   s$   g | ]}t |rt |n|qS r(   r   r   ri   rH   xr(   r(   r)   rJ   J  rK   c                 S   s$   g | ]}t |rt |n|qS r(   rp   rq   r(   r(   r)   rJ   K  s   r]   c                    s,   g | ]$\}}t  | r$t |n|qS r(   r   r   rm   rH   irr   r   r(   r)   rJ   Q  s   c                    s,   g | ]$\}}t  | r$t |n|qS r(   rs   rt   rv   r(   r)   rJ   V  s   )r.   r   _foreach_neganyrR   _foreach_add__foreach_addcmul__foreach_addZ_foreach_sqrtZ_foreach_divZ_foreach_mul	enumerate)r   r=   r>   r?   r   r   r   r   r4   r
   Z	minus_clrro   ZtoAddr(   )r   r   r   r)   rQ     sJ    


rQ   )NN)r   r   	optimizerr   typingr   r   r   rD   r0   r9   rY   rR   rQ   r(   r(   r(   r)   <module>   sV    *  48