a
    SicJ                     @   sv   d Z ddlm  mZ ddlmZ ddlmZ ddl	m
Z
 e e
dg dG dd	 d	ejZej d
eje_ dS )z!RMSprop optimizer implementation.    N)	optimizer)generic_utils)keras_exportz%keras.optimizers.experimental.RMSprop)v1c                       sB   e Zd ZdZd fdd	Z fddZdd Z fddZ  ZS )RMSpropa/  Optimizer that implements the RMSprop algorithm.

    The gist of RMSprop is to:

    - Maintain a moving (discounted) average of the square of gradients
    - Divide the gradient by the root of this average

    This implementation of RMSprop uses plain momentum, not Nesterov momentum.

    The centered version additionally maintains a moving average of the
    gradients, and uses that average to estimate the variance.

    Args:
      learning_rate: Initial value for the learning rate:
        either a floating point value,
        or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
        Defaults to 0.001.
      rho: float, defaults to 0.9. Discounting factor for the old gradients.
      momentum: float, defaults to 0.0. If not 0.0., the optimizer tracks the
        momentum value, with a decay rate equals to `1 - momentum`.
      epsilon: A small constant for numerical stability. This epsilon is
        "epsilon hat" in the Kingma and Ba paper (in the formula just before
        Section 2.1), not the epsilon in Algorithm 1 of the paper. Defaults to
        1e-7.
      centered: Boolean. If `True`, gradients are normalized by the estimated
        variance of the gradient; if False, by the uncentered second moment.
        Setting this to `True` may help with training, but is slightly more
        expensive in terms of computation and memory. Defaults to `False`.
      {{base_optimizer_keyword_args}}

    Usage:

    >>> opt = tf.keras.optimizers.RMSprop(learning_rate=0.1)
    >>> var1 = tf.Variable(10.0)
    >>> loss = lambda: (var1 ** 2) / 2.0    # d(loss) / d(var1) = var1
    >>> step_count = opt.minimize(loss, [var1]).numpy()
    >>> var1.numpy()
    9.683772

    Reference:
      - [Hinton, 2012](
        http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)

    MbP??        Hz>FNGz?d   Tc                    sL   t  jf ||||	|
|||d| | || _|| _|| _|| _|| _d S )N)clipnorm	clipvalueglobal_clipnormuse_emaema_momentumema_overwrite_frequencyjit_compilename)super__init___build_learning_rate_learning_raterhomomentumepsiloncentered)selflearning_rater   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__ k/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/optimizers/optimizer_experimental/rmsprop.pyr   J   s"    	zRMSprop.__init__c                    s   t  | t| dr | jr d S d| _g | _|D ]}| j| |d q0g | _| jdkrx|D ]}| j| |d q^g | _	| j
r|D ]}| j	| |d qd S )N_builtTvelocityr   r   Zaverage_gradient)r   buildhasattrr$   _velocitiesappendadd_variable_from_reference
_momentumsr   _average_gradientsr   )r   var_listvarr    r"   r#   r&   l   s*    



zRMSprop.buildc                 C   s*  t | j|j}| |}| j| j|  }d}| jdkrH| j| j|  }d}| j	rb| j
| j|  }| j}t|t jrv|||  |t t |jd|  |j | j	r|||  |t t |jd|  |j |t |  t ||j}	t |jt |	| j  |j}
| jdkrX|| j|  ||
 || |  n|t | |
j |
j n||| d| t |   | j	r||| d| t |   |t |  |t || j  }
| jdkr|| j| |
  || |  n|| |
  dS )z=Update step given gradient and the associated model variable.Nr      )tfcastr   dtype_var_keyr(   _index_dictr   r+   r   r,   r   
isinstanceIndexedSlicesassignscatter_addsquarevaluesindices
assign_addgathersqrtr   )r   gradientvariablelrvar_keyr%   r   Zaverage_gradr   Zvelocity_valueZtransformed_gradr"   r"   r#   update_step   sd    


 zRMSprop.update_stepc                    s4   t   }|| | j| j| j| j| jd |S )N)r   r   r   r   r   )	r   
get_configupdate_serialize_hyperparameterr   r   r   r   r   )r   configr    r"   r#   rD      s    
zRMSprop.get_config)r   r   r	   r
   FNNNFr   r   Tr   )	__name__
__module____qualname____doc__r   r&   rC   rD   __classcell__r"   r"   r    r#   r      s$   /             ">r   z{{base_optimizer_keyword_args}})rK   tensorflow.compat.v2compatv2r0   'keras.optimizers.optimizer_experimentalr   keras.utilsr    tensorflow.python.util.tf_exportr   register_keras_serializable	Optimizerr   replacebase_optimizer_keyword_argsr"   r"   r"   r#   <module>   s   
 :