a
    Sic                     @   s`   d Z ddlZddlm  mZ ddlmZ ddl	m
Z
 ddlmZ edG dd de
jZdS )	z!Adagrad optimizer implementation.    N)backend_config)optimizer_v2)keras_exportzkeras.optimizers.Adagradc                       st   e Zd ZdZdZd fdd	Zdd	 Z fd
dZ fddZe	dddZ
dddZdddZ fddZ  ZS )Adagrada  Optimizer that implements the Adagrad algorithm.

    Adagrad is an optimizer with parameter-specific learning rates,
    which are adapted relative to how frequently a parameter gets
    updated during training. The more updates a parameter receives,
    the smaller the updates.

    Args:
      learning_rate: Initial value for the learning rate:
        either a floating point value,
        or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance.
        Defaults to 0.001.
        Note that `Adagrad` tends to benefit from higher initial learning rate
        values compared to other optimizers.
        To match the exact form in the original paper, use 1.0.
      initial_accumulator_value: Floating point value.
        Starting value for the accumulators (per-parameter momentum values).
        Must be non-negative.
      epsilon: Small floating point value used to maintain numerical stability.
      name: Optional name prefix for the operations created when applying
        gradients.  Defaults to `"Adagrad"`.
      **kwargs: keyword arguments. Allowed arguments are `clipvalue`,
        `clipnorm`, `global_clipnorm`.
        If `clipvalue` (float) is set, the gradient of each weight
        is clipped to be no higher than this value.
        If `clipnorm` (float) is set, the gradient of each weight
        is individually clipped so that its norm is no higher than this value.
        If `global_clipnorm` (float) is set the gradient of all weights is
        clipped so that their global norm is no higher than this value..

    Reference:
      - [Duchi et al., 2011](
        http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
    TMbP?皙?Hz>c                    sr   |dk rt d| |d u r$t }t j|fi | | d|d| | d| j || _|pjt | _d S )Ng        z2initial_accumulator_value must be non-negative: %slearning_ratelrdecay)	
ValueErrorr   epsilonsuper__init__
_set_hyperget_initial_decay_initial_accumulator_value)selfr	   initial_accumulator_valuer   namekwargs	__class__ a/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/optimizers/optimizer_v2/adagrad.pyr   B   s    zAdagrad.__init__c                 C   s8   |D ].}|j j}tjjj| j|d}| |d| qd S )Ndtypeaccumulator)r   
base_dtypetfcompatv1constant_initializerr   add_slot)r   var_listvarr   initr   r   r   _create_slotsW   s    zAdagrad._create_slotsc              	      sT   t  ||| |||f tt| j||||f d  tjdtjdd d S )Nlr_tr   r   )r   neg_lr_tzero)	r   _prepare_localupdatedictr    convert_to_tensorr   zerosint64)r   
var_device	var_dtypeapply_stater   r   r   r,   _   s    zAdagrad._prepare_localc                    s:   | j }t|t|d kr*tdg| }t | d S )N   r   )weightslennparrayr   set_weights)r   r6   paramsr   r   r   r:   i   s    zAdagrad.set_weightsNc                 C   s4   d|vrd|d< d|v r&| d|d< | f i |S )a  Creates an optimizer from its config.

        This method is the reverse of `get_config`,
        capable of instantiating the same optimizer from the config
        dictionary.

        Args:
            config: A Python dictionary, typically the output of get_config.
            custom_objects: A Python dictionary mapping names to additional
              Python objects used to create this optimizer, such as a function
              used for a hyperparameter.

        Returns:
            An optimizer instance.
        r   r   r
   r	   )pop)clsconfigcustom_objectsr   r   r   from_configr   s
    zAdagrad.from_configc                 C   s`   |j |jj }}|pi ||fp,| ||}| |d}tjj|j	|j	|d |d || j
dS )Nr   r)   r   )r&   accumr
   r   graduse_locking)devicer   r   r   _fallback_apply_stateget_slotr    raw_opsResourceApplyAdagradV2handle_use_locking)r   rB   r&   r4   r2   r3   coefficientsaccr   r   r   _resource_apply_dense   s    
zAdagrad._resource_apply_densec           	   	   C   sb   |j |jj }}|pi ||fp,| ||}| |d}tjj|j	|j	|d |d ||| j
dS )Nr   r)   r   )r&   rA   r
   r   rB   indicesrC   )rD   r   r   r   rE   rF   r    rG   ResourceSparseApplyAdagradV2rI   rJ   )	r   rB   r&   rN   r4   r2   r3   rK   rL   r   r   r   _resource_apply_sparse   s     
zAdagrad._resource_apply_sparsec                    s.   t   }|| d| j| j| jd |S )Nr	   )r	   r   r   r   )r   
get_configr-   _serialize_hyperparameterr   r   r   )r   r>   r   r   r   rQ      s    

zAdagrad.get_config)r   r   r   r   )N)N)N)__name__
__module____qualname____doc___HAS_AGGREGATE_GRADr   r(   r,   r:   classmethodr@   rM   rP   rQ   __classcell__r   r   r   r   r      s   #    
	

r   )rV   numpyr8   tensorflow.compat.v2r!   v2r    kerasr   keras.optimizers.optimizer_v2r    tensorflow.python.util.tf_exportr   OptimizerV2r   r   r   r   r   <module>   s   