a
    Sic<                     @   s2  d Z ddlZddlm  mZ ddlmZ ddlm	Z	 ddlm
Z
 ddlmZ dd Zd	d
 ZedG dd dZedG dd deZeddG dd deZeddG dd deZeddg dG dd deZedd+d d!ZeZeZeZed"d#d$ Zed%d,d&d'Zed(d)d* ZdS )-zBuilt-in regularizers.    N)backend)deserialize_keras_objectserialize_keras_object)keras_exportc                 C   sF   t | ttfstd|  dt| s2t| rBtd|  ddS )z>check penalty number availability, raise ValueError if failed.zValue zN is not a valid regularization penalty number, expected an int or float value.z^ is not a valid regularization penalty number, an infinite number or NaN are not valid values.N)
isinstancefloatint
ValueErrormathisinfisnan)x r   N/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/regularizers.py_check_penalty_number   s    

r   c                 C   s   | d u r|S |S Nr   )inputsdefaultr   r   r   _none_to_default-   s    r   zkeras.regularizers.Regularizerc                   @   s,   e Zd ZdZdd Zedd Zdd ZdS )	Regularizera  Regularizer base class.

    Regularizers allow you to apply penalties on layer parameters or layer
    activity during optimization. These penalties are summed into the loss
    function that the network optimizes.

    Regularization penalties are applied on a per-layer basis. The exact API
    will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D`
    and `Conv3D`) have a unified API.

    These layers expose 3 keyword arguments:

    - `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
    - `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
    - `activity_regularizer`: Regularizer to apply a penalty on the layer's
    output

    All layers (including custom layers) expose `activity_regularizer` as a
    settable property, whether or not it is in the constructor arguments.

    The value returned by the `activity_regularizer` is divided by the input
    batch size so that the relative weighting between the weight regularizers
    and the activity regularizers does not change with the batch size.

    You can access a layer's regularization penalties by calling `layer.losses`
    after calling the layer on inputs.

    ## Example

    >>> layer = tf.keras.layers.Dense(
    ...     5, input_dim=5,
    ...     kernel_initializer='ones',
    ...     kernel_regularizer=tf.keras.regularizers.L1(0.01),
    ...     activity_regularizer=tf.keras.regularizers.L2(0.01))
    >>> tensor = tf.ones(shape=(5, 5)) * 2.0
    >>> out = layer(tensor)

    >>> # The kernel regularization term is 0.25
    >>> # The activity regularization term (after dividing by the batch size)
    >>> # is 5
    >>> tf.math.reduce_sum(layer.losses)
    <tf.Tensor: shape=(), dtype=float32, numpy=5.25>

    ## Available penalties

    ```python
    tf.keras.regularizers.L1(0.3)  # L1 Regularization Penalty
    tf.keras.regularizers.L2(0.1)  # L2 Regularization Penalty
    tf.keras.regularizers.L1L2(l1=0.01, l2=0.01)  # L1 + L2 penalties
    ```

    ## Directly calling a regularizer

    Compute a regularization loss on a tensor by directly calling a regularizer
    as if it is a one-argument function.

    E.g.
    >>> regularizer = tf.keras.regularizers.L2(2.)
    >>> tensor = tf.ones(shape=(5, 5))
    >>> regularizer(tensor)
    <tf.Tensor: shape=(), dtype=float32, numpy=50.0>


    ## Developing new regularizers

    Any function that takes in a weight matrix and returns a scalar
    tensor can be used as a regularizer, e.g.:

    >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
    ... def l1_reg(weight_matrix):
    ...    return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
    ...
    >>> layer = tf.keras.layers.Dense(5, input_dim=5,
    ...     kernel_initializer='ones', kernel_regularizer=l1_reg)
    >>> tensor = tf.ones(shape=(5, 5))
    >>> out = layer(tensor)
    >>> layer.losses
    [<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]

    Alternatively, you can write your custom regularizers in an
    object-oriented way by extending this regularizer base class, e.g.:

    >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
    ... class L2Regularizer(tf.keras.regularizers.Regularizer):
    ...   def __init__(self, l2=0.):
    ...     self.l2 = l2
    ...
    ...   def __call__(self, x):
    ...     return self.l2 * tf.math.reduce_sum(tf.math.square(x))
    ...
    ...   def get_config(self):
    ...     return {'l2': float(self.l2)}
    ...
    >>> layer = tf.keras.layers.Dense(
    ...   5, input_dim=5, kernel_initializer='ones',
    ...   kernel_regularizer=L2Regularizer(l2=0.5))

    >>> tensor = tf.ones(shape=(5, 5))
    >>> out = layer(tensor)
    >>> layer.losses
    [<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]

    ### A note on serialization and deserialization:

    Registering the regularizers as serializable is optional if you are just
    training and executing models, exporting to and from SavedModels, or saving
    and loading weight checkpoints.

    Registration is required for saving and
    loading models to HDF5 format, Keras model cloning, some visualization
    utilities, and exporting models to and from JSON. If using this
    functionality, you must make sure any python process running your model has
    also defined and registered your custom regularizer.
    c                 C   s   dS )z6Compute a regularization penalty from an input tensor.        r   selfr   r   r   r   __call__   s    zRegularizer.__call__c                 C   s   | f i |S )a!  Creates a regularizer from its config.

        This method is the reverse of `get_config`,
        capable of instantiating the same regularizer from the config
        dictionary.

        This method is used by Keras `model_to_estimator`, saving and
        loading models to HDF5 formats, Keras model cloning, some visualization
        utilities, and exporting models to and from JSON.

        Args:
            config: A Python dictionary, typically the output of get_config.

        Returns:
            A regularizer instance.
        r   )clsconfigr   r   r   from_config   s    zRegularizer.from_configc                 C   s   t |  ddS )a  Returns the config of the regularizer.

        An regularizer config is a Python dictionary (serializable)
        containing all configuration parameters of the regularizer.
        The same regularizer can be reinstantiated later
        (without any saved state) from this configuration.

        This method is optional if you are just training and executing models,
        exporting to and from SavedModels, or using weight checkpoints.

        This method is required for Keras `model_to_estimator`, saving and
        loading models to HDF5 formats, Keras model cloning, some visualization
        utilities, and exporting models to and from JSON.

        Returns:
            Python dictionary.
        z  does not implement get_config()N)NotImplementedErrorr   r   r   r   
get_config   s    zRegularizer.get_configN)__name__
__module____qualname____doc__r   classmethodr   r    r   r   r   r   r   1   s
   s
r   zkeras.regularizers.L1L2c                   @   s*   e Zd ZdZd
ddZdd Zdd Zd	S )L1L2a-  A regularizer that applies both L1 and L2 regularization penalties.

    The L1 regularization penalty is computed as:
    `loss = l1 * reduce_sum(abs(x))`

    The L2 regularization penalty is computed as
    `loss = l2 * reduce_sum(square(x))`

    L1L2 may be passed to a layer as a string identifier:

    >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2')

    In this case, the default values used are `l1=0.01` and `l2=0.01`.

    Arguments:
        l1: Float; L1 regularization factor.
        l2: Float; L2 regularization factor.
    r   c                 C   sL   |d u rdn|}|d u rdn|}t | t | t|| _t|| _d S )Nr   )r   r   cast_to_floatxl1l2)r   r(   r)   r   r   r   __init__   s    zL1L2.__init__c                 C   sT   t jd|jd}| jr0|| jtt| 7 }| jrP|d| j tj	| 7 }|S )Nr   dtype       @)
r   constantr,   r(   tf
reduce_sumabsr)   nnl2_loss)r   r   regularizationr   r   r   r      s    zL1L2.__call__c                 C   s   t | jt | jdS )Nr(   r)   )r   r(   r)   r   r   r   r   r       s    zL1L2.get_configN)r   r   r!   r"   r#   r$   r*   r   r    r   r   r   r   r&      s   
	r&   zkeras.regularizers.L1zkeras.regularizers.l1c                   @   s*   e Zd ZdZd
ddZdd Zdd Zd	S )L1a  A regularizer that applies a L1 regularization penalty.

    The L1 regularization penalty is computed as:
    `loss = l1 * reduce_sum(abs(x))`

    L1 may be passed to a layer as a string identifier:

    >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1')

    In this case, the default value used is `l1=0.01`.

    Arguments:
        l1: Float; L1 regularization factor.
    {Gz?c                 K   sF   | d|}|rtd| |d u r*dn|}t| t|| _d S NlzArgument(s) not recognized: r8   )pop	TypeErrorr   r   r'   r(   )r   r(   kwargsr   r   r   r*     s    zL1.__init__c                 C   s   | j tt| S r   )r(   r/   r0   r1   r   r   r   r   r     s    zL1.__call__c                 C   s   dt | jiS )Nr(   )r   r(   r   r   r   r   r      s    zL1.get_configN)r8   r6   r   r   r   r   r7     s   

r7   zkeras.regularizers.L2zkeras.regularizers.l2c                   @   s*   e Zd ZdZd
ddZdd Zdd Zd	S )L2a  A regularizer that applies a L2 regularization penalty.

    The L2 regularization penalty is computed as:
    `loss = l2 * reduce_sum(square(x))`

    L2 may be passed to a layer as a string identifier:

    >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2')

    In this case, the default value used is `l2=0.01`.

    Arguments:
        l2: Float; L2 regularization factor.
    r8   c                 K   sF   | d|}|rtd| |d u r*dn|}t| t|| _d S r9   )r;   r<   r   r   r'   r)   )r   r)   r=   r   r   r   r*   4  s    zL2.__init__c                 C   s   d| j  tj| S )Nr-   )r)   r/   r2   r3   r   r   r   r   r   >  s    zL2.__call__c                 C   s   dt | jiS )Nr)   )r   r)   r   r   r   r   r    B  s    zL2.get_configN)r8   r6   r   r   r   r   r>   #  s   

r>   z(keras.regularizers.OrthogonalRegularizerz)keras.regularizers.orthogonal_regularizer)v1c                   @   s*   e Zd ZdZdddZdd Zdd	 Zd
S )OrthogonalRegularizera  A regularizer that encourages input vectors to be orthogonal to each other.

    It can be applied to either the rows of a matrix (`mode="rows"`) or its
    columns (`mode="columns"`). When applied to a `Dense` kernel of shape
    `(input_dim, units)`, rows mode will seek to make the feature vectors
    (i.e. the basis of the output space) orthogonal to each other.

    Arguments:
      factor: Float. The regularization factor. The regularization penalty will
        be proportional to `factor` times the mean of the dot products between
        the L2-normalized rows (if `mode="rows"`, or columns if
        `mode="columns"`) of the inputs, excluding the product of each
        row/column with itself.  Defaults to 0.01.
      mode: String, one of `{"rows", "columns"}`. Defaults to `"rows"`. In rows
        mode, the regularization effect seeks to make the rows of the input
        orthogonal to each other. In columns mode, it seeks to make the columns
        of the input orthogonal to each other.

    Example:

    >>> regularizer = tf.keras.regularizers.OrthogonalRegularizer(factor=0.01)
    >>> layer = tf.keras.layers.Dense(units=4, kernel_regularizer=regularizer)
    r8   rowsc                 C   s4   t | t|| _|dvr*td| || _d S )N>   rA   columnszWInvalid value for argument `mode`. Expected one of {"rows", "columns"}. Received: mode=)r   r   r'   factorr
   mode)r   rC   rD   r   r   r   r*   d  s    zOrthogonalRegularizer.__init__c                 C   s   |j jdkrtd|j  | jdkrTtjj|dd}t|t|}|j d }n,tjj|dd}tt||}|j d }|dtj	||j
d  }||d  d	 }| jd
 tt| | S )N   zLInputs to OrthogonalRegularizer must have rank 2. Received: inputs.shape == rA      )axisr   g      ?r+   r-   g      ?)shaperankr
   rD   r/   r   l2_normalizematmul	transposeeyer,   rC   r0   r1   )r   r   productsizeZproduct_no_diagonalZ	num_pairsr   r   r   r   n  s.    

zOrthogonalRegularizer.__call__c                 C   s   t | j| jdS )N)rC   rD   )r   rC   rD   r   r   r   r   r      s    z OrthogonalRegularizer.get_configN)r8   rA   r6   r   r   r   r   r@   F  s   

r@   zkeras.regularizers.l1_l2r8   c                 C   s   t | |dS )a  Create a regularizer that applies both L1 and L2 penalties.

    The L1 regularization penalty is computed as:
    `loss = l1 * reduce_sum(abs(x))`

    The L2 regularization penalty is computed as:
    `loss = l2 * reduce_sum(square(x))`

    Args:
        l1: Float; L1 regularization factor.
        l2: Float; L2 regularization factor.

    Returns:
      An L1L2 Regularizer with the given regularization factors.
    r5   )r&   r5   r   r   r   l1_l2  s    rP   zkeras.regularizers.serializec                 C   s   t | S r   r   )regularizerr   r   r   	serialize  s    rR   zkeras.regularizers.deserializec                 C   s&   | dkrt dddS t| t |ddS )NrP   r8   r5   rQ   )module_objectscustom_objectsprintable_module_name)r&   r   globals)r   rT   r   r   r   deserialize  s    rW   zkeras.regularizers.getc                 C   sR   | du rdS t | trt| S t | tr4tt| S t| r@| S td|  dS )z<Retrieve a regularizer instance from a config or identifier.Nz,Could not interpret regularizer identifier: )r   dictrW   strcallabler
   )
identifierr   r   r   get  s    

r\   )r8   r8   )N)r$   r   tensorflow.compat.v2compatv2r/   kerasr   keras.utils.generic_utilsr   r    tensorflow.python.util.tf_exportr   r   r   r   r&   r7   r>   r@   rP   r(   r)   Zorthogonal_regularizerrR   rW   r\   r   r   r   r   <module>   sD    "-!">
