a
    Sic}                     @   s  d Z ddlZddlZddlZddlZddlZddlm  m	Z
 ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  edG dd dej!ej"dZ#G dd de#Z$edG dd de$Z%edG dd de$Z&edG dd de&Z'edG d d! d!e#Z(G d"d# d#e$Z)G d$d% d%e)Z*d&d' Z+d(d) Z,d*d+ Z-dS ),zBase Metric classes.    N)backend)dtensor_api)utils)
base_layer)base_layer_utils)keras_tensor)metric_serialization)generic_utils)losses_utils)metrics_utils)tf_utils)keras_export)doc_controlszkeras.metrics.Metricc                       s   e Zd ZdZd$ fdd	Z fddZdd Zd	d
 Zdd Ze	dd Z
dd Zdd Zejdd Zdd Zejdd Zejdejjejjddf fdd	Ze	dd Ze	dd Ze	d d! Zejejd"d# Z   Z!S )%Metrica	  Encapsulates metric logic and state.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.
      **kwargs: Additional layer keywords arguments.

    Standalone usage:

    ```python
    m = SomeMetric(...)
    for input in ...:
      m.update_state(input)
    print('Final result: ', m.result().numpy())
    ```

    Usage with `compile()` API:

    ```python
    model = tf.keras.Sequential()
    model.add(tf.keras.layers.Dense(64, activation='relu'))
    model.add(tf.keras.layers.Dense(64, activation='relu'))
    model.add(tf.keras.layers.Dense(10, activation='softmax'))

    model.compile(optimizer=tf.keras.optimizers.RMSprop(0.01),
                  loss=tf.keras.losses.CategoricalCrossentropy(),
                  metrics=[tf.keras.metrics.CategoricalAccuracy()])

    data = np.random.random((1000, 32))
    labels = np.random.random((1000, 10))

    dataset = tf.data.Dataset.from_tensor_slices((data, labels))
    dataset = dataset.batch(32)

    model.fit(dataset, epochs=10)
    ```

    To be implemented by subclasses:
    * `__init__()`: All state variables should be created in this method by
      calling `self.add_weight()` like: `self.var = self.add_weight(...)`
    * `update_state()`: Has all updates to the state variables like:
      self.var.assign_add(...).
    * `result()`: Computes and returns a scalar value or a dict of scalar values
      for the metric from the state variables.

    Example subclass implementation:

    ```python
    class BinaryTruePositives(tf.keras.metrics.Metric):

      def __init__(self, name='binary_true_positives', **kwargs):
        super(BinaryTruePositives, self).__init__(name=name, **kwargs)
        self.true_positives = self.add_weight(name='tp', initializer='zeros')

      def update_state(self, y_true, y_pred, sample_weight=None):
        y_true = tf.cast(y_true, tf.bool)
        y_pred = tf.cast(y_pred, tf.bool)

        values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True))
        values = tf.cast(values, self.dtype)
        if sample_weight is not None:
          sample_weight = tf.cast(sample_weight, self.dtype)
          sample_weight = tf.broadcast_to(sample_weight, values.shape)
          values = tf.multiply(values, sample_weight)
        self.true_positives.assign_add(tf.reduce_sum(values))

      def result(self):
        return self.true_positives
    ```
    Nc                    sN   t  jf ||d| d| _d| _t sJ|d u r<t n
t	|j
| _d S )NnamedtypeT)super__init__statefulbuiltr   v2_dtype_behavior_enabledr   floatxtfas_dtyper   _dtype)selfr   r   kwargs	__class__ U/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/metrics/base_metric.pyr   u   s    zMetric.__init__c                    s   t t| | }t s t| r4|jfdd}n&t|jtj	j
jrN|j}nt
|j}tt|||_|j  fdd}tt|||_|S )Nc                     s*   t jj }t jj |}|| i |S Nr   __internal__	autographcontrol_status_ctx
tf_convert)argsr   control_statusag_update_state)obj_update_stater    r!   update_state_fn   s
    z'Metric.__new__.<locals>.update_state_fnc                     s*   t jj }t jj |}|| i |S r"   r#   )r(   r   r)   	ag_result)
obj_resultr    r!   	result_fn   s
    z!Metric.__new__.<locals>.result_fn)r   r   __new__r   is_in_eager_or_tf_functionis_built_inupdate_state
isinstancer   r$   functionFunctiontypes
MethodTyper   update_state_wrapperresultresult_wrapper)clsr(   r   objr,   r/   r   )r.   r+   r!   r0      s     

zMetric.__new__c                    s0    fdd}ddl m} |j|g|R i |S )a   Accumulates statistics and then computes metric result value.

        Args:
          *args:
          **kwargs: A mini-batch of inputs to the Metric,
            passed on to `update_state()`.

        Returns:
          The metric value tensor.
        c                     s   t dd tj| |fD r$d}n j| i |}g }|durJ|| t|"   } |_|W  d   S 1 s|0    Y  dS )z;Updates the state of the metric in a replica-local context.c                 s   s   | ]}t |tjV  qd S r"   )r4   r   KerasTensor).0argr    r    r!   	<genexpr>   s   z<Metric.__call__.<locals>.replica_local_fn.<locals>.<genexpr>N)	anyr   nestflattenr3   appendcontrol_dependenciesr:   _metric_obj)r(   r   	update_op
update_opsresult_tr   r    r!   replica_local_fn   s    
z)Metric.__call__.<locals>.replica_local_fnr   )distributed_training_utils)Zkeras.distributerM   call_replica_local_fn)r   r(   r   rL   rM   r    rK   r!   __call__   s    zMetric.__call__c                 C   s0   d dd |   D }| jj d| dS )N,c                 s   s    | ]\}}| d | V  qdS )=Nr    r?   kvr    r    r!   rA          z!Metric.__str__.<locals>.<genexpr>())join
get_configitemsr   __name__)r   r(   r    r    r!   __str__   s    zMetric.__str__c              	   C   s   t | | j| jd}||t| < | j D ]^\}}|dv r<q*|dv r^tj||t	
|| q*|dv rtt||| q*t||t	
|| q*|S )Nr   )r3   r:   )_obj_reference_counts_dict)_thread_local_metrics_lock)typer   r   id__dict__rZ   r   Module__setattr__copydeepcopysetattr)r   memor:   rS   rT   r    r    r!   __deepcopy__   s    zMetric.__deepcopy__c                 C   s   | j S r"   )r   rK   r    r    r!   r      s    zMetric.dtypec                 C   s   | j | jdS )z.Returns the serializable config of the metric.r   r   rK   r    r    r!   rY      s    zMetric.get_configc                 C   sF   t | js,tjd| jjf dd |  S tdd | j	D  dS )zResets all of the metric state variables.

        This function is called between epochs/steps,
        when a metric is evaluated during training.
        zMetric %s implements a `reset_states()` method; rename it to `reset_state()` (without the final "s"). The name `reset_states()` has been deprecated to improve API consistency.   )
stacklevelc                 S   s   g | ]}|d fqS )r   r    r?   rT   r    r    r!   
<listcomp>  rU   z&Metric.reset_state.<locals>.<listcomp>N)
r	   
is_defaultreset_stateswarningswarnr   r[   r   batch_set_value	variablesrK   r    r    r!   reset_state   s    zMetric.reset_statec                 O   s   t ddS )a  Accumulates statistics for the metric.

        Note: This function is executed as a graph function in graph mode.
        This means:
          a) Operations on the same resource are executed in textual order.
             This should make it easier to do things like add the updated
             value of a variable to another, for example.
          b) You don't need to worry about collecting the update ops to execute.
             All update ops added to the graph by this function will be
             executed.
          As a result, code should generally work the same way with graph or
          eager execution.

        Args:
          *args:
          **kwargs: A mini-batch of inputs to the Metric.
        "Must be implemented in subclasses.NNotImplementedError)r   r(   r   r    r    r!   r3     s    zMetric.update_statec                 C   sb   g }|D ]T}t | jt |jkr4td| d|  t| j|jD ]\}}||| qBq|S )as  Merges the state from one or more metrics.

        This method can be used by distributed systems to merge the state
        computed by different metric instances. Typically the state will be
        stored in the form of the metric's weights. For example, a
        tf.keras.metrics.Mean metric contains a list of two weight values: a
        total and a count. If there were two instances of a
        tf.keras.metrics.Accuracy that each independently aggregated partial
        state for an overall accuracy calculation, these two metric's states
        could be combined as follows:

        >>> m1 = tf.keras.metrics.Accuracy()
        >>> _ = m1.update_state([[1], [2]], [[0], [2]])

        >>> m2 = tf.keras.metrics.Accuracy()
        >>> _ = m2.update_state([[3], [4]], [[3], [4]])

        >>> m2.merge_state([m1])
        >>> m2.result().numpy()
        0.75

        Args:
          metrics: an iterable of metrics. The metrics must have compatible
            state.

        Raises:
          ValueError: If the provided iterable does not contain metrics matching
            the metric's required specifications.
        zMetric z is not compatible with )lenweights
ValueErrorziprE   
assign_add)r   metricsZassign_add_opsmetricweightZweight_to_addr    r    r!   merge_state#  s    zMetric.merge_statec                 C   s   t ddS )a,  Computes and returns the scalar metric value tensor or a dict of
        scalars.

        Result computation is an idempotent operation that simply calculates the
        metric value using the state variables.

        Returns:
          A scalar tensor, or a dictionary of scalar tensors.
        ru   Nrv   rK   r    r    r!   r:   K  s    zMetric.resultr    c           	         s   t j rt j }nd}t|r,t jj}t| dddurZdt	j
| jt |ji}ni }tj| dB t jf |||du r| jn|d|g ||d|W  d   S 1 s0    Y  dS )z0Adds state variable. Only for use by subclasses.N_meshlayout)layerF)r   shaper   	trainableinitializercollectionssynchronizationaggregation)r   
distributehas_strategyget_strategyr   is_tpu_strategyVariableSynchronizationON_WRITEgetattrdtensorLayout
replicatedr   TensorShaperankr   maybe_init_scoper   
add_weightr   )	r   r   r   r   r   r   r   strategyZadditional_kwargsr   r    r!   r   Y  s0    

	zMetric.add_weightc                 C   s4   | j r,| j}| jD ]}||j7 }q| |S g S d S r"   )r   _trainable_weights_metricstrainable_weights_dedup_weights)r   r   mr    r    r!   r     s    

zMetric.trainable_weightsc                 C   sP   | j r$| j}| jD ]}||j7 }qn"| j| j }| jD ]}||j7 }q6| |S r"   )r   _non_trainable_weightsr   non_trainable_weightsr   ry   r   )r   r   r   r    r    r!   r     s    


zMetric.non_trainable_weightsc                 C   s
   t | S r"   )r   MetricSavedModelSaverrK   r    r    r!   _trackable_saved_model_saver  s    z#Metric._trackable_saved_model_saverc                 C   s   |   S r"   )rt   rK   r    r    r!   ro     s    zMetric.reset_states)NN)"r[   
__module____qualname____doc__r   r0   rO   r\   ri   propertyr   rY   rt   abcabstractmethodr3   r   r:   r   for_subclass_implementersr   VariableAggregationSUMr   ON_READr   r   r   r   r	   defaultdo_not_generate_docsro   __classcell__r    r    r   r!   r   ,   s>   G*/

(
+



r   )	metaclassc                       s4   e Zd ZdZd	 fdd	Zd
ddZdd Z  ZS )ReducezEncapsulates metrics that perform a reduce operation on the values.

    Args:
      reduction: a `tf.keras.metrics.Reduction` enum value.
      name: string name of the metric instance.
      dtype: (Optional) data type of the metric result.
    Nc                    sN   t  j||d || _| jddd| _|tjjtjjfv rJ| jddd| _	d S )Nr   totalzeros)r   count)
r   r   	reductionr   r   r   	ReductionSUM_OVER_BATCH_SIZEWEIGHTED_MEANr   )r   r   r   r   r   r    r!   r     s    zReduce.__init__c           
   	   C   s$  t |g|\\}}zt|| j}W n< ttfyb   d| d}t|trV|d7 }t	|Y n0 |durt|| j}t
j||d\}}}ztjj||}W nh ty   t|}t|}| jt jjkrtj|tt||d}ntj|tt||d}Y n0 t||}t|}t|g | j|}W d   n1 sT0    Y  | jt jjkrr|S | jt jjkrtt|| j}	nN| jt jjkr|du rtt|| j}	n
t|}	ntd| j dt|g | j |	W  d   S 1 s0    Y  dS )	zAccumulates statistics for computing the metric.

        Args:
          values: Per-example value.
          sample_weight: Optional weighting of each example. Defaults to 1.

        Returns:
          Update op.
        zGThe output of a metric function can only be a single Tensor. Received: z. z?To return a dict of values, implement a custom Metric subclass.Nsample_weightaxisReduction "M" not implemented. Expected "sum", "weighted_mean", or "sum_over_batch_size".)!r   ,ragged_assert_compatible_and_get_flat_valuesr   castr   rz   	TypeErrorr4   dictRuntimeErrorr
   squeeze_or_expand_dimensionsr$   opsbroadcast_weightsr   ndimr   r   r   
reduce_sumlistrangereduce_meanmultiplyrF   r   r|   r   sizer   rw   r   )
r   valuesr   msg_r   weight_ndim	value_sumupdate_total_op
num_valuesr    r    r!   r3     st    




,
zReduce.update_statec                 C   sX   | j tjjkrt| jS | j tjjtjjfv rBtj	
| j| jS td| j  dd S )Nr   r   )r   r   r   r   r   identityr   r   r   mathdivide_no_nanr   rw   rK   r    r    r!   r:     s    zReduce.result)N)N)r[   r   r   r   r   r3   r:   r   r    r    r   r!   r     s   

Rr   zkeras.metrics.Sumc                       s(   e Zd ZdZejd fdd	Z  ZS )SumaY  Computes the (weighted) sum of the given values.

    For example, if values is [1, 3, 5, 7] then the sum is 16.
    If the weights were specified as [1, 1, 0, 0] then the sum would be 4.

    This metric creates one variable, `total`, that is used to compute the sum
    of `values`. This is ultimately returned as `sum`.

    If `sample_weight` is `None`, weights default to 1.  Use `sample_weight` of
    0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.Sum()
    >>> m.update_state([1, 3, 5, 7])
    >>> m.result().numpy()
    16.0

    Usage with `compile()` API:

    ```python
    model.add_metric(tf.keras.metrics.Sum(name='sum_1')(outputs))
    model.compile(optimizer='sgd', loss='mse')
    ```
    sumNc                    s   t  jtjj||d d S N)r   r   r   )r   r   r   r   r   r   r   r   r   r    r!   r   ?  s    
zSum.__init__)r   Nr[   r   r   r   dtensor_utilsinject_meshr   r   r    r    r   r!   r     s   r   zkeras.metrics.Meanc                       s(   e Zd ZdZejd fdd	Z  ZS )MeanaB  Computes the (weighted) mean of the given values.

    For example, if values is [1, 3, 5, 7] then the mean is 4.
    If the weights were specified as [1, 1, 0, 0] then the mean would be 2.

    This metric creates two variables, `total` and `count` that are used to
    compute the average of `values`. This average is ultimately returned as
    `mean` which is an idempotent operation that simply divides `total` by
    `count`.

    If `sample_weight` is `None`, weights default to 1.
    Use `sample_weight` of 0 to mask values.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.

    Standalone usage:

    >>> m = tf.keras.metrics.Mean()
    >>> m.update_state([1, 3, 5, 7])
    >>> m.result().numpy()
    4.0
    >>> m.reset_state()
    >>> m.update_state([1, 3, 5, 7], sample_weight=[1, 1, 0, 0])
    >>> m.result().numpy()
    2.0

    Usage with `compile()` API:

    ```python
    model.add_metric(tf.keras.metrics.Mean(name='mean_1')(outputs))
    model.compile(optimizer='sgd', loss='mse')
    ```
    meanNc                    s   t  jtjj||d d S r   )r   r   r   r   r   r   r   r    r!   r   l  s
    zMean.__init__)r   Nr   r    r    r   r!   r   F  s   $r   zkeras.metrics.MeanMetricWrapperc                       sR   e Zd ZdZejd fdd	Zd fdd	Z fddZe	 fd	d
Z
  ZS )MeanMetricWrapperau  Wraps a stateless metric function with the Mean metric.

    You could use this class to quickly build a mean metric from a function. The
    function needs to have the signature `fn(y_true, y_pred)` and return a
    per-sample loss array. `MeanMetricWrapper.result()` will return
    the average metric value across all samples seen so far.

    For example:

    ```python
    def accuracy(y_true, y_pred):
      return tf.cast(tf.math.equal(y_true, y_pred), tf.float32)

    accuracy_metric = tf.keras.metrics.MeanMetricWrapper(fn=accuracy)

    keras_model.compile(..., metrics=accuracy_metric)
    ```

    Args:
      fn: The metric function to wrap, with signature `fn(y_true, y_pred,
        **kwargs)`.
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.
      **kwargs: Keyword arguments to pass on to `fn`.
    Nc                    s    t  j||d || _|| _d S )Nr   r   r   _fn
_fn_kwargsr   fnr   r   r   r   r    r!   r     s    zMeanMetricWrapper.__init__c                    s   t || j}t || j}t||g|\\}}}t||\}}t jj	| j
t jj }|||fi | j}t|}t|||| j}t j||dS )a	  Accumulates metric statistics.

        `y_true` and `y_pred` should have the same shape.

        Args:
          y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
          y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
          sample_weight: Optional `sample_weight` acts as a
            coefficient for the metric. If a scalar is provided, then the metric
            is simply scaled by the given value. If `sample_weight` is a tensor
            of size `[batch_size]`, then the metric for each sample of the batch
            is rescaled by the corresponding element in the `sample_weight`
            vector. If the shape of `sample_weight` is `[batch_size, d0, ..
            dN-1]` (or can be broadcasted to this shape), then each metric
            element of `y_pred` is scaled by the corresponding value of
            `sample_weight`. (Note on `dN-1`: all metric functions reduce by 1
            dimension, usually the last axis (-1)).

        Returns:
          Update op.
        r   )r   r   r   r   r   r
   r   r$   r%   r'   r   r&   r   get_maskapply_valid_maskr   r   r3   r   y_truey_predr   ag_fnmatchesmaskr   r    r!   r3     s*    

zMeanMetricWrapper.update_statec                    sP   dd | j  D }t| tu r*| j|d< t  }tt| t|  S )Nc                 S   s*   i | ]"\}}|t |r"t|n|qS r    r   is_tensor_or_variabler   evalrR   r    r    r!   
<dictcomp>  s   z0MeanMetricWrapper.get_config.<locals>.<dictcomp>r   )	r   rZ   r`   r   r   r   rY   r   r   r   configbase_configr   r    r!   rY     s    

zMeanMetricWrapper.get_configc                    sD   ddl m} |dd }| tu r4| ||fi |S tt| |S )Nr   )getr   )Zkeras.metricsr   popr   r   from_config)r<   r   r   r   r   r    r!   r     s
    zMeanMetricWrapper.from_config)NN)N)r[   r   r   r   r   r   r   r3   rY   classmethodr   r   r    r    r   r!   r   u  s   ,r   zkeras.metrics.MeanTensorc                       sb   e Zd ZdZejd fdd	Zdd Zedd	 Z	ed
d Z
dddZdd Zdd Z  ZS )
MeanTensora  Computes the element-wise (weighted) mean of the given tensors.

    `MeanTensor` returns a tensor with the same shape of the input tensors. The
    mean value is updated by keeping local variables `total` and `count`. The
    `total` tracks the sum of the weighted values, and `count` stores the sum of
    the weighted counts.

    Args:
      name: (Optional) string name of the metric instance.
      dtype: (Optional) data type of the metric result.
      shape: (Optional) A list of integers, a tuple of integers, or a 1-D Tensor
        of type int32. If not specified, the shape is inferred from the values
        at the first call of update_state.

    Standalone usage:

    >>> m = tf.keras.metrics.MeanTensor()
    >>> m.update_state([0, 1, 2, 3])
    >>> m.update_state([4, 5, 6, 7])
    >>> m.result().numpy()
    array([2., 3., 4., 5.], dtype=float32)

    >>> m.update_state([12, 10, 8, 6], sample_weight= [0, 0.2, 0.5, 1])
    >>> m.result().numpy()
    array([2.       , 3.6363635, 4.8      , 5.3333335], dtype=float32)

    >>> m = tf.keras.metrics.MeanTensor(dtype=tf.float64, shape=(1, 4))
    >>> m.result().numpy()
    array([[0., 0., 0., 0.]])
    >>> m.update_state([[0, 1, 2, 3]])
    >>> m.update_state([[4, 5, 6, 7]])
    >>> m.result().numpy()
    array([[2., 3., 4., 5.]])
    mean_tensorNc                    s>   t  j||d d | _d | _d | _d| _|d ur:| | d S )Nr   F)r   r   _shape_total_count_built_build)r   r   r   r   r   r    r!   r     s    zMeanTensor.__init__c                 C   s   t || _| j| _| jd|dd| _| jd|dd| _t  & t  sXt	
t	  W d    n1 sl0    Y  d| _d S )Nr   r   )r   r   r   r   T)r   r   r   _build_input_shaper   r   r   
init_scopeexecuting_eagerlyr   _initialize_variables_get_sessionr   )r   r   r    r    r!   r     s    
,zMeanTensor._buildc                 C   s   | j r| jS d S r"   )r   r   rK   r    r    r!   r     s    zMeanTensor.totalc                 C   s   | j r| jS d S r"   )r   r   rK   r    r    r!   r     s    zMeanTensor.countc              	   C   s2  t || j}| js"| |j n&|j| jkrHtd| j d|j dt |}|durt || j}t	j
||d\}}}zt jj||}W n> ty   t|}t|}t j|tt||d}Y n0 t ||}t ||}| j|}t |g | j|W  d   S 1 s$0    Y  dS )zAccumulates statistics for computing the element-wise mean.

        Args:
          values: Per-example value.
          sample_weight: Optional weighting of each example. Defaults to 1.

        Returns:
          Update op.
        zeMeanTensor input values must always have the same shape. Expected shape (set during the first call): z. Got: .Nr   r   )r   r   r   r   r   r   r   rz   	ones_liker
   r   r$   r   r   r   r   r   r   r   r   r   r|   rF   r   )r   r   r   r   r   r   r   r   r    r    r!   r3   #  sH    



zMeanTensor.update_statec                 C   s    | j stdtj| j| jS )NzMeanTensor does not have any value yet. Please call the MeanTensor instance or use `.update_state(value)` before retrieving the result.)r   rz   r   r   r   r   r   rK   r    r    r!   r:   X  s
    zMeanTensor.resultc                 C   s    | j rtdd | jD  d S )Nc                 S   s    g | ]}|t |j fqS r    )npr   r   as_listrl   r    r    r!   rm   d  rU   z*MeanTensor.reset_state.<locals>.<listcomp>)r   r   rr   rs   rK   r    r    r!   rt   a  s    zMeanTensor.reset_state)r   NN)N)r[   r   r   r   r   r   r   r   r   r   r   r3   r:   rt   r   r    r    r   r!   r     s   #	


5	r   c                       s"   e Zd ZdZd fdd	Z  ZS )SumOverBatchSizea=  Computes the weighted sum over batch size of the given values.

    For example, if values is [1, 3, 5, 7] then the metric value is 4.
    If the weights were specified as [1, 1, 0, 0] then the value would be 1.

    This metric creates two variables, `total` and `count` that are used to
    compute the average of `values`. This average is ultimately returned as sum
    over batch size which is an idempotent operation that simply divides `total`
    by `count`.

    If `sample_weight` is `None`, weights default to 1.  Use `sample_weight` of
    0 to mask values.
    sum_over_batch_sizeNc                    s   t  jtjj||d d S r   )r   r   r   r   r   r   r   r    r!   r   w  s
    zSumOverBatchSize.__init__)r
  N)r[   r   r   r   r   r   r    r    r   r!   r	  h  s   r	  c                       s<   e Zd ZdZd	 fdd	Zd
 fdd	Z fddZ  ZS )SumOverBatchSizeMetricWrapperzAWraps a function with the `SumOverBatchSizeMetricWrapper` metric.Nc                    s    t  j||d || _|| _dS )ar  Creates a `SumOverBatchSizeMetricWrapper` instance.

        Args:
          fn: The metric function to wrap, with signature `fn(y_true, y_pred,
            **kwargs)`.
          name: (Optional) string name of the metric instance.
          dtype: (Optional) data type of the metric result.
          **kwargs: The keyword arguments that are passed on to `fn`.
        r   Nr   r   r   r    r!   r     s    
z&SumOverBatchSizeMetricWrapper.__init__c                    s   t || j}t || j}t||\}}t jj| jt jj	 }|||fi | j
}t|}t|||| j}t j||dS )Nr   )r   r   r   r
   r   r$   r%   r'   r   r&   r   r   r   r   r   r3   r   r   r    r!   r3     s    

z*SumOverBatchSizeMetricWrapper.update_statec                    s:   dd | j  D }t  }tt| t|  S )Nc                 S   s*   i | ]"\}}|t |r"t|n|qS r    r   rR   r    r    r!   r     s   z<SumOverBatchSizeMetricWrapper.get_config.<locals>.<dictcomp>)r   rZ   r   rY   r   r   r   r   r    r!   rY     s
    
z(SumOverBatchSizeMetricWrapper.get_config)NN)N)r[   r   r   r   r   r3   rY   r   r    r    r   r!   r    s   r  c                 C   sF   t | trBt   | j|  W  d   S 1 s80    Y  | S )zFReturns a clone of the metric if stateful, otherwise returns it as is.N)r4   r   r   r  r   r   rY   )r~   r    r    r!   clone_metric  s    

.r  c                 C   s   t jt| S )z"Clones the given metric list/dict.)r   rC   map_structurer  )r}   r    r    r!   clone_metrics  s    r  c                 C   s"   | j dtj dd d S )Nr  )r   
startswithrX   r   split)r<   r    r    r!   r2     s    r2   ).r   r   re   r7   rp   numpyr  tensorflow.compat.v2compatv2r   kerasr   keras.dtensorr   r   r   r   keras.enginer   r   r   keras.saving.saved_modelr   keras.utilsr	   r
   r   r    tensorflow.python.util.tf_exportr   tensorflow.tools.docsr   LayerABCMetar   r   r   r   r   r   r	  r  r  r  r2   r    r    r    r!   <module>   sN      t&.g +