a
    SicR                     @   s   d Z ddlZddlm  mZ ddlmZ ddlm	Z	 ddl
mZ ddlmZ ddlmZ edg d	G d
d dZdaedg d	dd Zdd Zedg d	dd Zejdd Zdd Zdd Zdd ZdddZdS )z7Contains the Policy class for mixed precision training.    N)backend)base_layer_utils)device_compatibility_check)generic_utils)keras_exportzkeras.mixed_precision.Policy)v1c                   @   sb   e Zd ZdZdd Zdd Zedd Zedd	 Zed
d Z	dd Z
dd ZedddZdS )Policya  A dtype policy for a Keras layer.

    A dtype policy determines a layer's computation and variable dtypes. Each
    layer has a policy. Policies can be passed to the `dtype` argument of layer
    constructors, or a global policy can be set with
    `tf.keras.mixed_precision.set_global_policy`.

    Args:
      name: The policy name, which determines the compute and variable dtypes.
        Can be any dtype name, such as `'float32'` or `'float64'`, which causes
        both the compute and variable dtypes will be that dtype. Can also be the
        string `'mixed_float16'` or `'mixed_bfloat16'`, which causes the compute
        dtype to be float16 or bfloat16 and the variable dtype to be float32.

    Typically you only need to interact with dtype policies when using mixed
    precision, which is the use of float16 or bfloat16 for computations and
    float32 for variables. This is why the term `mixed_precision` appears in the
    API name. Mixed precision can be enabled by passing `'mixed_float16'` or
    `'mixed_bfloat16'` to `tf.keras.mixed_precision.set_global_policy`. See [the
    mixed precision
    guide](https://www.tensorflow.org/guide/keras/mixed_precision) for more
    information on how to use mixed precision.

    >>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
    >>> layer1 = tf.keras.layers.Dense(10)
    >>> layer1.dtype_policy  # `layer1` will automatically use mixed precision
    <Policy "mixed_float16">
    >>> # Can optionally override layer to use float32
    >>> # instead of mixed precision.
    >>> layer2 = tf.keras.layers.Dense(10, dtype='float32')
    >>> layer2.dtype_policy
    <Policy "float32">
    >>> # Set policy back to initial float32 for future examples.
    >>> tf.keras.mixed_precision.set_global_policy('float32')

    In the example above, passing `dtype='float32'` to the layer is equivalent
    to passing `dtype=tf.keras.mixed_precision.Policy('float32')`. In general,
    passing a dtype policy name to a layer is equivalent to passing the
    corresponding policy, so it is never necessary to explicitly construct a
    `Policy` object.

    Note: `Model.compile` will automatically wrap an optimizer with a
    `tf.keras.mixed_precision.LossScaleOptimizer` if you use the
    `'mixed_float16'` policy. If you use a custom training loop instead of
    calling `Model.compile`, you should explicitly use a
    `tf.keras.mixed_precision.LossScaleOptimizer` to avoid numeric underflow
    with float16.

    ### How a layer uses its policy's compute dtype

    A layer casts its inputs to its compute dtype. This causes the layer's
    computations and output to also be in the compute dtype. For example:

    >>> x = tf.ones((4, 4, 4, 4), dtype='float64')
    >>> # `layer`'s policy defaults to float32.
    >>> layer = tf.keras.layers.Conv2D(filters=4, kernel_size=2)
    >>> layer.compute_dtype  # Equivalent to layer.dtype_policy.compute_dtype
    'float32'
    >>> # `layer` casts its inputs to its compute dtype and does computations in
    >>> # that dtype.
    >>> y = layer(x)
    >>> y.dtype
    tf.float32

    Note that the base `tf.keras.layers.Layer` class inserts the casts. If
    subclassing your own layer, you do not have to insert any casts.

    Currently, only tensors in the first argument to the layer's `call` method
    are casted (although this will likely be changed in a future minor release).
    For example:

    >>> class MyLayer(tf.keras.layers.Layer):
    ...   # Bug! `b` will not be casted.
    ...   def call(self, a, b):
    ...     return a + 1., b + 1.
    >>> a = tf.constant(1., dtype="float32")
    >>> b = tf.constant(1., dtype="float32")
    >>> layer = MyLayer(dtype="float64")
    >>> x, y = layer(a, b)
    >>> x.dtype
    tf.float64
    >>> y.dtype
    tf.float32

    If writing your own layer with multiple inputs, you should either explicitly
    cast other tensors to `self.compute_dtype` in `call` or accept all tensors
    in the first argument as a list.

    The casting only occurs in TensorFlow 2. If
    `tf.compat.v1.disable_v2_behavior()` has been called, you can enable the
    casting behavior with
    `tf.compat.v1.keras.layers.enable_v2_dtype_behavior()`.

    ### How a layer uses its policy's variable dtype

    The default dtype of variables created by `tf.keras.layers.Layer.add_weight`
    is the layer's policy's variable dtype.

    If a layer's compute and variable dtypes differ, `add_weight` will wrap
    floating-point variables with a special wrapper called an
    `AutoCastVariable`.  `AutoCastVariable` is identical to the original
    variable except it casts itself to the layer's compute dtype when used
    within `Layer.call`. This means if you are writing a layer, you do not have
    to explicitly cast the variables to the layer's compute dtype. For example:

    >>> class SimpleDense(tf.keras.layers.Layer):
    ...
    ...   def build(self, input_shape):
    ...     # With mixed precision, self.kernel is a float32 AutoCastVariable
    ...     self.kernel = self.add_weight('kernel', (input_shape[-1], 10))
    ...
    ...   def call(self, inputs):
    ...     # With mixed precision, self.kernel will be casted to float16
    ...     return tf.linalg.matmul(inputs, self.kernel)
    ...
    >>> layer = SimpleDense(dtype='mixed_float16')
    >>> y = layer(tf.ones((10, 10)))
    >>> y.dtype
    tf.float16
    >>> layer.kernel.dtype
    tf.float32

    A layer author can prevent a variable from being wrapped with an
    `AutoCastVariable` by passing `experimental_autocast=False` to `add_weight`,
    which is useful if the float32 value of the variable must be accessed within
    the layer.

    ### How to write a layer that supports mixed precision and float64.

    For the most part, layers will automatically support mixed precision and
    float64 without any additional work, due to the fact the base layer
    automatically casts inputs, creates variables of the correct type, and in
    the case of mixed precision, wraps variables with `AutoCastVariables`.

    The primary case where you need extra work to support mixed precision or
    float64 is when you create a new tensor, such as with `tf.ones` or
    `tf.random.normal`, In such cases, you must create the tensor of the correct
    dtype. For example, if you call `tf.random.normal`, you must pass the
    compute dtype, which is the dtype the inputs have been casted to:

    >>> class AddRandom(tf.keras.layers.Layer):
    ...
    ...   def call(self, inputs):
    ...     # We must pass `dtype=inputs.dtype`, otherwise a TypeError may
    ...     # occur when adding `inputs` to `rand`.
    ...     rand = tf.random.normal(shape=inputs.shape, dtype=inputs.dtype)
    ...     return inputs + rand
    >>> layer = AddRandom(dtype='mixed_float16')
    >>> y = layer(x)
    >>> y.dtype
    tf.float16

    If you did not pass `dtype=inputs.dtype` to `tf.random.normal`, a
    `TypeError` would have occurred. This is because the `tf.random.normal`'s
    dtype defaults to `"float32"`, but the input dtype is float16. You cannot
    add a float32 tensor with a float16 tensor.
    c                 C   sd   t |tjrtd|jf nt |ts6td|f || _| |\| _| _	|dv r`t
| d S )NzG'name' must be a string, not a DType. Instead, pass DType.name. Got: %sz$'name' must be a string, but got: %s)mixed_float16mixed_bloat16)
isinstancetfDType	TypeErrornamestr_name_parse_name_compute_dtype_variable_dtyper   log_device_compatibility_check)selfr    r   X/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/mixed_precision/policy.py__init__   s    
zPolicy.__init__c                 C   s   | drVd}|dv r |d7 }n"|dkr2|d7 }n|dkrB|d7 }|d	| 7 }t||d
krbdS |dkrndS |dkrzdS zt|j}W n$ ty   d|f }t|Y n0 ||fS )zParses a Policy name into a compute and variable dtype.

        Args:
          name: The name of the policy:

        Returns:
          The (compute_dtype, variable_dtype) pair.
        _float32_varszEPolicies ending in '_float32_vars' have been removed from TensorFlow.)infer_float32_varsinfer_with_float32_varszC Please use the 'mixed_float16' or 'mixed_bfloat16' policy instead.float16_with_float32_varsz/ Please use the 'mixed_float16' policy instead.bfloat16_with_float32_varsz0 Please use the 'mixed_bfloat16' policy instead.z Got policy name: '%s'r	   )float16float32mixed_bfloat16)bfloat16r    _infer)NNzCannot convert value %s to a mixed precision Policy. Valid policies include 'mixed_float16', 'mixed_bfloat16', and the name of any dtype such as 'float32'.)endswith
ValueErrorr   as_dtyper   r   )r   r   	error_msgdtypeerrorr   r   r   r      s8    	

zPolicy._parse_namec                 C   s   | j S )a  The variable dtype of this policy.

        This is the dtype layers will create their variables in, unless a layer
        explicitly chooses a different dtype. If this is different than
        `Policy.compute_dtype`, Layers will cast variables to the compute dtype
        to avoid type errors.

        Variable regularizers are run in the variable dtype, not the compute
        dtype.

        Returns:
          The variable dtype of this policy, as a string.
        )r   r   r   r   r   variable_dtype  s    zPolicy.variable_dtypec                 C   s   | j S )aY  The compute dtype of this policy.

        This is the dtype layers will do their computations in. Typically layers
        output tensors with the compute dtype as well.

        Note that even if the compute dtype is float16 or bfloat16, hardware
        devices may not do individual adds, multiplies, and other fundamental
        operations in float16 or bfloat16, but instead may do some of them in
        float32 for numeric stability. The compute dtype is the dtype of the
        inputs and outputs of the TensorFlow ops that the layer executes.
        Internally, many TensorFlow ops will do certain internal calculations in
        float32 or some other device-internal intermediate format with higher
        precision than float16/bfloat16, to increase numeric stability.

        For example, a `tf.keras.layers.Dense` layer, when run on a GPU with a
        float16 compute dtype, will pass float16 inputs to `tf.linalg.matmul`.
        But, `tf.linalg.matmul` will do use float32 intermediate math. The
        performance benefit of float16 is still apparent, due to increased
        memory bandwidth and the fact modern GPUs have specialized hardware for
        computing matmuls on float16 inputs while still keeping intermediate
        computations in float32.

        Returns:
          The compute dtype of this policy, as a string.
        )r   r*   r   r   r   compute_dtype  s    zPolicy.compute_dtypec                 C   s   | j S )z Returns the name of this policy.r   r*   r   r   r   r   /  s    zPolicy.namec                 C   s
   d| j  S )Nz<Policy "%s">r-   r*   r   r   r   __repr__4  s    zPolicy.__repr__c                 C   s
   d| j iS )Nr   )r   r*   r   r   r   
get_config7  s    zPolicy.get_configNc                 C   s&   ~d|v r|  }|d= | f i |S )N
loss_scale)copy)clsconfigcustom_objectsr   r   r   from_config:  s
    zPolicy.from_config)N)__name__
__module____qualname____doc__r   r   propertyr+   r,   r   r.   r/   classmethodr5   r   r   r   r   r      s    6


r   z#keras.mixed_precision.global_policyc                   C   s(   t du r$t rtt S tdS t S )a  Returns the global dtype policy.

    The global policy is the default `tf.keras.mixed_precision.Policy` used for
    layers, if no policy is passed to the layer constructor. If no policy has
    been set with `keras.mixed_precision.set_global_policy`, this will return a
    policy constructed from `tf.keras.backend.floatx()` (floatx defaults to
    float32).

    >>> tf.keras.mixed_precision.global_policy()
    <Policy "float32">
    >>> tf.keras.layers.Dense(10).dtype_policy  # Defaults to the global policy
    <Policy "float32">

    If TensorFlow 2 behavior has been disabled with
    `tf.compat.v1.disable_v2_behavior()`, this will instead return a special
    "_infer" policy which infers the dtype from the dtype of the first input the
    first time the layer is called. This behavior matches the behavior that
    existed in TensorFlow 1.

    See `tf.keras.mixed_precision.Policy` for more information on policies.

    Returns:
      The global Policy.
    Nr#   )_global_policyr   v2_dtype_behavior_enabledr   r   floatxr   r   r   r   global_policyL  s
    r?   c                 C   s    t jj rtdj| dd S )Na@  The global dtype policy cannot be set to "{policy.name}", because the mixed precision graph rewrite has already been enabled.
At most, one of the following can be called:

  1. tf.compat.v1.train.enable_mixed_precision_graph_rewrite() (You called this first)
  2. tf.keras.mixed_precision.set_global_policy() with a mixed precision policy (You called this second)

You called both functions, which is an error, because both functions enable you to use mixed precision. If in doubt which function to use, use the second, as it supports Eager execution and is more customizable.policy)r   __internal__train(is_mixed_precision_graph_rewrite_enabledr%   formatr@   r   r   r   2_check_if_mixed_precision_graph_rewrite_is_enabledn  s    
rF   z'keras.mixed_precision.set_global_policyc                 C   s   t  std| dur*t| ts*t| } | duo<| j| jk}|rJt|  | durz| jdurzt	| jj
sztd| jf | atjj| dS )a  Sets the global dtype policy.

    The global policy is the default `tf.keras.mixed_precision.Policy` used for
    layers, if no policy is passed to the layer constructor.

    >>> tf.keras.mixed_precision.set_global_policy('mixed_float16')
    >>> tf.keras.mixed_precision.global_policy()
    <Policy "mixed_float16">
    >>> tf.keras.layers.Dense(10).dtype_policy
    <Policy "mixed_float16">
    >>> # Global policy is not used if a policy
    >>> # is directly passed to constructor
    >>> tf.keras.layers.Dense(10, dtype='float64').dtype_policy
    <Policy "float64">
    >>> tf.keras.mixed_precision.set_global_policy('float32')

    If no global policy is set, layers will instead default to a Policy
    constructed from `tf.keras.backend.floatx()`.

    To use mixed precision, the global policy should be set to `'mixed_float16'`
    or `'mixed_bfloat16'`, so that every layer uses a 16-bit compute dtype and
    float32 variable dtype by default.

    Only floating point policies can be set as the global policy, such as
    `'float32'` and `'mixed_float16'`. Non-floating point policies such as
    `'int32'` and `'complex64'` cannot be set as the global policy because most
    layers do not support such policies.

    See `tf.keras.mixed_precision.Policy` for more information.

    Args:
      policy: A Policy, or a string that will be converted to a Policy. Can also
        be None, in which case the global policy will be constructed from
        `tf.keras.backend.floatx()`
    zThe global policy can only be set in TensorFlow 2 or if V2 dtype behavior has been set. To enable V2 dtype behavior, call "tf.compat.v1.keras.layers.enable_v2_dtype_behavior()"Nzset_global_policy can only be used to set the global policy to floating-point policies, such as "float32" and "mixed_float16", but got policy: %s)r   r=   r%   r   r   r,   r+   rF   r   r&   is_floatingr   r<   rB   rC    set_using_mixed_precision_policy)rA   is_mixed_policyr   r   r   set_global_policy  s.    &rJ   c                 c   s.   t }zt|  dV  W t| n
t| 0 dS )zA context manager that sets the global Policy under it.

    Args:
      policy: A Policy, or a string that will be converted to a Policy..

    Yields:
      Nothing.
    N)r<   rJ   )rA   
old_policyr   r   r   policy_scope  s
    
rL   c                 C   s*   zt |  W dS  ty$   Y dS 0 d S )NTF)r   r&   r   )r(   r   r   r   _is_convertible_to_dtype  s
    
rM   c                 C   s    t | tko| jdkpt| jS )a  Returns True if the Policy is equivalent to a single dtype.

    A policy is equivalent to a single dtype if the policy's compute and
    variable dtypes are the same and the policy's type is Policy and not a
    subclass of Policy.

    The "_infer" policy is considered equivalent to a single dtype.

    Args:
      policy: A Policy.

    Returns:
      True, if the policy is equivalent to a single dtype.
    r#   )typer   r   rM   r@   r   r   r   _policy_equivalent_to_dtype  s    rO   c                 C   s&   t | r| jdkrd S | jS t| S )Nr#   )rO   r   r   serialize_keras_objectr@   r   r   r   	serialize  s    rQ   c                 C   sF   t | trt| rt| S | d u r*tdS ttd}tj| ||ddS )Nr#   )r   PolicyV1zdtype policy)module_objectsr4   printable_module_name)r   r   rM   r   r   deserialize_keras_object)r3   r4   rS   r   r   r   deserialize  s    
rV   )N)r9   
contextlibtensorflow.compat.v2compatv2r   kerasr   keras.enginer   keras.mixed_precisionr   keras.utilsr    tensorflow.python.util.tf_exportr   r   r<   r?   rF   rJ   contextmanagerrL   rM   rO   rQ   rV   r   r   r   r   <module>   s.   
  ,

!

B
	