a
    Sic                     @   sX   d Z ddlm  mZ ddlmZ ddlmZ ej	Z	dd Z
dd Zd	d
 Zdd ZdS )z*Utilities related to distributed training.    N)flags)backendc                 C   s   | j jS )N)extended_global_batch_size)distribution_strategy r   g/var/www/html/django/DPS/env/lib/python3.9/site-packages/keras/distribute/distributed_training_utils.pyglobal_batch_size_supported   s    r	   c                 O   s   d}d|v r| d}ntj r,tj }t|}|s|rtj r|   |j	
| ||W  d   S 1 sv0    Y  | |i |S )a=  Call a function that uses replica-local variables.

    This function correctly handles calling `fn` in a cross-replica
    context.

    Args:
      fn: The function to call.
      *args: Positional arguments to the `fn`.
      **kwargs: Keyword argument to `fn`.

    Returns:
      The result of calling `fn`.
    Nstrategy)poptf
distributehas_strategyget_strategyr   is_tpu_strategyin_cross_replica_contextscoper   call_for_each_replica)fnargskwargsr
   is_tpur   r   r   call_replica_local_fn    s    



.r   c                 C   s   t | tjjot | tjS )z.Returns whether `v` is a distributed variable.)
isinstancer   r   DistributedValuesVariable)vr   r   r   is_distributed_variable?   s    r   c                  C   s   t j} h d}| dkrbt j}|s(tdtjjj|d}tj	| tj
j| tjj|}nx| dkrxtjj }nb| dkrtj }nN| dkrtjj }tjj|}n*| dkrtjd	}ntd
|  d| |S )a  Creates a `tf.distribute.Strategy` object from flags.

    Example usage:

    ```python
    strategy = utils.get_strategy()
    with strategy.scope():
      model = tf.keras.Sequential([tf.keras.layers.Dense(10)])

    model.compile(...)
    train_ds, test_ds = ...
    model.fit(train_ds, validation_data=test_ds, epochs=10)
    ```

    Returns:
      `tf.distribute.Strategy` instance.
    >   multi_worker_mirroredparameter_servertpu
one_devicemirroredr    zdWhen using a TPU strategy, you must set the flag `keras_distribute_strategy_tpu_addr` (TPU address).)r    r   r"   r   r!   z/gpu:0zNUnknown distribution strategy flag. Received: keras_distribute_strategy_class=z. It should be one of )FLAGSZkeras_distribute_strategy_classZ"keras_distribute_strategy_tpu_addr
ValueErrorr   r   cluster_resolverTPUClusterResolverconfigexperimental_connect_to_clusterr    experimentalinitialize_tpu_systemTPUStrategyMultiWorkerMirroredStrategyMirroredStrategyTFConfigClusterResolverParameterServerStrategyOneDeviceStrategy)clsZaccepted_stratsZtpu_addrr%   r
   r   r   r   r   F   sD    
r   )__doc__tensorflow.compat.v2compatv2r   abslr   kerasr   r#   r	   r   r   r   r   r   r   r   <module>   s   