a
    /Sicd'                     @   s   d Z ddlZddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ dZdZeddddZeddddZdd ZdS )z%Helpers to connect to remote servers.    N)logging)	ServerDef)
pywrap_tfe)device_util)cluster_resolver)context)ops)remote_utils)
server_lib)nest)	tf_exportzgrpc://) localz#config.experimental_connect_to_hostworkerc                 C   s:   | st dt| }t|dd |D i}t| dS )a  Connects to a single machine to enable remote execution on it.

  Will make devices on the remote host available to use. Note that calling this
  more than once will work, but will invalidate any tensor handles on the old
  remote devices.

  Using the default job_name of worker, you can schedule ops to run remotely as
  follows:
  ```python
  # When eager execution is enabled, connect to the remote host.
  tf.config.experimental_connect_to_host("exampleaddr.com:9876")

  with ops.device("job:worker/replica:0/task:1/device:CPU:0"):
    # The following tensors should be resident on the remote device, and the op
    # will also execute remotely.
    x1 = array_ops.ones([2, 2])
    x2 = array_ops.ones([2, 2])
    y = math_ops.matmul(x1, x2)
  ```

  Args:
    remote_host: a single or a list the remote server addr in host-port format.
    job_name: The job name under which the new server will be accessible.

  Raises:
    ValueError: if remote_host is None.
  z%Must provide at least one remote_hostc                 S   s   g | ]}t |tqS  )_strip_prefix_GRPC_PREFIX).0hostr   r   Z/var/www/html/django/DPS/env/lib/python3.9/site-packages/tensorflow/python/eager/remote.py
<listcomp>G       z*connect_to_remote_host.<locals>.<listcomp>N)
ValueErrorr   flattenr
   ClusterSpecconnect_to_cluster)Zremote_hostjob_nameZremote_hostscluster_specr   r   r   connect_to_remote_host%   s    
r   z&config.experimental_connect_to_cluster	localhostTc                 C   s  t  std|pt }t| tjr.| }n.t| tj	rT| 
 tv rJdS |  }ntdt| }|rt|tjrt| }ntdd}t  }	|	r||jvrt|	jjD ]\}
}|j|kr|	jj|
= q|	du s|	j|ks|	j|ks|	j|krd}||jvr6t }|j }||_d||jd< t   jdu rdt |}|rdt   !| t"||||t   j#|d	}|rt $| n
t %| |rt| tj	r| 
 r| 
 }d}d}|jD ]F}|&|D ]4}|'||}||v s ||v r|}|} qΐqܐq|s(td
| d||}t()|}t(* }|rVt()|}|rv||krvtd||f |st+,d| t-.|/  dS )a)	  Connects to the given cluster.

  Will make devices on the cluster available to use. Note that calling this more
  than once will work, but will invalidate any tensor handles on the old remote
  devices.

  If the given local job name is not present in the cluster specification, it
  will be automatically added, using an unused port on the localhost.

  Device filters can be specified to isolate groups of remote tasks to avoid
  undesired accesses between workers. Workers accessing resources or launching
  ops / functions on filtered remote devices will result in errors (unknown
  devices). For any remote task, if no device filter is present, all cluster
  devices will be visible; if any device filter is specified, it can only
  see devices matching at least one filter. Devices on the task itself are
  always visible. Device filters can be particially specified.

  For example, for a cluster set up for parameter server training, the following
  device filters might be specified:

  ```python
  cdf = tf.config.experimental.ClusterDeviceFilters()
  # For any worker, only the devices on PS nodes and itself are visible
  for i in range(num_workers):
    cdf.set_device_filters('worker', i, ['/job:ps'])
  # Similarly for any ps, only the devices on workers and itself are visible
  for i in range(num_ps):
    cdf.set_device_filters('ps', i, ['/job:worker'])

  tf.config.experimental_connect_to_cluster(cluster_def,
                                            cluster_device_filters=cdf)
  ```

  Args:
    cluster_spec_or_resolver: A `ClusterSpec` or `ClusterResolver` describing
      the cluster.
    job_name: The name of the local job.
    task_index: The local task index.
    protocol: The communication protocol, such as `"grpc"`. If unspecified, will
      use the default from `python/platform/remote_utils.py`.
    make_master_device_default: If True and a cluster resolver is passed, will
      automatically enter the master task device scope, which indicates the
      master becomes the default device to run ops. It won't do anything if
      a cluster spec is passed. Will throw an error if the caller is currently
      already in some device scope.
    cluster_device_filters: an instance of
      `tf.train.experimental/ClusterDeviceFilters` that specify device filters
      to the remote tasks in cluster.
  zM`tf.config.experimental_connect_to_cluster` can only be called in eager mode.NzJ`cluster_spec_or_resolver` must be a `ClusterSpec` or a `ClusterResolver`.z]`cluster_device_filters` must be an instance of `tf.train.experimental.ClusterDeviceFilters`.FTzlocalhost:{}r   )clusterr   
task_indexprotocoldefault_session_configcluster_device_filterszT`make_master_device_default` is set to True but cannot find master %s in the clusterz/job:{}/replica:0/task:{}z`connect_to_cluster` is called inside existing device scope %s, which is different from the master device scope %s to enter. This is not allowed.z%Entering into master device scope: %s)0r   executing_eagerlyr   r	   Z"get_default_communication_protocol
isinstancer
   r   r   ClusterResolvermaster_LOCAL_MASTERSr   copydeepcopyas_cluster_defClusterDeviceFilters_as_cluster_device_filtersget_server_defjobs	enumerater    jobnamer   r!   r   TF_PickUnusedPortOrDieaddformattaskscoordination_serviceZcoordination_service_typeconfigure_coordination_servicer   configset_server_defupdate_server_deftask_indicestask_addressr   canonicalizecurrentr   infor   device	__enter__)Zcluster_spec_or_resolverr   r!   r"   Zmake_master_device_defaultr$   r   cluster_defZis_server_def_changedZcurrent_server_defir2   Z
local_portjob_defr8   
server_defr(   Zmaster_job_nameZmaster_task_idtask_idr>   Zmaster_devicecurrent_devicer   r   r   r   L   s    8







r   c                 C   s   |  |r| t|d  S | S )N)
startswithlen)sprefixr   r   r   r      s    r   )Nr   )r   r   NTN)__doc__r*   abslr   Z.tensorflow.core.protobuf.tensorflow_server_pb2r   tensorflow.pythonr   tensorflow.python.distributer   -tensorflow.python.distribute.cluster_resolverr   tensorflow.python.eagerr   tensorflow.python.frameworkr   tensorflow.python.platformr	   tensorflow.python.trainingr
   tensorflow.python.utilr    tensorflow.python.util.tf_exportr   r   r)   r   r   r   r   r   r   r   <module>   s2   &      !