) -> None:
assert self.lock.locked()
self.known_workers.add(worker)
- self.start_per_bundle[uuid] = time.time()
+ self.start_per_bundle[uuid] = None
x = self.in_flight_bundles_by_worker.get(worker, set())
x.add(uuid)
self.in_flight_bundles_by_worker[worker] = x
assert self.lock.locked()
self.bundle_details_by_uuid[details.uuid] = details
+ def record_release_worker(
+ self,
+ worker: RemoteWorkerRecord,
+ uuid: str,
+ was_cancelled: bool,
+ ) -> None:
+ with self.lock:
+ self.record_release_worker_already_locked(
+ worker, uuid, was_cancelled
+ )
+
def record_release_worker_already_locked(
self,
worker: RemoteWorkerRecord,
self.finished_bundle_timings_per_worker[worker] = x
self.finished_bundle_timings.append(bundle_latency)
+ def record_processing_began(self, uuid: str):
+ with self.lock:
+ self.start_per_bundle[uuid] = time.time()
+
def total_in_flight(self) -> int:
assert self.lock.locked()
total_in_flight = 0
None
)
pid = str(details.pid) if details is not None else "TBD"
- sec = ts - self.start_per_bundle[bundle_uuid]
- ret += f' (pid={pid}): {bundle_uuid} for {sec:.1f}s so far '
+ if self.start_per_bundle[bundle_uuid] is not None:
+ sec = ts - self.start_per_bundle[bundle_uuid]
+ ret += f' (pid={pid}): {bundle_uuid} for {sec:.1f}s so far '
+ else:
+ ret += f' {bundle_uuid} setting up / copying data...'
+ sec = 0.0
+
if qworker is not None:
if sec > qworker[1]:
ret += f'{bg("red")}>💻p95{reset()} '
raise Exception(msg)
self.policy.register_worker_pool(self.workers)
self.cv = threading.Condition()
+ logger.debug(f'Creating {self.worker_count} local threads, one per remote worker.')
self._helper_executor = fut.ThreadPoolExecutor(
thread_name_prefix="remote_executor_helper",
max_workers=self.worker_count,
self.status.periodic_dump(self.total_bundles_submitted)
# Look for bundles to reschedule
- if len(self.status.finished_bundle_timings) > 7:
+ num_done = len(self.status.finished_bundle_timings)
+ if num_done > 7 or (num_done > 5 and self.is_worker_available()):
for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items():
for uuid in bundle_uuids:
bundle = self.status.bundle_details_by_uuid.get(uuid, None)
return True
return False
- def launch(self, bundle: BundleDetails) -> Any:
+ def launch(self, bundle: BundleDetails, override_avoid_machine=None) -> Any:
"""Find a worker for bundle or block until one is available."""
self.adjust_task_count(+1)
uuid = bundle.uuid
hostname = bundle.hostname
- avoid_machine = None
+ avoid_machine = override_avoid_machine
+ is_original = bundle.src_bundle is None
# Try not to schedule a backup on the same host as the original.
- if bundle.src_bundle is not None:
+ if avoid_machine is None and bundle.src_bundle is not None:
avoid_machine = bundle.src_bundle.machine
worker = None
while worker is None:
worker = self.find_available_worker_or_block(avoid_machine)
+
+ # Ok, found a worker.
bundle.worker = worker
machine = bundle.machine = worker.machine
username = bundle.username = worker.username
fname = bundle.fname
self.status.record_acquire_worker(worker, uuid)
- logger.debug(f'Running bundle {uuid} on {worker}...')
+ logger.debug(f'{uuid}/{fname}: Running bundle on {worker}...')
# Before we do any work, make sure the bundle is still viable.
if self.check_if_cancelled(bundle):
return self.post_launch_work(bundle)
except Exception as e:
logger.exception(e)
- logger.info(f"{uuid}/{fname}: bundle seems to have failed?!")
- if bundle.failure_count < config.config['executors_max_bundle_failures']:
- return self.launch(bundle)
+ logger.error(
+ f'{uuid}/{fname}: bundle says it\'s cancelled upfront but no results?!'
+ )
+ assert bundle.worker is not None
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ if is_original:
+ # Weird. We are the original owner of this
+ # bundle. For it to have been cancelled, a backup
+ # must have already started and completed before
+ # we even for started. Moreover, the backup says
+ # it is done but we can't find the results it
+ # should have copied over. Reschedule the whole
+ # thing.
+ return self.emergency_retry_nasty_bundle(bundle)
else:
- logger.info(f"{uuid}/{fname}: bundle is poison, giving up on it.")
+ # Expected(?). We're a backup and our bundle is
+ # cancelled before we even got started. Something
+ # went bad in post_launch_work (I acutually don't
+ # see what?) but probably not worth worrying
+ # about.
return None
- # Send input to machine if it's not local.
+ # Send input code / data to worker machine if it's not local.
if hostname not in machine:
- cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}'
- logger.info(f"{uuid}/{fname}: Copying work to {worker} via {cmd}")
- run_silently(cmd)
+ try:
+ cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}'
+ start_ts = time.time()
+ logger.info(f"{uuid}/{fname}: Copying work to {worker} via {cmd}.")
+ run_silently(cmd)
+ xfer_latency = time.time() - start_ts
+ logger.info(f"{uuid}/{fname}: Copying done in {xfer_latency}s.")
+ except Exception as e:
+ logger.exception(e)
+ logger.error(
+ f'{uuid}/{fname}: failed to send instructions to worker machine?!?'
+ )
+ assert bundle.worker is not None
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ if is_original:
+ # Weird. We tried to copy the code to the worker and it failed...
+ # And we're the original bundle. We have to retry.
+ return self.emergency_retry_nasty_bundle(bundle)
+ else:
+ # This is actually expected; we're a backup.
+ # There's a race condition where someone else
+ # already finished the work and removed the source
+ # code file before we could copy it. No biggie.
+ return None
- # Do it.
+ # Kick off the work. Note that if this fails we let
+ # wait_for_process deal with it.
+ self.status.record_processing_began(uuid)
cmd = (f'{SSH} {bundle.username}@{bundle.machine} '
f'"source py39-venv/bin/activate &&'
f' /home/scott/lib/python_modules/remote_worker.py'
f' --code_file {bundle.code_file} --result_file {bundle.result_file}"')
+ logger.debug(f'{uuid}/{fname}: Executing {cmd} in the background to kick off work...')
p = cmd_in_background(cmd, silent=True)
bundle.pid = pid = p.pid
- logger.info(f"{uuid}/{fname}: Start training on {worker} via {cmd} (background pid {pid})")
+ logger.debug(f'{uuid}/{fname}: Local ssh process pid={pid}; remote worker is {machine}.')
+ return self.wait_for_process(p, bundle, 0)
+
+ def wait_for_process(self, p: subprocess.Popen, bundle: BundleDetails, depth: int) -> Any:
+ uuid = bundle.uuid
+ machine = bundle.machine
+ fname = bundle.fname
+ pid = p.pid
+ if depth > 3:
+ logger.error(
+ f"I've gotten repeated errors waiting on this bundle; giving up on pid={pid}."
+ )
+ p.terminate()
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ return self.emergency_retry_nasty_bundle(bundle)
+ # Spin until either the ssh job we scheduled finishes the
+ # bundle or some backup worker signals that they finished it
+ # before we could.
while True:
try:
p.wait(timeout=0.25)
except subprocess.TimeoutExpired:
self.heartbeat()
-
- # Both source and backup bundles can be cancelled by
- # the other depending on which finishes first.
if self.check_if_cancelled(bundle):
- p.terminate()
+ logger.info(
+ f'{uuid}/{fname}: another worker finished bundle, checking it out...'
+ )
break
else:
- logger.debug(
- f"{uuid}/{fname}: pid {pid} has finished its work normally."
+ logger.info(
+ f"{uuid}/{fname}: pid {pid} ({machine}) our ssh finished, checking it out..."
)
+ p = None
break
+ # If we get here we believe the bundle is done; either the ssh
+ # subprocess finished (hopefully successfully) or we noticed
+ # that some other worker seems to have completed the bundle
+ # and we're bailing out.
try:
- return self.post_launch_work(bundle)
+ ret = self.post_launch_work(bundle)
+ if ret is not None and p is not None:
+ p.terminate()
+ return ret
+
+ # Something went wrong; e.g. we could not copy the results
+ # back, cleanup after ourselves on the remote machine, or
+ # unpickle the results we got from the remove machine. If we
+ # still have an active ssh subprocess, keep waiting on it.
+ # Otherwise, time for an emergency reschedule.
except Exception as e:
logger.exception(e)
- logger.info(f"{uuid}: Bundle seems to have failed?!")
- if bundle.failure_count < config.config['executors_max_bundle_failures']:
- return self.launch(bundle)
- logger.info(f"{uuid}: Bundle is poison, giving up on it.")
- return None
+ logger.error(f'{uuid}/{fname}: Something unexpected just happened...')
+ if p is not None:
+ logger.warning(
+ f"{uuid}/{fname}: Failed to wrap up \"done\" bundle, re-waiting on active ssh."
+ )
+ return self.wait_for_process(p, bundle, depth + 1)
+ else:
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ return self.emergency_retry_nasty_bundle(bundle)
def post_launch_work(self, bundle: BundleDetails) -> Any:
with self.status.lock:
logger.info(
f"{uuid}/{fname}: Fetching results from {username}@{machine} via {cmd}"
)
- try:
- run_silently(cmd)
- except subprocess.CalledProcessError:
- logger.critical(f'Failed to copy {username}@{machine}:{result_file}!')
+
+ # If either of these throw they are handled in
+ # wait_for_process.
+ run_silently(cmd)
run_silently(f'{SSH} {username}@{machine}'
f' "/bin/rm -f {code_file} {result_file}"')
dur = bundle.end_ts - bundle.start_ts
self.histogram.add_item(dur)
- assert bundle.worker is not None
- self.status.record_release_worker_already_locked(
- bundle.worker,
- bundle.uuid,
- was_cancelled
- )
# Only the original worker should unpickle the file contents
# though since it's the only one whose result matters. The
serialized = rb.read()
result = cloudpickle.loads(serialized)
except Exception as e:
- msg = f'Failed to load {result_file}'
+ msg = f'Failed to load {result_file}, this is bad news.'
logger.critical(msg)
- bundle.failure_count += 1
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
self.release_worker(bundle.worker)
+
+ # Re-raise the exception; the code in wait_for_process may
+ # decide to emergency_retry_nasty_bundle here.
raise Exception(e)
+
+ logger.debug(
+ f'Removing local (master) {code_file} and {result_file}.'
+ )
os.remove(f'{result_file}')
os.remove(f'{code_file}')
)
backup.is_cancelled.set()
- # This is a backup job.
+ # This is a backup job and, by now, we have already fetched
+ # the bundle results.
else:
# Backup results don't matter, they just need to leave the
# result file in the right place for their originals to
# Tell the original to stop if we finished first.
if not was_cancelled:
logger.debug(
- f'{uuid}/{fname}: Notifying original {bundle.src_bundle.uuid} that it\'s cancelled'
+ f'{uuid}/{fname}: Notifying original {bundle.src_bundle.uuid} we beat them to it.'
)
bundle.src_bundle.is_cancelled.set()
assert bundle.worker is not None
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ was_cancelled,
+ )
self.release_worker(bundle.worker)
self.adjust_task_count(-1)
return result
# they will move the result_file to this machine and let
# the original pick them up and unpickle them.
+ def emergency_retry_nasty_bundle(self, bundle: BundleDetails) -> fut.Future:
+ uuid = bundle.uuid
+ is_original = bundle.src_bundle is None
+ bundle.worker = None
+ avoid_last_machine = bundle.machine
+ bundle.machine = None
+ bundle.username = None
+ bundle.failure_count += 1
+ if is_original:
+ retry_limit = 3
+ else:
+ retry_limit = 2
+
+ if bundle.failure_count > retry_limit:
+ logger.error(
+ f'{uuid}: Tried this bundle too many times already ({retry_limit}x); giving up.'
+ )
+ if is_original:
+ logger.critical(
+ f'{uuid}: This is the original of the bundle; results will be incomplete.'
+ )
+ else:
+ logger.error(f'{uuid}: At least it\'s only a backup; better luck with the others.')
+ return None
+ else:
+ logger.warning(
+ f'>>> Emergency rescheduling {uuid} because of unexected errors (wtf?!) <<<'
+ )
+ return self.launch(bundle, avoid_last_machine)
+
@overrides
def submit(self,
function: Callable,
return self.process_executor
def remote_pool(self) -> RemoteExecutor:
- logger.info('Looking for some helper machines...')
if self.remote_executor is None:
+ logger.info('Looking for some helper machines...')
pool: List[RemoteWorkerRecord] = []
if self.ping('cheetah.house'):
logger.info('Found cheetah.house')