from typing import Any, Callable, Dict, List, Optional, Set
import cloudpickle # type: ignore
+from overrides import overrides
from ansi import bg, fg, underline, reset
import argparse_utils
import config
-import exec_utils
from decorator_utils import singleton
-import histogram
-import string_utils
+from exec_utils import run_silently, cmd_in_background, cmd_with_timeout
+import histogram as hist
logger = logging.getLogger(__name__)
RSYNC = 'rsync -q --no-motd -W --ignore-existing --timeout=60 --size-only -z'
SSH = 'ssh -oForwardX11=no'
-HIST = histogram.SimpleHistogram(
- histogram.SimpleHistogram.n_evenly_spaced_buckets(
- int(0), int(500), 25
- )
-)
-
-
-def run_local_bundle(fun, *args, **kwargs):
- logger.debug(f"Running local bundle at {fun.__name__}")
- start = time.time()
- result = fun(*args, **kwargs)
- end = time.time()
- duration = end - start
- logger.debug(f"{fun.__name__} finished; used {duration:.1f}s")
- HIST.add_item(duration)
- return result
-
-
-def run_cloud_pickle(pickle):
- fun, args, kwargs = cloudpickle.loads(pickle)
- logger.debug(f"Running pickled bundle at {fun.__name__}")
- start = time.time()
- result = fun(*args, **kwargs)
- end = time.time()
- duration = end - start
- logger.debug(f"{fun.__name__} finished; used {duration:.1f}s")
- return result
def make_cloud_pickle(fun, *args, **kwargs):
- logger.info(f"Making cloudpickled bundle at {fun.__name__}")
+ logger.debug(f"Making cloudpickled bundle at {fun.__name__}")
return cloudpickle.dumps((fun, args, kwargs))
class BaseExecutor(ABC):
- def __init__(self):
- pass
+ def __init__(self, *, title=''):
+ self.title = title
+ self.task_count = 0
+ self.histogram = hist.SimpleHistogram(
+ hist.SimpleHistogram.n_evenly_spaced_buckets(
+ int(0), int(500), 50
+ )
+ )
@abstractmethod
def submit(self,
wait: bool = True) -> None:
pass
+ def adjust_task_count(self, delta: int) -> None:
+ self.task_count += delta
+ logger.debug(f'Executor current task count is {self.task_count}')
+
class ThreadExecutor(BaseExecutor):
def __init__(self,
max_workers=workers,
thread_name_prefix="thread_executor_helper"
)
- self.job_count = 0
+ def run_local_bundle(self, fun, *args, **kwargs):
+ logger.debug(f"Running local bundle at {fun.__name__}")
+ start = time.time()
+ result = fun(*args, **kwargs)
+ end = time.time()
+ self.adjust_task_count(-1)
+ duration = end - start
+ logger.debug(f"{fun.__name__} finished; used {duration:.1f}s")
+ self.histogram.add_item(duration)
+ return result
+
+ @overrides
def submit(self,
function: Callable,
*args,
**kwargs) -> fut.Future:
- self.job_count += 1
- logger.debug(
- f'Submitted work to threadpool; there are now {self.job_count} items.'
- )
+ self.adjust_task_count(+1)
newargs = []
newargs.append(function)
for arg in args:
newargs.append(arg)
return self._thread_pool_executor.submit(
- run_local_bundle,
+ self.run_local_bundle,
*newargs,
**kwargs)
+ @overrides
def shutdown(self,
wait = True) -> None:
- logger.debug("Shutting down threadpool executor.")
- print(HIST)
+ logger.debug(f'Shutting down threadpool executor {self.title}')
+ print(self.histogram)
self._thread_pool_executor.shutdown(wait)
self._process_executor = fut.ProcessPoolExecutor(
max_workers=workers,
)
- self.job_count = 0
+ def run_cloud_pickle(self, pickle):
+ fun, args, kwargs = cloudpickle.loads(pickle)
+ logger.debug(f"Running pickled bundle at {fun.__name__}")
+ result = fun(*args, **kwargs)
+ self.adjust_task_count(-1)
+ return result
+
+ @overrides
def submit(self,
function: Callable,
*args,
**kwargs) -> fut.Future:
- # Bundle it up before submitting because pickle sucks.
+ start = time.time()
+ self.adjust_task_count(+1)
pickle = make_cloud_pickle(function, *args, **kwargs)
- self.job_count += 1
- logger.debug(
- f'Submitting work to processpool executor; there are now {self.job_count} items.'
+ result = self._process_executor.submit(
+ self.run_cloud_pickle,
+ pickle
+ )
+ result.add_done_callback(
+ lambda _: self.histogram.add_item(
+ time.time() - start
+ )
)
- return self._process_executor.submit(run_cloud_pickle, pickle)
+ return result
+ @overrides
def shutdown(self, wait=True) -> None:
- logger.debug('Shutting down processpool executor')
- print(HIST)
+ logger.debug(f'Shutting down processpool executor {self.title}')
self._process_executor.shutdown(wait)
+ print(self.histogram)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state['_process_executor'] = None
+ return state
+
+
+class RemoteExecutorException(Exception):
+ """Thrown when a bundle cannot be executed despite several retries."""
+ pass
@dataclass
class BundleDetails:
pickled_code: bytes
uuid: str
+ fname: str
worker: Optional[RemoteWorkerRecord]
username: Optional[str]
machine: Optional[str]
pid: int
start_ts: float
end_ts: float
- too_slow: bool
- super_slow: bool
+ slower_than_local_p95: bool
+ slower_than_global_p95: bool
src_bundle: BundleDetails
is_cancelled: threading.Event
was_cancelled: bool
backup_bundles: Optional[List[BundleDetails]]
failure_count: int
+ def __repr__(self):
+ uuid = self.uuid
+ if uuid[-9:-2] == '_backup':
+ uuid = uuid[:-9]
+ suffix = f'{uuid[-6:]}_b{self.uuid[-1:]}'
+ else:
+ suffix = uuid[-6:]
+
+ colorz = [
+ fg('violet red'),
+ fg('red'),
+ fg('orange'),
+ fg('peach orange'),
+ fg('yellow'),
+ fg('marigold yellow'),
+ fg('green yellow'),
+ fg('tea green'),
+ fg('cornflower blue'),
+ fg('turquoise blue'),
+ fg('tropical blue'),
+ fg('lavender purple'),
+ fg('medium purple'),
+ ]
+ c = colorz[int(uuid[-2:], 16) % len(colorz)]
+ fname = self.fname if self.fname is not None else 'nofname'
+ machine = self.machine if self.machine is not None else 'nomachine'
+ return f'{c}{suffix}/{fname}/{machine}{reset()}'
+
class RemoteExecutorStatus:
def __init__(self, total_worker_count: int) -> None:
) -> None:
assert self.lock.locked()
self.known_workers.add(worker)
- self.start_per_bundle[uuid] = time.time()
+ self.start_per_bundle[uuid] = None
x = self.in_flight_bundles_by_worker.get(worker, set())
x.add(uuid)
self.in_flight_bundles_by_worker[worker] = x
assert self.lock.locked()
self.bundle_details_by_uuid[details.uuid] = details
+ def record_release_worker(
+ self,
+ worker: RemoteWorkerRecord,
+ uuid: str,
+ was_cancelled: bool,
+ ) -> None:
+ with self.lock:
+ self.record_release_worker_already_locked(
+ worker, uuid, was_cancelled
+ )
+
def record_release_worker_already_locked(
self,
worker: RemoteWorkerRecord,
self.finished_bundle_timings_per_worker[worker] = x
self.finished_bundle_timings.append(bundle_latency)
+ def record_processing_began(self, uuid: str):
+ with self.lock:
+ self.start_per_bundle[uuid] = time.time()
+
def total_in_flight(self) -> int:
assert self.lock.locked()
total_in_flight = 0
bundle_uuid,
None
)
- pid = str(details.pid) if details is not None else "TBD"
- sec = ts - self.start_per_bundle[bundle_uuid]
- ret += f' (pid={pid}): {bundle_uuid} for {sec:.1f}s so far '
+ pid = str(details.pid) if (details and details.pid != 0) else "TBD"
+ if self.start_per_bundle[bundle_uuid] is not None:
+ sec = ts - self.start_per_bundle[bundle_uuid]
+ ret += f' (pid={pid}): {details} for {sec:.1f}s so far '
+ else:
+ ret += f' {details} setting up / copying data...'
+ sec = 0.0
+
if qworker is not None:
if sec > qworker[1]:
ret += f'{bg("red")}>💻p95{reset()} '
- elif sec > qworker[0]:
- ret += f'{fg("red")}>💻p50{reset()} '
- if qall is not None:
- if sec > qall[1] * 1.5:
- ret += f'{bg("red")}!!!{reset()}'
if details is not None:
- logger.debug(f'Flagging {details.uuid} for another backup')
- details.super_slow = True
- elif sec > qall[1]:
+ details.slower_than_local_p95 = True
+ else:
+ if details is not None:
+ details.slower_than_local_p95 = False
+
+ if qall is not None:
+ if sec > qall[1]:
ret += f'{bg("red")}>∀p95{reset()} '
if details is not None:
- logger.debug(f'Flagging {details.uuid} for a backup')
- details.too_slow = True
- elif sec > qall[0]:
- ret += f'{fg("red")}>∀p50{reset()}'
+ details.slower_than_global_p95 = True
+ else:
+ details.slower_than_global_p95 = False
ret += '\n'
return ret
class RemoteWorkerSelectionPolicy(ABC):
def register_worker_pool(self, workers):
- random.seed()
self.workers = workers
@abstractmethod
class WeightedRandomRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy):
+ @overrides
def is_worker_available(self) -> bool:
for worker in self.workers:
if worker.count > 0:
return True
return False
+ @overrides
def acquire_worker(
self,
machine_to_avoid = None
def __init__(self) -> None:
self.index = 0
+ @overrides
def is_worker_available(self) -> bool:
for worker in self.workers:
if worker.count > 0:
return True
return False
+ @overrides
def acquire_worker(
self,
machine_to_avoid: str = None
policy: RemoteWorkerSelectionPolicy) -> None:
super().__init__()
self.workers = workers
+ self.policy = policy
self.worker_count = 0
for worker in self.workers:
self.worker_count += worker.count
if self.worker_count <= 0:
msg = f"We need somewhere to schedule work; count was {self.worker_count}"
logger.critical(msg)
- raise Exception(msg)
- self.policy = policy
+ raise RemoteExecutorException(msg)
self.policy.register_worker_pool(self.workers)
self.cv = threading.Condition()
+ logger.debug(f'Creating {self.worker_count} local threads, one per remote worker.')
self._helper_executor = fut.ThreadPoolExecutor(
thread_name_prefix="remote_executor_helper",
max_workers=self.worker_count,
)
self.status = RemoteExecutorStatus(self.worker_count)
self.total_bundles_submitted = 0
- logger.debug(
- f'Creating remote processpool with {self.worker_count} remote endpoints.'
- )
+ self.backup_lock = threading.Lock()
+ self.last_backup = None
def is_worker_available(self) -> bool:
return self.policy.is_worker_available()
# Regular progress report
self.status.periodic_dump(self.total_bundles_submitted)
- # Look for bundles to reschedule
- if len(self.status.finished_bundle_timings) > 7:
- for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items():
- for uuid in bundle_uuids:
- bundle = self.status.bundle_details_by_uuid.get(uuid, None)
- if (
- bundle is not None and
- bundle.too_slow and
- bundle.src_bundle is None and
- config.config['executors_schedule_remote_backups']
- ):
- self.consider_backup_for_bundle(bundle)
-
- def consider_backup_for_bundle(self, bundle: BundleDetails) -> None:
- assert self.status.lock.locked()
- if (
- bundle.too_slow
- and len(bundle.backup_bundles) == 0 # one backup per
- ):
- msg = f"*** Rescheduling {bundle.pid}/{bundle.uuid} ***"
- logger.debug(msg)
- self.schedule_backup_for_bundle(bundle)
- return
- elif (
- bundle.super_slow
- and len(bundle.backup_bundles) < 2 # two backups in dire situations
- and self.status.total_idle() > 4
- ):
- msg = f"*** Rescheduling {bundle.pid}/{bundle.uuid} ***"
- logger.debug(msg)
- self.schedule_backup_for_bundle(bundle)
- return
+ # Look for bundles to reschedule.
+ num_done = len(self.status.finished_bundle_timings)
+ num_idle_workers = self.worker_count - self.task_count
+ now = time.time()
+ if (
+ config.config['executors_schedule_remote_backups']
+ and num_done > 2
+ and num_idle_workers > 1
+ and (self.last_backup is None or (now - self.last_backup > 1.0))
+ and self.backup_lock.acquire(blocking=False)
+ ):
+ try:
+ assert self.backup_lock.locked()
+
+ bundle_to_backup = None
+ best_score = None
+ for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items():
+ # Prefer to schedule backups of bundles on slower machines.
+ base_score = 0
+ for record in self.workers:
+ if worker.machine == record.machine:
+ base_score = float(record.weight)
+ base_score = 1.0 / base_score
+ base_score *= 200.0
+ base_score = int(base_score)
+ break
+
+ for uuid in bundle_uuids:
+ bundle = self.status.bundle_details_by_uuid.get(uuid, None)
+ if (
+ bundle is not None
+ and bundle.src_bundle is None
+ and bundle.backup_bundles is not None
+ ):
+ score = base_score
+
+ # Schedule backups of bundles running longer; especially those
+ # that are unexpectedly slow.
+ start_ts = self.status.start_per_bundle[uuid]
+ if start_ts is not None:
+ runtime = now - start_ts
+ score += runtime
+ logger.debug(f'score[{bundle}] => {score} # latency boost')
+
+ if bundle.slower_than_local_p95:
+ score += runtime / 2
+ logger.debug(f'score[{bundle}] => {score} # >worker p95')
+
+ if bundle.slower_than_global_p95:
+ score += runtime / 2
+ logger.debug(f'score[{bundle}] => {score} # >global p95')
+
+ # Prefer backups of bundles that don't have backups already.
+ backup_count = len(bundle.backup_bundles)
+ if backup_count == 0:
+ score *= 2
+ elif backup_count == 1:
+ score /= 2
+ elif backup_count == 2:
+ score /= 8
+ else:
+ score = 0
+ logger.debug(f'score[{bundle}] => {score} # {backup_count} dup backup factor')
+
+ if (
+ score != 0
+ and (best_score is None or score > best_score)
+ ):
+ bundle_to_backup = bundle
+ assert bundle is not None
+ assert bundle.backup_bundles is not None
+ assert bundle.src_bundle is None
+ best_score = score
+
+ if bundle_to_backup is not None:
+ self.last_backup = now
+ logger.info(f'=====> SCHEDULING BACKUP {bundle_to_backup} (score={best_score:.1f}) <=====')
+ self.schedule_backup_for_bundle(bundle_to_backup)
+ finally:
+ self.backup_lock.release()
def check_if_cancelled(self, bundle: BundleDetails) -> bool:
with self.status.lock:
return True
return False
- def launch(self, bundle: BundleDetails) -> Any:
+ def launch(self, bundle: BundleDetails, override_avoid_machine=None) -> Any:
"""Find a worker for bundle or block until one is available."""
+ self.adjust_task_count(+1)
uuid = bundle.uuid
hostname = bundle.hostname
- avoid_machine = None
+ avoid_machine = override_avoid_machine
+ is_original = bundle.src_bundle is None
# Try not to schedule a backup on the same host as the original.
- if bundle.src_bundle is not None:
+ if avoid_machine is None and bundle.src_bundle is not None:
avoid_machine = bundle.src_bundle.machine
worker = None
while worker is None:
worker = self.find_available_worker_or_block(avoid_machine)
+
+ # Ok, found a worker.
bundle.worker = worker
machine = bundle.machine = worker.machine
username = bundle.username = worker.username
+
self.status.record_acquire_worker(worker, uuid)
- logger.debug(f'Running bundle {uuid} on {worker}...')
+ logger.debug(f'{bundle}: Running bundle on {worker}...')
# Before we do any work, make sure the bundle is still viable.
if self.check_if_cancelled(bundle):
return self.post_launch_work(bundle)
except Exception as e:
logger.exception(e)
- logger.info(f"Bundle {uuid} seems to have failed?!")
- if bundle.failure_count < config.config['executors_max_bundle_failures']:
- return self.launch(bundle)
+ logger.error(
+ f'{bundle}: bundle says it\'s cancelled upfront but no results?!'
+ )
+ assert bundle.worker is not None
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ if is_original:
+ # Weird. We are the original owner of this
+ # bundle. For it to have been cancelled, a backup
+ # must have already started and completed before
+ # we even for started. Moreover, the backup says
+ # it is done but we can't find the results it
+ # should have copied over. Reschedule the whole
+ # thing.
+ return self.emergency_retry_nasty_bundle(bundle)
else:
- logger.info(f"Bundle {uuid} is poison, giving up on it.")
+ # Expected(?). We're a backup and our bundle is
+ # cancelled before we even got started. Something
+ # went bad in post_launch_work (I acutually don't
+ # see what?) but probably not worth worrying
+ # about.
return None
- # Send input to machine if it's not local.
+ # Send input code / data to worker machine if it's not local.
if hostname not in machine:
- cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}'
- logger.info(f"Copying work to {worker} via {cmd}")
- exec_utils.run_silently(cmd)
+ try:
+ cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}'
+ start_ts = time.time()
+ logger.info(f"{bundle}: Copying work to {worker} via {cmd}.")
+ run_silently(cmd)
+ xfer_latency = time.time() - start_ts
+ logger.info(f"{bundle}: Copying done to {worker} in {xfer_latency:.1f}s.")
+ except Exception as e:
+ assert bundle.worker is not None
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ if is_original:
+ # Weird. We tried to copy the code to the worker and it failed...
+ # And we're the original bundle. We have to retry.
+ logger.exception(e)
+ logger.error(
+ f'{bundle}: Failed to send instructions to the worker machine?! ' +
+ 'This is not expected; we\'re the original bundle so this shouldn\'t ' +
+ 'be a race condition. Attempting an emergency retry...'
+ )
+ return self.emergency_retry_nasty_bundle(bundle)
+ else:
+ # This is actually expected; we're a backup.
+ # There's a race condition where someone else
+ # already finished the work and removed the source
+ # code file before we could copy it. No biggie.
+ logger.warning(
+ f'{bundle}: Failed to send instructions to the worker machine... ' +
+ 'We\'re a backup and this may be caused by the original (or some ' +
+ 'other backup) already finishing this work. Ignoring this.'
+ )
+ return None
- # Do it.
+ # Kick off the work. Note that if this fails we let
+ # wait_for_process deal with it.
+ self.status.record_processing_began(uuid)
cmd = (f'{SSH} {bundle.username}@{bundle.machine} '
- f'"source remote-execution/bin/activate &&'
+ f'"source py38-venv/bin/activate &&'
f' /home/scott/lib/python_modules/remote_worker.py'
f' --code_file {bundle.code_file} --result_file {bundle.result_file}"')
- p = exec_utils.cmd_in_background(cmd, silent=True)
+ logger.debug(f'{bundle}: Executing {cmd} in the background to kick off work...')
+ p = cmd_in_background(cmd, silent=True)
bundle.pid = pid = p.pid
- logger.info(f"Running {cmd} in the background as process {pid}")
+ logger.debug(f'{bundle}: Local ssh process pid={pid}; remote worker is {machine}.')
+ return self.wait_for_process(p, bundle, 0)
+
+ def wait_for_process(self, p: subprocess.Popen, bundle: BundleDetails, depth: int) -> Any:
+ machine = bundle.machine
+ pid = p.pid
+ if depth > 3:
+ logger.error(
+ f"I've gotten repeated errors waiting on this bundle; giving up on pid={pid}."
+ )
+ p.terminate()
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ return self.emergency_retry_nasty_bundle(bundle)
+ # Spin until either the ssh job we scheduled finishes the
+ # bundle or some backup worker signals that they finished it
+ # before we could.
while True:
try:
- p.wait(timeout=0.5)
+ p.wait(timeout=0.25)
except subprocess.TimeoutExpired:
self.heartbeat()
-
- # Both source and backup bundles can be cancelled by
- # the other depending on which finishes first.
if self.check_if_cancelled(bundle):
- p.terminate()
+ logger.info(
+ f'{bundle}: another worker finished bundle, checking it out...'
+ )
break
else:
- logger.debug(
- f"{pid}/{bundle.uuid} has finished its work normally."
+ logger.info(
+ f"{bundle}: pid {pid} ({machine}) our ssh finished, checking it out..."
)
+ p = None
break
+ # If we get here we believe the bundle is done; either the ssh
+ # subprocess finished (hopefully successfully) or we noticed
+ # that some other worker seems to have completed the bundle
+ # and we're bailing out.
try:
- return self.post_launch_work(bundle)
+ ret = self.post_launch_work(bundle)
+ if ret is not None and p is not None:
+ p.terminate()
+ return ret
+
+ # Something went wrong; e.g. we could not copy the results
+ # back, cleanup after ourselves on the remote machine, or
+ # unpickle the results we got from the remove machine. If we
+ # still have an active ssh subprocess, keep waiting on it.
+ # Otherwise, time for an emergency reschedule.
except Exception as e:
logger.exception(e)
- logger.info(f"Bundle {uuid} seems to have failed?!")
- if bundle.failure_count < config.config['executors_max_bundle_failures']:
- return self.launch(bundle)
- logger.info(f"Bundle {uuid} is poison, giving up on it.")
- return None
+ logger.error(f'{bundle}: Something unexpected just happened...')
+ if p is not None:
+ logger.warning(
+ f"{bundle}: Failed to wrap up \"done\" bundle, re-waiting on active ssh."
+ )
+ return self.wait_for_process(p, bundle, depth + 1)
+ else:
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
+ self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
+ return self.emergency_retry_nasty_bundle(bundle)
def post_launch_work(self, bundle: BundleDetails) -> Any:
with self.status.lock:
# Whether original or backup, if we finished first we must
# fetch the results if the computation happened on a
# remote machine.
+ bundle.end_ts = time.time()
if not was_cancelled:
assert bundle.machine is not None
if bundle.hostname not in bundle.machine:
cmd = f'{RSYNC} {username}@{machine}:{result_file} {result_file} 2>/dev/null'
logger.info(
- f"Fetching results from {username}@{machine} via {cmd}"
+ f"{bundle}: Fetching results from {username}@{machine} via {cmd}"
)
- try:
- exec_utils.run_silently(cmd)
- except subprocess.CalledProcessError:
- pass
- exec_utils.run_silently(f'{SSH} {username}@{machine}'
- f' "/bin/rm -f {code_file} {result_file}"')
- bundle.end_ts = time.time()
- assert bundle.worker is not None
- self.status.record_release_worker_already_locked(
- bundle.worker,
- bundle.uuid,
- was_cancelled
- )
- if not was_cancelled:
- dur = bundle.end_ts - bundle.start_ts
- HIST.add_item(dur)
- # Original or not, the results should be back on the local
- # machine. Are they?
- if not os.path.exists(result_file):
- msg = f'{result_file} unexpectedly missing, wtf?!'
- logger.critical(msg)
- bundle.failure_count += 1
- self.release_worker(bundle.worker)
- raise Exception(msg)
+ # If either of these throw they are handled in
+ # wait_for_process.
+ run_silently(cmd)
+ run_silently(f'{SSH} {username}@{machine}'
+ f' "/bin/rm -f {code_file} {result_file}"')
+ dur = bundle.end_ts - bundle.start_ts
+ self.histogram.add_item(dur)
# Only the original worker should unpickle the file contents
- # though since it's the only one whose result matters.
+ # though since it's the only one whose result matters. The
+ # original is also the only job that may delete result_file
+ # from disk. Note that the original may have been cancelled
+ # if one of the backups finished first; it still must read the
+ # result from disk.
if is_original:
- logger.debug(f"Unpickling {result_file}.")
+ logger.debug(f"{bundle}: Unpickling {result_file}.")
try:
- with open(f'{result_file}', 'rb') as rb:
+ with open(result_file, 'rb') as rb:
serialized = rb.read()
result = cloudpickle.loads(serialized)
except Exception as e:
- msg = f'Failed to load {result_file}'
+ msg = f'Failed to load {result_file}, this is bad news.'
logger.critical(msg)
- bundle.failure_count += 1
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ True,
+ )
self.release_worker(bundle.worker)
+
+ # Re-raise the exception; the code in wait_for_process may
+ # decide to emergency_retry_nasty_bundle here.
raise Exception(e)
+
+ logger.debug(
+ f'Removing local (master) {code_file} and {result_file}.'
+ )
os.remove(f'{result_file}')
os.remove(f'{code_file}')
if bundle.backup_bundles is not None:
for backup in bundle.backup_bundles:
logger.debug(
- f'Notifying backup {backup.uuid} that it is cancelled'
+ f'{bundle}: Notifying backup {backup.uuid} that it\'s cancelled'
)
backup.is_cancelled.set()
- # This is a backup.
+ # This is a backup job and, by now, we have already fetched
+ # the bundle results.
else:
# Backup results don't matter, they just need to leave the
- # result file in the right place for their original to
- # read later.
+ # result file in the right place for their originals to
+ # read/unpickle later.
result = None
# Tell the original to stop if we finished first.
if not was_cancelled:
logger.debug(
- f'Notifying original {bundle.src_bundle.uuid} that it is cancelled'
+ f'{bundle}: Notifying original {bundle.src_bundle.uuid} we beat them to it.'
)
bundle.src_bundle.is_cancelled.set()
assert bundle.worker is not None
+ self.status.record_release_worker(
+ bundle.worker,
+ bundle.uuid,
+ was_cancelled,
+ )
self.release_worker(bundle.worker)
+ self.adjust_task_count(-1)
return result
- def create_original_bundle(self, pickle):
- uuid = string_utils.generate_uuid(as_hex=True)
+ def create_original_bundle(self, pickle, fname: str):
+ from string_utils import generate_uuid
+ uuid = generate_uuid(as_hex=True)
code_file = f'/tmp/{uuid}.code.bin'
result_file = f'/tmp/{uuid}.result.bin'
bundle = BundleDetails(
pickled_code = pickle,
uuid = uuid,
+ fname = fname,
worker = None,
username = None,
machine = None,
pid = 0,
start_ts = time.time(),
end_ts = 0.0,
- too_slow = False,
- super_slow = False,
+ slower_than_local_p95 = False,
+ slower_than_global_p95 = False,
src_bundle = None,
is_cancelled = threading.Event(),
was_cancelled = False,
failure_count = 0,
)
self.status.record_bundle_details(bundle)
- logger.debug(f'Created original bundle {uuid}')
+ logger.debug(f'{bundle}: Created an original bundle')
return bundle
def create_backup_bundle(self, src_bundle: BundleDetails):
backup_bundle = BundleDetails(
pickled_code = src_bundle.pickled_code,
uuid = uuid,
+ fname = src_bundle.fname,
worker = None,
username = None,
machine = None,
pid = 0,
start_ts = time.time(),
end_ts = 0.0,
- too_slow = False,
- super_slow = False,
+ slower_than_local_p95 = False,
+ slower_than_global_p95 = False,
src_bundle = src_bundle,
is_cancelled = threading.Event(),
was_cancelled = False,
)
src_bundle.backup_bundles.append(backup_bundle)
self.status.record_bundle_details_already_locked(backup_bundle)
- logger.debug(f'Created backup bundle {uuid}')
+ logger.debug(f'{backup_bundle}: Created a backup bundle')
return backup_bundle
def schedule_backup_for_bundle(self,
src_bundle: BundleDetails):
assert self.status.lock.locked()
+ assert src_bundle is not None
backup_bundle = self.create_backup_bundle(src_bundle)
logger.debug(
- f'Scheduling backup bundle {backup_bundle.uuid} for execution'
+ f'{backup_bundle.uuid}/{backup_bundle.fname}: Scheduling backup for execution...'
)
self._helper_executor.submit(self.launch, backup_bundle)
# they will move the result_file to this machine and let
# the original pick them up and unpickle them.
+ def emergency_retry_nasty_bundle(self, bundle: BundleDetails) -> fut.Future:
+ is_original = bundle.src_bundle is None
+ bundle.worker = None
+ avoid_last_machine = bundle.machine
+ bundle.machine = None
+ bundle.username = None
+ bundle.failure_count += 1
+ if is_original:
+ retry_limit = 3
+ else:
+ retry_limit = 2
+
+ if bundle.failure_count > retry_limit:
+ logger.error(
+ f'{bundle}: Tried this bundle too many times already ({retry_limit}x); giving up.'
+ )
+ if is_original:
+ raise RemoteExecutorException(
+ f'{bundle}: This bundle can\'t be completed despite several backups and retries'
+ )
+ else:
+ logger.error(f'{bundle}: At least it\'s only a backup; better luck with the others.')
+ return None
+ else:
+ logger.warning(
+ f'>>> Emergency rescheduling {bundle} because of unexected errors (wtf?!) <<<'
+ )
+ return self.launch(bundle, avoid_last_machine)
+
+ @overrides
def submit(self,
function: Callable,
*args,
**kwargs) -> fut.Future:
pickle = make_cloud_pickle(function, *args, **kwargs)
- bundle = self.create_original_bundle(pickle)
+ bundle = self.create_original_bundle(pickle, function.__name__)
self.total_bundles_submitted += 1
- logger.debug(
- f'Submitted work to remote executor; {self.total_bundles_submitted} items now submitted'
- )
return self._helper_executor.submit(self.launch, bundle)
+ @overrides
def shutdown(self, wait=True) -> None:
self._helper_executor.shutdown(wait)
- print(HIST)
+ logging.debug(f'Shutting down RemoteExecutor {self.title}')
+ print(self.histogram)
@singleton
self.remote_executor: Optional[RemoteExecutor] = None
def ping(self, host) -> bool:
- command = ['ping', '-c', '1', host]
- return subprocess.call(
- command,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.DEVNULL,
- ) == 0
+ logger.debug(f'RUN> ping -c 1 {host}')
+ try:
+ x = cmd_with_timeout(
+ f'ping -c 1 {host} >/dev/null 2>/dev/null',
+ timeout_seconds=1.0
+ )
+ return x == 0
+ except Exception:
+ return False
def thread_pool(self) -> ThreadExecutor:
if self.thread_executor is None:
def remote_pool(self) -> RemoteExecutor:
if self.remote_executor is None:
+ logger.info('Looking for some helper machines...')
pool: List[RemoteWorkerRecord] = []
if self.ping('cheetah.house'):
+ logger.info('Found cheetah.house')
pool.append(
RemoteWorkerRecord(
username = 'scott',
machine = 'cheetah.house',
- weight = 12,
- count = 4,
+ weight = 25,
+ count = 6,
),
)
- if self.ping('video.house'):
+ if self.ping('meerkat.cabin'):
+ logger.info('Found meerkat.cabin')
pool.append(
RemoteWorkerRecord(
username = 'scott',
- machine = 'video.house',
- weight = 1,
- count = 4,
+ machine = 'meerkat.cabin',
+ weight = 5,
+ count = 2,
),
)
- if self.ping('wannabe.house'):
+ # if self.ping('kiosk.house'):
+ # logger.info('Found kiosk.house')
+ # pool.append(
+ # RemoteWorkerRecord(
+ # username = 'pi',
+ # machine = 'kiosk.house',
+ # weight = 1,
+ # count = 2,
+ # ),
+ # )
+ if self.ping('hero.house'):
+ logger.info('Found hero.house')
pool.append(
RemoteWorkerRecord(
username = 'scott',
- machine = 'wannabe.house',
- weight = 2,
- count = 4,
+ machine = 'hero.house',
+ weight = 30,
+ count = 10,
),
)
- if self.ping('meerkat.cabin'):
+ if self.ping('puma.cabin'):
+ logger.info('Found puma.cabin')
pool.append(
RemoteWorkerRecord(
username = 'scott',
- machine = 'meerkat.cabin',
- weight = 7,
- count = 2,
+ machine = 'puma.cabin',
+ weight = 25,
+ count = 6,
),
)
if self.ping('backup.house'):
+ logger.info('Found backup.house')
pool.append(
RemoteWorkerRecord(
username = 'scott',
machine = 'backup.house',
- weight = 1,
- count = 4,
- ),
- )
- if self.ping('puma.cabin'):
- pool.append(
- RemoteWorkerRecord(
- username = 'scott',
- machine = 'puma.cabin',
- weight = 12,
- count = 4,
+ weight = 3,
+ count = 2,
),
)
+
+ # The controller machine has a lot to do; go easy on it.
+ for record in pool:
+ if record.machine == platform.node() and record.count > 1:
+ logger.info(f'Reducing workload for {record.machine}.')
+ record.count = 1
+
policy = WeightedRandomRemoteWorkerSelectionPolicy()
policy.register_worker_pool(pool)
self.remote_executor = RemoteExecutor(pool, policy)