X-Git-Url: https://wannabe.guru.org/gitweb/?a=blobdiff_plain;f=executors.py;h=c11bd546cc3b1b0afcefff8c3215aab2707db147;hb=ed8fa2b10b0177b15b7423263bdd390efde2f0c8;hp=b9c0748391f733e0719a744d9b30280c34bb30ee;hpb=497fb9e21f45ec08e1486abaee6dfa7b20b8a691;p=python_utils.git diff --git a/executors.py b/executors.py index b9c0748..c11bd54 100644 --- a/executors.py +++ b/executors.py @@ -17,14 +17,14 @@ import time from typing import Any, Callable, Dict, List, Optional, Set import cloudpickle # type: ignore +from overrides import overrides from ansi import bg, fg, underline, reset import argparse_utils import config -import exec_utils from decorator_utils import singleton -import histogram -import string_utils +from exec_utils import run_silently, cmd_in_background, cmd_with_timeout +import histogram as hist logger = logging.getLogger(__name__) @@ -52,48 +52,32 @@ parser.add_argument( action=argparse_utils.ActionNoYes, help='Should we schedule duplicative backup work if a remote bundle is slow', ) - -rsync = 'rsync -q --no-motd -W --ignore-existing --timeout=60 --size-only -z' -ssh = 'ssh -oForwardX11=no' - - -hist = histogram.SimpleHistogram( - histogram.SimpleHistogram.n_evenly_spaced_buckets( - int(0), int(500), 25 - ) +parser.add_argument( + '--executors_max_bundle_failures', + type=int, + default=3, + metavar='#FAILURES', + help='Maximum number of failures before giving up on a bundle', ) - -def run_local_bundle(fun, *args, **kwargs): - logger.debug(f"Running local bundle at {fun.__name__}") - start = time.time() - result = fun(*args, **kwargs) - end = time.time() - duration = end - start - logger.debug(f"{fun.__name__} finished; used {duration:.1f}s") - hist.add_item(duration) - return result - - -def run_cloud_pickle(pickle): - fun, args, kwargs = cloudpickle.loads(pickle) - logger.debug(f"Running pickled bundle at {fun.__name__}") - start = time.time() - result = fun(*args, **kwargs) - end = time.time() - duration = end - start - logger.debug(f"{fun.__name__} finished; used {duration:.1f}s") - return result +RSYNC = 'rsync -q --no-motd -W --ignore-existing --timeout=60 --size-only -z' +SSH = 'ssh -oForwardX11=no' def make_cloud_pickle(fun, *args, **kwargs): - logger.info(f"Making cloudpickled bundle at {fun.__name__}") + logger.debug(f"Making cloudpickled bundle at {fun.__name__}") return cloudpickle.dumps((fun, args, kwargs)) class BaseExecutor(ABC): - def __init__(self): - pass + def __init__(self, *, title=''): + self.title = title + self.task_count = 0 + self.histogram = hist.SimpleHistogram( + hist.SimpleHistogram.n_evenly_spaced_buckets( + int(0), int(500), 50 + ) + ) @abstractmethod def submit(self, @@ -107,6 +91,10 @@ class BaseExecutor(ABC): wait: bool = True) -> None: pass + def adjust_task_count(self, delta: int) -> None: + self.task_count += delta + logger.debug(f'Executor current task count is {self.task_count}') + class ThreadExecutor(BaseExecutor): def __init__(self, @@ -122,29 +110,38 @@ class ThreadExecutor(BaseExecutor): max_workers=workers, thread_name_prefix="thread_executor_helper" ) - self.job_count = 0 + def run_local_bundle(self, fun, *args, **kwargs): + logger.debug(f"Running local bundle at {fun.__name__}") + start = time.time() + result = fun(*args, **kwargs) + end = time.time() + self.adjust_task_count(-1) + duration = end - start + logger.debug(f"{fun.__name__} finished; used {duration:.1f}s") + self.histogram.add_item(duration) + return result + + @overrides def submit(self, function: Callable, *args, **kwargs) -> fut.Future: - self.job_count += 1 - logger.debug( - f'Submitted work to threadpool; there are now {self.job_count} items.' - ) + self.adjust_task_count(+1) newargs = [] newargs.append(function) for arg in args: newargs.append(arg) return self._thread_pool_executor.submit( - run_local_bundle, + self.run_local_bundle, *newargs, **kwargs) + @overrides def shutdown(self, wait = True) -> None: - logger.debug("Shutting down threadpool executor.") - print(hist) + logger.debug(f'Shutting down threadpool executor {self.title}') + print(self.histogram) self._thread_pool_executor.shutdown(wait) @@ -161,24 +158,48 @@ class ProcessExecutor(BaseExecutor): self._process_executor = fut.ProcessPoolExecutor( max_workers=workers, ) - self.job_count = 0 + def run_cloud_pickle(self, pickle): + fun, args, kwargs = cloudpickle.loads(pickle) + logger.debug(f"Running pickled bundle at {fun.__name__}") + result = fun(*args, **kwargs) + self.adjust_task_count(-1) + return result + + @overrides def submit(self, function: Callable, *args, **kwargs) -> fut.Future: - # Bundle it up before submitting because pickle sucks. + start = time.time() + self.adjust_task_count(+1) pickle = make_cloud_pickle(function, *args, **kwargs) - self.job_count += 1 - logger.debug( - f'Submitting work to processpool executor; there are now {self.job_count} items.' + result = self._process_executor.submit( + self.run_cloud_pickle, + pickle ) - return self._process_executor.submit(run_cloud_pickle, pickle) + result.add_done_callback( + lambda _: self.histogram.add_item( + time.time() - start + ) + ) + return result + @overrides def shutdown(self, wait=True) -> None: - logger.debug('Shutting down processpool executor') - print(hist) + logger.debug(f'Shutting down processpool executor {self.title}') self._process_executor.shutdown(wait) + print(self.histogram) + + def __getstate__(self): + state = self.__dict__.copy() + state['_process_executor'] = None + return state + + +class RemoteExecutorException(Exception): + """Thrown when a bundle cannot be executed despite several retries.""" + pass @dataclass @@ -199,6 +220,7 @@ class RemoteWorkerRecord: class BundleDetails: pickled_code: bytes uuid: str + fname: str worker: Optional[RemoteWorkerRecord] username: Optional[str] machine: Optional[str] @@ -208,12 +230,41 @@ class BundleDetails: pid: int start_ts: float end_ts: float - too_slow: bool - super_slow: bool + slower_than_local_p95: bool + slower_than_global_p95: bool src_bundle: BundleDetails is_cancelled: threading.Event was_cancelled: bool backup_bundles: Optional[List[BundleDetails]] + failure_count: int + + def __repr__(self): + uuid = self.uuid + if uuid[-9:-2] == '_backup': + uuid = uuid[:-9] + suffix = f'{uuid[-6:]}_b{self.uuid[-1:]}' + else: + suffix = uuid[-6:] + + colorz = [ + fg('violet red'), + fg('red'), + fg('orange'), + fg('peach orange'), + fg('yellow'), + fg('marigold yellow'), + fg('green yellow'), + fg('tea green'), + fg('cornflower blue'), + fg('turquoise blue'), + fg('tropical blue'), + fg('lavender purple'), + fg('medium purple'), + ] + c = colorz[int(uuid[-2:], 16) % len(colorz)] + fname = self.fname if self.fname is not None else 'nofname' + machine = self.machine if self.machine is not None else 'nomachine' + return f'{c}{suffix}/{fname}/{machine}{reset()}' class RemoteExecutorStatus: @@ -257,7 +308,7 @@ class RemoteExecutorStatus: ) -> None: assert self.lock.locked() self.known_workers.add(worker) - self.start_per_bundle[uuid] = time.time() + self.start_per_bundle[uuid] = None x = self.in_flight_bundles_by_worker.get(worker, set()) x.add(uuid) self.in_flight_bundles_by_worker[worker] = x @@ -274,6 +325,17 @@ class RemoteExecutorStatus: assert self.lock.locked() self.bundle_details_by_uuid[details.uuid] = details + def record_release_worker( + self, + worker: RemoteWorkerRecord, + uuid: str, + was_cancelled: bool, + ) -> None: + with self.lock: + self.record_release_worker_already_locked( + worker, uuid, was_cancelled + ) + def record_release_worker_already_locked( self, worker: RemoteWorkerRecord, @@ -291,6 +353,10 @@ class RemoteExecutorStatus: self.finished_bundle_timings_per_worker[worker] = x self.finished_bundle_timings.append(bundle_latency) + def record_processing_began(self, uuid: str): + with self.lock: + self.start_per_bundle[uuid] = time.time() + def total_in_flight(self) -> int: assert self.lock.locked() total_in_flight = 0 @@ -342,27 +408,30 @@ class RemoteExecutorStatus: bundle_uuid, None ) - pid = str(details.pid) if details is not None else "TBD" - sec = ts - self.start_per_bundle[bundle_uuid] - ret += f' (pid={pid}): {bundle_uuid} for {sec:.1f}s so far ' + pid = str(details.pid) if (details and details.pid != 0) else "TBD" + if self.start_per_bundle[bundle_uuid] is not None: + sec = ts - self.start_per_bundle[bundle_uuid] + ret += f' (pid={pid}): {details} for {sec:.1f}s so far ' + else: + ret += f' {details} setting up / copying data...' + sec = 0.0 + if qworker is not None: if sec > qworker[1]: ret += f'{bg("red")}>💻p95{reset()} ' - elif sec > qworker[0]: - ret += f'{fg("red")}>💻p50{reset()} ' - if qall is not None: - if sec > qall[1] * 1.5: - ret += f'{bg("red")}!!!{reset()}' if details is not None: - logger.debug(f'Flagging {details.uuid} for another backup') - details.super_slow = True - elif sec > qall[1]: + details.slower_than_local_p95 = True + else: + if details is not None: + details.slower_than_local_p95 = False + + if qall is not None: + if sec > qall[1]: ret += f'{bg("red")}>∀p95{reset()} ' if details is not None: - logger.debug(f'Flagging {details.uuid} for a backup') - details.too_slow = True - elif sec > qall[0]: - ret += f'{fg("red")}>∀p50{reset()}' + details.slower_than_global_p95 = True + else: + details.slower_than_global_p95 = False ret += '\n' return ret @@ -380,7 +449,6 @@ class RemoteExecutorStatus: class RemoteWorkerSelectionPolicy(ABC): def register_worker_pool(self, workers): - random.seed() self.workers = workers @abstractmethod @@ -396,12 +464,14 @@ class RemoteWorkerSelectionPolicy(ABC): class WeightedRandomRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy): + @overrides def is_worker_available(self) -> bool: for worker in self.workers: if worker.count > 0: return True return False + @overrides def acquire_worker( self, machine_to_avoid = None @@ -428,12 +498,14 @@ class RoundRobinRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy): def __init__(self) -> None: self.index = 0 + @overrides def is_worker_available(self) -> bool: for worker in self.workers: if worker.count > 0: return True return False + @overrides def acquire_worker( self, machine_to_avoid: str = None @@ -463,25 +535,25 @@ class RemoteExecutor(BaseExecutor): policy: RemoteWorkerSelectionPolicy) -> None: super().__init__() self.workers = workers + self.policy = policy self.worker_count = 0 for worker in self.workers: self.worker_count += worker.count if self.worker_count <= 0: msg = f"We need somewhere to schedule work; count was {self.worker_count}" logger.critical(msg) - raise Exception(msg) - self.policy = policy + raise RemoteExecutorException(msg) self.policy.register_worker_pool(self.workers) self.cv = threading.Condition() + logger.debug(f'Creating {self.worker_count} local threads, one per remote worker.') self._helper_executor = fut.ThreadPoolExecutor( thread_name_prefix="remote_executor_helper", max_workers=self.worker_count, ) self.status = RemoteExecutorStatus(self.worker_count) self.total_bundles_submitted = 0 - logger.debug( - f'Creating remote processpool with {self.worker_count} remote endpoints.' - ) + self.backup_lock = threading.Lock() + self.last_backup = None def is_worker_available(self) -> bool: return self.policy.is_worker_available() @@ -517,38 +589,86 @@ class RemoteExecutor(BaseExecutor): # Regular progress report self.status.periodic_dump(self.total_bundles_submitted) - # Look for bundles to reschedule - if len(self.status.finished_bundle_timings) > 7: - for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items(): - for uuid in bundle_uuids: - bundle = self.status.bundle_details_by_uuid.get(uuid, None) - if ( - bundle is not None and - bundle.too_slow and - bundle.src_bundle is None and - config.config['executors_schedule_remote_backups'] - ): - self.consider_backup_for_bundle(bundle) - - def consider_backup_for_bundle(self, bundle: BundleDetails) -> None: - assert self.status.lock.locked() - if ( - bundle.too_slow - and len(bundle.backup_bundles) == 0 # one backup per - ): - msg = f"*** Rescheduling {bundle.pid}/{bundle.uuid} ***" - logger.debug(msg) - self.schedule_backup_for_bundle(bundle) - return - elif ( - bundle.super_slow - and len(bundle.backup_bundles) < 2 # two backups in dire situations - and self.status.total_idle() > 4 - ): - msg = f"*** Rescheduling {bundle.pid}/{bundle.uuid} ***" - logger.debug(msg) - self.schedule_backup_for_bundle(bundle) - return + # Look for bundles to reschedule. + num_done = len(self.status.finished_bundle_timings) + num_idle_workers = self.worker_count - self.task_count + now = time.time() + if ( + config.config['executors_schedule_remote_backups'] + and num_done > 2 + and num_idle_workers > 1 + and (self.last_backup is None or (now - self.last_backup > 1.0)) + and self.backup_lock.acquire(blocking=False) + ): + try: + assert self.backup_lock.locked() + + bundle_to_backup = None + best_score = None + for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items(): + # Prefer to schedule backups of bundles on slower machines. + base_score = 0 + for record in self.workers: + if worker.machine == record.machine: + base_score = float(record.weight) + base_score = 1.0 / base_score + base_score *= 200.0 + base_score = int(base_score) + break + + for uuid in bundle_uuids: + bundle = self.status.bundle_details_by_uuid.get(uuid, None) + if ( + bundle is not None + and bundle.src_bundle is None + and bundle.backup_bundles is not None + ): + score = base_score + + # Schedule backups of bundles running longer; especially those + # that are unexpectedly slow. + start_ts = self.status.start_per_bundle[uuid] + if start_ts is not None: + runtime = now - start_ts + score += runtime + logger.debug(f'score[{bundle}] => {score} # latency boost') + + if bundle.slower_than_local_p95: + score += runtime / 2 + logger.debug(f'score[{bundle}] => {score} # >worker p95') + + if bundle.slower_than_global_p95: + score += runtime / 2 + logger.debug(f'score[{bundle}] => {score} # >global p95') + + # Prefer backups of bundles that don't have backups already. + backup_count = len(bundle.backup_bundles) + if backup_count == 0: + score *= 2 + elif backup_count == 1: + score /= 2 + elif backup_count == 2: + score /= 8 + else: + score = 0 + logger.debug(f'score[{bundle}] => {score} # {backup_count} dup backup factor') + + if ( + score != 0 + and (best_score is None or score > best_score) + ): + bundle_to_backup = bundle + assert bundle is not None + assert bundle.backup_bundles is not None + assert bundle.src_bundle is None + best_score = score + + if bundle_to_backup is not None: + self.last_backup = now + logger.info(f'=====> SCHEDULING BACKUP {bundle_to_backup} (score={best_score:.1f}) <=====') + self.schedule_backup_for_bundle(bundle_to_backup) + finally: + self.backup_lock.release() def check_if_cancelled(self, bundle: BundleDetails) -> bool: with self.status.lock: @@ -558,63 +678,178 @@ class RemoteExecutor(BaseExecutor): return True return False - def launch(self, bundle: BundleDetails) -> Any: - # Find a worker for bundle or block until one is available. + def launch(self, bundle: BundleDetails, override_avoid_machine=None) -> Any: + """Find a worker for bundle or block until one is available.""" + self.adjust_task_count(+1) uuid = bundle.uuid hostname = bundle.hostname - avoid_machine = None - if bundle.src_bundle is not None: + avoid_machine = override_avoid_machine + is_original = bundle.src_bundle is None + + # Try not to schedule a backup on the same host as the original. + if avoid_machine is None and bundle.src_bundle is not None: avoid_machine = bundle.src_bundle.machine worker = None while worker is None: worker = self.find_available_worker_or_block(avoid_machine) + + # Ok, found a worker. bundle.worker = worker machine = bundle.machine = worker.machine username = bundle.username = worker.username + self.status.record_acquire_worker(worker, uuid) - logger.debug(f'Running bundle {uuid} on {worker}...') + logger.debug(f'{bundle}: Running bundle on {worker}...') - # Before we do work, make sure it's still viable. + # Before we do any work, make sure the bundle is still viable. if self.check_if_cancelled(bundle): - return self.post_launch_work(bundle) - - # Send input to machine if it's not local. + try: + return self.post_launch_work(bundle) + except Exception as e: + logger.exception(e) + logger.error( + f'{bundle}: bundle says it\'s cancelled upfront but no results?!' + ) + assert bundle.worker is not None + self.status.record_release_worker( + bundle.worker, + bundle.uuid, + True, + ) + self.release_worker(bundle.worker) + self.adjust_task_count(-1) + if is_original: + # Weird. We are the original owner of this + # bundle. For it to have been cancelled, a backup + # must have already started and completed before + # we even for started. Moreover, the backup says + # it is done but we can't find the results it + # should have copied over. Reschedule the whole + # thing. + return self.emergency_retry_nasty_bundle(bundle) + else: + # Expected(?). We're a backup and our bundle is + # cancelled before we even got started. Something + # went bad in post_launch_work (I acutually don't + # see what?) but probably not worth worrying + # about. + return None + + # Send input code / data to worker machine if it's not local. if hostname not in machine: - cmd = f'{rsync} {bundle.code_file} {username}@{machine}:{bundle.code_file}' - logger.debug(f"Copying work to {worker} via {cmd}") - exec_utils.run_silently(cmd) - - # Before we do more work, make sure it's still viable. - if self.check_if_cancelled(bundle): - return self.post_launch_work(bundle) - - # Fucking Apple has a python3 binary in /usr/sbin that is not - # the one we want and is protected by the OS so make sure that - # /usr/local/bin is early in the path. - cmd = (f'{ssh} {bundle.username}@{bundle.machine} ' - f'"export PATH=/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/home/scott/bin:/home/scott/.local/bin; /home/scott/lib/python_modules/remote_worker.py' + try: + cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}' + start_ts = time.time() + logger.info(f"{bundle}: Copying work to {worker} via {cmd}.") + run_silently(cmd) + xfer_latency = time.time() - start_ts + logger.info(f"{bundle}: Copying done to {worker} in {xfer_latency:.1f}s.") + except Exception as e: + logger.exception(e) + logger.error( + f'{bundle}: failed to send instructions to worker machine?!?' + ) + assert bundle.worker is not None + self.status.record_release_worker( + bundle.worker, + bundle.uuid, + True, + ) + self.release_worker(bundle.worker) + self.adjust_task_count(-1) + if is_original: + # Weird. We tried to copy the code to the worker and it failed... + # And we're the original bundle. We have to retry. + return self.emergency_retry_nasty_bundle(bundle) + else: + # This is actually expected; we're a backup. + # There's a race condition where someone else + # already finished the work and removed the source + # code file before we could copy it. No biggie. + return None + + # Kick off the work. Note that if this fails we let + # wait_for_process deal with it. + self.status.record_processing_began(uuid) + cmd = (f'{SSH} {bundle.username}@{bundle.machine} ' + f'"source py39-venv/bin/activate &&' + f' /home/scott/lib/python_modules/remote_worker.py' f' --code_file {bundle.code_file} --result_file {bundle.result_file}"') - p = exec_utils.cmd_in_background(cmd, silent=True) + logger.debug(f'{bundle}: Executing {cmd} in the background to kick off work...') + p = cmd_in_background(cmd, silent=True) bundle.pid = pid = p.pid - logger.debug(f"Running {cmd} in the background as process {pid}") + logger.debug(f'{bundle}: Local ssh process pid={pid}; remote worker is {machine}.') + return self.wait_for_process(p, bundle, 0) + + def wait_for_process(self, p: subprocess.Popen, bundle: BundleDetails, depth: int) -> Any: + machine = bundle.machine + pid = p.pid + if depth > 3: + logger.error( + f"I've gotten repeated errors waiting on this bundle; giving up on pid={pid}." + ) + p.terminate() + self.status.record_release_worker( + bundle.worker, + bundle.uuid, + True, + ) + self.release_worker(bundle.worker) + self.adjust_task_count(-1) + return self.emergency_retry_nasty_bundle(bundle) + # Spin until either the ssh job we scheduled finishes the + # bundle or some backup worker signals that they finished it + # before we could. while True: try: - p.wait(timeout=0.5) + p.wait(timeout=0.25) except subprocess.TimeoutExpired: self.heartbeat() - - # Both source and backup bundles can be cancelled by - # the other depending on which finishes first. if self.check_if_cancelled(bundle): - p.terminate() + logger.info( + f'{bundle}: another worker finished bundle, checking it out...' + ) break else: - logger.debug( - f"{pid}/{bundle.uuid} has finished its work normally." + logger.info( + f"{bundle}: pid {pid} ({machine}) our ssh finished, checking it out..." ) + p = None break - return self.post_launch_work(bundle) + + # If we get here we believe the bundle is done; either the ssh + # subprocess finished (hopefully successfully) or we noticed + # that some other worker seems to have completed the bundle + # and we're bailing out. + try: + ret = self.post_launch_work(bundle) + if ret is not None and p is not None: + p.terminate() + return ret + + # Something went wrong; e.g. we could not copy the results + # back, cleanup after ourselves on the remote machine, or + # unpickle the results we got from the remove machine. If we + # still have an active ssh subprocess, keep waiting on it. + # Otherwise, time for an emergency reschedule. + except Exception as e: + logger.exception(e) + logger.error(f'{bundle}: Something unexpected just happened...') + if p is not None: + logger.warning( + f"{bundle}: Failed to wrap up \"done\" bundle, re-waiting on active ssh." + ) + return self.wait_for_process(p, bundle, depth + 1) + else: + self.status.record_release_worker( + bundle.worker, + bundle.uuid, + True, + ) + self.release_worker(bundle.worker) + self.adjust_task_count(-1) + return self.emergency_retry_nasty_bundle(bundle) def post_launch_work(self, bundle: BundleDetails) -> Any: with self.status.lock: @@ -628,37 +863,52 @@ class RemoteExecutor(BaseExecutor): # Whether original or backup, if we finished first we must # fetch the results if the computation happened on a # remote machine. + bundle.end_ts = time.time() if not was_cancelled: assert bundle.machine is not None if bundle.hostname not in bundle.machine: - cmd = f'{rsync} {username}@{machine}:{result_file} {result_file} 2>/dev/null' - logger.debug( - f"Fetching results from {username}@{machine} via {cmd}" + cmd = f'{RSYNC} {username}@{machine}:{result_file} {result_file} 2>/dev/null' + logger.info( + f"{bundle}: Fetching results from {username}@{machine} via {cmd}" ) - try: - exec_utils.run_silently(cmd) - except subprocess.CalledProcessError: - pass - exec_utils.run_silently(f'{ssh} {username}@{machine}' - f' "/bin/rm -f {code_file} {result_file}"') - bundle.end_ts = time.time() - assert bundle.worker is not None - self.status.record_release_worker_already_locked( - bundle.worker, - bundle.uuid, - was_cancelled - ) - if not was_cancelled: + + # If either of these throw they are handled in + # wait_for_process. + run_silently(cmd) + run_silently(f'{SSH} {username}@{machine}' + f' "/bin/rm -f {code_file} {result_file}"') dur = bundle.end_ts - bundle.start_ts - hist.add_item(dur) + self.histogram.add_item(dur) # Only the original worker should unpickle the file contents - # though since it's the only one whose result matters. + # though since it's the only one whose result matters. The + # original is also the only job that may delete result_file + # from disk. Note that the original may have been cancelled + # if one of the backups finished first; it still must read the + # result from disk. if is_original: - logger.debug(f"Unpickling {result_file}.") - with open(f'{result_file}', 'rb') as rb: - serialized = rb.read() - result = cloudpickle.loads(serialized) + logger.debug(f"{bundle}: Unpickling {result_file}.") + try: + with open(f'{result_file}', 'rb') as rb: + serialized = rb.read() + result = cloudpickle.loads(serialized) + except Exception as e: + msg = f'Failed to load {result_file}, this is bad news.' + logger.critical(msg) + self.status.record_release_worker( + bundle.worker, + bundle.uuid, + True, + ) + self.release_worker(bundle.worker) + + # Re-raise the exception; the code in wait_for_process may + # decide to emergency_retry_nasty_bundle here. + raise Exception(e) + + logger.debug( + f'Removing local (master) {code_file} and {result_file}.' + ) os.remove(f'{result_file}') os.remove(f'{code_file}') @@ -669,30 +919,38 @@ class RemoteExecutor(BaseExecutor): if bundle.backup_bundles is not None: for backup in bundle.backup_bundles: logger.debug( - f'Notifying backup {backup.uuid} that it is cancelled' + f'{bundle}: Notifying backup {backup.uuid} that it\'s cancelled' ) backup.is_cancelled.set() - # This is a backup. + # This is a backup job and, by now, we have already fetched + # the bundle results. else: # Backup results don't matter, they just need to leave the - # result file in the right place for their original to - # read later. + # result file in the right place for their originals to + # read/unpickle later. result = None # Tell the original to stop if we finished first. if not was_cancelled: logger.debug( - f'Notifying original {bundle.src_bundle.uuid} that it is cancelled' + f'{bundle}: Notifying original {bundle.src_bundle.uuid} we beat them to it.' ) bundle.src_bundle.is_cancelled.set() assert bundle.worker is not None + self.status.record_release_worker( + bundle.worker, + bundle.uuid, + was_cancelled, + ) self.release_worker(bundle.worker) + self.adjust_task_count(-1) return result - def create_original_bundle(self, pickle): - uuid = string_utils.generate_uuid(as_hex=True) + def create_original_bundle(self, pickle, fname: str): + from string_utils import generate_uuid + uuid = generate_uuid(as_hex=True) code_file = f'/tmp/{uuid}.code.bin' result_file = f'/tmp/{uuid}.result.bin' @@ -703,6 +961,7 @@ class RemoteExecutor(BaseExecutor): bundle = BundleDetails( pickled_code = pickle, uuid = uuid, + fname = fname, worker = None, username = None, machine = None, @@ -712,15 +971,16 @@ class RemoteExecutor(BaseExecutor): pid = 0, start_ts = time.time(), end_ts = 0.0, - too_slow = False, - super_slow = False, + slower_than_local_p95 = False, + slower_than_global_p95 = False, src_bundle = None, is_cancelled = threading.Event(), was_cancelled = False, backup_bundles = [], + failure_count = 0, ) self.status.record_bundle_details(bundle) - logger.debug(f'Created original bundle {uuid}') + logger.debug(f'{bundle}: Created an original bundle') return bundle def create_backup_bundle(self, src_bundle: BundleDetails): @@ -731,6 +991,7 @@ class RemoteExecutor(BaseExecutor): backup_bundle = BundleDetails( pickled_code = src_bundle.pickled_code, uuid = uuid, + fname = src_bundle.fname, worker = None, username = None, machine = None, @@ -740,24 +1001,26 @@ class RemoteExecutor(BaseExecutor): pid = 0, start_ts = time.time(), end_ts = 0.0, - too_slow = False, - super_slow = False, + slower_than_local_p95 = False, + slower_than_global_p95 = False, src_bundle = src_bundle, is_cancelled = threading.Event(), was_cancelled = False, backup_bundles = None, # backup backups not allowed + failure_count = 0, ) src_bundle.backup_bundles.append(backup_bundle) self.status.record_bundle_details_already_locked(backup_bundle) - logger.debug(f'Created backup bundle {uuid}') + logger.debug(f'{backup_bundle}: Created a backup bundle') return backup_bundle def schedule_backup_for_bundle(self, src_bundle: BundleDetails): assert self.status.lock.locked() + assert src_bundle is not None backup_bundle = self.create_backup_bundle(src_bundle) logger.debug( - f'Scheduling backup bundle {backup_bundle.uuid} for execution' + f'{backup_bundle.uuid}/{backup_bundle.fname}: Scheduling backup for execution...' ) self._helper_executor.submit(self.launch, backup_bundle) @@ -765,21 +1028,50 @@ class RemoteExecutor(BaseExecutor): # they will move the result_file to this machine and let # the original pick them up and unpickle them. + def emergency_retry_nasty_bundle(self, bundle: BundleDetails) -> fut.Future: + is_original = bundle.src_bundle is None + bundle.worker = None + avoid_last_machine = bundle.machine + bundle.machine = None + bundle.username = None + bundle.failure_count += 1 + if is_original: + retry_limit = 3 + else: + retry_limit = 2 + + if bundle.failure_count > retry_limit: + logger.error( + f'{bundle}: Tried this bundle too many times already ({retry_limit}x); giving up.' + ) + if is_original: + raise RemoteExecutorException( + f'{bundle}: This bundle can\'t be completed despite several backups and retries' + ) + else: + logger.error(f'{bundle}: At least it\'s only a backup; better luck with the others.') + return None + else: + logger.warning( + f'>>> Emergency rescheduling {bundle} because of unexected errors (wtf?!) <<<' + ) + return self.launch(bundle, avoid_last_machine) + + @overrides def submit(self, function: Callable, *args, **kwargs) -> fut.Future: pickle = make_cloud_pickle(function, *args, **kwargs) - bundle = self.create_original_bundle(pickle) + bundle = self.create_original_bundle(pickle, function.__name__) self.total_bundles_submitted += 1 - logger.debug( - f'Submitted work to remote executor; {self.total_bundles_submitted} items now submitted' - ) return self._helper_executor.submit(self.launch, bundle) + @overrides def shutdown(self, wait=True) -> None: self._helper_executor.shutdown(wait) - print(hist) + logging.debug(f'Shutting down RemoteExecutor {self.title}') + print(self.histogram) @singleton @@ -790,12 +1082,15 @@ class DefaultExecutors(object): self.remote_executor: Optional[RemoteExecutor] = None def ping(self, host) -> bool: - command = ['ping', '-c', '1', host] - return subprocess.call( - command, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) == 0 + logger.debug(f'RUN> ping -c 1 {host}') + try: + x = cmd_with_timeout( + f'ping -c 1 {host} >/dev/null 2>/dev/null', + timeout_seconds=1.0 + ) + return x == 0 + except Exception: + return False def thread_pool(self) -> ThreadExecutor: if self.thread_executor is None: @@ -809,26 +1104,30 @@ class DefaultExecutors(object): def remote_pool(self) -> RemoteExecutor: if self.remote_executor is None: + logger.info('Looking for some helper machines...') pool: List[RemoteWorkerRecord] = [] if self.ping('cheetah.house'): + logger.info('Found cheetah.house') pool.append( RemoteWorkerRecord( username = 'scott', machine = 'cheetah.house', - weight = 10, - count = 6, + weight = 14, + count = 4, ), ) if self.ping('video.house'): + logger.info('Found video.house') pool.append( RemoteWorkerRecord( username = 'scott', machine = 'video.house', - weight = 2, + weight = 1, count = 4, ), ) if self.ping('wannabe.house'): + logger.info('Found wannabe.house') pool.append( RemoteWorkerRecord( username = 'scott', @@ -838,32 +1137,42 @@ class DefaultExecutors(object): ), ) if self.ping('meerkat.cabin'): + logger.info('Found meerkat.cabin') pool.append( RemoteWorkerRecord( username = 'scott', machine = 'meerkat.cabin', - weight = 7, + weight = 5, count = 2, ), ) - if self.ping('backup.house'): + if self.ping('kiosk.house'): + logger.info('Found kiosk.house') pool.append( RemoteWorkerRecord( - username = 'scott', - machine = 'backup.house', - weight = 3, + username = 'pi', + machine = 'kiosk.house', + weight = 1, count = 2, ), ) if self.ping('puma.cabin'): + logger.info('Found puma.cabin') pool.append( RemoteWorkerRecord( username = 'scott', machine = 'puma.cabin', - weight = 10, - count = 6, + weight = 12, + count = 4, ), ) + + # The controller machine has a lot to do; go easy on it. + for record in pool: + if record.machine == platform.node() and record.count > 1: + logger.info(f'Reducing workload for {record.machine}.') + record.count = 1 + policy = WeightedRandomRemoteWorkerSelectionPolicy() policy.register_worker_pool(pool) self.remote_executor = RemoteExecutor(pool, policy)