X-Git-Url: https://wannabe.guru.org/gitweb/?a=blobdiff_plain;f=executors.py;h=c91c2a6af535502ad31db6bb233fa9145d43dd4a;hb=eedcbd4f64af13ec2098508c3d839a60f7e9ffce;hp=b446822945a73c7a896755fc4b509d24c3600f84;hpb=3f4818bf9d1f3071c4e2906896d422810b5776bc;p=python_utils.git diff --git a/executors.py b/executors.py index b446822..c91c2a6 100644 --- a/executors.py +++ b/executors.py @@ -15,6 +15,7 @@ import subprocess import threading import time from typing import Any, Callable, Dict, List, Optional, Set +import warnings import cloudpickle # type: ignore from overrides import overrides @@ -22,9 +23,11 @@ from overrides import overrides from ansi import bg, fg, underline, reset import argparse_utils import config -from exec_utils import run_silently, cmd_in_background, cmd_with_timeout from decorator_utils import singleton +from exec_utils import run_silently, cmd_in_background, cmd_with_timeout import histogram as hist +from thread_utils import background_thread + logger = logging.getLogger(__name__) @@ -60,8 +63,8 @@ parser.add_argument( help='Maximum number of failures before giving up on a bundle', ) -RSYNC = 'rsync -q --no-motd -W --ignore-existing --timeout=60 --size-only -z' -SSH = 'ssh -oForwardX11=no' +SSH = '/usr/bin/ssh -oForwardX11=no' +SCP = '/usr/bin/scp' def make_cloud_pickle(fun, *args, **kwargs): @@ -230,8 +233,8 @@ class BundleDetails: pid: int start_ts: float end_ts: float - too_slow: bool - super_slow: bool + slower_than_local_p95: bool + slower_than_global_p95: bool src_bundle: BundleDetails is_cancelled: threading.Event was_cancelled: bool @@ -419,21 +422,19 @@ class RemoteExecutorStatus: if qworker is not None: if sec > qworker[1]: ret += f'{bg("red")}>💻p95{reset()} ' - elif sec > qworker[0]: - ret += f'{fg("red")}>💻p50{reset()} ' - if qall is not None: - if sec > qall[1] * 1.5: - ret += f'{bg("red")}!!!{reset()}' if details is not None: - logger.debug(f'Flagging {details} for another backup') - details.super_slow = True - elif sec > qall[1]: + details.slower_than_local_p95 = True + else: + if details is not None: + details.slower_than_local_p95 = False + + if qall is not None: + if sec > qall[1]: ret += f'{bg("red")}>∀p95{reset()} ' if details is not None: - logger.debug(f'Flagging {details} for a backup') - details.too_slow = True - elif sec > qall[0]: - ret += f'{fg("red")}>∀p50{reset()}' + details.slower_than_global_p95 = True + else: + details.slower_than_global_p95 = False ret += '\n' return ret @@ -451,7 +452,6 @@ class RemoteExecutorStatus: class RemoteWorkerSelectionPolicy(ABC): def register_worker_pool(self, workers): - random.seed() self.workers = workers @abstractmethod @@ -467,12 +467,14 @@ class RemoteWorkerSelectionPolicy(ABC): class WeightedRandomRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy): + @overrides def is_worker_available(self) -> bool: for worker in self.workers: if worker.count > 0: return True return False + @overrides def acquire_worker( self, machine_to_avoid = None @@ -491,7 +493,8 @@ class WeightedRandomRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy): worker.count -= 1 logger.debug(f'Selected worker {worker}') return worker - logger.warning("Couldn't find a worker; go fish.") + msg = 'Unexpectedly could not find a worker, retrying...' + logger.warning(msg) return None @@ -499,12 +502,14 @@ class RoundRobinRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy): def __init__(self) -> None: self.index = 0 + @overrides def is_worker_available(self) -> bool: for worker in self.workers: if worker.count > 0: return True return False + @overrides def acquire_worker( self, machine_to_avoid: str = None @@ -524,7 +529,8 @@ class RoundRobinRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy): if x >= len(self.workers): x = 0 if x == self.index: - logger.warning("Couldn't find a worker; go fish.") + msg = 'Unexpectedly could not find a worker, retrying...' + logger.warning(msg) return None @@ -551,6 +557,121 @@ class RemoteExecutor(BaseExecutor): ) self.status = RemoteExecutorStatus(self.worker_count) self.total_bundles_submitted = 0 + self.backup_lock = threading.Lock() + self.last_backup = None + (self.heartbeat_thread, self.heartbeat_stop_event) = self.run_periodic_heartbeat() + + @background_thread + def run_periodic_heartbeat(self, stop_event) -> None: + while not stop_event.is_set(): + time.sleep(5.0) + logger.debug('Running periodic heartbeat code...') + self.heartbeat() + logger.debug('Periodic heartbeat thread shutting down.') + + def heartbeat(self) -> None: + with self.status.lock: + # Dump regular progress report + self.status.periodic_dump(self.total_bundles_submitted) + + # Look for bundles to reschedule via executor.submit + if config.config['executors_schedule_remote_backups']: + self.maybe_schedule_backup_bundles() + + def maybe_schedule_backup_bundles(self): + assert self.status.lock.locked() + num_done = len(self.status.finished_bundle_timings) + num_idle_workers = self.worker_count - self.task_count + now = time.time() + if ( + num_done > 2 + and num_idle_workers > 1 + and (self.last_backup is None or (now - self.last_backup > 6.0)) + and self.backup_lock.acquire(blocking=False) + ): + try: + assert self.backup_lock.locked() + + bundle_to_backup = None + best_score = None + for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items(): + + # Prefer to schedule backups of bundles running on + # slower machines. + base_score = 0 + for record in self.workers: + if worker.machine == record.machine: + base_score = float(record.weight) + base_score = 1.0 / base_score + base_score *= 200.0 + base_score = int(base_score) + break + + for uuid in bundle_uuids: + bundle = self.status.bundle_details_by_uuid.get(uuid, None) + if ( + bundle is not None + and bundle.src_bundle is None + and bundle.backup_bundles is not None + ): + score = base_score + + # Schedule backups of bundles running + # longer; especially those that are + # unexpectedly slow. + start_ts = self.status.start_per_bundle[uuid] + if start_ts is not None: + runtime = now - start_ts + score += runtime + logger.debug(f'score[{bundle}] => {score} # latency boost') + + if bundle.slower_than_local_p95: + score += runtime / 2 + logger.debug(f'score[{bundle}] => {score} # >worker p95') + + if bundle.slower_than_global_p95: + score += runtime / 4 + logger.debug(f'score[{bundle}] => {score} # >global p95') + + # Prefer backups of bundles that don't + # have backups already. + backup_count = len(bundle.backup_bundles) + if backup_count == 0: + score *= 2 + elif backup_count == 1: + score /= 2 + elif backup_count == 2: + score /= 8 + else: + score = 0 + logger.debug( + f'score[{bundle}] => {score} # {backup_count} dup backup factor' + ) + + if ( + score != 0 + and (best_score is None or score > best_score) + ): + bundle_to_backup = bundle + assert bundle is not None + assert bundle.backup_bundles is not None + assert bundle.src_bundle is None + best_score = score + + # Note: this is all still happening on the heartbeat + # runner thread. That's ok because + # schedule_backup_for_bundle uses the executor to + # submit the bundle again which will cause it to be + # picked up by a worker thread and allow this thread + # to return to run future heartbeats. + if bundle_to_backup is not None: + self.last_backup = now + logger.info( + f'=====> SCHEDULING BACKUP {bundle_to_backup} (score={best_score:.1f}) <=====' + ) + self.schedule_backup_for_bundle(bundle_to_backup) + finally: + self.backup_lock.release() def is_worker_available(self) -> bool: return self.policy.is_worker_available() @@ -575,50 +696,24 @@ class RemoteExecutor(BaseExecutor): logger.critical(msg) raise Exception(msg) - def release_worker(self, worker: RemoteWorkerRecord) -> None: + def release_worker( + self, + bundle: BundleDetails, + *, + was_cancelled=True + ) -> None: + worker = bundle.worker + assert worker is not None logger.debug(f'Released worker {worker}') + self.status.record_release_worker( + worker, + bundle.uuid, + was_cancelled, + ) with self.cv: worker.count += 1 self.cv.notify() - - def heartbeat(self) -> None: - with self.status.lock: - # Regular progress report - self.status.periodic_dump(self.total_bundles_submitted) - - # Look for bundles to reschedule. - num_done = len(self.status.finished_bundle_timings) - if num_done > 7 or (num_done > 5 and self.is_worker_available()): - for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items(): - for uuid in bundle_uuids: - bundle = self.status.bundle_details_by_uuid.get(uuid, None) - if ( - bundle is not None and - bundle.too_slow and - bundle.src_bundle is None and - config.config['executors_schedule_remote_backups'] - ): - self.consider_backup_for_bundle(bundle) - - def consider_backup_for_bundle(self, bundle: BundleDetails) -> None: - assert self.status.lock.locked() - if ( - bundle.too_slow - and len(bundle.backup_bundles) == 0 # one backup per - ): - msg = f"*** Rescheduling {bundle} (first backup) ***" - logger.debug(msg) - self.schedule_backup_for_bundle(bundle) - return - elif ( - bundle.super_slow - and len(bundle.backup_bundles) < 2 # two backups in dire situations - and self.status.total_idle() > 4 - ): - msg = f"*** Rescheduling {bundle} (second backup) ***" - logger.debug(msg) - self.schedule_backup_for_bundle(bundle) - return + self.adjust_task_count(-1) def check_if_cancelled(self, bundle: BundleDetails) -> bool: with self.status.lock: @@ -642,32 +737,27 @@ class RemoteExecutor(BaseExecutor): worker = None while worker is None: worker = self.find_available_worker_or_block(avoid_machine) + assert worker # Ok, found a worker. bundle.worker = worker machine = bundle.machine = worker.machine username = bundle.username = worker.username - self.status.record_acquire_worker(worker, uuid) logger.debug(f'{bundle}: Running bundle on {worker}...') # Before we do any work, make sure the bundle is still viable. + # It may have been some time between when it was submitted and + # now due to lack of worker availability and someone else may + # have already finished it. if self.check_if_cancelled(bundle): try: - return self.post_launch_work(bundle) + return self.process_work_result(bundle) except Exception as e: - logger.exception(e) - logger.error( + logger.warning( f'{bundle}: bundle says it\'s cancelled upfront but no results?!' ) - assert bundle.worker is not None - self.status.record_release_worker( - bundle.worker, - bundle.uuid, - True, - ) - self.release_worker(bundle.worker) - self.adjust_task_count(-1) + self.release_worker(bundle) if is_original: # Weird. We are the original owner of this # bundle. For it to have been cancelled, a backup @@ -676,59 +766,65 @@ class RemoteExecutor(BaseExecutor): # it is done but we can't find the results it # should have copied over. Reschedule the whole # thing. + logger.exception(e) + logger.error( + f'{bundle}: We are the original owner thread and yet there are ' + + 'no results for this bundle. This is unexpected and bad.' + ) return self.emergency_retry_nasty_bundle(bundle) else: # Expected(?). We're a backup and our bundle is # cancelled before we even got started. Something - # went bad in post_launch_work (I acutually don't + # went bad in process_work_result (I acutually don't # see what?) but probably not worth worrying - # about. + # about. Let the original thread worry about + # either finding the results or complaining about + # it. return None # Send input code / data to worker machine if it's not local. if hostname not in machine: try: - cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}' + cmd = f'{SCP} {bundle.code_file} {username}@{machine}:{bundle.code_file}' start_ts = time.time() logger.info(f"{bundle}: Copying work to {worker} via {cmd}.") run_silently(cmd) xfer_latency = time.time() - start_ts - logger.info(f"{bundle}: Copying done to {worker} in {xfer_latency:.1f}s.") + logger.info(f"{bundle}: Copying to {worker} took {xfer_latency:.1f}s.") except Exception as e: - logger.exception(e) - logger.error( - f'{bundle}: failed to send instructions to worker machine?!?' - ) - assert bundle.worker is not None - self.status.record_release_worker( - bundle.worker, - bundle.uuid, - True, - ) - self.release_worker(bundle.worker) - self.adjust_task_count(-1) + self.release_worker(bundle) if is_original: # Weird. We tried to copy the code to the worker and it failed... # And we're the original bundle. We have to retry. + logger.exception(e) + logger.error( + f"{bundle}: Failed to send instructions to the worker machine?! " + + "This is not expected; we\'re the original bundle so this shouldn\'t " + + "be a race condition. Attempting an emergency retry..." + ) return self.emergency_retry_nasty_bundle(bundle) else: # This is actually expected; we're a backup. # There's a race condition where someone else # already finished the work and removed the source # code file before we could copy it. No biggie. + msg = f'{bundle}: Failed to send instructions to the worker machine... ' + msg += 'We\'re a backup and this may be caused by the original (or some ' + msg += 'other backup) already finishing this work. Ignoring this.' + logger.warning(msg) return None # Kick off the work. Note that if this fails we let # wait_for_process deal with it. self.status.record_processing_began(uuid) cmd = (f'{SSH} {bundle.username}@{bundle.machine} ' - f'"source py39-venv/bin/activate &&' + f'"source py38-venv/bin/activate &&' f' /home/scott/lib/python_modules/remote_worker.py' f' --code_file {bundle.code_file} --result_file {bundle.result_file}"') logger.debug(f'{bundle}: Executing {cmd} in the background to kick off work...') p = cmd_in_background(cmd, silent=True) - bundle.pid = pid = p.pid - logger.debug(f'{bundle}: Local ssh process pid={pid}; remote worker is {machine}.') + bundle.pid = p.pid + logger.debug(f'{bundle}: Local ssh process pid={p.pid}; remote worker is {machine}.') return self.wait_for_process(p, bundle, 0) def wait_for_process(self, p: subprocess.Popen, bundle: BundleDetails, depth: int) -> Any: @@ -739,13 +835,7 @@ class RemoteExecutor(BaseExecutor): f"I've gotten repeated errors waiting on this bundle; giving up on pid={pid}." ) p.terminate() - self.status.record_release_worker( - bundle.worker, - bundle.uuid, - True, - ) - self.release_worker(bundle.worker) - self.adjust_task_count(-1) + self.release_worker(bundle) return self.emergency_retry_nasty_bundle(bundle) # Spin until either the ssh job we scheduled finishes the @@ -755,7 +845,6 @@ class RemoteExecutor(BaseExecutor): try: p.wait(timeout=0.25) except subprocess.TimeoutExpired: - self.heartbeat() if self.check_if_cancelled(bundle): logger.info( f'{bundle}: another worker finished bundle, checking it out...' @@ -763,7 +852,7 @@ class RemoteExecutor(BaseExecutor): break else: logger.info( - f"{bundle}: pid {pid} ({machine}) our ssh finished, checking it out..." + f"{bundle}: pid {pid} ({machine}) is finished!" ) p = None break @@ -773,7 +862,7 @@ class RemoteExecutor(BaseExecutor): # that some other worker seems to have completed the bundle # and we're bailing out. try: - ret = self.post_launch_work(bundle) + ret = self.process_work_result(bundle) if ret is not None and p is not None: p.terminate() return ret @@ -787,21 +876,14 @@ class RemoteExecutor(BaseExecutor): logger.exception(e) logger.error(f'{bundle}: Something unexpected just happened...') if p is not None: - logger.warning( - f"{bundle}: Failed to wrap up \"done\" bundle, re-waiting on active ssh." - ) + msg = f"{bundle}: Failed to wrap up \"done\" bundle, re-waiting on active ssh." + logger.warning(msg) return self.wait_for_process(p, bundle, depth + 1) else: - self.status.record_release_worker( - bundle.worker, - bundle.uuid, - True, - ) - self.release_worker(bundle.worker) - self.adjust_task_count(-1) + self.release_worker(bundle) return self.emergency_retry_nasty_bundle(bundle) - def post_launch_work(self, bundle: BundleDetails) -> Any: + def process_work_result(self, bundle: BundleDetails) -> Any: with self.status.lock: is_original = bundle.src_bundle is None was_cancelled = bundle.was_cancelled @@ -817,16 +899,27 @@ class RemoteExecutor(BaseExecutor): if not was_cancelled: assert bundle.machine is not None if bundle.hostname not in bundle.machine: - cmd = f'{RSYNC} {username}@{machine}:{result_file} {result_file} 2>/dev/null' + cmd = f'{SCP} {username}@{machine}:{result_file} {result_file} 2>/dev/null' logger.info( - f"{bundle}: Fetching results from {username}@{machine} via {cmd}" + f"{bundle}: Fetching results back from {username}@{machine} via {cmd}" ) # If either of these throw they are handled in # wait_for_process. - run_silently(cmd) + attempts = 0 + while True: + try: + run_silently(cmd) + except Exception as e: + attempts += 1 + if attempts >= 3: + raise(e) + else: + break + run_silently(f'{SSH} {username}@{machine}' f' "/bin/rm -f {code_file} {result_file}"') + logger.debug(f'Fetching results back took {time.time() - bundle.end_ts:.1f}s.') dur = bundle.end_ts - bundle.start_ts self.histogram.add_item(dur) @@ -839,23 +932,18 @@ class RemoteExecutor(BaseExecutor): if is_original: logger.debug(f"{bundle}: Unpickling {result_file}.") try: - with open(f'{result_file}', 'rb') as rb: + with open(result_file, 'rb') as rb: serialized = rb.read() result = cloudpickle.loads(serialized) except Exception as e: - msg = f'Failed to load {result_file}, this is bad news.' + logger.exception(e) + msg = f'Failed to load {result_file}... this is bad news.' logger.critical(msg) - self.status.record_release_worker( - bundle.worker, - bundle.uuid, - True, - ) - self.release_worker(bundle.worker) + self.release_worker(bundle) # Re-raise the exception; the code in wait_for_process may # decide to emergency_retry_nasty_bundle here. raise Exception(e) - logger.debug( f'Removing local (master) {code_file} and {result_file}.' ) @@ -887,20 +975,12 @@ class RemoteExecutor(BaseExecutor): f'{bundle}: Notifying original {bundle.src_bundle.uuid} we beat them to it.' ) bundle.src_bundle.is_cancelled.set() - - assert bundle.worker is not None - self.status.record_release_worker( - bundle.worker, - bundle.uuid, - was_cancelled, - ) - self.release_worker(bundle.worker) - self.adjust_task_count(-1) + self.release_worker(bundle, was_cancelled=was_cancelled) return result def create_original_bundle(self, pickle, fname: str): from string_utils import generate_uuid - uuid = generate_uuid(as_hex=True) + uuid = generate_uuid(omit_dashes=True) code_file = f'/tmp/{uuid}.code.bin' result_file = f'/tmp/{uuid}.result.bin' @@ -921,8 +1001,8 @@ class RemoteExecutor(BaseExecutor): pid = 0, start_ts = time.time(), end_ts = 0.0, - too_slow = False, - super_slow = False, + slower_than_local_p95 = False, + slower_than_global_p95 = False, src_bundle = None, is_cancelled = threading.Event(), was_cancelled = False, @@ -951,8 +1031,8 @@ class RemoteExecutor(BaseExecutor): pid = 0, start_ts = time.time(), end_ts = 0.0, - too_slow = False, - super_slow = False, + slower_than_local_p95 = False, + slower_than_global_p95 = False, src_bundle = src_bundle, is_cancelled = threading.Event(), was_cancelled = False, @@ -967,6 +1047,7 @@ class RemoteExecutor(BaseExecutor): def schedule_backup_for_bundle(self, src_bundle: BundleDetails): assert self.status.lock.locked() + assert src_bundle is not None backup_bundle = self.create_backup_bundle(src_bundle) logger.debug( f'{backup_bundle.uuid}/{backup_bundle.fname}: Scheduling backup for execution...' @@ -1001,9 +1082,9 @@ class RemoteExecutor(BaseExecutor): logger.error(f'{bundle}: At least it\'s only a backup; better luck with the others.') return None else: - logger.warning( - f'>>> Emergency rescheduling {bundle} because of unexected errors (wtf?!) <<<' - ) + msg = f'>>> Emergency rescheduling {bundle} because of unexected errors (wtf?!) <<<' + logger.warning(msg) + warnings.warn(msg) return self.launch(bundle, avoid_last_machine) @overrides @@ -1018,8 +1099,10 @@ class RemoteExecutor(BaseExecutor): @overrides def shutdown(self, wait=True) -> None: - self._helper_executor.shutdown(wait) logging.debug(f'Shutting down RemoteExecutor {self.title}') + self.heartbeat_stop_event.set() + self.heartbeat_thread.join() + self._helper_executor.shutdown(wait) print(self.histogram) @@ -1061,18 +1144,18 @@ class DefaultExecutors(object): RemoteWorkerRecord( username = 'scott', machine = 'cheetah.house', - weight = 14, - count = 4, + weight = 25, + count = 6, ), ) - if self.ping('video.house'): - logger.info('Found video.house') + if self.ping('meerkat.cabin'): + logger.info('Found meerkat.cabin') pool.append( RemoteWorkerRecord( username = 'scott', - machine = 'video.house', - weight = 1, - count = 4, + machine = 'meerkat.cabin', + weight = 12, + count = 2, ), ) if self.ping('wannabe.house'): @@ -1081,18 +1164,18 @@ class DefaultExecutors(object): RemoteWorkerRecord( username = 'scott', machine = 'wannabe.house', - weight = 2, - count = 4, + weight = 30, + count = 10, ), ) - if self.ping('meerkat.cabin'): - logger.info('Found meerkat.cabin') + if self.ping('puma.cabin'): + logger.info('Found puma.cabin') pool.append( RemoteWorkerRecord( username = 'scott', - machine = 'meerkat.cabin', - weight = 5, - count = 2, + machine = 'puma.cabin', + weight = 25, + count = 6, ), ) if self.ping('backup.house'): @@ -1101,30 +1184,10 @@ class DefaultExecutors(object): RemoteWorkerRecord( username = 'scott', machine = 'backup.house', - weight = 1, - count = 4, - ), - ) - if self.ping('kiosk.house'): - logger.info('Found kiosk.house') - pool.append( - RemoteWorkerRecord( - username = 'pi', - machine = 'kiosk.house', - weight = 1, + weight = 7, count = 2, ), ) - if self.ping('puma.cabin'): - logger.info('Found puma.cabin') - pool.append( - RemoteWorkerRecord( - username = 'scott', - machine = 'puma.cabin', - weight = 12, - count = 4, - ), - ) # The controller machine has a lot to do; go easy on it. for record in pool: @@ -1136,3 +1199,14 @@ class DefaultExecutors(object): policy.register_worker_pool(pool) self.remote_executor = RemoteExecutor(pool, policy) return self.remote_executor + + def shutdown(self) -> None: + if self.thread_executor is not None: + self.thread_executor.shutdown() + self.thread_executor = None + if self.process_executor is not None: + self.process_executor.shutdown() + self.process_executor = None + if self.remote_executor is not None: + self.remote_executor.shutdown() + self.remote_executor = None