Ugh, a bunch of things. @overrides. --lmodule. Chromecasts. etc...
[python_utils.git] / executors.py
index ddd62f1b8a3998fa85a4fb456538eabd54e3d206..d5049a264317c2f764d2068e7108a65d858f7cb2 100644 (file)
@@ -17,6 +17,7 @@ import time
 from typing import Any, Callable, Dict, List, Optional, Set
 
 import cloudpickle  # type: ignore
+from overrides import overrides
 
 from ansi import bg, fg, underline, reset
 import argparse_utils
@@ -121,6 +122,7 @@ class ThreadExecutor(BaseExecutor):
         self.histogram.add_item(duration)
         return result
 
+    @overrides
     def submit(self,
                function: Callable,
                *args,
@@ -135,6 +137,7 @@ class ThreadExecutor(BaseExecutor):
             *newargs,
             **kwargs)
 
+    @overrides
     def shutdown(self,
                  wait = True) -> None:
         logger.debug(f'Shutting down threadpool executor {self.title}')
@@ -163,6 +166,7 @@ class ProcessExecutor(BaseExecutor):
         self.adjust_task_count(-1)
         return result
 
+    @overrides
     def submit(self,
                function: Callable,
                *args,
@@ -181,6 +185,7 @@ class ProcessExecutor(BaseExecutor):
         )
         return result
 
+    @overrides
     def shutdown(self, wait=True) -> None:
         logger.debug(f'Shutting down processpool executor {self.title}')
         self._process_executor.shutdown(wait)
@@ -210,6 +215,7 @@ class RemoteWorkerRecord:
 class BundleDetails:
     pickled_code: bytes
     uuid: str
+    fname: str
     worker: Optional[RemoteWorkerRecord]
     username: Optional[str]
     machine: Optional[str]
@@ -475,6 +481,7 @@ class RemoteExecutor(BaseExecutor):
                  policy: RemoteWorkerSelectionPolicy) -> None:
         super().__init__()
         self.workers = workers
+        self.policy = policy
         self.worker_count = 0
         for worker in self.workers:
             self.worker_count += worker.count
@@ -482,7 +489,6 @@ class RemoteExecutor(BaseExecutor):
             msg = f"We need somewhere to schedule work; count was {self.worker_count}"
             logger.critical(msg)
             raise Exception(msg)
-        self.policy = policy
         self.policy.register_worker_pool(self.workers)
         self.cv = threading.Condition()
         self._helper_executor = fut.ThreadPoolExecutor(
@@ -492,7 +498,7 @@ class RemoteExecutor(BaseExecutor):
         self.status = RemoteExecutorStatus(self.worker_count)
         self.total_bundles_submitted = 0
         logger.debug(
-            f'Creating remote processpool with {self.worker_count} remote endpoints.'
+            f'Creating remote processpool with {self.worker_count} remote worker threads.'
         )
 
     def is_worker_available(self) -> bool:
@@ -586,6 +592,7 @@ class RemoteExecutor(BaseExecutor):
         bundle.worker = worker
         machine = bundle.machine = worker.machine
         username = bundle.username = worker.username
+        fname = bundle.fname
         self.status.record_acquire_worker(worker, uuid)
         logger.debug(f'Running bundle {uuid} on {worker}...')
 
@@ -595,17 +602,17 @@ class RemoteExecutor(BaseExecutor):
                 return self.post_launch_work(bundle)
             except Exception as e:
                 logger.exception(e)
-                logger.info(f"Bundle {uuid} seems to have failed?!")
+                logger.info(f"{uuid}/{fname}: bundle seems to have failed?!")
                 if bundle.failure_count < config.config['executors_max_bundle_failures']:
                     return self.launch(bundle)
                 else:
-                    logger.info(f"Bundle {uuid} is poison, giving up on it.")
+                    logger.info(f"{uuid}/{fname}: bundle is poison, giving up on it.")
                     return None
 
         # Send input to machine if it's not local.
         if hostname not in machine:
             cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}'
-            logger.info(f"Copying work to {worker} via {cmd}")
+            logger.info(f"{uuid}/{fname}: Copying work to {worker} via {cmd}")
             run_silently(cmd)
 
         # Do it.
@@ -615,7 +622,7 @@ class RemoteExecutor(BaseExecutor):
                f' --code_file {bundle.code_file} --result_file {bundle.result_file}"')
         p = cmd_in_background(cmd, silent=True)
         bundle.pid = pid = p.pid
-        logger.info(f"Running {cmd} in the background as process {pid}")
+        logger.info(f"{uuid}/{fname}: Start training on {worker} via {cmd} (background pid {pid})")
 
         while True:
             try:
@@ -630,7 +637,7 @@ class RemoteExecutor(BaseExecutor):
                     break
             else:
                 logger.debug(
-                    f"{pid}/{bundle.uuid} has finished its work normally."
+                    f"{uuid}/{fname}: pid {pid} has finished its work normally."
                 )
                 break
 
@@ -638,10 +645,10 @@ class RemoteExecutor(BaseExecutor):
             return self.post_launch_work(bundle)
         except Exception as e:
             logger.exception(e)
-            logger.info(f"Bundle {uuid} seems to have failed?!")
+            logger.info(f"{uuid}: Bundle seems to have failed?!")
             if bundle.failure_count < config.config['executors_max_bundle_failures']:
                 return self.launch(bundle)
-            logger.info(f"Bundle {uuid} is poison, giving up on it.")
+            logger.info(f"{uuid}: Bundle is poison, giving up on it.")
             return None
 
     def post_launch_work(self, bundle: BundleDetails) -> Any:
@@ -652,6 +659,8 @@ class RemoteExecutor(BaseExecutor):
             machine = bundle.machine
             result_file = bundle.result_file
             code_file = bundle.code_file
+            fname = bundle.fname
+            uuid = bundle.uuid
 
             # Whether original or backup, if we finished first we must
             # fetch the results if the computation happened on a
@@ -662,7 +671,7 @@ class RemoteExecutor(BaseExecutor):
                 if bundle.hostname not in bundle.machine:
                     cmd = f'{RSYNC} {username}@{machine}:{result_file} {result_file} 2>/dev/null'
                     logger.info(
-                        f"Fetching results from {username}@{machine} via {cmd}"
+                        f"{uuid}/{fname}: Fetching results from {username}@{machine} via {cmd}"
                     )
                     try:
                         run_silently(cmd)
@@ -686,7 +695,7 @@ class RemoteExecutor(BaseExecutor):
         # if one of the backups finished first; it still must read the
         # result from disk.
         if is_original:
-            logger.debug(f"Unpickling {result_file}.")
+            logger.debug(f"{uuid}/{fname}: Unpickling {result_file}.")
             try:
                 with open(f'{result_file}', 'rb') as rb:
                     serialized = rb.read()
@@ -707,7 +716,7 @@ class RemoteExecutor(BaseExecutor):
             if bundle.backup_bundles is not None:
                 for backup in bundle.backup_bundles:
                     logger.debug(
-                        f'Notifying backup {backup.uuid} that it is cancelled'
+                        f'{uuid}/{fname}: Notifying backup {backup.uuid} that it\'s cancelled'
                     )
                     backup.is_cancelled.set()
 
@@ -721,7 +730,7 @@ class RemoteExecutor(BaseExecutor):
             # Tell the original to stop if we finished first.
             if not was_cancelled:
                 logger.debug(
-                    f'Notifying original {bundle.src_bundle.uuid} that it is cancelled'
+                    f'{uuid}/{fname}: Notifying original {bundle.src_bundle.uuid} that it\'s cancelled'
                 )
                 bundle.src_bundle.is_cancelled.set()
 
@@ -730,7 +739,7 @@ class RemoteExecutor(BaseExecutor):
         self.adjust_task_count(-1)
         return result
 
-    def create_original_bundle(self, pickle):
+    def create_original_bundle(self, pickle, fname: str):
         from string_utils import generate_uuid
         uuid = generate_uuid(as_hex=True)
         code_file = f'/tmp/{uuid}.code.bin'
@@ -743,6 +752,7 @@ class RemoteExecutor(BaseExecutor):
         bundle = BundleDetails(
             pickled_code = pickle,
             uuid = uuid,
+            fname = fname,
             worker = None,
             username = None,
             machine = None,
@@ -761,7 +771,7 @@ class RemoteExecutor(BaseExecutor):
             failure_count = 0,
         )
         self.status.record_bundle_details(bundle)
-        logger.debug(f'Created original bundle {uuid}')
+        logger.debug(f'{uuid}/{fname}: Created original bundle')
         return bundle
 
     def create_backup_bundle(self, src_bundle: BundleDetails):
@@ -772,6 +782,7 @@ class RemoteExecutor(BaseExecutor):
         backup_bundle = BundleDetails(
             pickled_code = src_bundle.pickled_code,
             uuid = uuid,
+            fname = src_bundle.fname,
             worker = None,
             username = None,
             machine = None,
@@ -791,7 +802,7 @@ class RemoteExecutor(BaseExecutor):
         )
         src_bundle.backup_bundles.append(backup_bundle)
         self.status.record_bundle_details_already_locked(backup_bundle)
-        logger.debug(f'Created backup bundle {uuid}')
+        logger.debug(f'{uuid}/{src_bundle.fname}: Created backup bundle')
         return backup_bundle
 
     def schedule_backup_for_bundle(self,
@@ -799,7 +810,7 @@ class RemoteExecutor(BaseExecutor):
         assert self.status.lock.locked()
         backup_bundle = self.create_backup_bundle(src_bundle)
         logger.debug(
-            f'Scheduling backup bundle {backup_bundle.uuid} for execution'
+            f'{backup_bundle.uuid}/{backup_bundle.fname}: Scheduling backup for execution...'
         )
         self._helper_executor.submit(self.launch, backup_bundle)
 
@@ -807,15 +818,17 @@ class RemoteExecutor(BaseExecutor):
         # they will move the result_file to this machine and let
         # the original pick them up and unpickle them.
 
+    @overrides
     def submit(self,
                function: Callable,
                *args,
                **kwargs) -> fut.Future:
         pickle = make_cloud_pickle(function, *args, **kwargs)
-        bundle = self.create_original_bundle(pickle)
+        bundle = self.create_original_bundle(pickle, function.__name__)
         self.total_bundles_submitted += 1
         return self._helper_executor.submit(self.launch, bundle)
 
+    @overrides
     def shutdown(self, wait=True) -> None:
         self._helper_executor.shutdown(wait)
         logging.debug(f'Shutting down RemoteExecutor {self.title}')
@@ -882,7 +895,7 @@ class DefaultExecutors(object):
                     RemoteWorkerRecord(
                         username = 'scott',
                         machine = 'meerkat.cabin',
-                        weight = 6,
+                        weight = 5,
                         count = 2,
                     ),
                 )