Adds doctests.
[python_utils.git] / executors.py
index fe8d9d0d8e749b0aa85609d04c3444e35b6e89d3..6ccd7b675c760315d05158d68dad0768cc0f0871 100644 (file)
@@ -15,6 +15,7 @@ import subprocess
 import threading
 import time
 from typing import Any, Callable, Dict, List, Optional, Set
+import warnings
 
 import cloudpickle  # type: ignore
 from overrides import overrides
@@ -22,8 +23,8 @@ from overrides import overrides
 from ansi import bg, fg, underline, reset
 import argparse_utils
 import config
-from exec_utils import run_silently, cmd_in_background
 from decorator_utils import singleton
+from exec_utils import run_silently, cmd_in_background, cmd_with_timeout
 import histogram as hist
 
 logger = logging.getLogger(__name__)
@@ -230,14 +231,42 @@ class BundleDetails:
     pid: int
     start_ts: float
     end_ts: float
-    too_slow: bool
-    super_slow: bool
+    slower_than_local_p95: bool
+    slower_than_global_p95: bool
     src_bundle: BundleDetails
     is_cancelled: threading.Event
     was_cancelled: bool
     backup_bundles: Optional[List[BundleDetails]]
     failure_count: int
 
+    def __repr__(self):
+        uuid = self.uuid
+        if uuid[-9:-2] == '_backup':
+            uuid = uuid[:-9]
+            suffix = f'{uuid[-6:]}_b{self.uuid[-1:]}'
+        else:
+            suffix = uuid[-6:]
+
+        colorz = [
+            fg('violet red'),
+            fg('red'),
+            fg('orange'),
+            fg('peach orange'),
+            fg('yellow'),
+            fg('marigold yellow'),
+            fg('green yellow'),
+            fg('tea green'),
+            fg('cornflower blue'),
+            fg('turquoise blue'),
+            fg('tropical blue'),
+            fg('lavender purple'),
+            fg('medium purple'),
+        ]
+        c = colorz[int(uuid[-2:], 16) % len(colorz)]
+        fname = self.fname if self.fname is not None else 'nofname'
+        machine = self.machine if self.machine is not None else 'nomachine'
+        return f'{c}{suffix}/{fname}/{machine}{reset()}'
+
 
 class RemoteExecutorStatus:
     def __init__(self, total_worker_count: int) -> None:
@@ -380,32 +409,30 @@ class RemoteExecutorStatus:
                         bundle_uuid,
                         None
                     )
-                    pid = str(details.pid) if details is not None else "TBD"
+                    pid = str(details.pid) if (details and details.pid != 0) else "TBD"
                     if self.start_per_bundle[bundle_uuid] is not None:
                         sec = ts - self.start_per_bundle[bundle_uuid]
-                        ret += f'       (pid={pid}): {bundle_uuid} for {sec:.1f}s so far '
+                        ret += f'       (pid={pid}): {details} for {sec:.1f}s so far '
                     else:
-                        ret += f'       {bundle_uuid} setting up / copying data...'
+                        ret += f'       {details} setting up / copying data...'
                         sec = 0.0
 
                     if qworker is not None:
                         if sec > qworker[1]:
                             ret += f'{bg("red")}>💻p95{reset()} '
-                        elif sec > qworker[0]:
-                            ret += f'{fg("red")}>💻p50{reset()} '
-                    if qall is not None:
-                        if sec > qall[1] * 1.5:
-                            ret += f'{bg("red")}!!!{reset()}'
                             if details is not None:
-                                logger.debug(f'Flagging {details.uuid} for another backup')
-                                details.super_slow = True
-                        elif sec > qall[1]:
+                                details.slower_than_local_p95 = True
+                        else:
+                            if details is not None:
+                                details.slower_than_local_p95 = False
+
+                    if qall is not None:
+                        if sec > qall[1]:
                             ret += f'{bg("red")}>∀p95{reset()} '
                             if details is not None:
-                                logger.debug(f'Flagging {details.uuid} for a backup')
-                                details.too_slow = True
-                        elif sec > qall[0]:
-                            ret += f'{fg("red")}>∀p50{reset()}'
+                                details.slower_than_global_p95 = True
+                        else:
+                            details.slower_than_global_p95 = False
                     ret += '\n'
         return ret
 
@@ -423,7 +450,6 @@ class RemoteExecutorStatus:
 
 class RemoteWorkerSelectionPolicy(ABC):
     def register_worker_pool(self, workers):
-        random.seed()
         self.workers = workers
 
     @abstractmethod
@@ -439,12 +465,14 @@ class RemoteWorkerSelectionPolicy(ABC):
 
 
 class WeightedRandomRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy):
+    @overrides
     def is_worker_available(self) -> bool:
         for worker in self.workers:
             if worker.count > 0:
                 return True
         return False
 
+    @overrides
     def acquire_worker(
             self,
             machine_to_avoid = None
@@ -463,7 +491,8 @@ class WeightedRandomRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy):
                     worker.count -= 1
                     logger.debug(f'Selected worker {worker}')
                     return worker
-        logger.warning("Couldn't find a worker; go fish.")
+        msg = 'Unexpectedly could not find a worker, retrying...'
+        logger.warning(msg)
         return None
 
 
@@ -471,12 +500,14 @@ class RoundRobinRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy):
     def __init__(self) -> None:
         self.index = 0
 
+    @overrides
     def is_worker_available(self) -> bool:
         for worker in self.workers:
             if worker.count > 0:
                 return True
         return False
 
+    @overrides
     def acquire_worker(
             self,
             machine_to_avoid: str = None
@@ -496,7 +527,8 @@ class RoundRobinRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy):
             if x >= len(self.workers):
                 x = 0
             if x == self.index:
-                logger.warning("Couldn't find a worker; go fish.")
+                msg = 'Unexpectedly could not find a worker, retrying...'
+                logger.warning(msg)
                 return None
 
 
@@ -523,6 +555,8 @@ class RemoteExecutor(BaseExecutor):
         )
         self.status = RemoteExecutorStatus(self.worker_count)
         self.total_bundles_submitted = 0
+        self.backup_lock = threading.Lock()
+        self.last_backup = None
 
     def is_worker_available(self) -> bool:
         return self.policy.is_worker_available()
@@ -560,37 +594,84 @@ class RemoteExecutor(BaseExecutor):
 
             # Look for bundles to reschedule.
             num_done = len(self.status.finished_bundle_timings)
-            if num_done > 7 or (num_done > 5 and self.is_worker_available()):
-                for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items():
-                    for uuid in bundle_uuids:
-                        bundle = self.status.bundle_details_by_uuid.get(uuid, None)
-                        if (
-                                bundle is not None and
-                                bundle.too_slow and
-                                bundle.src_bundle is None and
-                                config.config['executors_schedule_remote_backups']
-                        ):
-                            self.consider_backup_for_bundle(bundle)
-
-    def consider_backup_for_bundle(self, bundle: BundleDetails) -> None:
-        assert self.status.lock.locked()
-        if (
-            bundle.too_slow
-            and len(bundle.backup_bundles) == 0       # one backup per
-        ):
-            msg = f"*** Rescheduling {bundle.pid}/{bundle.uuid} ***"
-            logger.debug(msg)
-            self.schedule_backup_for_bundle(bundle)
-            return
-        elif (
-                bundle.super_slow
-                and len(bundle.backup_bundles) < 2    # two backups in dire situations
-                and self.status.total_idle() > 4
-        ):
-            msg = f"*** Rescheduling {bundle.pid}/{bundle.uuid} ***"
-            logger.debug(msg)
-            self.schedule_backup_for_bundle(bundle)
-            return
+            num_idle_workers = self.worker_count - self.task_count
+            now = time.time()
+            if (
+                    config.config['executors_schedule_remote_backups']
+                    and num_done > 2
+                    and num_idle_workers > 1
+                    and (self.last_backup is None or (now - self.last_backup > 1.0))
+                    and self.backup_lock.acquire(blocking=False)
+            ):
+                try:
+                    assert self.backup_lock.locked()
+
+                    bundle_to_backup = None
+                    best_score = None
+                    for worker, bundle_uuids in self.status.in_flight_bundles_by_worker.items():
+                        # Prefer to schedule backups of bundles on slower machines.
+                        base_score = 0
+                        for record in self.workers:
+                            if worker.machine == record.machine:
+                                base_score = float(record.weight)
+                                base_score = 1.0 / base_score
+                                base_score *= 200.0
+                                base_score = int(base_score)
+                                break
+
+                        for uuid in bundle_uuids:
+                            bundle = self.status.bundle_details_by_uuid.get(uuid, None)
+                            if (
+                                    bundle is not None
+                                    and bundle.src_bundle is None
+                                    and bundle.backup_bundles is not None
+                            ):
+                                score = base_score
+
+                                # Schedule backups of bundles running longer; especially those
+                                # that are unexpectedly slow.
+                                start_ts = self.status.start_per_bundle[uuid]
+                                if start_ts is not None:
+                                    runtime = now - start_ts
+                                    score += runtime
+                                    logger.debug(f'score[{bundle}] => {score}  # latency boost')
+
+                                    if bundle.slower_than_local_p95:
+                                        score += runtime / 2
+                                        logger.debug(f'score[{bundle}] => {score}  # >worker p95')
+
+                                    if bundle.slower_than_global_p95:
+                                        score += runtime / 2
+                                        logger.debug(f'score[{bundle}] => {score}  # >global p95')
+
+                                # Prefer backups of bundles that don't have backups already.
+                                backup_count = len(bundle.backup_bundles)
+                                if backup_count == 0:
+                                    score *= 2
+                                elif backup_count == 1:
+                                    score /= 2
+                                elif backup_count == 2:
+                                    score /= 8
+                                else:
+                                    score = 0
+                                logger.debug(f'score[{bundle}] => {score}  # {backup_count} dup backup factor')
+
+                                if (
+                                        score != 0
+                                        and (best_score is None or score > best_score)
+                                ):
+                                    bundle_to_backup = bundle
+                                    assert bundle is not None
+                                    assert bundle.backup_bundles is not None
+                                    assert bundle.src_bundle is None
+                                    best_score = score
+
+                    if bundle_to_backup is not None:
+                        self.last_backup = now
+                        logger.info(f'=====> SCHEDULING BACKUP {bundle_to_backup} (score={best_score:.1f}) <=====')
+                        self.schedule_backup_for_bundle(bundle_to_backup)
+                finally:
+                    self.backup_lock.release()
 
     def check_if_cancelled(self, bundle: BundleDetails) -> bool:
         with self.status.lock:
@@ -619,9 +700,9 @@ class RemoteExecutor(BaseExecutor):
         bundle.worker = worker
         machine = bundle.machine = worker.machine
         username = bundle.username = worker.username
-        fname = bundle.fname
+
         self.status.record_acquire_worker(worker, uuid)
-        logger.debug(f'{uuid}/{fname}: Running bundle on {worker}...')
+        logger.debug(f'{bundle}: Running bundle on {worker}...')
 
         # Before we do any work, make sure the bundle is still viable.
         if self.check_if_cancelled(bundle):
@@ -630,7 +711,7 @@ class RemoteExecutor(BaseExecutor):
             except Exception as e:
                 logger.exception(e)
                 logger.error(
-                    f'{uuid}/{fname}: bundle says it\'s cancelled upfront but no results?!'
+                    f'{bundle}: bundle says it\'s cancelled upfront but no results?!'
                 )
                 assert bundle.worker is not None
                 self.status.record_release_worker(
@@ -662,15 +743,11 @@ class RemoteExecutor(BaseExecutor):
             try:
                 cmd = f'{RSYNC} {bundle.code_file} {username}@{machine}:{bundle.code_file}'
                 start_ts = time.time()
-                logger.info(f"{uuid}/{fname}: Copying work to {worker} via {cmd}.")
+                logger.info(f"{bundle}: Copying work to {worker} via {cmd}.")
                 run_silently(cmd)
                 xfer_latency = time.time() - start_ts
-                logger.info(f"{uuid}/{fname}: Copying done in {xfer_latency:.1f}s.")
+                logger.info(f"{bundle}: Copying done to {worker} in {xfer_latency:.1f}s.")
             except Exception as e:
-                logger.exception(e)
-                logger.error(
-                    f'{uuid}/{fname}: failed to send instructions to worker machine?!?'
-                )
                 assert bundle.worker is not None
                 self.status.record_release_worker(
                     bundle.worker,
@@ -682,31 +759,39 @@ class RemoteExecutor(BaseExecutor):
                 if is_original:
                     # Weird.  We tried to copy the code to the worker and it failed...
                     # And we're the original bundle.  We have to retry.
+                    logger.exception(e)
+                    logger.error(
+                        f'{bundle}: Failed to send instructions to the worker machine?! ' +
+                        'This is not expected; we\'re the original bundle so this shouldn\'t ' +
+                        'be a race condition.  Attempting an emergency retry...'
+                    )
                     return self.emergency_retry_nasty_bundle(bundle)
                 else:
                     # This is actually expected; we're a backup.
                     # There's a race condition where someone else
                     # already finished the work and removed the source
                     # code file before we could copy it.  No biggie.
+                    msg = f'{bundle}: Failed to send instructions to the worker machine... '
+                    msg += 'We\'re a backup and this may be caused by the original (or some '
+                    msg += 'other backup) already finishing this work.  Ignoring this.'
+                    logger.warning(msg)
                     return None
 
         # Kick off the work.  Note that if this fails we let
         # wait_for_process deal with it.
         self.status.record_processing_began(uuid)
         cmd = (f'{SSH} {bundle.username}@{bundle.machine} '
-               f'"source py39-venv/bin/activate &&'
+               f'"source py38-venv/bin/activate &&'
                f' /home/scott/lib/python_modules/remote_worker.py'
                f' --code_file {bundle.code_file} --result_file {bundle.result_file}"')
-        logger.debug(f'{uuid}/{fname}: Executing {cmd} in the background to kick off work...')
+        logger.debug(f'{bundle}: Executing {cmd} in the background to kick off work...')
         p = cmd_in_background(cmd, silent=True)
         bundle.pid = pid = p.pid
-        logger.debug(f'{uuid}/{fname}: Local ssh process pid={pid}; remote worker is {machine}.')
+        logger.debug(f'{bundle}: Local ssh process pid={pid}; remote worker is {machine}.')
         return self.wait_for_process(p, bundle, 0)
 
     def wait_for_process(self, p: subprocess.Popen, bundle: BundleDetails, depth: int) -> Any:
-        uuid = bundle.uuid
         machine = bundle.machine
-        fname = bundle.fname
         pid = p.pid
         if depth > 3:
             logger.error(
@@ -732,12 +817,12 @@ class RemoteExecutor(BaseExecutor):
                 self.heartbeat()
                 if self.check_if_cancelled(bundle):
                     logger.info(
-                        f'{uuid}/{fname}: another worker finished bundle, checking it out...'
+                        f'{bundle}: another worker finished bundle, checking it out...'
                     )
                     break
             else:
                 logger.info(
-                    f"{uuid}/{fname}: pid {pid} ({machine}) our ssh finished, checking it out..."
+                    f"{bundle}: pid {pid} ({machine}) our ssh finished, checking it out..."
                 )
                 p = None
                 break
@@ -759,11 +844,10 @@ class RemoteExecutor(BaseExecutor):
         # Otherwise, time for an emergency reschedule.
         except Exception as e:
             logger.exception(e)
-            logger.error(f'{uuid}/{fname}: Something unexpected just happened...')
+            logger.error(f'{bundle}: Something unexpected just happened...')
             if p is not None:
-                logger.warning(
-                    f"{uuid}/{fname}: Failed to wrap up \"done\" bundle, re-waiting on active ssh."
-                )
+                msg = f"{bundle}: Failed to wrap up \"done\" bundle, re-waiting on active ssh."
+                logger.warning(msg)
                 return self.wait_for_process(p, bundle, depth + 1)
             else:
                 self.status.record_release_worker(
@@ -783,8 +867,6 @@ class RemoteExecutor(BaseExecutor):
             machine = bundle.machine
             result_file = bundle.result_file
             code_file = bundle.code_file
-            fname = bundle.fname
-            uuid = bundle.uuid
 
             # Whether original or backup, if we finished first we must
             # fetch the results if the computation happened on a
@@ -795,7 +877,7 @@ class RemoteExecutor(BaseExecutor):
                 if bundle.hostname not in bundle.machine:
                     cmd = f'{RSYNC} {username}@{machine}:{result_file} {result_file} 2>/dev/null'
                     logger.info(
-                        f"{uuid}/{fname}: Fetching results from {username}@{machine} via {cmd}"
+                        f"{bundle}: Fetching results from {username}@{machine} via {cmd}"
                     )
 
                     # If either of these throw they are handled in
@@ -813,9 +895,9 @@ class RemoteExecutor(BaseExecutor):
         # if one of the backups finished first; it still must read the
         # result from disk.
         if is_original:
-            logger.debug(f"{uuid}/{fname}: Unpickling {result_file}.")
+            logger.debug(f"{bundle}: Unpickling {result_file}.")
             try:
-                with open(f'{result_file}', 'rb') as rb:
+                with open(result_file, 'rb') as rb:
                     serialized = rb.read()
                 result = cloudpickle.loads(serialized)
             except Exception as e:
@@ -845,7 +927,7 @@ class RemoteExecutor(BaseExecutor):
             if bundle.backup_bundles is not None:
                 for backup in bundle.backup_bundles:
                     logger.debug(
-                        f'{uuid}/{fname}: Notifying backup {backup.uuid} that it\'s cancelled'
+                        f'{bundle}: Notifying backup {backup.uuid} that it\'s cancelled'
                     )
                     backup.is_cancelled.set()
 
@@ -860,7 +942,7 @@ class RemoteExecutor(BaseExecutor):
             # Tell the original to stop if we finished first.
             if not was_cancelled:
                 logger.debug(
-                    f'{uuid}/{fname}: Notifying original {bundle.src_bundle.uuid} we beat them to it.'
+                    f'{bundle}: Notifying original {bundle.src_bundle.uuid} we beat them to it.'
                 )
                 bundle.src_bundle.is_cancelled.set()
 
@@ -876,7 +958,7 @@ class RemoteExecutor(BaseExecutor):
 
     def create_original_bundle(self, pickle, fname: str):
         from string_utils import generate_uuid
-        uuid = generate_uuid(as_hex=True)
+        uuid = generate_uuid(omit_dashes=True)
         code_file = f'/tmp/{uuid}.code.bin'
         result_file = f'/tmp/{uuid}.result.bin'
 
@@ -897,8 +979,8 @@ class RemoteExecutor(BaseExecutor):
             pid = 0,
             start_ts = time.time(),
             end_ts = 0.0,
-            too_slow = False,
-            super_slow = False,
+            slower_than_local_p95 = False,
+            slower_than_global_p95 = False,
             src_bundle = None,
             is_cancelled = threading.Event(),
             was_cancelled = False,
@@ -906,7 +988,7 @@ class RemoteExecutor(BaseExecutor):
             failure_count = 0,
         )
         self.status.record_bundle_details(bundle)
-        logger.debug(f'{uuid}/{fname}: Created original bundle')
+        logger.debug(f'{bundle}: Created an original bundle')
         return bundle
 
     def create_backup_bundle(self, src_bundle: BundleDetails):
@@ -927,8 +1009,8 @@ class RemoteExecutor(BaseExecutor):
             pid = 0,
             start_ts = time.time(),
             end_ts = 0.0,
-            too_slow = False,
-            super_slow = False,
+            slower_than_local_p95 = False,
+            slower_than_global_p95 = False,
             src_bundle = src_bundle,
             is_cancelled = threading.Event(),
             was_cancelled = False,
@@ -937,12 +1019,13 @@ class RemoteExecutor(BaseExecutor):
         )
         src_bundle.backup_bundles.append(backup_bundle)
         self.status.record_bundle_details_already_locked(backup_bundle)
-        logger.debug(f'{uuid}/{src_bundle.fname}: Created backup bundle')
+        logger.debug(f'{backup_bundle}: Created a backup bundle')
         return backup_bundle
 
     def schedule_backup_for_bundle(self,
                                    src_bundle: BundleDetails):
         assert self.status.lock.locked()
+        assert src_bundle is not None
         backup_bundle = self.create_backup_bundle(src_bundle)
         logger.debug(
             f'{backup_bundle.uuid}/{backup_bundle.fname}: Scheduling backup for execution...'
@@ -954,7 +1037,6 @@ class RemoteExecutor(BaseExecutor):
         # the original pick them up and unpickle them.
 
     def emergency_retry_nasty_bundle(self, bundle: BundleDetails) -> fut.Future:
-        uuid = bundle.uuid
         is_original = bundle.src_bundle is None
         bundle.worker = None
         avoid_last_machine = bundle.machine
@@ -968,19 +1050,19 @@ class RemoteExecutor(BaseExecutor):
 
         if bundle.failure_count > retry_limit:
             logger.error(
-                f'{uuid}: Tried this bundle too many times already ({retry_limit}x); giving up.'
+                f'{bundle}: Tried this bundle too many times already ({retry_limit}x); giving up.'
             )
             if is_original:
                 raise RemoteExecutorException(
-                    f'{uuid}: This bundle can\'t be completed despite several backups and retries'
+                    f'{bundle}: This bundle can\'t be completed despite several backups and retries'
                 )
             else:
-                logger.error(f'{uuid}: At least it\'s only a backup; better luck with the others.')
+                logger.error(f'{bundle}: At least it\'s only a backup; better luck with the others.')
             return None
         else:
-            logger.warning(
-                f'>>> Emergency rescheduling {uuid} because of unexected errors (wtf?!) <<<'
-            )
+            msg = f'>>> Emergency rescheduling {bundle} because of unexected errors (wtf?!) <<<'
+            logger.warning(msg)
+            warnings.warn(msg)
             return self.launch(bundle, avoid_last_machine)
 
     @overrides
@@ -1009,12 +1091,14 @@ class DefaultExecutors(object):
 
     def ping(self, host) -> bool:
         logger.debug(f'RUN> ping -c 1 {host}')
-        command = ['ping', '-c', '1', host]
-        return subprocess.call(
-            command,
-            stdout=subprocess.DEVNULL,
-            stderr=subprocess.DEVNULL,
-        ) == 0
+        try:
+            x = cmd_with_timeout(
+                f'ping -c 1 {host} >/dev/null 2>/dev/null',
+                timeout_seconds=1.0
+            )
+            return x == 0
+        except Exception:
+            return False
 
     def thread_pool(self) -> ThreadExecutor:
         if self.thread_executor is None:
@@ -1036,28 +1120,8 @@ class DefaultExecutors(object):
                     RemoteWorkerRecord(
                         username = 'scott',
                         machine = 'cheetah.house',
-                        weight = 12,
-                        count = 4,
-                    ),
-                )
-            if self.ping('video.house'):
-                logger.info('Found video.house')
-                pool.append(
-                    RemoteWorkerRecord(
-                        username = 'scott',
-                        machine = 'video.house',
-                        weight = 1,
-                        count = 4,
-                    ),
-                )
-            if self.ping('wannabe.house'):
-                logger.info('Found wannabe.house')
-                pool.append(
-                    RemoteWorkerRecord(
-                        username = 'scott',
-                        machine = 'wannabe.house',
-                        weight = 2,
-                        count = 4,
+                        weight = 25,
+                        count = 6,
                     ),
                 )
             if self.ping('meerkat.cabin'):
@@ -1070,34 +1134,44 @@ class DefaultExecutors(object):
                         count = 2,
                     ),
                 )
-            if self.ping('backup.house'):
-                logger.info('Found backup.house')
+            # if self.ping('kiosk.house'):
+            #     logger.info('Found kiosk.house')
+            #     pool.append(
+            #         RemoteWorkerRecord(
+            #             username = 'pi',
+            #             machine = 'kiosk.house',
+            #             weight = 1,
+            #             count = 2,
+            #         ),
+            #     )
+            if self.ping('hero.house'):
+                logger.info('Found hero.house')
                 pool.append(
                     RemoteWorkerRecord(
                         username = 'scott',
-                        machine = 'backup.house',
-                        weight = 1,
-                        count = 4,
+                        machine = 'hero.house',
+                        weight = 30,
+                        count = 10,
                     ),
                 )
-            if self.ping('kiosk.house'):
-                logger.info('Found kiosk.house')
+            if self.ping('puma.cabin'):
+                logger.info('Found puma.cabin')
                 pool.append(
                     RemoteWorkerRecord(
-                        username = 'pi',
-                        machine = 'kiosk.house',
-                        weight = 1,
-                        count = 2,
+                        username = 'scott',
+                        machine = 'puma.cabin',
+                        weight = 25,
+                        count = 6,
                     ),
                 )
-            if self.ping('puma.cabin'):
-                logger.info('Found puma.cabin')
+            if self.ping('backup.house'):
+                logger.info('Found backup.house')
                 pool.append(
                     RemoteWorkerRecord(
                         username = 'scott',
-                        machine = 'puma.cabin',
-                        weight = 12,
-                        count = 4,
+                        machine = 'backup.house',
+                        weight = 3,
+                        count = 2,
                     ),
                 )