From: Scott Date: Thu, 27 Jan 2022 05:35:20 +0000 (-0800) Subject: Ahem. Still running black? X-Git-Url: https://wannabe.guru.org/gitweb/?a=commitdiff_plain;h=e6f32fdd9b373dfcd100c7accb41f57d83c2f0a1;p=python_utils.git Ahem. Still running black? --- diff --git a/acl.py b/acl.py index adec643..0692a04 100644 --- a/acl.py +++ b/acl.py @@ -28,9 +28,7 @@ class Order(enum.Enum): class SimpleACL(ABC): """A simple Access Control List interface.""" - def __init__( - self, *, order_to_check_allow_deny: Order, default_answer: bool - ): + def __init__(self, *, order_to_check_allow_deny: Order, default_answer: bool): if order_to_check_allow_deny not in ( Order.ALLOW_DENY, Order.DENY_ALLOW, diff --git a/ansi.py b/ansi.py index 5fde4af..9e31b81 100755 --- a/ansi.py +++ b/ansi.py @@ -1773,9 +1773,7 @@ def fg( green = 0 if blue is None: blue = 0 - if ( - is_16color(red) and is_16color(green) and is_16color(blue) - ) or force_16color: + if (is_16color(red) and is_16color(green) and is_16color(blue)) or force_16color: logger.debug("Using 16-color strategy") return fg_16color(red, green, blue) if ( @@ -1878,9 +1876,7 @@ def bg( green = 0 if blue is None: blue = 0 - if ( - is_16color(red) and is_16color(green) and is_16color(blue) - ) or force_16color: + if (is_16color(red) and is_16color(green) and is_16color(blue)) or force_16color: logger.debug("Using 16-color strategy") return bg_16color(red, green, blue) if ( diff --git a/argparse_utils.py b/argparse_utils.py index 8c254ae..43536e4 100644 --- a/argparse_utils.py +++ b/argparse_utils.py @@ -16,9 +16,7 @@ logger = logging.getLogger(__name__) class ActionNoYes(argparse.Action): - def __init__( - self, option_strings, dest, default=None, required=False, help=None - ): + def __init__(self, option_strings, dest, default=None, required=False, help=None): if default is None: msg = 'You must provide a default with Yes/No action' logger.critical(msg) @@ -47,9 +45,7 @@ class ActionNoYes(argparse.Action): @overrides def __call__(self, parser, namespace, values, option_strings=None): - if option_strings.startswith('--no-') or option_strings.startswith( - '--no_' - ): + if option_strings.startswith('--no-') or option_strings.startswith('--no_'): setattr(namespace, self.dest, False) else: setattr(namespace, self.dest, True) diff --git a/arper.py b/arper.py index 39aecf9..29a8a12 100644 --- a/arper.py +++ b/arper.py @@ -131,10 +131,7 @@ class Arper(persistent.Persistent): mac = mac.lower() ip = ip.strip() cached_state[mac] = ip - if ( - len(cached_state) - > config.config['arper_min_entries_to_be_valid'] - ): + if len(cached_state) > config.config['arper_min_entries_to_be_valid']: return cls(cached_state) else: msg = f'{cache_file} is invalid: only {len(cached_state)} entries. Deleting it.' @@ -147,12 +144,8 @@ class Arper(persistent.Persistent): @overrides def save(self) -> bool: if len(self.state) > config.config['arper_min_entries_to_be_valid']: - logger.debug( - f'Persisting state to {config.config["arper_cache_location"]}' - ) - with file_utils.FileWriter( - config.config['arper_cache_location'] - ) as wf: + logger.debug(f'Persisting state to {config.config["arper_cache_location"]}') + with file_utils.FileWriter(config.config['arper_cache_location']) as wf: for (mac, ip) in self.state.items(): mac = mac.lower() print(f'{mac}, {ip}', file=wf) diff --git a/base_presence.py b/base_presence.py index f774dbc..612193e 100755 --- a/base_presence.py +++ b/base_presence.py @@ -85,9 +85,7 @@ class PresenceDetection(object): delta = now - self.last_update if ( delta.total_seconds() - > config.config[ - 'presence_tolerable_staleness_seconds' - ].total_seconds() + > config.config['presence_tolerable_staleness_seconds'].total_seconds() ): logger.debug( f"It's been {delta.total_seconds()}s since last update; refreshing now." @@ -146,9 +144,7 @@ class PresenceDetection(object): warnings.warn(msg, stacklevel=2) self.dark_locations.add(Location.HOUSE) - def read_persisted_macs_file( - self, filename: str, location: Location - ) -> None: + def read_persisted_macs_file(self, filename: str, location: Location) -> None: if location is Location.UNKNOWN: return with open(filename, "r") as rf: @@ -177,9 +173,9 @@ class PresenceDetection(object): logger.exception(e) continue mac = mac.strip() - (self.location_ts_by_mac[location])[ - mac - ] = datetime.datetime.fromtimestamp(int(ts.strip())) + (self.location_ts_by_mac[location])[mac] = datetime.datetime.fromtimestamp( + int(ts.strip()) + ) ip_name = ip_name.strip() match = re.match(r"(\d+\.\d+\.\d+\.\d+) +\(([^\)]+)\)", ip_name) if match is not None: @@ -192,9 +188,7 @@ class PresenceDetection(object): def is_anyone_in_location_now(self, location: Location) -> bool: self.maybe_update() if location in self.dark_locations: - raise Exception( - f"Can't see {location} right now; answer undefined." - ) + raise Exception(f"Can't see {location} right now; answer undefined.") for person in Person: if person is not None: loc = self.where_is_person_now(person) @@ -207,7 +201,9 @@ class PresenceDetection(object): def where_is_person_now(self, name: Person) -> Location: self.maybe_update() if len(self.dark_locations) > 0: - msg = f"Can't see {self.dark_locations} right now; answer confidence impacted" + msg = ( + f"Can't see {self.dark_locations} right now; answer confidence impacted" + ) logger.warning(msg) warnings.warn(msg, stacklevel=2) logger.debug(f'Looking for {name}...') @@ -227,15 +223,11 @@ class PresenceDetection(object): if mac not in self.names_by_mac: continue mac_name = self.names_by_mac[mac] - logger.debug( - f'Looking for {name}... check for mac {mac} ({mac_name})' - ) + logger.debug(f'Looking for {name}... check for mac {mac} ({mac_name})') for location in self.location_ts_by_mac: if mac in self.location_ts_by_mac[location]: ts = (self.location_ts_by_mac[location])[mac] - logger.debug( - f'Seen {mac} ({mac_name}) at {location} since {ts}' - ) + logger.debug(f'Seen {mac} ({mac_name}) at {location} since {ts}') tiebreaks[location] = ts ( @@ -246,9 +238,7 @@ class PresenceDetection(object): v = votes.get(most_recent_location, 0) votes[most_recent_location] = v + bonus logger.debug(f'{name}: {location} gets {bonus} votes.') - credit = int( - credit * 0.2 - ) # Note: list most important devices first + credit = int(credit * 0.2) # Note: list most important devices first if credit <= 0: credit = 1 if len(votes) > 0: diff --git a/bootstrap.py b/bootstrap.py index c3b70db..44a16fb 100644 --- a/bootstrap.py +++ b/bootstrap.py @@ -121,9 +121,7 @@ class ImportInterceptor(object): loading_module = self.module_by_filename_cache[filename] else: self.repopulate_modules_by_filename() - loading_module = self.module_by_filename_cache.get( - filename, 'unknown' - ) + loading_module = self.module_by_filename_cache.get(filename, 'unknown') path = self.tree_node_by_module.get(loading_module, []) path.extend([loaded_module]) @@ -264,9 +262,7 @@ def initialize(entry_point): with stopwatch.Timer() as t: ret = entry_point(*args, **kwargs) - logger.debug( - f'{entry_point.__name__} (program entry point) returned {ret}.' - ) + logger.debug(f'{entry_point.__name__} (program entry point) returned {ret}.') if config.config['dump_all_objects']: dump_all_objects() diff --git a/camera_utils.py b/camera_utils.py index 03ac621..d2c50dd 100644 --- a/camera_utils.py +++ b/camera_utils.py @@ -74,9 +74,7 @@ def fetch_camera_image_from_video_server( response = requests.get(url, stream=False, timeout=10.0) if response.ok: raw = response.content - logger.debug( - f'Read {len(response.content)} byte image from HTTP server' - ) + logger.debug(f'Read {len(response.content)} byte image from HTTP server') tmp = np.frombuffer(raw, dtype="uint8") logger.debug( f'Translated raw content into {tmp.shape} {type(tmp)} with element type {type(tmp[0])}.' @@ -172,9 +170,7 @@ def _fetch_camera_image( camera_name, width=width, quality=quality ) if raw is None: - logger.debug( - "Reading from video server failed; trying direct RTSP stream" - ) + logger.debug("Reading from video server failed; trying direct RTSP stream") raw = fetch_camera_image_from_rtsp_stream(camera_name, width=width) if raw is not None and len(raw) > 0: tmp = np.frombuffer(raw, dtype="uint8") @@ -185,9 +181,7 @@ def _fetch_camera_image( jpg=jpg, hsv=hsv, ) - msg = ( - "Failed to retieve image from both video server and direct RTSP stream" - ) + msg = "Failed to retieve image from both video server and direct RTSP stream" logger.warning(msg) warnings.warn(msg, stacklevel=2) return RawJpgHsv(None, None, None) diff --git a/config.py b/config.py index a608cf5..0edb169 100644 --- a/config.py +++ b/config.py @@ -239,9 +239,7 @@ def parse(entry_module: Optional[str]) -> Dict[str, Any]: if loadfile is not None: if saw_other_args: - msg = ( - f'Augmenting commandline arguments with those from {loadfile}.' - ) + msg = f'Augmenting commandline arguments with those from {loadfile}.' print(msg, file=sys.stderr) saved_messages.append(msg) if not os.path.exists(loadfile): @@ -252,9 +250,7 @@ def parse(entry_module: Optional[str]) -> Dict[str, Any]: sys.exit(-1) with open(loadfile, 'r') as rf: newargs = rf.readlines() - newargs = [ - arg.strip('\n') for arg in newargs if 'config_savefile' not in arg - ] + newargs = [arg.strip('\n') for arg in newargs if 'config_savefile' not in arg] sys.argv += newargs # Parse (possibly augmented, possibly completely overwritten) diff --git a/conversion_utils.py b/conversion_utils.py index 4326840..684edc0 100644 --- a/conversion_utils.py +++ b/conversion_utils.py @@ -86,9 +86,7 @@ conversion_catalog = { lambda c: c * 1.8 + 32.0, "°F", ), - "Celsius": Converter( - "Celsius", "temperature", lambda c: c, lambda c: c, "°C" - ), + "Celsius": Converter("Celsius", "temperature", lambda c: c, lambda c: c, "°C"), "Kelvin": Converter( "Kelvin", "temperature", @@ -109,9 +107,7 @@ def convert(magnitude: Number, from_thing: str, to_thing: str) -> float: return _convert(magnitude, src, dst) -def _convert( - magnitude: Number, from_unit: Converter, to_unit: Converter -) -> float: +def _convert(magnitude: Number, from_unit: Converter, to_unit: Converter) -> float: canonical = from_unit.to_canonical(magnitude) converted = to_unit.from_canonical(canonical) return float(converted) diff --git a/datetime_utils.py b/datetime_utils.py index 9794720..3565936 100644 --- a/datetime_utils.py +++ b/datetime_utils.py @@ -34,9 +34,7 @@ def is_timezone_naive(dt: datetime.datetime) -> bool: return not is_timezone_aware(dt) -def replace_timezone( - dt: datetime.datetime, tz: datetime.tzinfo -) -> datetime.datetime: +def replace_timezone(dt: datetime.datetime, tz: datetime.tzinfo) -> datetime.datetime: """ Replaces the timezone on a datetime object directly (leaving the year, month, day, hour, minute, second, micro, etc... alone). @@ -66,9 +64,7 @@ def replace_timezone( ) -def replace_time_timezone( - t: datetime.time, tz: datetime.tzinfo -) -> datetime.time: +def replace_time_timezone(t: datetime.time, tz: datetime.tzinfo) -> datetime.time: """ Replaces the timezone on a datetime.time directly without performing any translation. @@ -85,9 +81,7 @@ def replace_time_timezone( return t.replace(tzinfo=tz) -def translate_timezone( - dt: datetime.datetime, tz: datetime.tzinfo -) -> datetime.datetime: +def translate_timezone(dt: datetime.datetime, tz: datetime.tzinfo) -> datetime.datetime: """ Translates dt into a different timezone by adjusting the year, month, day, hour, minute, second, micro, etc... appropriately. The returned diff --git a/decorator_utils.py b/decorator_utils.py index daae64e..1ecbce3 100644 --- a/decorator_utils.py +++ b/decorator_utils.py @@ -80,9 +80,7 @@ def invocation_logged(func: Callable) -> Callable: return wrapper_invocation_logged -def rate_limited( - n_calls: int, *, per_period_in_seconds: float = 1.0 -) -> Callable: +def rate_limited(n_calls: int, *, per_period_in_seconds: float = 1.0) -> Callable: """Limit invocation of a wrapped function to n calls per period. Thread safe. In testing this was relatively fair with multiple threads using it though that hasn't been measured. @@ -220,9 +218,7 @@ def debug_count_calls(func: Callable) -> Callable: @functools.wraps(func) def wrapper_debug_count_calls(*args, **kwargs): wrapper_debug_count_calls.num_calls += 1 - msg = ( - f"Call #{wrapper_debug_count_calls.num_calls} of {func.__name__!r}" - ) + msg = f"Call #{wrapper_debug_count_calls.num_calls} of {func.__name__!r}" print(msg) logger.info(msg) return func(*args, **kwargs) @@ -266,15 +262,11 @@ def delay( @functools.wraps(func) def wrapper_delay(*args, **kwargs): if when & DelayWhen.BEFORE_CALL: - logger.debug( - f"@delay for {seconds}s BEFORE_CALL to {func.__name__}" - ) + logger.debug(f"@delay for {seconds}s BEFORE_CALL to {func.__name__}") time.sleep(seconds) retval = func(*args, **kwargs) if when & DelayWhen.AFTER_CALL: - logger.debug( - f"@delay for {seconds}s AFTER_CALL to {func.__name__}" - ) + logger.debug(f"@delay for {seconds}s AFTER_CALL to {func.__name__}") time.sleep(seconds) return retval @@ -368,9 +360,7 @@ def memoized(func: Callable) -> Callable: cache_key = args + tuple(kwargs.items()) if cache_key not in wrapper_memoized.cache: value = func(*args, **kwargs) - logger.debug( - f"Memoizing {cache_key} => {value} for {func.__name__}" - ) + logger.debug(f"Memoizing {cache_key} => {value} for {func.__name__}") wrapper_memoized.cache[cache_key] = value else: logger.debug(f"Returning memoized value for {func.__name__}") @@ -760,9 +750,7 @@ def call_with_sample_rate(sample_rate: float) -> Callable: if random.uniform(0, 1) < sample_rate: return f(*args, **kwargs) else: - logger.debug( - f"@call_with_sample_rate skipping a call to {f.__name__}" - ) + logger.debug(f"@call_with_sample_rate skipping a call to {f.__name__}") return _call_with_sample_rate diff --git a/deferred_operand.py b/deferred_operand.py index 22bcb83..75e98d9 100644 --- a/deferred_operand.py +++ b/deferred_operand.py @@ -91,9 +91,7 @@ class DeferredOperand(ABC, Generic[T]): return DeferredOperand.resolve(self) is DeferredOperand.resolve(other) def is_not(self, other): - return DeferredOperand.resolve(self) is not DeferredOperand.resolve( - other - ) + return DeferredOperand.resolve(self) is not DeferredOperand.resolve(other) def __abs__(self): return abs(DeferredOperand.resolve(self)) @@ -151,8 +149,6 @@ class DeferredOperand(ABC, Generic[T]): def __getattr__(self, method_name): def method(*args, **kwargs): - return getattr(DeferredOperand.resolve(self), method_name)( - *args, **kwargs - ) + return getattr(DeferredOperand.resolve(self), method_name)(*args, **kwargs) return method diff --git a/dict_utils.py b/dict_utils.py index 79c86ed..b1464c6 100644 --- a/dict_utils.py +++ b/dict_utils.py @@ -198,9 +198,7 @@ def min_key(d: Dict[Any, Any]) -> Any: return min(d.keys()) -def parallel_lists_to_dict( - keys: List[Any], values: List[Any] -) -> Dict[Any, Any]: +def parallel_lists_to_dict(keys: List[Any], values: List[Any]) -> Dict[Any, Any]: """Given two parallel lists (keys and values), create and return a dict. @@ -211,9 +209,7 @@ def parallel_lists_to_dict( """ if len(keys) != len(values): - raise Exception( - "Parallel keys and values lists must have the same length" - ) + raise Exception("Parallel keys and values lists must have the same length") return dict(zip(keys, values)) diff --git a/directory_filter.py b/directory_filter.py index 8d03ff6..508baf3 100644 --- a/directory_filter.py +++ b/directory_filter.py @@ -57,9 +57,7 @@ class DirectoryFileFilter(object): mtime = file_utils.get_file_raw_mtime(filename) if self.mtime_by_filename.get(filename, 0) != mtime: md5 = file_utils.get_file_md5(filename) - logger.debug( - f'Computed/stored {filename}\'s MD5 at ts={mtime} ({md5})' - ) + logger.debug(f'Computed/stored {filename}\'s MD5 at ts={mtime} ({md5})') self.mtime_by_filename[filename] = mtime self.md5_by_filename[filename] = md5 diff --git a/exec_utils.py b/exec_utils.py index 0163107..282a325 100644 --- a/exec_utils.py +++ b/exec_utils.py @@ -68,9 +68,7 @@ def cmd_with_timeout(command: str, timeout_seconds: Optional[float]) -> int: subprocess.TimeoutExpired: Command '['/bin/bash', '-c', '/bin/sleep 2']' timed out after 0.1 seconds """ - return subprocess.check_call( - ["/bin/bash", "-c", command], timeout=timeout_seconds - ) + return subprocess.check_call(["/bin/bash", "-c", command], timeout=timeout_seconds) def cmd(command: str, timeout_seconds: Optional[float] = None) -> str: @@ -120,9 +118,7 @@ def run_silently(command: str, timeout_seconds: Optional[float] = None) -> None: ) -def cmd_in_background( - command: str, *, silent: bool = False -) -> subprocess.Popen: +def cmd_in_background(command: str, *, silent: bool = False) -> subprocess.Popen: args = shlex.split(command) if silent: subproc = subprocess.Popen( @@ -137,9 +133,7 @@ def cmd_in_background( def kill_subproc() -> None: try: if subproc.poll() is None: - logger.info( - "At exit handler: killing {}: {}".format(subproc, command) - ) + logger.info("At exit handler: killing {}: {}".format(subproc, command)) subproc.terminate() subproc.wait(timeout=10.0) except BaseException as be: diff --git a/executors.py b/executors.py index 46812c2..453139a 100644 --- a/executors.py +++ b/executors.py @@ -160,9 +160,7 @@ class ProcessExecutor(BaseExecutor): self.adjust_task_count(+1) pickle = make_cloud_pickle(function, *args, **kwargs) result = self._process_executor.submit(self.run_cloud_pickle, pickle) - result.add_done_callback( - lambda _: self.histogram.add_item(time.time() - start) - ) + result.add_done_callback(lambda _: self.histogram.add_item(time.time() - start)) return result @overrides @@ -258,9 +256,7 @@ class RemoteExecutorStatus: self.finished_bundle_timings_per_worker: Dict[ RemoteWorkerRecord, List[float] ] = {} - self.in_flight_bundles_by_worker: Dict[ - RemoteWorkerRecord, Set[str] - ] = {} + self.in_flight_bundles_by_worker: Dict[RemoteWorkerRecord, Set[str]] = {} self.bundle_details_by_uuid: Dict[str, BundleDetails] = {} self.finished_bundle_timings: List[float] = [] self.last_periodic_dump: Optional[float] = None @@ -270,9 +266,7 @@ class RemoteExecutorStatus: # as a memory fence for modifications to bundle. self.lock: threading.Lock = threading.Lock() - def record_acquire_worker( - self, worker: RemoteWorkerRecord, uuid: str - ) -> None: + def record_acquire_worker(self, worker: RemoteWorkerRecord, uuid: str) -> None: with self.lock: self.record_acquire_worker_already_locked(worker, uuid) @@ -290,9 +284,7 @@ class RemoteExecutorStatus: with self.lock: self.record_bundle_details_already_locked(details) - def record_bundle_details_already_locked( - self, details: BundleDetails - ) -> None: + def record_bundle_details_already_locked(self, details: BundleDetails) -> None: assert self.lock.locked() self.bundle_details_by_uuid[details.uuid] = details @@ -303,9 +295,7 @@ class RemoteExecutorStatus: was_cancelled: bool, ) -> None: with self.lock: - self.record_release_worker_already_locked( - worker, uuid, was_cancelled - ) + self.record_release_worker_already_locked(worker, uuid, was_cancelled) def record_release_worker_already_locked( self, @@ -377,11 +367,7 @@ class RemoteExecutorStatus: ret += f' ...{in_flight} bundles currently in flight:\n' for bundle_uuid in self.in_flight_bundles_by_worker[worker]: details = self.bundle_details_by_uuid.get(bundle_uuid, None) - pid = ( - str(details.pid) - if (details and details.pid != 0) - else "TBD" - ) + pid = str(details.pid) if (details and details.pid != 0) else "TBD" if self.start_per_bundle[bundle_uuid] is not None: sec = ts - self.start_per_bundle[bundle_uuid] ret += f' (pid={pid}): {details} for {sec:.1f}s so far ' @@ -412,10 +398,7 @@ class RemoteExecutorStatus: assert self.lock.locked() self.total_bundles_submitted = total_bundles_submitted ts = time.time() - if ( - self.last_periodic_dump is None - or ts - self.last_periodic_dump > 5.0 - ): + if self.last_periodic_dump is None or ts - self.last_periodic_dump > 5.0: print(self) self.last_periodic_dump = ts @@ -429,9 +412,7 @@ class RemoteWorkerSelectionPolicy(ABC): pass @abstractmethod - def acquire_worker( - self, machine_to_avoid=None - ) -> Optional[RemoteWorkerRecord]: + def acquire_worker(self, machine_to_avoid=None) -> Optional[RemoteWorkerRecord]: pass @@ -444,9 +425,7 @@ class WeightedRandomRemoteWorkerSelectionPolicy(RemoteWorkerSelectionPolicy): return False @overrides - def acquire_worker( - self, machine_to_avoid=None - ) -> Optional[RemoteWorkerRecord]: + def acquire_worker(self, machine_to_avoid=None) -> Optional[RemoteWorkerRecord]: grabbag = [] for worker in self.workers: for x in range(0, worker.count): @@ -585,9 +564,7 @@ class RemoteExecutor(BaseExecutor): break for uuid in bundle_uuids: - bundle = self.status.bundle_details_by_uuid.get( - uuid, None - ) + bundle = self.status.bundle_details_by_uuid.get(uuid, None) if ( bundle is not None and bundle.src_bundle is None @@ -678,9 +655,7 @@ class RemoteExecutor(BaseExecutor): logger.critical(msg) raise Exception(msg) - def release_worker( - self, bundle: BundleDetails, *, was_cancelled=True - ) -> None: + def release_worker(self, bundle: BundleDetails, *, was_cancelled=True) -> None: worker = bundle.worker assert worker is not None logger.debug(f'Released worker {worker}') @@ -764,14 +739,14 @@ class RemoteExecutor(BaseExecutor): # Send input code / data to worker machine if it's not local. if hostname not in machine: try: - cmd = f'{SCP} {bundle.code_file} {username}@{machine}:{bundle.code_file}' + cmd = ( + f'{SCP} {bundle.code_file} {username}@{machine}:{bundle.code_file}' + ) start_ts = time.time() logger.info(f"{bundle}: Copying work to {worker} via {cmd}.") run_silently(cmd) xfer_latency = time.time() - start_ts - logger.debug( - f"{bundle}: Copying to {worker} took {xfer_latency:.1f}s." - ) + logger.debug(f"{bundle}: Copying to {worker} took {xfer_latency:.1f}s.") except Exception as e: self.release_worker(bundle) if is_original: @@ -804,9 +779,7 @@ class RemoteExecutor(BaseExecutor): f' /home/scott/lib/python_modules/remote_worker.py' f' --code_file {bundle.code_file} --result_file {bundle.result_file}"' ) - logger.debug( - f'{bundle}: Executing {cmd} in the background to kick off work...' - ) + logger.debug(f'{bundle}: Executing {cmd} in the background to kick off work...') p = cmd_in_background(cmd, silent=True) bundle.pid = p.pid logger.debug( @@ -935,9 +908,7 @@ class RemoteExecutor(BaseExecutor): # Re-raise the exception; the code in wait_for_process may # decide to emergency_retry_nasty_bundle here. raise Exception(e) - logger.debug( - f'Removing local (master) {code_file} and {result_file}.' - ) + logger.debug(f'Removing local (master) {code_file} and {result_file}.') os.remove(f'{result_file}') os.remove(f'{code_file}') diff --git a/file_utils.py b/file_utils.py index 12aadca..5d9a0be 100644 --- a/file_utils.py +++ b/file_utils.py @@ -366,9 +366,7 @@ def get_file_mtime_timedelta(filename: str) -> Optional[datetime.timedelta]: return get_file_timestamp_timedelta(filename, lambda x: x.st_mtime) -def describe_file_timestamp( - filename: str, extractor, *, brief=False -) -> Optional[str]: +def describe_file_timestamp(filename: str, extractor, *, brief=False) -> Optional[str]: from datetime_utils import describe_duration, describe_duration_briefly age = get_file_timestamp_age_seconds(filename, extractor) diff --git a/google_assistant.py b/google_assistant.py index 75ca643..b92f443 100644 --- a/google_assistant.py +++ b/google_assistant.py @@ -105,9 +105,7 @@ def ask_google(cmd: str, *, recognize_speech=True) -> GoogleResponse: audio_transcription=audio_transcription, ) else: - message = ( - f'HTTP request to {url} with {payload} failed; code {r.status_code}' - ) + message = f'HTTP request to {url} with {payload} failed; code {r.status_code}' logger.error(message) return GoogleResponse( success=False, diff --git a/histogram.py b/histogram.py index 4aa4749..a899fe9 100644 --- a/histogram.py +++ b/histogram.py @@ -94,11 +94,7 @@ class SimpleHistogram(Generic[T]): right_end="", ) label = f'{label_formatter}..{label_formatter}' % (start, end) - txt += ( - f'{label:20}: ' - + bar - + f"({pop/self.count*100.0:5.2f}% n={pop})\n" - ) + txt += f'{label:20}: ' + bar + f"({pop/self.count*100.0:5.2f}% n={pop})\n" if start == last_bucket_start: break return txt diff --git a/lockfile.py b/lockfile.py index 2bbe6f4..4b6aade 100644 --- a/lockfile.py +++ b/lockfile.py @@ -15,9 +15,7 @@ import datetime_utils import decorator_utils -cfg = config.add_commandline_args( - f'Lockfile ({__file__})', 'Args related to lockfiles' -) +cfg = config.add_commandline_args(f'Lockfile ({__file__})', 'Args related to lockfiles') cfg.add_argument( '--lockfile_held_duration_warning_threshold_sec', type=float, @@ -136,9 +134,7 @@ class LockFile(object): duration >= config.config['lockfile_held_duration_warning_threshold_sec'] ): - str_duration = datetime_utils.describe_duration_briefly( - duration - ) + str_duration = datetime_utils.describe_duration_briefly(duration) msg = f'Held {self.lockfile} for {str_duration}' logger.warning(msg) warnings.warn(msg, stacklevel=2) diff --git a/logging_utils.py b/logging_utils.py index bf8d8b0..2d9d63b 100644 --- a/logging_utils.py +++ b/logging_utils.py @@ -22,9 +22,7 @@ import pytz import argparse_utils import config -cfg = config.add_commandline_args( - f'Logging ({__file__})', 'Args related to logging' -) +cfg = config.add_commandline_args(f'Logging ({__file__})', 'Args related to logging') cfg.add_argument( '--logging_config_file', type=argparse_utils.valid_filename, @@ -233,9 +231,7 @@ class SquelchRepeatedMessagesFilter(logging.Filter): if id1 not in squelched_logging_counts: return True threshold = squelched_logging_counts[id1] - logsite = ( - f'{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}' - ) + logsite = f'{record.pathname}+{record.lineno}+{record.levelno}+{record.msg}' count = self.counters[logsite] self.counters[logsite] += 1 return count < threshold @@ -444,12 +440,8 @@ def initialize_logging(logger=None) -> logging.Logger: if config.config['logging_syslog']: if sys.platform not in ('win32', 'cygwin'): if config.config['logging_syslog_facility']: - facility_name = ( - 'LOG_' + config.config['logging_syslog_facility'] - ) - facility = SysLogHandler.__dict__.get( - facility_name, SysLogHandler.LOG_USER - ) + facility_name = 'LOG_' + config.config['logging_syslog_facility'] + facility = SysLogHandler.__dict__.get(facility_name, SysLogHandler.LOG_USER) handler = SysLogHandler(facility=facility, address='/dev/log') handler.setFormatter( MillisecondAwareFormatter( @@ -533,9 +525,7 @@ def initialize_logging(logger=None) -> logging.Logger: level_name = logging._levelToName.get( default_logging_level, str(default_logging_level) ) - logger.debug( - f'Initialized global logging; default logging level is {level_name}.' - ) + logger.debug(f'Initialized global logging; default logging level is {level_name}.') if ( config.config['logging_clear_preexisting_handlers'] and preexisting_handlers_count > 0 @@ -664,23 +654,17 @@ class OutputMultiplexer(object): self.logger = logger if filenames is not None: - self.f = [ - open(filename, 'wb', buffering=0) for filename in filenames - ] + self.f = [open(filename, 'wb', buffering=0) for filename in filenames] else: if destination_bitv & OutputMultiplexer.FILENAMES: - raise ValueError( - "Filenames argument is required if bitv & FILENAMES" - ) + raise ValueError("Filenames argument is required if bitv & FILENAMES") self.f = None if handles is not None: self.h = [handle for handle in handles] else: if destination_bitv & OutputMultiplexer.Destination.FILEHANDLES: - raise ValueError( - "Handle argument is required if bitv & FILEHANDLES" - ) + raise ValueError("Handle argument is required if bitv & FILEHANDLES") self.h = None self.set_destination_bitv(destination_bitv) @@ -690,13 +674,9 @@ class OutputMultiplexer(object): def set_destination_bitv(self, destination_bitv: int): if destination_bitv & self.Destination.FILENAMES and self.f is None: - raise ValueError( - "Filename argument is required if bitv & FILENAMES" - ) + raise ValueError("Filename argument is required if bitv & FILENAMES") if destination_bitv & self.Destination.FILEHANDLES and self.h is None: - raise ValueError( - "Handle argument is required if bitv & FILEHANDLES" - ) + raise ValueError("Handle argument is required if bitv & FILEHANDLES") self.destination_bitv = destination_bitv def print(self, *args, **kwargs): @@ -719,18 +699,12 @@ class OutputMultiplexer(object): end = "\n" if end == '\n': buf += '\n' - if ( - self.destination_bitv & self.Destination.FILENAMES - and self.f is not None - ): + if self.destination_bitv & self.Destination.FILENAMES and self.f is not None: for _ in self.f: _.write(buf.encode('utf-8')) _.flush() - if ( - self.destination_bitv & self.Destination.FILEHANDLES - and self.h is not None - ): + if self.destination_bitv & self.Destination.FILEHANDLES and self.h is not None: for _ in self.h: _.write(buf) _.flush() diff --git a/logical_search.py b/logical_search.py index 85f9461..c324ff8 100644 --- a/logical_search.py +++ b/logical_search.py @@ -30,9 +30,7 @@ class Document(NamedTuple): docid: str # a unique idenfier for the document tags: Set[str] # an optional set of tags - properties: List[ - Tuple[str, str] - ] # an optional set of key->value properties + properties: List[Tuple[str, str]] # an optional set of key->value properties reference: Any # an optional reference to something else @@ -102,9 +100,7 @@ class Corpus(object): def __init__(self) -> None: self.docids_by_tag: Dict[str, Set[str]] = defaultdict(set) - self.docids_by_property: Dict[Tuple[str, str], Set[str]] = defaultdict( - set - ) + self.docids_by_property: Dict[Tuple[str, str], Set[str]] = defaultdict(set) self.docids_with_property: Dict[str, Set[str]] = defaultdict(set) self.documents_by_docid: Dict[str, Document] = {} @@ -183,11 +179,7 @@ class Corpus(object): """Invert a set of docids.""" return set( - [ - docid - for docid in self.documents_by_docid.keys() - if docid not in original - ] + [docid for docid in self.documents_by_docid.keys() if docid not in original] ) def get_doc(self, docid: str) -> Optional[Document]: @@ -297,9 +289,7 @@ class Corpus(object): ok = True break if not ok: - raise ParseError( - "Unbalanced parenthesis in query expression" - ) + raise ParseError("Unbalanced parenthesis in query expression") # and, or, not else: @@ -376,23 +366,17 @@ class Node(object): raise ParseError(f"Unexpected query {tag}") elif self.op is Operation.DISJUNCTION: if len(evaled_operands) != 2: - raise ParseError( - "Operation.DISJUNCTION (or) expects two operands." - ) + raise ParseError("Operation.DISJUNCTION (or) expects two operands.") retval.update(evaled_operands[0]) retval.update(evaled_operands[1]) elif self.op is Operation.CONJUNCTION: if len(evaled_operands) != 2: - raise ParseError( - "Operation.CONJUNCTION (and) expects two operands." - ) + raise ParseError("Operation.CONJUNCTION (and) expects two operands.") retval.update(evaled_operands[0]) retval = retval.intersection(evaled_operands[1]) elif self.op is Operation.INVERSION: if len(evaled_operands) != 1: - raise ParseError( - "Operation.INVERSION (not) expects one operand." - ) + raise ParseError("Operation.INVERSION (not) expects one operand.") _ = evaled_operands[0] if isinstance(_, set): retval.update(self.corpus.invert_docid_set(_)) diff --git a/parallelize.py b/parallelize.py index 98f883c..698a7ec 100644 --- a/parallelize.py +++ b/parallelize.py @@ -15,9 +15,7 @@ class Method(Enum): def parallelize( - _funct: typing.Optional[typing.Callable] = None, - *, - method: Method = Method.THREAD + _funct: typing.Optional[typing.Callable] = None, *, method: Method = Method.THREAD ) -> typing.Callable: """Usage: diff --git a/persistent.py b/persistent.py index 5c2b132..d62dd67 100644 --- a/persistent.py +++ b/persistent.py @@ -65,11 +65,7 @@ def was_file_written_today(filename: str) -> bool: mtime = file_utils.get_file_mtime_as_datetime(filename) now = datetime.datetime.now() - return ( - mtime.month == now.month - and mtime.day == now.day - and mtime.year == now.year - ) + return mtime.month == now.month and mtime.day == now.day and mtime.year == now.year def was_file_written_within_n_seconds( @@ -144,16 +140,12 @@ class persistent_autoloaded_singleton(object): # Otherwise, try to load it from persisted state. was_loaded = False - logger.debug( - f'Attempting to load {cls.__name__} from persisted state.' - ) + logger.debug(f'Attempting to load {cls.__name__} from persisted state.') self.instance = cls.load() if not self.instance: msg = 'Loading from cache failed.' logger.warning(msg) - logger.debug( - f'Attempting to instantiate {cls.__name__} directly.' - ) + logger.debug(f'Attempting to instantiate {cls.__name__} directly.') self.instance = cls(*args, **kwargs) else: logger.debug( diff --git a/profanity_filter.py b/profanity_filter.py index 95540fa..4723a2d 100755 --- a/profanity_filter.py +++ b/profanity_filter.py @@ -494,9 +494,7 @@ class ProfanityFilter(object): result = result.replace('3', 'e') for x in string.punctuation: result = result.replace(x, "") - chunks = [ - self.stemmer.stem(word) for word in nltk.word_tokenize(result) - ] + chunks = [self.stemmer.stem(word) for word in nltk.word_tokenize(result)] return ' '.join(chunks) def tokenize(self, text: str): diff --git a/site_config.py b/site_config.py index 62c2b98..b09e735 100644 --- a/site_config.py +++ b/site_config.py @@ -97,9 +97,7 @@ def get_config(): network_netmask='255.255.255.0', network_router_ip='10.0.0.1', presence_location=Location.HOUSE, - is_anyone_present=lambda x=Location.HOUSE: is_anyone_present_wrapper( - x - ), + is_anyone_present=lambda x=Location.HOUSE: is_anyone_present_wrapper(x), arper_minimum_device_count=50, ) elif location == 'CABIN': @@ -110,9 +108,7 @@ def get_config(): network_netmask='255.255.255.0', network_router_ip='192.168.0.1', presence_location=Location.CABIN, - is_anyone_present=lambda x=Location.CABIN: is_anyone_present_wrapper( - x - ), + is_anyone_present=lambda x=Location.CABIN: is_anyone_present_wrapper(x), arper_minimum_device_count=15, ) else: diff --git a/state_tracker.py b/state_tracker.py index 4836e3e..453faf7 100644 --- a/state_tracker.py +++ b/state_tracker.py @@ -73,21 +73,15 @@ class StateTracker(ABC): for update_id in sorted(self.last_reminder_ts.keys()): if force_all_updates_to_run: logger.debug('Forcing all updates to run') - self.update( - update_id, self.now, self.last_reminder_ts[update_id] - ) + self.update(update_id, self.now, self.last_reminder_ts[update_id]) self.last_reminder_ts[update_id] = self.now return refresh_secs = self.update_ids_to_update_secs[update_id] last_run = self.last_reminder_ts[update_id] if last_run is None: # Never run before - logger.debug( - f'id {update_id} has never been run; running it now' - ) - self.update( - update_id, self.now, self.last_reminder_ts[update_id] - ) + logger.debug(f'id {update_id} has never been run; running it now') + self.update(update_id, self.now, self.last_reminder_ts[update_id]) self.last_reminder_ts[update_id] = self.now else: delta = self.now - last_run @@ -148,9 +142,7 @@ class AutomaticStateTracker(StateTracker): This may block for as long as self.sleep_delay. """ - logger.debug( - 'Setting shutdown event and waiting for background thread.' - ) + logger.debug('Setting shutdown event and waiting for background thread.') self.should_terminate.set() self.updater_thread.join() logger.debug('Background thread terminated.') diff --git a/text_utils.py b/text_utils.py index 94df3e3..cfed169 100644 --- a/text_utils.py +++ b/text_utils.py @@ -136,9 +136,7 @@ def distribute_strings( return retval -def justify_string_by_chunk( - string: str, width: int = 80, padding: str = " " -) -> str: +def justify_string_by_chunk(string: str, width: int = 80, padding: str = " ") -> str: """ Justifies a string. diff --git a/thread_utils.py b/thread_utils.py index ad1f0bf..d8c85f4 100644 --- a/thread_utils.py +++ b/thread_utils.py @@ -61,9 +61,7 @@ def background_thread( def wrapper(funct: Callable): @functools.wraps(funct) - def inner_wrapper( - *a, **kwa - ) -> Tuple[threading.Thread, threading.Event]: + def inner_wrapper(*a, **kwa) -> Tuple[threading.Thread, threading.Event]: should_terminate = threading.Event() should_terminate.clear() newargs = (*a, should_terminate) @@ -130,9 +128,7 @@ def periodically_invoke( should_terminate = threading.Event() should_terminate.clear() newargs = (should_terminate, *args) - thread = threading.Thread( - target=helper_thread, args=newargs, kwargs=kwargs - ) + thread = threading.Thread(target=helper_thread, args=newargs, kwargs=kwargs) thread.start() logger.debug(f'Started thread {thread.name} tid={thread.ident}') return (thread, should_terminate) diff --git a/unittest_utils.py b/unittest_utils.py index 4a9669d..f4fed35 100644 --- a/unittest_utils.py +++ b/unittest_utils.py @@ -87,9 +87,7 @@ class PerfRegressionDataPersister(ABC): pass @abstractmethod - def save_performance_data( - self, method_id: str, data: Dict[str, List[float]] - ): + def save_performance_data(self, method_id: str, data: Dict[str, List[float]]): pass @abstractmethod @@ -106,9 +104,7 @@ class FileBasedPerfRegressionDataPersister(PerfRegressionDataPersister): with open(self.filename, 'rb') as f: return pickle.load(f) - def save_performance_data( - self, method_id: str, data: Dict[str, List[float]] - ): + def save_performance_data(self, method_id: str, data: Dict[str, List[float]]): for trace in self.traces_to_delete: if trace in data: data[trace] = [] @@ -138,9 +134,7 @@ class DatabasePerfRegressionDataPersister(PerfRegressionDataPersister): results.close() return ret - def save_performance_data( - self, method_id: str, data: Dict[str, List[float]] - ): + def save_performance_data(self, method_id: str, data: Dict[str, List[float]]): self.delete_performance_data(method_id) for (method_id, perf_data) in data.items(): sql = 'INSERT INTO runtimes_by_function (function, runtime) VALUES ' @@ -174,9 +168,7 @@ def check_method_for_perf_regressions(func: Callable) -> Callable: ) helper = DatabasePerfRegressionDataPersister(dbspec) else: - raise Exception( - 'Unknown/unexpected --unittests_persistance_strategy value' - ) + raise Exception('Unknown/unexpected --unittests_persistance_strategy value') func_id = function_utils.function_identifier(func) func_name = func.__name__ @@ -212,16 +204,10 @@ def check_method_for_perf_regressions(func: Callable) -> Callable: stdev = statistics.stdev(hist) logger.debug(f'For {func_name}, performance stdev={stdev}') slowest = hist[-1] - logger.debug( - f'For {func_name}, slowest perf on record is {slowest:f}s' - ) + logger.debug(f'For {func_name}, slowest perf on record is {slowest:f}s') limit = slowest + stdev * 4 - logger.debug( - f'For {func_name}, max acceptable runtime is {limit:f}s' - ) - logger.debug( - f'For {func_name}, actual observed runtime was {run_time:f}s' - ) + logger.debug(f'For {func_name}, max acceptable runtime is {limit:f}s') + logger.debug(f'For {func_name}, actual observed runtime was {run_time:f}s') if run_time > limit and not config.config['unittests_ignore_perf']: msg = f'''{func_id} performance has regressed unacceptably. {slowest:f}s is the slowest runtime on record in {len(hist)} perf samples. diff --git a/unscrambler.py b/unscrambler.py index 3abb6d8..d3686d6 100644 --- a/unscrambler.py +++ b/unscrambler.py @@ -121,13 +121,9 @@ class Unscrambler(object): # 52 bits @staticmethod - def _compute_word_fingerprint( - word: str, population: Mapping[str, int] - ) -> int: + def _compute_word_fingerprint(word: str, population: Mapping[str, int]) -> int: fp = 0 - for pair in sorted( - population.items(), key=lambda x: x[1], reverse=True - ): + for pair in sorted(population.items(), key=lambda x: x[1], reverse=True): letter = pair[0] if letter in fprint_feature_bit: count = pair[1] @@ -146,9 +142,7 @@ class Unscrambler(object): population: Mapping[str, int], ) -> int: sig = 0 - for pair in sorted( - population.items(), key=lambda x: x[1], reverse=True - ): + for pair in sorted(population.items(), key=lambda x: x[1], reverse=True): letter = pair[0] if letter not in letter_sigs: continue @@ -189,9 +183,7 @@ class Unscrambler(object): """ population = list_utils.population_counts(word) fprint = Unscrambler._compute_word_fingerprint(word, population) - letter_sig = Unscrambler._compute_word_letter_sig( - letter_sigs, word, population - ) + letter_sig = Unscrambler._compute_word_letter_sig(letter_sigs, word, population) assert fprint & letter_sig == 0 sig = fprint | letter_sig return sig @@ -238,9 +230,7 @@ class Unscrambler(object): """ sig = Unscrambler.compute_word_sig(word) - return self.lookup_by_sig( - sig, include_fuzzy_matches=include_fuzzy_matches - ) + return self.lookup_by_sig(sig, include_fuzzy_matches=include_fuzzy_matches) def lookup_by_sig( self, sig: int, *, include_fuzzy_matches: bool = False diff --git a/waitable_presence.py b/waitable_presence.py index cd5501d..d54511f 100644 --- a/waitable_presence.py +++ b/waitable_presence.py @@ -20,9 +20,7 @@ import state_tracker logger = logging.getLogger(__name__) -class WaitablePresenceDetectorWithMemory( - state_tracker.WaitableAutomaticStateTracker -): +class WaitablePresenceDetectorWithMemory(state_tracker.WaitableAutomaticStateTracker): """ This is a waitable class that keeps a PresenceDetector internally and periodically polls it to detect changes in presence in a @@ -88,9 +86,7 @@ class WaitablePresenceDetectorWithMemory( def check_detector(self) -> None: if len(self.detector.dark_locations) > 0: - logger.debug( - 'PresenceDetector is incomplete; trying to reinitialize...' - ) + logger.debug('PresenceDetector is incomplete; trying to reinitialize...') self.detector = base_presence.PresenceDetection() def is_someone_home(self) -> Tuple[bool, datetime.datetime]: