#!/usr/bin/env python3
+import base64
import contextlib
import datetime
import io
import random
import re
import string
-from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+)
import unicodedata
from uuid import uuid4
+import warnings
+
+import list_utils
logger = logging.getLogger(__name__)
r"([a-z-]+://)" # scheme
r"([a-z_\d-]+:[a-z_\d-]+@)?" # user:password
r"(www\.)?" # www.
- r"((?<!\.)[a-z\d]+[a-z\d.-]+\.[a-z]{2,6}|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|localhost)" # domain
+ r"((?<!\.)[a-z\d]+[a-z\d.-]+\.[a-z]{2,6}|\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|localhost)" # domain
r"(:\d{2,})?" # port number
r"(/[a-z\d_%+-]*)*" # folders
r"(\.[a-z\d_%+-]+)*" # file extension
False
"""
return (
- (is_number(in_str) and "." not in in_str) or
- is_hexidecimal_integer_number(in_str) or
- is_octal_integer_number(in_str) or
- is_binary_integer_number(in_str)
+ (is_number(in_str) and "." not in in_str)
+ or is_hexidecimal_integer_number(in_str)
+ or is_octal_integer_number(in_str)
+ or is_binary_integer_number(in_str)
)
def add_thousands_separator(
- in_str: str,
- *,
- separator_char = ',',
- places = 3
+ in_str: str, *, separator_char=',', places=3
) -> str:
"""
Add thousands separator to a numeric string. Also handles numbers.
in_str = f'{in_str}'
if is_number(in_str):
return _add_thousands_separator(
- in_str,
- separator_char = separator_char,
- places = places
+ in_str, separator_char=separator_char, places=places
)
raise ValueError(in_str)
-def _add_thousands_separator(in_str: str, *, separator_char = ',', places = 3) -> str:
+def _add_thousands_separator(
+ in_str: str, *, separator_char=',', places=3
+) -> str:
decimal_part = ""
if '.' in in_str:
(in_str, decimal_part) = in_str.split('.')
tmp = [iter(in_str[::-1])] * places
ret = separator_char.join(
- "".join(x) for x in zip_longest(*tmp, fillvalue=""))[::-1]
+ "".join(x) for x in zip_longest(*tmp, fillvalue="")
+ )[::-1]
if len(decimal_part) > 0:
ret += '.'
ret += decimal_part
>>> suffix_string_to_number('13.1Gb')
14066017894
"""
+
def suffix_capitalize(s: str) -> str:
if len(s) == 1:
return s.upper()
>>> extract_mac_address(' MAC Address: 34:29:8F:12:0D:2F')
'34:29:8F:12:0D:2F'
+ >>> extract_mac_address('? (10.0.0.30) at d8:5d:e2:34:54:86 on em0 expires in 1176 seconds [ethernet]')
+ 'd8:5d:e2:34:54:86'
+
"""
if not is_full_string(in_str):
return None
return len(WORDS_COUNT_RE.findall(in_str))
-def generate_uuid(as_hex: bool = False) -> str:
+def generate_uuid(omit_dashes: bool = False) -> str:
"""
Generated an UUID string (using `uuid.uuid4()`).
generate_uuid() # possible output: '97e3a716-6b33-4ab9-9bb1-8128cb24d76b'
- generate_uuid(as_hex=True) # possible output: '97e3a7166b334ab99bb18128cb24d76b'
+ generate_uuid(omit_dashes=True) # possible output: '97e3a7166b334ab99bb18128cb24d76b'
"""
uid = uuid4()
- if as_hex:
+ if omit_dashes:
return uid.hex
return str(uid)
Parses a date string. See DateParser docs for details.
"""
import dateparse.dateparse_utils as dp
+
try:
d = dp.DateParser()
d.parse(in_str)
return d.get_date()
except dp.ParseException:
- logger.warning(f'Unable to parse date {in_str}.')
+ msg = f'Unable to parse date {in_str}.'
+ logger.warning(msg)
return None
True if the string represents a valid date.
"""
import dateparse.dateparse_utils as dp
+
try:
d = dp.DateParser()
_ = d.parse(in_str)
return True
except dp.ParseException:
- logger.warning(f'Unable to parse date {in_str}.')
+ msg = f'Unable to parse date {in_str}.'
+ logger.warning(msg)
return False
Parses a datetime string. See DateParser docs for more info.
"""
import dateparse.dateparse_utils as dp
+
try:
d = dp.DateParser()
dt = d.parse(in_str)
if type(dt) == datetime.datetime:
return dt
except ValueError:
- logger.warning(f'Unable to parse datetime {in_str}.')
+ msg = f'Unable to parse datetime {in_str}.'
+ logger.warning(msg)
return None
_ = to_datetime(in_str)
if _ is not None:
return True
- logger.warning(f'Unable to parse datetime {in_str}.')
+ msg = f'Unable to parse datetime {in_str}.'
+ logger.warning(msg)
return False
+def squeeze(in_str: str, character_to_squeeze: str = ' ') -> str:
+ """
+ Squeeze runs of more than one character_to_squeeze into one.
+
+ >>> squeeze(' this is a test ')
+ ' this is a test '
+
+ >>> squeeze('one|!||!|two|!||!|three', character_to_squeeze='|!|')
+ 'one|!|two|!|three'
+
+ """
+ return re.sub(
+ r'(' + re.escape(character_to_squeeze) + r')+',
+ character_to_squeeze,
+ in_str,
+ )
+
+
def dedent(in_str: str) -> str:
"""
Removes tab indentation from multi line strings (inspired by analogous Scala function).
'test\n'
"""
+
def __init__(self) -> None:
self.destination = io.StringIO()
self.recorder = None
"""
words = txt.split()
- return ngrams_presplit(words, n)
+ for ngram in ngrams_presplit(words, n):
+ ret = ''
+ for word in ngram:
+ ret += f'{word} '
+ yield ret.strip()
-def ngrams_presplit(words: Iterable[str], n: int):
- for ngram in zip(*[words[i:] for i in range(n)]):
- yield(' '.join(ngram))
+def ngrams_presplit(words: Sequence[str], n: int):
+ return list_utils.ngrams(words, n)
def bigrams(txt: str):
def shuffle_columns_into_list(
- input_lines: Iterable[str],
- column_specs: Iterable[Iterable[int]],
- delim=''
+ input_lines: Iterable[str], column_specs: Iterable[Iterable[int]], delim=''
) -> Iterable[str]:
"""Helper to shuffle / parse columnar data and return the results as a
list. The column_specs argument is an iterable collection of
def shuffle_columns_into_dict(
- input_lines: Iterable[str],
- column_specs: Iterable[Tuple[str, Iterable[int]]],
- delim=''
+ input_lines: Iterable[str],
+ column_specs: Iterable[Tuple[str, Iterable[int]]],
+ delim='',
) -> Dict[str, str]:
"""Helper to shuffle / parse columnar data and return the results
as a dict.
return sprintf(txt.format(**values), end='')
+def to_ascii(x: str):
+ """Encode as ascii bytes string.
+
+ >>> to_ascii('test')
+ b'test'
+
+ >>> to_ascii(b'1, 2, 3')
+ b'1, 2, 3'
+
+ """
+ if type(x) is str:
+ return x.encode('ascii')
+ if type(x) is bytes:
+ return x
+ raise Exception('to_ascii works with strings and bytes')
+
+
+def to_base64(txt: str, *, encoding='utf-8', errors='surrogatepass') -> str:
+ """Encode txt and then encode the bytes with a 64-character
+ alphabet. This is compatible with uudecode.
+
+ >>> to_base64('hello?')
+ b'aGVsbG8/\\n'
+
+ """
+ return base64.encodebytes(txt.encode(encoding, errors))
+
+
+def is_base64(txt: str) -> bool:
+ """Determine whether a string is base64 encoded (with Python's standard
+ base64 alphabet which is the same as what uuencode uses).
+
+ >>> is_base64('test') # all letters in the b64 alphabet
+ True
+
+ >>> is_base64('another test, how do you like this one?')
+ False
+
+ >>> is_base64(b'aGVsbG8/\\n') # Ending newline is ok.
+ True
+
+ """
+ a = string.ascii_uppercase + string.ascii_lowercase + string.digits + '+/'
+ alphabet = set(a.encode('ascii'))
+ for char in to_ascii(txt.strip()):
+ if char not in alphabet:
+ return False
+ return True
+
+
+def from_base64(b64: str, encoding='utf-8', errors='surrogatepass') -> str:
+ """Convert base64 encoded string back to normal strings.
+
+ >>> from_base64(b'aGVsbG8/\\n')
+ 'hello?'
+
+ """
+ return base64.decodebytes(b64).decode(encoding, errors)
+
+
+def chunk(txt: str, chunk_size):
+ """Chunk up a string.
+
+ >>> ' '.join(chunk('010011011100010110101010101010101001111110101000', 8))
+ '01001101 11000101 10101010 10101010 10011111 10101000'
+
+ """
+ if len(txt) % chunk_size != 0:
+ msg = f'String to chunk\'s length ({len(txt)} is not an even multiple of chunk_size ({chunk_size})'
+ logger.warning(msg)
+ warnings.warn(msg, stacklevel=2)
+ for x in range(0, len(txt), chunk_size):
+ yield txt[x : x + chunk_size]
+
+
+def to_bitstring(
+ txt: str, *, delimiter='', encoding='utf-8', errors='surrogatepass'
+) -> str:
+ """Encode txt and then chop it into bytes. Note: only bitstrings
+ with delimiter='' are interpretable by from_bitstring.
+
+ >>> to_bitstring('hello?')
+ '011010000110010101101100011011000110111100111111'
+
+ >>> to_bitstring('test', delimiter=' ')
+ '01110100 01100101 01110011 01110100'
+
+ >>> to_bitstring(b'test')
+ '01110100011001010111001101110100'
+
+ """
+ etxt = to_ascii(txt)
+ bits = bin(int.from_bytes(etxt, 'big'))
+ bits = bits[2:]
+ return delimiter.join(chunk(bits.zfill(8 * ((len(bits) + 7) // 8)), 8))
+
+
+def is_bitstring(txt: str) -> bool:
+ """Is this a bitstring?
+
+ >>> is_bitstring('011010000110010101101100011011000110111100111111')
+ True
+
+ >>> is_bitstring('1234')
+ False
+
+ """
+ return is_binary_integer_number(f'0b{txt}')
+
+
+def from_bitstring(bits: str, encoding='utf-8', errors='surrogatepass') -> str:
+ """Convert from bitstring back to bytes then decode into a str.
+
+ >>> from_bitstring('011010000110010101101100011011000110111100111111')
+ 'hello?'
+
+ """
+ n = int(bits, 2)
+ return (
+ n.to_bytes((n.bit_length() + 7) // 8, 'big').decode(encoding, errors)
+ or '\0'
+ )
+
+
+def ip_v4_sort_key(txt: str) -> Tuple[int]:
+ """Turn an IPv4 address into a tuple for sorting purposes.
+
+ >>> ip_v4_sort_key('10.0.0.18')
+ (10, 0, 0, 18)
+
+ >>> ips = ['10.0.0.10', '100.0.0.1', '1.2.3.4', '10.0.0.9']
+ >>> sorted(ips, key=lambda x: ip_v4_sort_key(x))
+ ['1.2.3.4', '10.0.0.9', '10.0.0.10', '100.0.0.1']
+
+ """
+ if not is_ip_v4(txt):
+ print(f"not IP: {txt}")
+ return None
+ return tuple([int(x) for x in txt.split('.')])
+
+
+def path_ancestors_before_descendants_sort_key(volume: str) -> Tuple[str]:
+ """Chunk up a file path so that parent/ancestor paths sort before
+ children/descendant paths.
+
+ >>> path_ancestors_before_descendants_sort_key('/usr/local/bin')
+ ('usr', 'local', 'bin')
+
+ >>> paths = ['/usr/local', '/usr/local/bin', '/usr']
+ >>> sorted(paths, key=lambda x: path_ancestors_before_descendants_sort_key(x))
+ ['/usr', '/usr/local', '/usr/local/bin']
+
+ """
+ return tuple([x for x in volume.split('/') if len(x) > 0])
+
+
+def replace_all(in_str: str, replace_set: str, replacement: str) -> str:
+ """Execute several replace operations in a row.
+
+ >>> s = 'this_is a-test!'
+ >>> replace_all(s, ' _-!', '')
+ 'thisisatest'
+
+ """
+ for char in replace_set:
+ in_str = in_str.replace(char, replacement)
+ return in_str
+
+
if __name__ == '__main__':
import doctest
+
doctest.testmod()