Add fff.py as an example but it's really a devtool that I used in
[pyutils.git] / examples / fff / fff.py
1 #!/usr/bin/env python3
2
3 """
4 This isn't really an example of pyutils but rather a development
5 tool I used as part of a git pre-commit hook...
6
7 f)ind f)ucked f)-strings
8
9 Searches python files for suspicious looking strings that seem to
10 use f-string {interpolations} without acutally being f-strings.
11
12 Usage: fff.py *.py
13 """
14
15 import re
16 import sys
17 import tokenize
18 from pathlib import Path
19 from token import STRING
20 from typing import List, Tuple
21
22 from pyutils import ansi
23
24 curly = re.compile(r'\{[^\}]+\}')
25
26
27 def looks_suspicious(q: str, previous_tokens: List[Tuple[int, str]]) -> bool:
28     for pair in previous_tokens:
29         if ':' in pair[1]:
30             return False
31     return q[0] != 'f' and curly.search(q) is not None
32
33
34 for filename in sys.argv[1:]:
35     path = Path(filename)
36     if path.suffix != ".py":
37         print(f"{filename} doesn't look like python; skipping.", file=sys.stderr)
38         continue
39     with tokenize.open(filename) as f:
40         previous_tokens = []
41         for token in tokenize.generate_tokens(f.readline):
42             (ttype, text, (start_row, _), (end_row, _), _) = token
43             if ttype == STRING:
44                 if (
45                     'r"' not in text
46                     and "r'" not in text
47                     and looks_suspicious(text, previous_tokens)
48                 ):
49                     print(
50                         f"{ansi.fg('green')}{filename}:{start_row}-{end_row}>{ansi.reset()}"
51                     )
52                     for (n, line) in enumerate(text.split('\n')):
53                         print(
54                             f'{ansi.fg("dark gray")}{start_row+n}:{ansi.reset()} {line}'
55                         )
56             #                        print('Previous context:')
57             #                        for pair in previous_tokens:
58             #                            print(f'{pair[0]} ({pair[1]})', end=' + ')
59             #                        print()
60             previous_tokens.append((ttype, text))
61             previous_tokens = previous_tokens[-3:]