aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorjakobst1n <jakob.stendahl@outlook.com>2023-10-20 19:35:32 +0200
committerjakobst1n <jakob.stendahl@outlook.com>2023-10-20 19:35:32 +0200
commit81f0f8de331b382caad8e82348a3ccbac5bb150e (patch)
treefdaaeab78ba7cc55336fdbdfae258a15cf5b24f1
parent3bb38fcbbc9703ba22429441604d66f577fc6010 (diff)
downloadjournal-lib-81f0f8de331b382caad8e82348a3ccbac5bb150e.tar.gz
journal-lib-81f0f8de331b382caad8e82348a3ccbac5bb150e.zip
Run black, and do some changes for enabling better syntax expandability
-rw-r--r--src/journal_lib/dataclasses.py15
-rw-r--r--src/journal_lib/parse/lexers/l_ledger.py136
-rw-r--r--src/journal_lib/parse/lexwrapper.py47
-rw-r--r--src/journal_lib/parse/parsers/p_ledger.py112
-rw-r--r--src/journal_lib/parse/parsewrapper.py7
-rw-r--r--src/journal_lib/parse/preprocessing.py9
-rw-r--r--src/journal_lib/utils.py27
7 files changed, 203 insertions, 150 deletions
diff --git a/src/journal_lib/dataclasses.py b/src/journal_lib/dataclasses.py
index 227cabe..73b81e9 100644
--- a/src/journal_lib/dataclasses.py
+++ b/src/journal_lib/dataclasses.py
@@ -3,22 +3,26 @@ from dataclasses import dataclass
FORCE_NOCOLOR = False
+
def set_force_nocolor(value: bool):
global FORCE_NOCOLOR
FORCE_NOCOLOR = value
+
def anesc(code: str):
return "" if FORCE_NOCOLOR or not sys.stdout.isatty() else f"\u001b[{code}"
+
def format_amount(amount: str, currency: str | None = None) -> str:
if currency is None:
return amount
- if currency in ['NOK']:
+ if currency in ["NOK"]:
return f"{amount} {currency}"
- if currency in ['$']:
+ if currency in ["$"]:
return f"{currency}{amount}"
return f"{amount} {currency}"
+
@dataclass
class JournalEntryTransaction:
account: str
@@ -26,6 +30,7 @@ class JournalEntryTransaction:
amount: str | None
comment: str | None
+
@dataclass
class JournalEntry:
date: str
@@ -51,7 +56,7 @@ class JournalEntry:
t = f" {anesc('33m')}{transaction.account:{max_account_len}}{anesc('0m')}"
if transaction.amount is not None:
amount_code = "32m"
- if (transaction.amount[0] == "-"):
+ if transaction.amount[0] == "-":
amount_code = "31m"
t += f" {anesc(amount_code)}{format_amount(transaction.amount, transaction.currency)}{anesc('0m')}"
if transaction.comment is not None:
@@ -62,6 +67,7 @@ class JournalEntry:
s += f" {anesc('38m')}{comment}{anesc('0m')}\n"
return s + "\n"
+
@dataclass
class JournalAccountDef:
account: str
@@ -73,6 +79,7 @@ class JournalAccountDef:
s += f" {anesc('38m')}{self.comment}{anesc('0m')}"
return s + "\n"
+
@dataclass
class JournalCommodityDef:
commodity: str
@@ -93,6 +100,7 @@ class JournalCommodityDef:
s += "\n"
return s
+
@dataclass
class Journal:
entries: list[JournalEntry]
@@ -132,4 +140,3 @@ class Journal:
print(f"INVALID ELEMENT {element}")
return Journal(entries=entries, accounts=accounts, commodities=commodities)
-
diff --git a/src/journal_lib/parse/lexers/l_ledger.py b/src/journal_lib/parse/lexers/l_ledger.py
index 6f63626..2ab51bd 100644
--- a/src/journal_lib/parse/lexers/l_ledger.py
+++ b/src/journal_lib/parse/lexers/l_ledger.py
@@ -1,10 +1,12 @@
from journal_lib.parse.lexwrapper import LexWrapper
+
class JournalLexer(LexWrapper):
states = (
('sHEADER', 'exclusive'), # Entry header parsing state
('sHEADEREFF', 'exclusive'), # Entry header effective date parsing state
('sENTRY', 'exclusive'), # Entry parsing state
+ ('sENTRYCONTENT', 'exclusive'), # Entry parsing state
('sBLOCKCOMMENT', 'exclusive'), # Block comment parsing state
('sACCOUNT', 'exclusive'), # Account definition parsing state
('sCOMMODITY', 'exclusive'), # Commodity definition parsing state
@@ -15,10 +17,15 @@ class JournalLexer(LexWrapper):
"commodity": 'KW_COMMODITY'
}
+ kw_state_map = {
+ "KW_ACCOUNT": "sACCOUNT",
+ "KW_COMMODITY": "sCOMMODITY",
+ }
+
tokens = (
'TEXT',
'AMOUNT',
- 'CURRENCY',
+ 'COMMODITY',
'COMMENT',
'INLINE_COMMENT',
'DATE',
@@ -30,46 +37,47 @@ class JournalLexer(LexWrapper):
'COMMODITY_DEFAULT',
) + tuple(reserved.values())
- t_ANY_ignore = ' \t'
+ t_ANY_ignore = " \t"
- literals = '\n'
+ literals = "\n"
# Rules for the 'initial' state
def t_INITIAL_DATE(self, t):
- r'\d{4}(-|\/)\d{2}(-|\/)\d{2}'
- self._state_begin('sHEADER', t)
+ r"\d{4}(-|\/)\d{2}(-|\/)\d{2}"
+ self._state_begin("sHEADER", t)
return t
def t_INITIAL_eof(self, t):
pass
def t_INITIAL_COMMENT(self, t):
- r'(;|\#|\%|\||\*).+\n'
+ r"(;|\#|\%|\||\*).+\n"
+ t.value = t.value.lstrip()
+ if t.value[0] in [";", "#", "%", "|", "*"]:
+ t.value = t.value[1:]
pass
def t_INITIAL_BLOCKCOMMENT(self, t):
- r'comment'
- self._state_begin('sBLOCKCOMMENT', t)
+ r"comment"
+ self._state_begin("sBLOCKCOMMENT", t)
def t_INITIAL_KEYWORD(self, t):
- r'[a-zA-Z_][a-zA-Z_0-9]*'
- t.type = self.reserved.get(t.value,'KW')
- if t.type == "KW_ACCOUNT":
- self._state_begin('sACCOUNT', t)
- if t.type == "KW_COMMODITY":
- self._state_begin('sCOMMODITY', t)
+ r"[a-zA-Z_][a-zA-Z_0-9]*"
+ t.type = self.reserved.get(t.value, "KW")
+ if (new_state := self.kw_state_map.get(t.type)) is not None:
+ self._state_begin(new_state, t)
return t
# Rules for the 'sBLOCKCOMMENT' state
def t_sBLOCKCOMMENT_end(self, t):
- r'end\scomment'
- t.lexer.lineno += t.value.count('\n')
- self._state_begin('INITIAL', t)
+ r"end\scomment"
+ t.lexer.lineno += t.value.count("\n")
+ self._state_begin("INITIAL", t)
def t_sBLOCKCOMMENT_content(self, t):
- r'.+?\n'
+ r".+?\n"
def t_sBLOCKCOMMENT_error(self, t):
r.lexer.skip(1)
@@ -84,17 +92,20 @@ class JournalLexer(LexWrapper):
return t
def t_sACCOUNT_COMMENT(self, t):
- r'(;)[^\n]*'
+ r"(;)[^\n]*"
+ t.value.lstrip()
+ if t.value[0] == ";":
+ t.value = t.value[1:]
return t
def t_sACCOUNT_newline(self, t):
- r'\n'
- self._state_begin('INITIAL', t)
+ r"\n"
+ self._state_begin("INITIAL", t)
# Rules for the 'sCOMMODITY' state
def t_sCOMMODITY_KW(self, t):
- r'(note|format|nomarket|default)'
+ r"(note|format|nomarket|default)"
if t.value == "note":
t.type = "COMMODITY_NOTE"
elif t.value == "format":
@@ -107,62 +118,69 @@ class JournalLexer(LexWrapper):
return t
def t_sCOMMODITY_TEXT(self, t):
- r'[^\n]+'
+ r"[^\n]+"
return t
def t_sCOMMODITY_newline(self, t):
- r'\n(?=(\s*\n|\s*$|[^\s]))'
- self._state_begin('INITIAL', t)
+ r"\n(?=(\s*\n|\s*$|[^\s]))"
+ self._state_begin("INITIAL", t)
# Rules for the 'sheader' state
def t_sHEADER_ENTRY_STATUS(self, t):
- r'(\*|!)'
+ r"(\*|!)"
return t
def t_sHEADER_ENTRY_EFFECTIVE_DATE_SEPARATOR(self, t):
- r'='
- self._state_begin('sHEADEREFF', t)
+ r"="
+ self._state_begin("sHEADEREFF", t)
return t
def t_sHEADER_TEXT(self, t):
- r'[^\n]+'
- if ((t.value.startswith('"') and t.value.endswith('"'))
- or (t.value.startswith("'") and t.value.endswith("'"))):
- t.value = t.value[1:-1]
+ r"[^\n]+"
+ if (t.value.startswith('"') and t.value.endswith('"')) or (
+ t.value.startswith("'") and t.value.endswith("'")
+ ):
+ t.value = t.value[1:-1]
return t
def t_sHEADER_newline(self, t):
- r'\n'
- self._state_begin('sENTRY', t)
+ r"\n"
+ self._state_begin("sENTRY", t)
# Rules for the 'sheader_effective_date' state
def t_sHEADEREFF_DATE(self, t):
- r'\d{4}(-|\/)\d{2}(-|\/)\d{2}'
- self._state_begin('sHEADER', t)
+ r"\d{4}(-|\/)\d{2}(-|\/)\d{2}"
+ self._state_begin("sHEADER", t)
return t
# Rules for the 'sentry' state
- def t_sENTRY_DATE(self, t):
- r'\d{4}(-|\/)\d{2}(-|\/)\d{2}'
+ def t_sENTRY_TEXT(self, t):
+ r"[^\n;]+?(?=\s{2,}|$|;)"
+ if t.value.startswith('"') and t.value.endswith('"'):
+ t.value = t.value[1:-1]
+ t.value = t.value.rstrip()
+ self._state_begin("sENTRYCONTENT")
return t
- def t_sENTRY_CURRENCY(self, t):
- r'\$|NOK'
+ # Rules for the 'sENTRYCONTENT' state
+
+ def t_sENTRYCONTENT_COMMODITY(self, t):
+ r"\$|NOK"
return t
- def t_sENTRY_AMOUNT(self, t):
- r'(-)?(\d|\,)+(\.\d{2})?'
+ def t_sENTRYCONTENT_AMOUNT(self, t):
+ r"(-)?(\d|\,)+(\.\d{2})?"
return t
- def t_sENTRY_COMMENT(self, t):
- r';[^\n]*'
+ def t_sENTRY_sENTRYCONTENT_COMMENT(self, t):
+ r";[^\n]*"
# Check if the comment is at the start of a line (considering whitespaces)
- line_start = t.lexer.lexdata.rfind('\n', 0, t.lexpos) + 1
- pre_comment = t.lexer.lexdata[line_start:t.lexpos]
-
+ line_start = t.lexer.lexdata.rfind("\n", 0, t.lexpos) + 1
+ pre_comment = t.lexer.lexdata[line_start : t.lexpos]
+
# If the comment is at the start of a line, it's a standalone comment
if pre_comment.isspace() or pre_comment == "":
t.type = "COMMENT"
@@ -170,27 +188,23 @@ class JournalLexer(LexWrapper):
t.type = "INLINE_COMMENT"
return t
- def t_sENTRY_TEXT(self, t):
- r'[^\n;]+?(?=\s{2,}|$|;)'
- if t.value.startswith('"') and t.value.endswith('"'):
- t.value = t.value[1:-1]
- t.value = t.value.rstrip()
- return t
-
- def t_sENTRY_newline(self, t):
- r'\n\n'
- self._state_begin('INITIAL', t)
+ def t_sENTRYCONTENT_newline(self, t):
+ r"\n"
+ self._state_begin("sENTRY", t)
- def t_sENTRY_eof(self, t):
- self._state_begin('INITIAL', t)
+ def t_sENTRY_sENTRYCONTENT_double_newline(self, t):
+ r"\n\n"
+ self._state_begin("INITIAL", t)
# Common rules
+ def t_ANY_eof(self, t):
+ self._state_begin("INITIAL", t)
+
def t_ANY_newline(self, t):
- r'\n+'
+ r"\n+"
t.lexer.lineno += len(t.value)
def t_ANY_error(self, t):
self._hl_token(t)
t.lexer.skip(1)
-
diff --git a/src/journal_lib/parse/lexwrapper.py b/src/journal_lib/parse/lexwrapper.py
index 6a3989e..dcd92cc 100644
--- a/src/journal_lib/parse/lexwrapper.py
+++ b/src/journal_lib/parse/lexwrapper.py
@@ -1,11 +1,12 @@
from .ply import lex
import sys
+
class LexWrapper(object):
state_trail = ["INITIAL"]
- def _state_begin(self, state: str, t = None):
- """ Convenient wrapper for the lexer.begin, which makes it possible to track state changes. """
+ def _state_begin(self, state: str, t=None):
+ """Convenient wrapper for the lexer.begin, which makes it possible to track state changes."""
self.lexer.begin(state)
self.state_trail.append(self.lexer.current_state())
@@ -15,40 +16,44 @@ class LexWrapper(object):
if self.debug:
d = f"{' ':{self.max_token_name_length+2}}{self.state_trail[-2]} -> {self.state_trail[-1]}"
if t is not None:
- d += ", recognized [{}] \"{}\"".format(t.type, t.value.replace("\n", "\\n"))
+ d += ', recognized [{}] "{}"'.format(
+ t.type, t.value.replace("\n", "\\n")
+ )
self.debuglog.info(d)
def __init__(self, debug: bool = False):
- """ Initialize a new JournalLexer """
+ """Initialize a new JournalLexer"""
self.build(debug=debug)
self.debug = debug
if self.debug:
self.debuglog = lex.PlyLogger(sys.stderr)
- self.max_token_name_length = max(len(x)+1 for x in self.tokens)
+ self.max_token_name_length = max(len(x) + 1 for x in self.tokens)
def build(self, **kwargs):
- """ Reinitialize the lexer module (this is called on __init__) """
+ """Reinitialize the lexer module (this is called on __init__)"""
self.lexer = lex.lex(module=self, **kwargs)
def input(self, s: str):
- """ Wrapper for the lex input function """
+ """Wrapper for the lex input function"""
self.lexer.input(s)
def token(self):
- """ Wrapper for the lex token function, can print debug information to stdout if debug is enabled """
+ """Wrapper for the lex token function, can print debug information to stdout if debug is enabled"""
tok = self.lexer.token()
if self.debug and tok:
- self.debuglog.info("[{:<{width}} ({}:{}) \"{}\"".format(
- tok.type + "]",
- tok.lineno,
- tok.lexpos,
- tok.value.replace("\n", "\\n"),
- width=self.max_token_name_length,
- ))
+ self.debuglog.info(
+ '[{:<{width}} ({}:{}) "{}"'.format(
+ tok.type + "]",
+ tok.lineno,
+ tok.lexpos,
+ tok.value.replace("\n", "\\n"),
+ width=self.max_token_name_length,
+ )
+ )
return tok
def print_tokens(self, data):
- """ Simple debugging function which will trigger a tokenization of all the data provided """
+ """Simple debugging function which will trigger a tokenization of all the data provided"""
self.input(data)
_debug = self.debug
self.debug = True
@@ -61,13 +66,17 @@ class LexWrapper(object):
linestart = t.lexer.lexdata.rfind("\n", 0, t.lexpos) + 1
lineend = t.lexer.lexdata.find("\n", t.lexpos)
markpos = t.lexpos - linestart
- lineno = t.lexer.lexdata[0:linestart+1].count("\n")
- print(f"Illegal character at '{t.value[0]}' on line {lineno}, position {markpos}")
+ lineno = t.lexer.lexdata[0 : linestart + 1].count("\n")
+ print(
+ f"Illegal character at '{t.value[0]}' on line {lineno}, position {markpos}"
+ )
print(f" {t.lexer.lexdata[linestart:lineend]}")
print(f" {' ' * markpos}^")
except Exception as e:
print(f"Illegal character '{p.value}'")
- print(f"Additionally a error occuren when showing the position of the illegal character\n{e}")
+ print(
+ f"Additionally a error occuren when showing the position of the illegal character\n{e}"
+ )
@property
def lexdata(self):
diff --git a/src/journal_lib/parse/parsers/p_ledger.py b/src/journal_lib/parse/parsers/p_ledger.py
index ad01a79..6b5d573 100644
--- a/src/journal_lib/parse/parsers/p_ledger.py
+++ b/src/journal_lib/parse/parsers/p_ledger.py
@@ -2,13 +2,20 @@ from datetime import datetime
from journal_lib.parse.parsewrapper import ParseWrapper
from journal_lib.parse.lexers.l_ledger import JournalLexer
-from journal_lib.dataclasses import Journal, JournalEntry, JournalEntryTransaction, JournalAccountDef, JournalCommodityDef
+from journal_lib.dataclasses import (
+ Journal,
+ JournalEntry,
+ JournalEntryTransaction,
+ JournalAccountDef,
+ JournalCommodityDef,
+)
+
class JournalParser(ParseWrapper):
tokens = JournalLexer.tokens
def p_journal(self, p):
- '''journal : elements '''
+ """journal : elements"""
p[0] = Journal.from_elements(p[1])
if self.debug:
@@ -16,41 +23,41 @@ class JournalParser(ParseWrapper):
print(repr(x))
def p_elements(self, p):
- '''elements : elements element
- | element'''
+ """elements : elements element
+ | element"""
if len(p) == 3:
p[0] = p[1] + [p[2]]
else:
p[0] = [p[1]]
def p_element_entry(self, p):
- '''element : DATE effective_date status TEXT transactions'''
+ """element : DATE effective_date status TEXT transactions"""
p[0] = JournalEntry(
- date = p[1],
- effective_date = p[2],
- cleared = p[3]['cleared'],
- pending = p[3]['pending'],
- title = p[4],
- transactions = p[5]['transactions'],
- comments = p[5]['comments']
+ date=p[1],
+ effective_date=p[2],
+ cleared=p[3]["cleared"],
+ pending=p[3]["pending"],
+ title=p[4],
+ transactions=p[5]["transactions"],
+ comments=p[5]["comments"],
)
def p_element_account(self, p):
- '''element : KW_ACCOUNT TEXT COMMENT
- | KW_ACCOUNT TEXT'''
+ """element : KW_ACCOUNT TEXT COMMENT
+ | KW_ACCOUNT TEXT"""
p[0] = JournalAccountDef(account=p[2], comment=p[3] if len(p) > 3 else None)
def p_element_commodity(self, p):
- '''element : KW_COMMODITY TEXT commodity_attributes '''
+ """element : KW_COMMODITY TEXT commodity_attributes"""
p[0] = JournalCommodityDef(commodity=p[2], **p[3])
def p_commodity_attributes(self, p):
- '''commodity_attributes : COMMODITY_FORMAT TEXT commodity_attributes
+ """commodity_attributes : COMMODITY_FORMAT TEXT commodity_attributes
| COMMODITY_NOTE TEXT commodity_attributes
| COMMODITY_NOMARKET commodity_attributes
| COMMODITY_DEFAULT commodity_attributes
| empty
- '''
+ """
p[0] = {}
if p[1] == "format":
p[0]["format"] = p[2]
@@ -64,66 +71,72 @@ class JournalParser(ParseWrapper):
p[0].update(p[3])
def p_effective_date(self, p):
- '''effective_date : ENTRY_EFFECTIVE_DATE_SEPARATOR DATE
- | empty
- '''
+ """effective_date : ENTRY_EFFECTIVE_DATE_SEPARATOR DATE
+ | empty"""
p[0] = p[2] if p[1] else None
def p_status(self, p):
- '''status : ENTRY_STATUS
- | empty
- '''
- p[0] = {'cleared': p[1] == "*", 'pending': p[1] == "!"} if p[1] else {'cleared': False, 'pending': False}
+ """status : ENTRY_STATUS
+ | empty"""
+ p[0] = (
+ {"cleared": p[1] == "*", "pending": p[1] == "!"}
+ if p[1]
+ else {"cleared": False, "pending": False}
+ )
def p_empty(self, p):
- '''empty :'''
+ """empty :"""
pass
def p_transactions(self, p):
- '''transactions : transactions transaction'''
- p[0] = {"comments": p[1]['comments'],
- "transactions": p[1]['transactions'] + [p[2]]}
+ """transactions : transactions transaction"""
+ p[0] = {
+ "comments": p[1]["comments"],
+ "transactions": p[1]["transactions"] + [p[2]],
+ }
def p_comments(self, p):
- '''transactions : transactions COMMENT'''
- p[0] = {"comments": p[1]['comments'] + [p[2]],
- "transactions": p[1]['transactions']}
+ """transactions : transactions COMMENT"""
+ p[0] = {
+ "comments": p[1]["comments"] + [p[2]],
+ "transactions": p[1]["transactions"],
+ }
def p_transactions_single(self, p):
- '''transactions : transaction '''
+ """transactions : transaction"""
p[0] = {"comments": [], "transactions": [p[1]]}
def p_transactions_comment_single(self, p):
- '''transactions : COMMENT '''
+ """transactions : COMMENT"""
p[0] = {"comments": [p[1]], "transactions": []}
def p_amount_prefixed(self, p):
- '''amount : CURRENCY AMOUNT'''
- p[0] = {'currency': p[1], 'amount': p[2]}
+ """amount : COMMODITY AMOUNT"""
+ p[0] = {"currency": p[1], "amount": p[2]}
def p_amount_suffixed(self, p):
- '''amount : AMOUNT CURRENCY'''
- p[0] = {'currency': p[2], 'amount': p[1]}
+ """amount : AMOUNT COMMODITY"""
+ p[0] = {"currency": p[2], "amount": p[1]}
def p_transaction_with_amount(self, p):
- '''transaction : TEXT amount INLINE_COMMENT
- | TEXT amount'''
+ """transaction : TEXT amount INLINE_COMMENT
+ | TEXT amount"""
p[0] = JournalEntryTransaction(
- account = p[1],
- currency = p[2]['currency'],
- amount = p[2]['amount'],
- comment = p[3] if len(p) > 3 else None
+ account=p[1],
+ currency=p[2]["currency"],
+ amount=p[2]["amount"],
+ comment=p[3] if len(p) > 3 else None,
)
def p_transaction_without_amount(self, p):
- '''transaction : TEXT INLINE_COMMENT
- | TEXT'''
+ """transaction : TEXT INLINE_COMMENT
+ | TEXT"""
p[0] = JournalEntryTransaction(
- account = p[1],
- currency = None,
- amount = None,
- comment = p[2] if len(p) > 2 else None
+ account=p[1],
+ currency=None,
+ amount=None,
+ comment=p[2] if len(p) > 2 else None,
)
def p_error(self, p):
@@ -131,4 +144,3 @@ class JournalParser(ParseWrapper):
self._hl_token(p)
else:
print("Syntax error at EOF")
-
diff --git a/src/journal_lib/parse/parsewrapper.py b/src/journal_lib/parse/parsewrapper.py
index c75b32c..3946c92 100644
--- a/src/journal_lib/parse/parsewrapper.py
+++ b/src/journal_lib/parse/parsewrapper.py
@@ -1,8 +1,8 @@
from .ply import yacc
-class ParseWrapper(object):
- def __init__(self, tokens = None, debug: bool = False):
+class ParseWrapper(object):
+ def __init__(self, tokens=None, debug: bool = False):
if tokens is not None:
self.tokens = tokens
self.debug = debug
@@ -20,10 +20,9 @@ class ParseWrapper(object):
lineend = p.lexer.lexdata.find("\n", p.lexpos)
markpos = p.lexpos - linestart
marklen = len(str(p.value))
- lineno = p.lexer.lexdata[0:linestart+1].count("\n")
+ lineno = p.lexer.lexdata[0 : linestart + 1].count("\n")
print(f"Syntax error at '{p.value}' on line {lineno}, position {markpos}")
print(f" {p.lexer.lexdata[linestart:lineend]}")
print(f" {' ' * markpos}{'^' * marklen}")
except Exception as e:
print(f"An error occured when showing the position of token {p}\n{e}")
-
diff --git a/src/journal_lib/parse/preprocessing.py b/src/journal_lib/parse/preprocessing.py
index 6d4a0cf..a9ca676 100644
--- a/src/journal_lib/parse/preprocessing.py
+++ b/src/journal_lib/parse/preprocessing.py
@@ -1,6 +1,7 @@
import re
from pathlib import Path
+
def preprocess_includes(filepath: Path):
"""
Reads the file at 'filepath', processing any "include" directives,
@@ -8,10 +9,10 @@ def preprocess_includes(filepath: Path):
This does not report circular includes, so that would become a infinite loop
"""
- INCLUDE_RE = re.compile(r'^\s*include\s+([^\n]+)\s*$', re.IGNORECASE)
+ INCLUDE_RE = re.compile(r"^\s*include\s+([^\n]+)\s*$", re.IGNORECASE)
def read_file(file_path):
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
return file.readlines()
lines = read_file(filepath)
@@ -21,8 +22,8 @@ def preprocess_includes(filepath: Path):
if match:
included_file_path = match.group(1)
included_lines = read_file(included_file_path)
- lines[i:i+1] = included_lines
+ lines[i : i + 1] = included_lines
else:
i += 1
- return ''.join(lines)
+ return "".join(lines)
diff --git a/src/journal_lib/utils.py b/src/journal_lib/utils.py
index b8f645d..71d7fe4 100644
--- a/src/journal_lib/utils.py
+++ b/src/journal_lib/utils.py
@@ -2,28 +2,40 @@ from pathlib import Path
from journal_lib.dataclasses import Journal
from journal_lib.parse import JournalParser, JournalLexer, preprocess_includes
+
def journal_from_str(data: str, debug: bool = False) -> Journal:
- """ Read a string of Journal entries into a Journal object """
- if debug: print("= Building lexer ===========")
+ """Read a string of Journal entries into a Journal object"""
+ if debug:
+ print("= Building lexer ===========")
lexer = JournalLexer(debug=debug)
- if debug: print("= Building parser ==========")
+ if debug:
+ print("= Building parser ==========")
parser = JournalParser(debug=debug)
- if debug: print("= PARSE ====================")
+ if debug:
+ print("= PARSE ====================")
journal = parser.parse(data, lexer=lexer)
- if debug: print("= JOURNAL ==================")
+ if debug:
+ print("= JOURNAL ==================")
return journal
+
def journal_from_file(filename: Path, debug: bool = False) -> Journal:
- """ Read a journal file into a Journal object """
+ """Read a journal file into a Journal object"""
journal_raw = preprocess_includes(filename)
return journal_from_str(journal_raw, debug=debug)
def test():
from argparse import ArgumentParser
+
parser = ArgumentParser()
parser.add_argument("-f", "--file", help="The journal file to read")
- parser.add_argument("-d", "--debug", action="store_true", help="Print more debug information from lexing and parsing")
+ parser.add_argument(
+ "-d",
+ "--debug",
+ action="store_true",
+ help="Print more debug information from lexing and parsing",
+ )
args = parser.parse_args()
if args.file is not None:
@@ -61,4 +73,3 @@ end comment
Assets:Cash
"""
print(journal_from_str(data, debug=args.debug))
-