diff --git a/__pycache__/lineanalyzer.cpython-37.pyc b/__pycache__/lineanalyzer.cpython-37.pyc new file mode 100644 index 0000000..20e6747 Binary files /dev/null and b/__pycache__/lineanalyzer.cpython-37.pyc differ diff --git a/lineanalyzer.py b/lineanalyzer.py index bd603d3..8829136 100644 --- a/lineanalyzer.py +++ b/lineanalyzer.py @@ -8,14 +8,23 @@ class ANALYZEOBJECT: _passes = count(0) _analyzed = [] - def __exit__(self, *exc): - return False - def __init__(self): self.passes = 0 self.analyzeline = [] self.analyzed = [] + self.comment = False + def analyze(self,l): + if l.startswith('/*') or l.startswith('/**'): + self.comment = True + if l.endswith('*/') or l.endswith('**/'): + self.comment = False + return l + if self.comment == True: + if l.startswith('*') or l.startswith('**'): + return l + if l.startswith(' *'): + return l class ANALYZER(ANALYZEOBJECT): _ids = count(0) diff --git a/parser.py b/parser.py index da35c24..b4a2ed4 100644 --- a/parser.py +++ b/parser.py @@ -4,6 +4,7 @@ Contains class PARSER from itertools import count import os from tokenizer import TOKENIZER +from lineanalyzer import ANALYZER #Element type definitions. Used in the parse process. ELEMENT_TYPE_PREPROCESS = 1 @@ -81,7 +82,8 @@ substitute = False class PARSEOBJECT: _passes = count(0) - + _lineanalyzer = ANALYZER() + def __init__(self): self.tokenize = TOKENIZER() self.parseline = [] @@ -107,11 +109,20 @@ class PARSEOBJECT: def parseheader(self, fl, fn): tempfile = [] tempfile1 = [] + templine = [] outfile = '' + rr = 'next' + count = 0 self.parse_reset() - for l in fl: - analyzed_line = self.analyzer(l) - tempfile.append(analyzed_line) + i = iter(fl) + while i: + rr = self._lineanalyzer.analyze(next(i)) + if rr == 'next': + count += 1 + else: + templine.append(rr) + tempfile.append(templine) + count += 1 self.inc_passes() for l in tempfile: analyzed_line = self.token_analyzer(l)