diff --git a/parser/keywords.py b/parser/keywords.py index 6757f11..4094a14 100644 --- a/parser/keywords.py +++ b/parser/keywords.py @@ -105,4 +105,23 @@ def init(): '\>' : 'RANGLE' } + global nasm_preprocess_directives + nasm_preprocess_directives = { + '#include' : '$include', + '#define' : '$define', + '#undef' : '$undef', + '#if' : '$if', + '#ifdef' : '$ifdef', + '#ifndef' : '$ifndef', + '#error' : '$error', + '__FILE__' : '__FILE__', + '__LINE__' : '__LINE__', + '__DATE__' : '__DATE__', + '__TIME__' : '__TIME__', + '__TIMESTAMP__' : '__TIMESTAMP__', + 'pragma' : 'pragma', + '#' : '#', + '##' : '##' + } + tokens += reserved.values() \ No newline at end of file diff --git a/parser/parser.py b/parser/parser.py index 3ac4420..aa52e02 100644 --- a/parser/parser.py +++ b/parser/parser.py @@ -12,11 +12,36 @@ class parser(f): self.prep = keywords.preprocessor_directives self.reg = keywords.regular self.file = f + self.blockcomment = False - def get_token(self, k): + def get_token(self, keyword): token = "" - if w in self.prep: - token = self.prep(w) - if w in self.reg: - token = self.reg(w) - return token \ No newline at end of file + if keyword in self.prep: + token = self.prep(keyword) + if keyword in self.reg: + token = self.reg(keyword) + return token + + def parse_preprocess(self, token): + + # Converts a word into a 'key : value' pair. + def tokenize_word(self, word): + token = {} + if word in self.prep: + token.keys = word + token.values = self.prep(word) + if word in self.reg: + token.keys = word + token.values = self.reg(word) + return token + + # Creates a list of elements per line in file, + # then adding this list as a single element in a global tuple + def parseline(self, line): + tupline = [] + word = [w for w in line.split()] + for w in word: + token{} = self.tokenize_word(w) + if token.value == 'PREPROCESS': + self.parse_preprocess(token) + return tupline \ No newline at end of file