47 lines
1.3 KiB
Python
47 lines
1.3 KiB
Python
# (c) 2018 Jan Lerking
|
|
# Python parser for c header files.
|
|
# Used for creating corresponding NASM include files.
|
|
|
|
import keywords
|
|
import os
|
|
|
|
keywords.init()
|
|
|
|
class parser(f):
|
|
|
|
self.prep = keywords.preprocessor_directives
|
|
self.reg = keywords.regular
|
|
self.file = f
|
|
self.blockcomment = False
|
|
|
|
def get_token(self, keyword):
|
|
token = ""
|
|
if keyword in self.prep:
|
|
token = self.prep(keyword)
|
|
if keyword in self.reg:
|
|
token = self.reg(keyword)
|
|
return token
|
|
|
|
def parse_preprocess(self, token):
|
|
|
|
# Converts a word into a 'key : value' pair.
|
|
def tokenize_word(self, word):
|
|
token = {}
|
|
if word in self.prep:
|
|
token.keys = word
|
|
token.values = self.prep(word)
|
|
if word in self.reg:
|
|
token.keys = word
|
|
token.values = self.reg(word)
|
|
return token
|
|
|
|
# Creates a list of elements per line in file,
|
|
# then adding this list as a single element in a global tuple
|
|
def parseline(self, line):
|
|
tupline = []
|
|
word = [w for w in line.split()]
|
|
for w in word:
|
|
token{} = self.tokenize_word(w)
|
|
if token.value == 'PREPROCESS':
|
|
self.parse_preprocess(token)
|
|
return tupline |