diff --git a/dig-lexer.py b/dig-lexer.py index 57e553d..4e611fb 100644 --- a/dig-lexer.py +++ b/dig-lexer.py @@ -1,28 +1,91 @@ from pygments.lexer import ExtendedRegexLexer from pygments.token import * +REPLY_CODES = { + 'NOERROR': Name.Function, + 'NXDOMAIN': String.Escape, + 'REFUSED': String.Escape, + 'NOTAUTH': String.Escape, + 'NOTZONE': String.Escape +} + class CustomLexer(ExtendedRegexLexer): name = 'Dig' aliases = ['dig'] filenames = ['*.zone'] - def parse_record(lexer, match, ctx): - yield match.start('zone'), Name.Function, match.group('zone') - if match.group('ttl'): - yield match.start('ttl'), Number.Integer, match.group('ttl') - yield match.start('cls'), Name.Class, match.group('cls') - yield match.start('typ'), Name.Tag, match.group('typ') - yield match.start('value'), String.Escape, match.group('value') - if match.group('extval'): - yield match.start('extval'), String.Double, match.group('extval') - yield match.start('comment'), Comment.Special, match.group('comment') - ctx.pos = match.end() - + def parse_dig_header(lexer, re, ctx): + yield re.start('c1'), Comment.Hashbang, re.group('c1') + yield re.start('version'), String.Doc, re.group('version') + yield re.start('c2'), Comment.Hashbang, re.group('c2') + yield re.start('query'), Name.Function, re.group('query') + ctx.pos = re.end() + + def parse_response_header(lexer, re, ctx): + yield re.start('c1'), Comment.Hashbang, re.group('c1') + yield re.start('opcode'), String.Doc, re.group('opcode') + yield re.start('c2'), Comment.Hashbang, re.group('c2') + yield re.start('rcode'), REPLY_CODES.get(re.group('rcode'), Name.Tag), re.group('rcode') + yield re.start('c3'), Comment.Hashbang, re.group('c3') + yield re.start('nonce'), String.Doc, re.group('nonce') + yield re.start('c4'), Comment.Hashbang, re.group('c4') + yield re.start('flags'), String.Doc, re.group('flags') + yield re.start('c5'), Comment.Hashbang, re.group('c5') + yield re.start('nquery'), String.Doc, re.group('nquery') + yield re.start('c6'), Comment.Hashbang, re.group('c6') + yield re.start('nanswer'), String.Doc, re.group('nanswer') + yield re.start('c7'), Comment.Hashbang, re.group('c7') + yield re.start('nauth'), String.Doc, re.group('nauth') + yield re.start('c8'), Comment.Hashbang, re.group('c8') + yield re.start('nadd'), String.Doc, re.group('nadd') + ctx.pos = re.end() + tokens = { 'root': [ - (r'; <<>>.*\n', Comment.Hashbang), # dig header + (r'(?P; <<>>)(?P.*)(?P<<>>)(?P.*\n)', parse_dig_header), + (r'(?P;; ->>HEADER<<- opcode: )(?P[^\s]+)(?P, status: )(?P[^\s]+)(?P, id: )(?P[^\s]+\n)(?P;; flags: )(?P[^;]+)(?P; QUERY: )(?P\d+)(?P, ANSWER: )(?P\d+)(?P, AUTHORITY: )(?P\d+)(?P, ADDITIONAL: )(?P\d+\n)', parse_response_header), (r';;.*', Generic.Subheading), # comment section headers (r';.*', Comment.Single), # comment section headers - (r'(?P(.*)?\.\s+)(?P[0-9]+\s+)?(?P[A-Z0-9]+\s+)(?P[A-Z0-9]+\s+)(?P.+?)(?P\s+?\(\s*?(\n.*?(;[^\n]*)?)+?\))?(?P(\s+?;.*)?\n)', parse_record), + (r'\n', Generic), + (r'^', Generic, 'rname') + ], + 'rname': [ + (r'\s', Generic, 'wsp1'), + (r'\\[0-7]{3}', String.Escape), + (r'\\.', String.Escape), + (r'.', Name.Function) + ], + 'wsp1': [ + (r'\s', Generic), + (r'[0-9]+', Number.Integer, 'wsp2'), + ], + 'wsp2': [ + (r'\s', Generic), + (r'[^\s]+', Name.Class, 'wsp3'), + ], + 'wsp3': [ + (r'\s', Generic), + (r'[^\s]+', Name.Tag, 'value'), + ], + 'value': [ + (r';.*$', Comment.Special), + (r'\(', Punctuation, 'multiline'), + (r'\n', Generic, '#pop:5'), + (r'\s', Generic), + (r'"', String.Double, 'string'), + (r'.', String.Single) + ], + 'string': [ + (r'\\[0-7]{3}', String.Escape), + (r'\\.', String.Escape), + (r'"', String.Double, '#pop'), + (r'.', String.Double) + ], + 'multiline': [ + (r';.*$', Comment.Special), + (r'\)', Punctuation, '#pop'), + (r'\s', Generic), + (r'"', String.Double, 'string'), + (r'.', String.Single) ] }