2020-11-22 08:00:28 +01:00
|
|
|
from pygments.lexer import ExtendedRegexLexer
|
|
|
|
from pygments.token import *
|
|
|
|
|
2020-11-23 01:45:31 +01:00
|
|
|
REPLY_CODES = {
|
|
|
|
'NOERROR': Name.Function,
|
|
|
|
'NXDOMAIN': String.Escape,
|
|
|
|
'REFUSED': String.Escape,
|
|
|
|
'NOTAUTH': String.Escape,
|
|
|
|
'NOTZONE': String.Escape
|
|
|
|
}
|
|
|
|
|
2020-11-22 08:00:28 +01:00
|
|
|
class CustomLexer(ExtendedRegexLexer):
|
|
|
|
name = 'Dig'
|
|
|
|
aliases = ['dig']
|
|
|
|
filenames = ['*.zone']
|
|
|
|
|
2020-11-23 01:45:31 +01:00
|
|
|
def parse_dig_header(lexer, re, ctx):
|
|
|
|
yield re.start('c1'), Comment.Hashbang, re.group('c1')
|
|
|
|
yield re.start('version'), String.Doc, re.group('version')
|
|
|
|
yield re.start('c2'), Comment.Hashbang, re.group('c2')
|
|
|
|
yield re.start('query'), Name.Function, re.group('query')
|
|
|
|
ctx.pos = re.end()
|
|
|
|
|
|
|
|
def parse_response_header(lexer, re, ctx):
|
|
|
|
yield re.start('c1'), Comment.Hashbang, re.group('c1')
|
|
|
|
yield re.start('opcode'), String.Doc, re.group('opcode')
|
|
|
|
yield re.start('c2'), Comment.Hashbang, re.group('c2')
|
|
|
|
yield re.start('rcode'), REPLY_CODES.get(re.group('rcode'), Name.Tag), re.group('rcode')
|
|
|
|
yield re.start('c3'), Comment.Hashbang, re.group('c3')
|
|
|
|
yield re.start('nonce'), String.Doc, re.group('nonce')
|
|
|
|
yield re.start('c4'), Comment.Hashbang, re.group('c4')
|
|
|
|
yield re.start('flags'), String.Doc, re.group('flags')
|
|
|
|
yield re.start('c5'), Comment.Hashbang, re.group('c5')
|
|
|
|
yield re.start('nquery'), String.Doc, re.group('nquery')
|
|
|
|
yield re.start('c6'), Comment.Hashbang, re.group('c6')
|
|
|
|
yield re.start('nanswer'), String.Doc, re.group('nanswer')
|
|
|
|
yield re.start('c7'), Comment.Hashbang, re.group('c7')
|
|
|
|
yield re.start('nauth'), String.Doc, re.group('nauth')
|
|
|
|
yield re.start('c8'), Comment.Hashbang, re.group('c8')
|
|
|
|
yield re.start('nadd'), String.Doc, re.group('nadd')
|
|
|
|
ctx.pos = re.end()
|
|
|
|
|
2020-11-22 08:00:28 +01:00
|
|
|
tokens = {
|
|
|
|
'root': [
|
2020-11-23 01:45:31 +01:00
|
|
|
(r'(?P<c1>; <<>>)(?P<version>.*)(?P<c2><<>>)(?P<query>.*\n)', parse_dig_header),
|
|
|
|
(r'(?P<c1>;; ->>HEADER<<- opcode: )(?P<opcode>[^\s]+)(?P<c2>, status: )(?P<rcode>[^\s]+)(?P<c3>, id: )(?P<nonce>[^\s]+\n)(?P<c4>;; flags: )(?P<flags>[^;]+)(?P<c5>; QUERY: )(?P<nquery>\d+)(?P<c6>, ANSWER: )(?P<nanswer>\d+)(?P<c7>, AUTHORITY: )(?P<nauth>\d+)(?P<c8>, ADDITIONAL: )(?P<nadd>\d+\n)', parse_response_header),
|
2020-11-22 08:00:28 +01:00
|
|
|
(r';;.*', Generic.Subheading), # comment section headers
|
|
|
|
(r';.*', Comment.Single), # comment section headers
|
2020-11-23 01:45:31 +01:00
|
|
|
(r'\n', Generic),
|
|
|
|
(r'^', Generic, 'rname')
|
|
|
|
],
|
|
|
|
'rname': [
|
|
|
|
(r'\s', Generic, 'wsp1'),
|
|
|
|
(r'\\[0-7]{3}', String.Escape),
|
|
|
|
(r'\\.', String.Escape),
|
|
|
|
(r'.', Name.Function)
|
|
|
|
],
|
|
|
|
'wsp1': [
|
|
|
|
(r'\s', Generic),
|
|
|
|
(r'[0-9]+', Number.Integer, 'wsp2'),
|
|
|
|
],
|
|
|
|
'wsp2': [
|
|
|
|
(r'\s', Generic),
|
|
|
|
(r'[^\s]+', Name.Class, 'wsp3'),
|
|
|
|
],
|
|
|
|
'wsp3': [
|
|
|
|
(r'\s', Generic),
|
|
|
|
(r'[^\s]+', Name.Tag, 'value'),
|
|
|
|
],
|
|
|
|
'value': [
|
|
|
|
(r';.*$', Comment.Special),
|
|
|
|
(r'\(', Punctuation, 'multiline'),
|
|
|
|
(r'\n', Generic, '#pop:5'),
|
|
|
|
(r'\s', Generic),
|
|
|
|
(r'"', String.Double, 'string'),
|
|
|
|
(r'.', String.Single)
|
|
|
|
],
|
|
|
|
'string': [
|
|
|
|
(r'\\[0-7]{3}', String.Escape),
|
|
|
|
(r'\\.', String.Escape),
|
|
|
|
(r'"', String.Double, '#pop'),
|
|
|
|
(r'.', String.Double)
|
|
|
|
],
|
|
|
|
'multiline': [
|
|
|
|
(r';.*$', Comment.Special),
|
|
|
|
(r'\)', Punctuation, '#pop'),
|
|
|
|
(r'\s', Generic),
|
|
|
|
(r'"', String.Double, 'string'),
|
|
|
|
(r'.', String.Single)
|
2020-11-22 08:00:28 +01:00
|
|
|
]
|
|
|
|
}
|