自然语言式parsing
阅读原文时间:2023年07月09日阅读:2
      got NUM(1)  

Is NUM(1) an expr?
Is NUM(1) a term?
Is NUM(1) a number?
is_term got -(-)
-(-) was back
is_expr got -(-)
is_expr got NUM(2)
Is NUM(2) an expr?
Is NUM(2) a term?
Is NUM(2) a number?
is_term got *(*)
is_term got NUM(33)
Is NUM(33) a term?
Is NUM(33) a number?
is_term got None
is_expr got None

import ply.lex as lex # pip install ply
tokens = ('NUM',); t_NUM = r'\d+'; literals = ['+', '-', '*', '/']
t_ignore = ' \t'
def t_error(t): raise SyntaxError()
lexer = lex.lex()
prev_tk = None
def get_tk(who):
global prev_tk
if prev_tk != None: tk = prev_tk; prev_tk = None
else: tk = lexer.token()
if tk == None: print('\t', who + ' got None')
else: print('\t', who + ' got ' + tk.type + '(' + tk.value + ')')
return tk
def put_token_back(tk):
global prev_tk
prev_tk = tk
print('\t', tk.type + '(' + tk.value + ') was back')
def print_token(tk, what, i): print(i * ' ', 'Is ' + tk.type + '(' + tk.value + ') ' + what + '?', sep='')
def Tk(fn, *args, **kwargs): fn(*args, **kwargs)
def is_NUM(tk, i):
print_token(tk, 'a number', i)
if tk.type != 'NUM': raise SyntaxError()
def is_term(tk, i):
'''t : NUM | NUM '*' t | NUM '/' t'''
print_token(tk, 'a term', i)
Tk(is_NUM, tk, i + 1)
tk = get_tk('is_term')
if tk == None: return
if tk.type == '*' or tk.type == '/': Tk(is_term, get_tk('is_term'), i + 1)
else: put_token_back(tk)
def is_expr(tk, i):
'''e : t | t '+' e | t '-' e'''
if tk == None:
if i == 0: return
raise SyntaxError()
print_token(tk, 'an expr', i)
Tk(is_term, tk, i + 1)
tk = get_tk('is_expr')
if tk == None: return
t = tk.type
if t == '+' or t == '-': Tk(is_expr, get_tk('is_expr'), i + 1)
lexer.input('1 - 2*33')
try: Tk(is_expr, get_tk(''), 0)
except SyntaxError: print('\nAbout what talking you are?')

产生式是一组规则。分析时不是根据规则产生语言去和输入比较,而是检查输入是否符合规则。所以我觉得函数名叫is_expr比expr好理解点。再如:Tom是主语吗?Tom是名词吗?it是代词吗?头一句:token 是 表达式。a)只看了头一个token; b)这个命题是真命题还是假命题,let's try 一 try. 我们并没有分析并生成机器码/中间代码/语法树。分析过程中函数的递归调用关系/顺序靠堆栈表达。它隐藏着一颗动态的、不完整的树。

import ply.lex as lex # pip install ply
import ply.yacc as yacc
from functools import reduce
tokens = ('NUM',); t_NUM = r'\d+'; literals = ['+', '-', '*', '/']
def t_error(t): t.lexer.skip(1)
precedence = (('left', '+', '-'), ('left', '*', '/'))
s = []
def p_1(p): "e : NUM"; s.append(p_1.__doc__); p[0] = int(p[1])
def p_2(p): "e : e '+' e"; s.append(p_2.__doc__); p[0] = p[1] + p[3]
def p_3(p): "e : e '-' e"; s.append(p_3.__doc__); p[0] = p[1] - p[3]
def p_4(p): "e : e '*' e"; s.append(p_4.__doc__); p[0] = p[1] * p[3]
def p_5(p): "e : e '/' e"; s.append(p_5.__doc__); p[0] = p[1] / p[3]
def p_error(p): raise Exception()
lexer = lex.lex()
istr = '3 + 2 * 5'
print(istr, '=', yacc.yacc().parse(istr))
s.reverse(); print(reduce(lambda x,y:x+'\n'+y, s, ''))

上面这样的语法能写出Top down的吗?左递归是什么?请看 https://files.cnblogs.com/files/blogs/714801/topdownparsing.zip search(top down operator precedence parsing)

import ply.lex as lex # pip install ply
import ply.yacc as yacc
from functools import reduce
tokens = ('NUM',); t_NUM = r'\d+'; literals = ['+', '-', '*', '/']
def t_error(t): t.lexer.skip(1)
s = []
def p_1(p): "e : t"; s.append(p_1.__doc__); p[0] = p[1]
def p_2(p): "e : t '+' e"; s.append(p_2.__doc__); p[0] = p[1] + p[3]
def p_3(p): "e : t '-' e"; s.append(p_3.__doc__); p[0] = p[1] - p[3]
def p_4(p): "t : NUM"; s.append(p_4.__doc__ + ' ' + p[1]); p[0] = int(p[1])
def p_5(p): "t : NUM '*' t"; s.append(p_5.__doc__); p[0] = int(p[1]) * p[3]
def p_6(p): "t : NUM '/' t"; s.append(p_6.__doc__); p[0] = int(p[1]) / p[3]
def p_error(p): raise Exception()
lexer = lex.lex()
istr = '1 + 2 * 3 - 4'
print(istr, '=', yacc.yacc().parse(istr))
s.reverse(); print(reduce(lambda x,y:x+'\n'+y, s, ''))

手机扫一扫

移动阅读更方便

阿里云服务器
腾讯云服务器
七牛云服务器

你可能感兴趣的文章