8000 Merge pull request #6 from gregglind/negativearray-bug6 · mloudon/python-jsonpath-rw@97c6823 · GitHub
[go: up one dir, main page]

Skip to content

Commit 97c6823

Browse files
committed
Merge pull request kennknowles#6 from gregglind/negativearray-bug6
Negative array slicing?
2 parents 7d5169c + 3451d2e commit 97c6823

File tree

3 files changed

+12
-8
lines changed

3 files changed

+12
-8
lines changed

jsonpath_rw/lexer.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ class JsonPathLexerError(Exception):
1111

1212
class JsonPathLexer(object):
1313
'''
14-
A Lexical analyzer for JsonPath.
14+
A Lexical analyzer for JsonPath.
1515
'''
16-
16+
1717
def __init__(self, debug=False):
1818
self.debug = debug
1919
if self.__doc__ == None:
@@ -23,7 +23,7 @@ def tokenize(self, string):
2323
'''
2424
Maps a string to an iterator over tokens. In other words: [char] -> [token]
2525
'''
26-
26+
2727
new_lexer = ply.lex.lex(module=self, debug=self.debug, errorlog=logger)
2828
new_lexer.latest_newline = 0
2929
new_lexer.input(string)
@@ -43,7 +43,7 @@ def tokenize(self, string):
4343
# Anyhow, it is pythonic to give some rope to hang oneself with :-)
4444

4545
literals = ['*', '.', '[', ']', '(', ')', '$', ',', ':', '|', '&']
46-
46+
4747
reserved_words = { 'where': 'WHERE' }
4848

4949
tokens = ['DOUBLEDOT', 'NUMBER', 'ID', 'NAMED_OPERATOR'] + list(reserved_words.values())
@@ -62,7 +62,7 @@ def t_ID(self, t):
6262
return t
6363

6464
def t_NUMBER(self, t):
65-
r'\d+'
65+
r'-?\d+'
6666
t.value = int(t.value)
6767
return t
6868

@@ -101,7 +101,7 @@ def t_doublequote_DOUBLEQUOTE(self, t):
101101
def t_doublequote_error(self, t):
102102
raise JsonPathLexerError('Error on line %s, col %s while lexing doublequoted field: Unexpected character: %s ' % (t.lexer.lineno, t.lexpos - t.lexer.latest_newline, t.value[0]))
103103

104-
104+
105105
# Back-quoted "magic" operators
106106
t_backquote_ignore = ''
107107
def t_BACKQUOTE(self, t):

tests/test_lexer.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ def token(self, value, ty=None):
1515
t.lineno = -1
1616
t.lexpos = -1
1717
return t
18-
18+
1919
def assert_lex_equiv(self, s, stream2):
2020
# NOTE: lexer fails to reset after call?
2121
l = JsonPathLexer(debug=True)
@@ -38,6 +38,8 @@ def test_simple_inputs(self):
3838
self.assert_lex_equiv('fuzz', [self.token('fuzz', 'ID')])
3939
self.assert_lex_equiv('1', [self.token(1, 'NUMBER')])
4040
self.assert_lex_equiv('45', [self.token(45, 'NUMBER')])
41+
self.assert_lex_equiv('-1', [self.token(-1, 'NUMBER')])
42+
self.assert_lex_equiv(' -13 ', [self.token(-13, 'NUMBER')])
4143
self.assert_lex_equiv('"fuzz.bang"', [self.token('fuzz.bang', 'ID')])
4244
self.assert_lex_equiv('fuzz.bang', [self.token('fuzz', 'ID'), self.token('.', '.'), self.token('bang', 'ID')])
4345
self.assert_lex_equiv('fuzz.*', [self.token('fuzz', 'ID'), self.token('.', '.'), self.token('*', '*')])

tests/test_parser.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,9 @@ def test_atomic(self):
2828
('[:]', Slice()),
2929
('[*]', Slice()),
3030
('[:2]', Slice(end=2)),
31-
('[1:2]', Slice(start=1, end=2))])
31+
('[1:2]', Slice(start=1, end=2)),
32+
('[5:-2]', Slice(start=5, end=-2))
33+
])
3234

3335
def test_nested(self):
3436
self.check_parse_cases([('foo.baz', Child(Fields('foo'), Fields('baz'))),

0 commit comments

Comments
 (0)
0