8000 Python 2 & 3 support · cdent/python-jsonpath-rw@4ae5f3f · GitHub
[go: up one dir, main page]

Skip to content

Commit 4ae5f3f

Browse files
committed
Python 2 & 3 support
1 parent 47855af commit 4ae5f3f

File tree

8 files changed

+30
-16
lines changed

8 files changed

+30
-16
lines changed

.travis.yml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
language: python
22
python:
3+
- "2.6"
34
- "2.7"
5+
- "3.2"
6+
- "3.3"
47
install:
58
- "pip install . --use-mirrors"
69
- "pip install pytest --use-mirrors"

jsonpath_rw/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ 8000 -1,2 +1,2 @@
1-
from jsonpath import *
2-
from parser import parse
1+
from .jsonpath import *
2+
from .parser import parse

jsonpath_rw/jsonpath.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
1+
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
12
import logging
23
import six
4+
from six.moves import xrange
35
from itertools import *
46

57
logger = logging.getLogger(__name__)
@@ -283,7 +285,7 @@ def match_recursively(datum):
283285
# Manually do the * or [*] to avoid coercion and recurse just the right-hand pattern
284286
if isinstance(datum.value, list):
285287
recursive_matches = [submatch
286-
for i in xrange(0, len(datum.value))
288+
for i in range( 8000 0, len(datum.value))
287289
for submatch in match_recursively(DatumInContext(datum.value[i], context=datum, path=Index(i)))]
288290

289291
elif isinstance(datum.value, dict):

jsonpath_rw/lexer.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
12
import sys
23
import logging
34

@@ -40,7 +41,7 @@ def tokenize(self, string):
4041

4142
reserved_words = { 'where': 'WHERE' }
4243

43-
tokens = ['DOUBLEDOT', 'NUMBER', 'ID'] + reserved_words.values()
44+
tokens = ['DOUBLEDOT', 'NUMBER', 'ID'] + list(reserved_words.values())
4445

4546
states = [ ('singlequote', 'exclusive'),
4647
('doublequote', 'exclusive') ]
@@ -107,4 +108,4 @@ def t_error(self, t):
107108
logging.basicConfig()
108109
lexer = JsonPathLexer(debug=True)
109110
for token in lexer.tokenize(sys.stdin.read()):
110-
print '%-20s%s' % (token.value, token.type)
111+
print('%-20s%s' % (token.value, token.type))

jsonpath_rw/parser.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import print_function, absolute_import, division, generators, nested_scopes
12
import sys
23
import os.path
34
import logging
@@ -163,12 +164,12 @@ def __init__(self, iterator):
163164

164165
def token(self):
165166
try:
166-
return self.iterator.next()
167+
return next(self.iterator)
167168
except StopIteration:
168169
return None
169170

170171

171172
if __name__ == '__main__':
172173
logging.basicConfig()
173174
parser = JsonPathParser(debug=True)
174-
print parser.parse(sys.stdin.read())
175+
print(parser.parse(sys.stdin.read()))

tests/test_jsonpath.py

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
12
import unittest
23

34
from jsonpath_rw import jsonpath # For setting the global auto_id_field flag
@@ -89,21 +90,23 @@ def check_cases(self, test_cases):
8990
# Also, we coerce iterables, etc, into the desired target type
9091

9192
for string, data, target in test_cases:
92-
print 'parse("%s").find(%s) =?= %s' % (string, data, target)
93+
print('parse("%s").find(%s) =?= %s' % (string, data, target))
9394
result = parse(string).find(data)
9495
if isinstance(target, list):
9596
assert [r.value for r in result] == target
97+
elif isinstance(target, set):
98+
assert {r.value for r in result} == target
9699
else:
97100
assert result.value == target
98101

99102
def test_fields_value(self):
100103
jsonpath.auto_id_field = None
101104
self.check_cases([ ('foo', {'foo': 'baz'}, ['baz']),
102105
('foo,baz', {'foo': 1, 'baz': 2}, [1, 2]),
103-
('*', {'foo': 1, 'baz': 2}, [1, 2]) ])
106+
('*', {'foo': 1, 'baz': 2}, {1, 2}) ])
104107

105108
jsonpath.auto_id_field = 'id'
106-
self.check_cases([ ('*', {'foo': 1, 'baz': 2}, [1, 2, '@']) ])
109+
self.check_cases([ ('*', {'foo': 1, 'baz': 2}, {1, 2, '@'}) ])
107110

108111
def test_index_value(self):
109112
self.check_cases([
@@ -146,21 +149,23 @@ def check_paths(self, test_cases):
146149
# Also, we coerce iterables, etc, into the desired target type
147150

148151
for string, data, target in test_cases:
149-
print 'parse("%s").find(%s).paths =?= %s' % (string, data, target)
152+
print('parse("%s").find(%s).paths =?= %s' % (string, data, target))
150153
result = parse(string).find(data)
151154
if isinstance(target, list):
152155
assert [str(r.full_path) for r in result] == target
156+
elif isinstance(target, set):
157+
assert {str(r.full_path) for r in result} == target
153158
else:
154159
assert str(result.path) == target
155160

156161
def test_fields_paths(self):
157162
jsonpath.auto_id_field = None
158163
self.check_paths([ ('foo', {'foo': 'baz'}, ['foo']),
159164
('foo,baz', {'foo': 1, 'baz': 2}, ['foo', 'baz']),
160-
('*', {'foo': 1, 'baz': 2}, ['foo', 'baz']) ])
165+
('*', {'foo': 1, 'baz': 2}, {'foo', 'baz'}) ])
161166

162167
jsonpath.auto_id_field = 'id'
163-
self.check_paths([ ('*', {'foo': 1, 'baz': 2}, ['foo', 'baz', 'id']) ])
168+
self.check_paths([ ('*', {'foo': 1, 'baz': 2}, {'foo', 'baz', 'id'}) ])
164169

165170
def test_index_paths(self):
166171
self.check_paths([('[0]', [42], ['[0]']),
@@ -190,7 +195,7 @@ def test_fields_auto_id(self):
190195
('*.id',
191196
{'foo':{'id': 1},
192197
'baz': 2},
193-
['1', 'baz']) ])
198+
{'1', 'baz'}) ])
194199

195200
def test_index_auto_id(self):
196201
jsonpath.auto_id_field = "id"

tests/test_lexer.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
12
import logging
23
import unittest
34

@@ -22,7 +23,7 @@ def assert_lex_equiv(self, s, stream2):
2223
stream2 = list(stream2)
2324
assert len(stream1) == len(stream2)
2425
for token1, token2 in zip(stream1, stream2):
25-
print token1, token2
26+
print(token1, token2)
2627
assert token1.type == token2.type
2728
assert token1.value == token2.value
2829

tests/test_parser.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import unicode_literals, print_function, absolute_import, division, generators, nested_scopes
12
import unittest
23

34
from jsonpath_rw.lexer import JsonPathLexer
@@ -15,7 +16,7 @@ def check_parse_cases(self, test_cases):
1516
parser = JsonPathParser(debug=True, lexer_class=lambda:JsonPathLexer(debug=False)) # Note that just manually passing token streams avoids this dep, but that sucks
1617

1718
for string, parsed in test_cases:
18-
print string, '=?=', parsed # pytest captures this and we see it only on a failure, for debugging
19+
print(string, '=?=', parsed) # pytest captures this and we see it only on a failure, for debugging
1920
assert parser.parse(string) == parsed
2021

2122
def test_atomic(self):

0 commit comments

Comments
 (0)
0