8000 add scripts ; add unicode tables · mdinger/unicode-segmentation@b378be6 · GitHub
[go: up one dir, main page]

Skip to content

Commit b378be6

Browse files
committed
add scripts ; add unicode tables
1 parent b6fffdc commit b378be6

File tree

3 files changed

+1524
-0
lines changed

3 files changed

+1524
-0
lines changed

scripts/unicode.py

Lines changed: 262 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,262 @@
1+
#!/usr/bin/env python
2+
#
3+
# Copyright 2011-2013 The Rust Project Developers. See the COPYRIGHT
4+
# file at the top-level directory of this distribution and at
5+
# http://rust-lang.org/COPYRIGHT.
6+
#
7+
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
8+
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
9+
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
10+
# option. This file may not be copied, modified, or distributed
11+
# except according to those terms.
12+
13+
# This script uses the following Unicode tables:
14+
# - DerivedCoreProperties.txt
15+
# - DerivedNormalizationProps.txt
16+
# - EastAsianWidth.txt
17+
# - auxiliary/GraphemeBreakProperty.txt
18+
# - auxiliary/WordBreakProperty.txt
19+
# - PropList.txt
20+
# - ReadMe.txt
21+
# - Scripts.txt
22+
# - UnicodeData.txt
23+
#
24+
# Since this should not require frequent updates, we just store this
25+
# out-of-line and check the unicode.rs file into git.
26+
27+
import fileinput, re, os, sys, operator
28+
29+
preamble = '''// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
30+
// file at the top-level directory of this distribution and at
31+
// http://rust-lang.org/COPYRIGHT.
32+
//
33+
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
34+
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
35+
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
36+
// option. This file may not be copied, modified, or distributed
37+
// except according to those terms.
38+
39+
// NOTE: The following code was generated by "src/etc/unicode.py", do not edit directly
40+
41+
#![allow(missing_docs, non_upper_case_globals, non_snake_case)]
42+
'''
43+
44+
# Mapping taken from Table 12 from:
45+
# http://www.unicode.org/reports/tr44/#General_Category_Values
46+
expanded_categories = {
47+
'Lu': ['LC', 'L'], 'Ll': ['LC', 'L'], 'Lt': ['LC', 'L'],
48+
'Lm': ['L'], 'Lo': ['L'],
49+
'Mn': ['M'], 'Mc': ['M'], 'Me': ['M'],
50+
'Nd': ['N'], 'Nl': ['N'], 'No': ['No'],
51+
'Pc': ['P'], 'Pd': ['P'], 'Ps': ['P'], 'Pe': ['P'],
52+
'Pi': ['P'], 'Pf': ['P'], 'Po': ['P'],
53+
'Sm': ['S'], 'Sc': ['S'], 'Sk': ['S'], 'So': ['S'],
54+
'Zs': ['Z'], 'Zl': ['Z'], 'Zp': ['Z'],
55+
'Cc': ['C'], 'Cf': ['C'], 'Cs': ['C'], 'Co': ['C'], 'Cn': ['C'],
56+
}
57+
58+
# these are the surrogate codepoints, which are not valid rust characters
59+
surrogate_codepoints = (0xd800, 0xdfff)
60+
61+
def is_surrogate(n):
62+
return surrogate_codepoints[0] <= n <= surrogate_codepoints[1]
63+
64+
def fetch(f):
65+
if not os.path.exists(os.path.basename(f)):
66+
os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
67+
% f)
68+
69+
if not os.path.exists(os.path.basename(f)):
70+
sys.stderr.write("cannot load %s" % f)
71+
exit(1)
72+
73+
def group_cat(cat):
74+
cat_out = []
75+
letters = sorted(set(cat))
76+
cur_start = letters.pop(0)
77+
cur_end = cur_start
78+
for letter in letters:
79+
assert letter > cur_end, \
80+
"cur_end: %s, letter: %s" % (hex(cur_end), hex(letter))
81+
if letter == cur_end + 1:
82+
cur_end = letter
83+
else:
84+
cat_out.append((cur_start, cur_end))
85+
cur_start = cur_end = letter
86+
cat_out.append((cur_start, cur_end))
87+
return cat_out
88+
89+
def ungroup_cat(cat):
90+
cat_out = []
91+
for (lo, hi) in cat:
92+
while lo <= hi:
93+
cat_out.append(lo)
94+
lo += 1
95+
return cat_out
96+
97+
def format_table_content(f, content, indent):
98+
line = " "*indent
99+
first = True
100+
for chunk in content.split(","):
101+
if len(line) + len(chunk) < 98:
102+
if first:
103+
line += chunk
104+
else:
105+
line += ", " + chunk
106+
first = False
107+
else:
108+
f.write(line + ",\n")
109+
line = " "*indent + chunk
110+
f.write(line)
111+
112+
def load_properties(f, interestingprops):
113+
fetch(f)
114+
props = {}
115+
re1 = re.compile("^([0-9A-F]+) +; (\w+)")
116+
re2 = re.compile("^([0-9A-F]+)\.\.([0-9A-F]+) +; (\w+)")
117+
118+
for line in fileinput.input(os.path.basename(f)):
119+
prop = None
120+
d_lo = 0
121+
d_hi = 0
122+
m = re1.match(line)
123+
if m:
124+
d_lo = m.group(1)
125+
d_hi = m.group(1)
126+
prop = m.group(2)
127+
else:
128+
m = re2.match(line)
129+
if m:
130+
d_lo = m.group(1)
131+
d_hi = m.group(2)
132+
prop = m.group(3)
133+
else:
134+
continue
135+
if interestingprops and prop not in interestingprops:
136+
continue
137+
d_lo = int(d_lo, 16)
138+
d_hi = int(d_hi, 16)
139+
if prop not in props:
140+
props[prop] = []
141+
props[prop].append((d_lo, d_hi))
142+
return props
143+
144+
def escape_char(c):
145+
return "'\\u{%x}'" % c
146+
147+
def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True,
148+
pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1])), is_const=True):
149+
pub_string = "const"
150+
if not is_const:
151+
pub_string = "let"
152+
if is_pub:
153+
pub_string = "pub " + pub_string
154+
f.write(" %s %s: %s = &[\n" % (pub_string, name, t_type))
155+
data = ""
156+
first = True
157+
for dat in t_data:
158+
if not first:
159+
data += ","
160+
first = False
161+
data += pfun(dat)
162+
format_table_content(f, data, 8)
163+
f.write("\n ];\n\n")
164+
165+
def emit_break_module(f, break_table, break_cats, name):
166+
Name = name.capitalize()
167+
f.write("""pub mod %s {
168+
use core::slice::SliceExt;
169+
pub use self::%sCat::*;
170+
use core::result::Result::{Ok, Err};
171+
172+
#[allow(non_camel_case_types)]
173+
#[derive(Clone, Copy, PartialEq, Eq)]
174+
pub enum %sCat {
175+
""" % (name, Name, Name))
176+
177+
break_cats.append("Any")
178+
break_cats.sort()
179+
for cat in break_cats:
180+
f.write((" %sC_" % Name[0]) + cat + ",\n")
181+
f.write(""" }
182+
183+
fn bsearch_range_value_table(c: char, r: &'static [(char, char, %sCat)]) -> %sCat {
184+
use core::cmp::Ordering::{Equal, Less, Greater};
185+
match r.binary_search_by(|&(lo, hi, _)| {
186+
if lo <= c && c <= hi { Equal }
187+
else if hi < c { Less }
188+
else { Greater }
189+
}) {
190+
Ok(idx) => {
191+
let (_, _, cat) = r[idx];
192+
cat
193+
}
194+
Err(_) => %sC_Any
195+
}
196+
}
197+
198+
pub fn %s_category(c: char) -> %sCat {
199+
bsearch_range_value_table(c, %s_cat_table)
200+
}
201+
202+
""" % (Name, Name, Name[0], name, Name, name))
203+
204+
emit_table(f, "%s_cat_table" % name, break_table, "&'static [(char, char, %sCat)]" % Name,
205+
pfun=lambda x: "(%s,%s,%sC_%s)" % (escape_char(x[0]), escape_char(x[1]), Name[0], x[2]),
206+
is_pub=False, is_const=True)
207+
f.write("}\n")
208+
209+
if __name__ == "__main__":
210+
r = "tables.rs"
211+
if os.path.exists(r):
212+
os.remove(r)
213+
with open(r, "w") as rf:
214+
# write the file's preamble
215+
rf.write(preamble)
216+
217+
# download and parse all the data
218+
fetch("ReadMe.txt")
219+
with open("ReadMe.txt") as readme:
220+
pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
221+
unicode_version = re.search(pattern, readme.read()).groups()
222+
rf.write("""
223+
/// The version of [Unicode](http://www.unicode.org/)
224+
/// that the unicode parts of `CharExt` and `UnicodeStrPrelude` traits are based on.
225+
pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
226+
227+
""" % unicode_version)
228+
229+
### grapheme cluster module
230+
# from http://www.unicode.org/reports/tr29/#Grapheme_Cluster_Break_Property_Values
231+
grapheme_cats = load_properties("auxiliary/GraphemeBreakProperty.txt", [])
232+
233+
# Control
234+
# Note 1:
235+
# This category also includes Cs (surrogate codepoints), but Rust's `char`s are
236+
# Unicode Scalar Values only, and surrogates are thus invalid `char`s.
237+
# Thus, we have to remove Cs from the Control category
238+
# Note 2:
239+
# 0x0a and 0x0d (CR and LF) are not in the Control category for Graphemes.
240+
# However, the Graphemes iterator treats these as a special case, so they
241+
# should be included in grapheme_cats["Control"] for our implementation.
242+
grapheme_cats["Control"] = group_cat(list(
243+
(set(ungroup_cat(grapheme_cats["Control"]))
244+
| set(ungroup_cat(grapheme_cats["CR"]))
245+
| set(ungroup_cat(grapheme_cats["LF"])))
246+
- set(ungroup_cat([surrogate_codepoints]))))
247+
del(grapheme_cats["CR"])
248+
del(grapheme_cats["LF"])
249+
250+
grapheme_table = []
251+
for cat in grapheme_cats:
252+
grapheme_table.extend([(x, y, cat) for (x, y) in grapheme_cats[cat]])
253+
grapheme_table.sort(key=lambda w: w[0])
254+
emit_break_module(rf, grapheme_table, grapheme_cats.keys(), "grapheme")
255+
rf.write("\n")
256+
257+
word_cats = load_properties("auxiliary/WordBreakProperty.txt", [])
258+
word_table = []
259+
for cat in word_cats:
260+
word_table.extend([(x, y, cat) for (x, y) in word_cats[cat]])
261+
word_table.sort(key=lambda w: w[0])
262+
emit_break_module(rf, word_table, word_cats.keys(), "word")

0 commit comments

Comments
 (0)
0