diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml new file mode 100644 index 0000000..c05faa7 --- /dev/null +++ b/.github/workflows/python-package.yml @@ -0,0 +1,39 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Python package + +on: + push: + branches: [ master ] + pull_request: + branches: [ master ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.5, 3.6, 3.7, 3.8, 3.9] + + steps: + - uses: actions/checkout@v2 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --ignore=F401 --statistics + - name: Test with pytest + run: | + pytest diff --git a/.github/workflows/python-publish.yml b/.github/workflows/python-publish.yml new file mode 100644 index 0000000..4e1ef42 --- /dev/null +++ b/.github/workflows/python-publish.yml @@ -0,0 +1,31 @@ +# This workflows will upload a Python Package using Twine when a release is created +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: Upload Python Package + +on: + release: + types: [created] + +jobs: + deploy: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install setuptools wheel twine + - name: Build and publish + env: + TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} + TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} + run: | + python setup.py sdist bdist_wheel + twine upload dist/* diff --git a/README.md b/README.md index 79b0f9b..72c3f6c 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,10 @@ # python-string-similarity -**Python3.5 implementation of [tdebatty/java-string-similarity](https://github.com/tdebatty/java-string-similarity)** + +![Python package](https://github.com/luozhouyang/python-string-similarity/workflows/Python%20package/badge.svg) +[![PyPI version](https://badge.fury.io/py/strsimpy.svg)](https://badge.fury.io/py/strsimpy) +[![Python](https://img.shields.io/pypi/pyversions/strsimpy.svg?style=plastic)](https://badge.fury.io/py/strsimpy) + +Python3.x implementation of [tdebatty/java-string-similarity](https://github.com/tdebatty/java-string-similarity) A library implementing different string similarity and distance measures. A dozen of algorithms (including Levenshtein edit distance and sibblings, Jaro-Winkler, Longest Common Subsequence, cosine similarity etc.) are currently implemented. Check the summary table below for the complete list... @@ -37,7 +42,7 @@ From pypi: ```bash # pip install strsim # deprecated, do not use this! -pip install strsimpy +pip install -U strsimpy ``` ## Overview @@ -139,15 +144,25 @@ It can also be used for keyboard typing auto-correction. Here the cost of substi ```python from strsimpy.weighted_levenshtein import WeightedLevenshtein -from strsimpy.weighted_levenshtein import CharacterSubstitutionInterface -class CharacterSubstitution(CharacterSubstitutionInterface): - def cost(self, c0, c1): - if c0=='t' and c1=='r': - return 0.5 - return 1.0 -weighted_levenshtein = WeightedLevenshtein(CharacterSubstitution()) +def insertion_cost(char): + return 1.0 + + +def deletion_cost(char): + return 1.0 + + +def substitution_cost(char_a, char_b): + if char_a == 't' and char_b == 'r': + return 0.5 + return 1.0 + +weighted_levenshtein = WeightedLevenshtein( + substitution_cost_fn=substitution_cost, + insertion_cost_fn=insertion_cost, + deletion_cost_fn=deletion_cost) print(weighted_levenshtein.distance('String1', 'String2')) ``` @@ -245,12 +260,15 @@ In "Length of Maximal Common Subsequences", K.S. Larsen proposed an algorithm th ```python from strsimpy.longest_common_subsequence import LongestCommonSubsequence - lcs = LongestCommonSubsequence() -# Will produce 4.0 print(lcs.distance('AGCAT', 'GAC')) -# Will produce 1.0 +4 +print(lcs.length('AGCAT', 'GAC')) +2 print(lcs.distance('AGCAT', 'AGCT')) +1 +print(lcs.length('AGCAT', 'AGCT')) +4 ``` @@ -375,9 +393,16 @@ Distance is computed as 1 - similarity. ### SIFT4 SIFT4 is a general purpose string distance algorithm inspired by JaroWinkler and Longest Common Subsequence. It was developed to produce a distance measure that matches as close as possible to the human perception of string distance. Hence it takes into account elements like character substitution, character distance, longest common subsequence etc. It was developed using experimental testing, and without theoretical background. -**Not implemented yet** +```python +from strsimpy import SIFT4 +s = SIFT4() +# result: 11.0 +s.distance('This is the first string', 'And this is another string') # 11.0 +# result: 12.0 +s.distance('Lorem ipsum dolor sit amet, consectetur adipiscing elit.', 'Amet Lorm ispum dolor sit amet, consetetur adixxxpiscing elit.', maxoffset=10) +``` ## Users * [StringSimilarity.NET](https://github.com/feature23/StringSimilarity.NET) a .NET port of java-string-similarity diff --git a/setup.py b/setup.py index b02622f..20da3f8 100644 --- a/setup.py +++ b/setup.py @@ -5,19 +5,24 @@ setuptools.setup( name="strsimpy", - version="0.1.4", + version="0.2.1", description="A library implementing different string similarity and distance measures", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/luozhouyang/python-string-similarity", author="ZhouYang Luo", - author_email="stupidme.me.lzy@gmail.com", + author_email="zhouyang.luo@gmail.com", packages=setuptools.find_packages(), include_package_data=True, install_requires=[], license="MIT License", classifiers=( "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ) diff --git a/strsimpy/__init__.py b/strsimpy/__init__.py index 607631f..1b1792f 100644 --- a/strsimpy/__init__.py +++ b/strsimpy/__init__.py @@ -34,6 +34,7 @@ from .string_distance import StringDistance from .string_similarity import StringSimilarity from .weighted_levenshtein import WeightedLevenshtein +from .sift4 import SIFT4Options, SIFT4 __name__ = 'strsimpy' -__version__ = '0.1.4' +__version__ = '0.2.1' diff --git a/strsimpy/cosine.py b/strsimpy/cosine.py index 799e598..656e487 100644 --- a/strsimpy/cosine.py +++ b/strsimpy/cosine.py @@ -45,12 +45,10 @@ def similarity(self, s0, s1): return 0.0 profile0 = self.get_profile(s0) profile1 = self.get_profile(s1) - return self._dot_product(profile0, profile1) / ( - self._norm(profile0) * self._norm(profile1)) + return self._dot_product(profile0, profile1) / (self._norm(profile0) * self._norm(profile1)) def similarity_profiles(self, profile0, profile1): - return self._dot_product(profile0, profile1) / ( - self._norm(profile0) * self._norm(profile1)) + return self._dot_product(profile0, profile1) / (self._norm(profile0) * self._norm(profile1)) @staticmethod def _dot_product(profile0, profile1): diff --git a/strsimpy/longest_common_subsequence.py b/strsimpy/longest_common_subsequence.py index e447cd0..07ede48 100644 --- a/strsimpy/longest_common_subsequence.py +++ b/strsimpy/longest_common_subsequence.py @@ -39,7 +39,7 @@ def length(s0, s1): raise TypeError("Argument s1 is NoneType.") s0_len, s1_len = len(s0), len(s1) x, y = s0[:], s1[:] - matrix = [[0] * (s1_len+1) for _ in range(s0_len + 1)] + matrix = [[0] * (s1_len + 1) for _ in range(s0_len + 1)] for i in range(1, s0_len + 1): for j in range(1, s1_len + 1): if x[i - 1] == y[j - 1]: diff --git a/strsimpy/ngram.py b/strsimpy/ngram.py index c3860b7..27d4ae1 100644 --- a/strsimpy/ngram.py +++ b/strsimpy/ngram.py @@ -46,7 +46,7 @@ def distance(self, s0, s1): for i in range(min(sl, tl)): if s0[i] == s1[i]: cost += 1 - return 1.0 * cost / max(sl, tl) + return 1.0 - cost / max(sl, tl) sa = [''] * (sl + self.n - 1) diff --git a/strsimpy/optimal_string_alignment.py b/strsimpy/optimal_string_alignment.py index cb4fa57..8812048 100644 --- a/strsimpy/optimal_string_alignment.py +++ b/strsimpy/optimal_string_alignment.py @@ -18,7 +18,6 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. -import numpy as np from .string_distance import StringDistance @@ -39,7 +38,7 @@ def distance(self, s0, s1): if m == 0: return 1.0 * m - d = np.zeros((n + 2, m + 2)) + d = [[0] * (m + 2) for _ in range(n + 2)] for i in range(n + 1): d[i][0] = i for j in range(m + 1): diff --git a/strsimpy/overlap_coefficient.py b/strsimpy/overlap_coefficient.py index 96b4ba5..39edc96 100644 --- a/strsimpy/overlap_coefficient.py +++ b/strsimpy/overlap_coefficient.py @@ -25,4 +25,4 @@ def similarity(self, s0, s1): for k in profile1.keys(): union.add(k) inter = int(len(profile0.keys()) + len(profile1.keys()) - len(union)) - return inter / min(len(profile0),len(profile1)) + return inter / min(len(profile0), len(profile1)) diff --git a/strsimpy/overlap_coefficient_test.py b/strsimpy/overlap_coefficient_test.py index 9912d83..a83925b 100644 --- a/strsimpy/overlap_coefficient_test.py +++ b/strsimpy/overlap_coefficient_test.py @@ -2,34 +2,36 @@ from strsimpy.overlap_coefficient import OverlapCoefficient + class TestOverlapCoefficient(unittest.TestCase): def test_overlap_coefficient_onestringissubsetofother_return0(self): sim = OverlapCoefficient(3) - s1,s2 = "eat","eating" - actual = sim.distance(s1,s2) - print("distance: {:.4}\t between '{}' and '{}'".format(str(actual), s1,s2)) - self.assertEqual(0,actual) + s1, s2 = "eat", "eating" + actual = sim.distance(s1, s2) + print("distance: {:.4}\t between '{}' and '{}'".format(str(actual), s1, s2)) + self.assertEqual(0, actual) def test_overlap_coefficient_onestringissubset_return1(self): sim = OverlapCoefficient(3) - s1,s2 = "eat","eating" - actual = sim.similarity(s1,s2) - print("strsim: {:.4}\t between '{}' and '{}'".format(str(actual), s1,s2)) - self.assertEqual(1,actual) + s1, s2 = "eat", "eating" + actual = sim.similarity(s1, s2) + print("strsim: {:.4}\t between '{}' and '{}'".format(str(actual), s1, s2)) + self.assertEqual(1, actual) def test_overlap_coefficient_onestringissubsetofother_return1(self): sim = OverlapCoefficient(3) - s1,s2 = "eat","eating" - actual = sim.similarity(s1,s2) - print("strsim: {:.4}\t between '{}' and '{}'".format(str(actual), s1,s2)) - self.assertEqual(1,actual) + s1, s2 = "eat", "eating" + actual = sim.similarity(s1, s2) + print("strsim: {:.4}\t between '{}' and '{}'".format(str(actual), s1, s2)) + self.assertEqual(1, actual) def test_overlap_coefficient_halfsimilar_return1(self): sim = OverlapCoefficient(2) - s1,s2 = "car","bar" - self.assertEqual(1/2,sim.similarity(s1,s2)) - self.assertEqual(1/2,sim.distance(s1,s2)) + s1, s2 = "car", "bar" + self.assertEqual(1 / 2, sim.similarity(s1, s2)) + self.assertEqual(1 / 2, sim.distance(s1, s2)) + if __name__ == "__main__": unittest.main() diff --git a/strsimpy/sift4.py b/strsimpy/sift4.py new file mode 100644 index 0000000..76adc52 --- /dev/null +++ b/strsimpy/sift4.py @@ -0,0 +1,188 @@ +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +from .string_distance import MetricStringDistance + + +class SIFT4Options(MetricStringDistance): + def __init__(self, options=None): + self.options = { + 'maxdistance': 0, + 'tokenizer': lambda x: [i for i in x], + 'tokenmatcher': lambda t1, t2: t1 == t2, + 'matchingevaluator': lambda t1, t2: 1, + 'locallengthevaluator': lambda x: x, + 'transpositioncostevaluator': lambda c1, c2: 1, + 'transpositionsevaluator': lambda lcss, trans: lcss - trans + } + otheroptions = { + 'tokenizer': { + 'ngram': self.ngramtokenizer, + 'wordsplit': self.wordsplittokenizer, + 'characterfrequency': self.characterfrequencytokenizer + }, + 'tokematcher': {'sift4tokenmatcher': self.sift4tokenmatcher}, + 'matchingevaluator': {'sift4matchingevaluator': self.sift4matchingevaluator}, + 'locallengthevaluator': { + 'rewardlengthevaluator': self.rewardlengthevaluator, + 'rewardlengthevaluator2': self.rewardlengthevaluator2 + }, + 'transpositioncostevaluator': {'longertranspositionsaremorecostly':self.longertranspositionsaremorecostly}, + 'transpositionsevaluator': {} + } + if isinstance(options, dict): + for k, v in options.items(): + if k in self.options.keys(): + if k == 'maxdistance': + if isinstance(v, int): + self.options[k] = v + else: + raise ValueError("Option maxdistance should be int") + else: + if callable(v): + self.options[k] = v + else: + if v in otheroptions[k].keys(): + self.options[k] = otheroptions[k][v] + else: + msg = "Option {} should be callable or one of [{}]".format(k, ', '.join(otheroptions[k].keys())) + raise ValueError(msg) + else: + raise ValueError("Option {} not recognized.".format(k)) + elif options is not None: + raise ValueError("options should be a dictionary") + self.maxdistance = self.options['maxdistance'] + self.tokenizer = self.options['tokenizer'] + self.tokenmatcher = self.options['tokenmatcher'] + self.matchingevaluator = self.options['matchingevaluator'] + self.locallengthevaluator = self.options['locallengthevaluator'] + self.transpositioncostevaluator = self.options['transpositioncostevaluator'] + self.transpositionsevaluator = self.options['transpositionsevaluator'] + + # tokenizers: + @staticmethod + def ngramtokenizer(s, n): + result = [] + if not s: + return result + for i in range(len(s) - n - 1): + result.append(s[i:(i + n)]) + return result + + @staticmethod + def wordsplittokenizer(s): + if not s: + return [] + return s.split() + + @staticmethod + def characterfrequencytokenizer(s): + letters = [i for i in 'abcdefghijklmnopqrstuvwxyz'] + return [s.lower().count(x) for x in letters] + + # tokenMatchers: + @staticmethod + def sift4tokenmatcher(t1, t2): + similarity = 1 - SIFT4().distance(t1, t2, 5) / max(len(t1), len(t2)) + return similarity > 0.7 + + # matchingEvaluators: + @staticmethod + def sift4matchingevaluator(t1, t2): + similarity = 1 - SIFT4().distance(t1, t2, 5) / max(len(t1), len(t2)) + return similarity + + # localLengthEvaluators: + @staticmethod + def rewardlengthevaluator(l): + if l < 1: + return l + return l - 1 / (l + 1) + + @staticmethod + def rewardlengthevaluator2(l): + return pow(l, 1.5) + + # transpositionCostEvaluators: + @staticmethod + def longertranspositionsaremorecostly(c1, c2): + return abs(c2 - c1) / 9 + 1 + + +class SIFT4: + # As described in https://siderite.dev/blog/super-fast-and-accurate-string-distance.html/ + def distance(self, s1, s2, maxoffset=5, options=None): + options = SIFT4Options(options) + t1, t2 = options.tokenizer(s1), options.tokenizer(s2) + l1, l2 = len(t1), len(t2) + if l1 == 0: + return l2 + if l2 == 0: + return l1 + + c1, c2, lcss, local_cs, trans, offset_arr = 0, 0, 0, 0, 0, [] + while (c1 < l1) and (c2 < l2): + if options.tokenmatcher(t1[c1], t2[c2]): + local_cs += options.matchingevaluator(t1[c1], t2[c2]) + isTrans = False + i = 0 + while i < len(offset_arr): + ofs = offset_arr[i] + if (c1 <= ofs['c1']) or (c2 <= ofs['c2']): + isTrans = abs(c2 - c1) >= abs(ofs['c2'] - ofs['c1']) + if isTrans: + trans += options.transpositioncostevaluator(c1, c2) + else: + if not ofs['trans']: + ofs['trans'] = True + trans += options.transpositioncostevaluator(ofs['c1'], ofs['c2']) + break + else: + if (c1 > ofs['c2']) and (c2 > ofs['c1']): + offset_arr.pop(i) + else: + i += 1 + offset_arr.append({'c1': c1, 'c2': c2, 'trans': isTrans}) + else: + lcss += options.locallengthevaluator(local_cs) + local_cs = 0 + if c1 != c2: + c1 = c2 = min(c1, c2) + for i in range(maxoffset): + if (c1 + i < l1) or (c2 + i < l2): + if (c1 + i < l1) and options.tokenmatcher(t1[c1 + i], t2[c2]): + c1 += i - 1 + c2 -= 1 + break + if (c2 + i < l2) and options.tokenmatcher(t1[c1], t2[c2 + i]): + c1 -= 1 + c2 += i - 1 + break + c1 += 1 + c2 += 1 + if options.maxdistance: + temporarydistance = options.locallengthevaluator(max(c1, c2)) - options.transpositionsevaluator(lcss, trans) + if temporarydistance >= options.maxdistance: + return round(temporarydistance) + if (c1 >= l1) or (c2 >= l2): + lcss += options.locallengthevaluator(local_cs) + local_cs = 0 + c1 = c2 = min(c1, c2) + lcss += options.locallengthevaluator(local_cs) + return round(options.locallengthevaluator(max(l1, l2)) - options.transpositionsevaluator(lcss, trans)) + + diff --git a/strsimpy/sift4_test.py b/strsimpy/sift4_test.py new file mode 100644 index 0000000..1960e7b --- /dev/null +++ b/strsimpy/sift4_test.py @@ -0,0 +1,21 @@ +import unittest + +from .sift4 import SIFT4 + + +class SIFT4Test(unittest.TestCase): + + def testSIFT4(self): + s = SIFT4() + + results = [ + ('This is the first string', 'And this is another string', 5, 11.0), + ('Lorem ipsum dolor sit amet, consectetur adipiscing elit.', 'Amet Lorm ispum dolor sit amet, consetetur adixxxpiscing elit.', 10, 12.0) + ] + + for a, b, offset, res in results: + self.assertEqual(res, s.distance(a, b, maxoffset=offset)) + + +if __name__ == "__main__": + unittest.main() diff --git a/strsimpy/weighted_levenshtein.py b/strsimpy/weighted_levenshtein.py index 5f9d80b..a03b460 100644 --- a/strsimpy/weighted_levenshtein.py +++ b/strsimpy/weighted_levenshtein.py @@ -18,31 +18,32 @@ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. +from functools import reduce from .string_distance import StringDistance -class CharacterInsDelInterface: +def default_insertion_cost(char): + return 1.0 - def deletion_cost(self, c): - raise NotImplementedError() - def insertion_cost(self, c): - raise NotImplementedError() +def default_deletion_cost(char): + return 1.0 -class CharacterSubstitutionInterface: - - def cost(self, c0, c1): - raise NotImplementedError() +def default_substitution_cost(char_a, char_b): + return 1.0 class WeightedLevenshtein(StringDistance): - def __init__(self, character_substitution, character_ins_del=None): - self.character_ins_del = character_ins_del - if character_substitution is None: - raise TypeError("Argument character_substitution is NoneType.") - self.character_substitution = character_substitution + def __init__(self, + substitution_cost_fn=default_substitution_cost, + insertion_cost_fn=default_insertion_cost, + deletion_cost_fn=default_deletion_cost, + ): + self.substitution_cost_fn = substitution_cost_fn + self.insertion_cost_fn = insertion_cost_fn + self.deletion_cost_fn = deletion_cost_fn def distance(self, s0, s1): if s0 is None: @@ -52,38 +53,28 @@ def distance(self, s0, s1): if s0 == s1: return 0.0 if len(s0) == 0: - return len(s1) + return reduce(lambda cost, char: cost + self.insertion_cost_fn(char), s1, 0) if len(s1) == 0: - return len(s0) + return reduce(lambda cost, char: cost + self.deletion_cost_fn(char), s0, 0) v0, v1 = [0.0] * (len(s1) + 1), [0.0] * (len(s1) + 1) v0[0] = 0 for i in range(1, len(v0)): - v0[i] = v0[i - 1] + self._insertion_cost(s1[i - 1]) + v0[i] = v0[i - 1] + self.insertion_cost_fn(s1[i - 1]) for i in range(len(s0)): - s1i = s0[i] - deletion_cost = self._deletion_cost(s1i) + s0i = s0[i] + deletion_cost = self.deletion_cost_fn(s0i) v1[0] = v0[0] + deletion_cost for j in range(len(s1)): - s2j = s1[j] + s1j = s1[j] cost = 0 - if s1i != s2j: - cost = self.character_substitution.cost(s1i, s2j) - insertion_cost = self._insertion_cost(s2j) + if s0i != s1j: + cost = self.substitution_cost_fn(s0i, s1j) + insertion_cost = self.insertion_cost_fn(s1j) v1[j + 1] = min(v1[j] + insertion_cost, v0[j + 1] + deletion_cost, v0[j] + cost) v0, v1 = v1, v0 return v0[len(s1)] - - def _insertion_cost(self, c): - if self.character_ins_del is None: - return 1.0 - return self.character_ins_del.insertion_cost(c) - - def _deletion_cost(self, c): - if self.character_ins_del is None: - return 1.0 - return self.character_ins_del.deletion_cost(c) diff --git a/strsimpy/weighted_levenshtein_test.py b/strsimpy/weighted_levenshtein_test.py index 6c0832d..90fb2b1 100644 --- a/strsimpy/weighted_levenshtein_test.py +++ b/strsimpy/weighted_levenshtein_test.py @@ -20,19 +20,13 @@ import unittest -from .weighted_levenshtein import WeightedLevenshtein, CharacterSubstitutionInterface - - -class CharSub(CharacterSubstitutionInterface): - - def cost(self, c0, c1): - return 1.0 +from .weighted_levenshtein import WeightedLevenshtein class TestWeightedLevenshtein(unittest.TestCase): def test_weighted_levenshtein(self): - a = WeightedLevenshtein(character_substitution=CharSub()) + a = WeightedLevenshtein() s0 = "" s1 = "" s2 = "上海" diff --git a/upload.sh b/upload.sh deleted file mode 100644 index e5d2bd3..0000000 --- a/upload.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -conda activate strsim -python3 setup.py sdist bdist_wheel -twine upload dist/* - -rm -rf build -rm -rf dist -rm -rf *.egg-info diff --git a/upload_test.sh b/upload_test.sh deleted file mode 100644 index b91c154..0000000 --- a/upload_test.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env bash - -conda activate strsim -python3 setup.py sdist bdist_wheel -twine upload --repository-url https://test.pypi.org/legacy/ dist/* - -rm -rf build -rm -rf dist -rm -rf *.egg-info