diff --git a/Changelog b/Changelog index a5967f27..b2c098fd 100644 --- a/Changelog +++ b/Changelog @@ -1,3 +1,6 @@ +2019-1-20: version 0.42.1 +1. 修复setup.py在python2.7版本无法工作的问题 (issue #809) + 2019-1-13: version 0.42 1. 修复paddle模式空字符串coredump问题 @JesseyXujin 2. 修复cut_all模式切分丢字问题 @fxsjy diff --git a/jieba/__init__.py b/jieba/__init__.py index f7b23e12..90f0bcd5 100644 --- a/jieba/__init__.py +++ b/jieba/__init__.py @@ -1,6 +1,6 @@ from __future__ import absolute_import, unicode_literals -__version__ = '0.42' +__version__ = '0.42.1' __license__ = 'MIT' import marshal @@ -300,7 +300,7 @@ def cut(self, sentence, cut_all=False, HMM=True, use_paddle=False): sentence = strdecode(sentence) if use_paddle and is_paddle_installed: # if sentence is null, it will raise core exception in paddle. - if sentence is None or sentence == "" or sentence == u"": + if sentence is None or len(sentence) == 0: return import jieba.lac_small.predict as predict results = predict.get_sent(sentence) diff --git a/setup.py b/setup.py index b704a52e..f23dd323 100644 --- a/setup.py +++ b/setup.py @@ -43,7 +43,7 @@ """ setup(name='jieba', - version='0.42', + version='0.42.1', description='Chinese Words Segmentation Utilities', long_description=LONGDOC, author='Sun, Junyi', @@ -71,5 +71,5 @@ keywords='NLP,tokenizing,Chinese word segementation', packages=['jieba'], package_dir={'jieba':'jieba'}, - package_data={'jieba':['*.*','finalseg/*','analyse/*','posseg/*', 'lac_small/*','lac_small/model_baseline/*']} + package_data={'jieba':['*.*','finalseg/*','analyse/*','posseg/*', 'lac_small/*.py','lac_small/*.dic', 'lac_small/model_baseline/*']} )