Skip to content

Commit

Permalink
Update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
pecorarista committed Nov 8, 2017
1 parent 442f425 commit dcda036
Show file tree
Hide file tree
Showing 6 changed files with 92 additions and 60 deletions.
10 changes: 3 additions & 7 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,11 +1,7 @@
__pycache__
build
*.pyc
trf.egg-info
tests/faster-rnnlm
tests/uniq.dat
tests/test.input
tests/rnnlm.output
tests/__init__.pyc
tests/test_acceptability.pyc

build
tools
data
32 changes: 15 additions & 17 deletions tests/test_acceptability.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import unittest
import warnings
import tempfile

from trf.acceptability import Acceptability
from trf.util import check_executable
Expand All @@ -20,34 +18,34 @@ def setUp(self):
self.delimiter,
self.rnnlm_model_path)

def test_rnnlm_scores(self):
scores = self.acceptability.rnnlm_scores
def test_log_prob(self):
scores = self.acceptability._calc_log_prob_scores()
self.assertAlmostEqual(scores[0], -11.571, places=2)

def test_unigram_scores(self):

scores = self.acceptability.unigram_scores
scores = self.acceptability._calc_unigram_scores()
self.assertAlmostEqual(scores[0], -31.457, places=2)

# def test_mean_unigram_scores(self):
def test_mean_lp_scores(self):

# scores = self.acceptability.mean_unigram_scores
# self.assertAlmostEqual(scores[0], -2.12, places=2)
score = self.acceptability.mean_lp
self.assertAlmostEqual(score, -2.892, places=2)

# def test_normalized_scores_div(self):
def test_norm_lp_div(self):

# scores = self.acceptability.normalized_scores_div
# self.assertAlmostEqual(scores[0], -5.446, places=2)
score = self.acceptability.norm_lp_div
self.assertAlmostEqual(score, -0.3678, places=2)

# def test_normalized_scores_sub(self):
def test_norm_lp_sub(self):

# scores = self.acceptability.normalized_scores_sub
# self.assertAlmostEqual(scores[0], -9.447, places=2)
score = self.acceptability.norm_lp_sub
self.assertAlmostEqual(score, 19.885, places=2)

# def test_normalized_scores_len(self):
def test_slor(self):

# scores = self.acceptability.normalized_scores_len
# self.assertAlmostEqual(scores[0], -0.9447, places=2)
score = self.acceptability.slor
self.assertAlmostEqual(score, 4.9713, places=2)

def tearDown(self):
pass
Expand Down
Binary file added trf/__init__.pyc
Binary file not shown.
79 changes: 56 additions & 23 deletions trf/acceptability.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@ class Acceptability:
def __init__(self, text: str, delimiter: str, rnnlm_model_path: str):

self.text = text
self.sentences = split_text(text, delimiter)
self.lengths, self.split_texts = tokenize(self.sentences)
self.sentences = split_text(text, delimiter) # type: List[str]
lengths, self.tss = tokenize(self.sentences)

if not os.path.isfile(rnnlm_model_path):
raise FileNotFoundError(errno.ENOENT,
Expand All @@ -27,13 +27,27 @@ def __init__(self, text: str, delimiter: str, rnnlm_model_path: str):

self.word_freq, self.n_total_words = self._load_word_freq(threshold=1)

self.log_prob_scores = self.calc_log_prob_scores()
self.unigram_scores = self.calc_unigram_scores()
self.mean_lp_scores = self.calc_mean_lp_scores()
self.norm_lp_div_scores = self.calc_log_prob_scores()
self.norm_lp_sub_scores = self.calc_unigram_scores()

def calc_log_prob_scores(self) -> List[Union[None, float]]:
log_prob_scores = \
self._calc_log_prob_scores()
unigram_scores = \
self._calc_unigram_scores()

mean_lp_scores = \
calc_mean_lp_scores(log_prob_scores, lengths)
norm_lp_div_scores = \
calc_norm_lp_div_scores(log_prob_scores, unigram_scores)
norm_lp_sub_scores = \
calc_norm_lp_sub_scores(log_prob_scores, unigram_scores)
slor_scores = \
calc_slor_scores(norm_lp_sub_scores, lengths)

self.log_prob = average(log_prob_scores)
self.mean_lp = average(mean_lp_scores)
self.norm_lp_div = average(norm_lp_div_scores)
self.norm_lp_sub = average(norm_lp_sub_scores)
self.slor = average(slor_scores)

def _calc_log_prob_scores(self) -> List[Union[None, float]]:
"""Get log likelihood scores by calling RNNLM
"""

Expand Down Expand Up @@ -81,7 +95,7 @@ def _load_word_freq(self, threshold: int) -> Tuple[Dict[str, int], int]:

return (word_freq, n_total_words)

def calc_unigram_scores(self) -> List[float]:
def _calc_unigram_scores(self) -> List[float]:

unigram_scores = []
for ts in self.tss:
Expand All @@ -98,7 +112,7 @@ def calc_unigram_scores(self) -> List[float]:


def average(xs: List[Union[None, float]]) -> float:
"""
"""Calculate the arithmetic mean of the given values (possibly None)
>>> '{:.2f}'.format(average([None, 1.0, 2.0]))
'1.50'
"""
Expand Down Expand Up @@ -126,16 +140,17 @@ def calc_mean_lp_scores(log_prob_scores: List[float],
return mean_lp_scores


def calc_norm_lp_div(log_prob_scores: List[float],
unigram_scores: List[float]) -> List[Union[None, float]]:
def calc_norm_lp_div_scores(
log_prob_scores: List[float],
unigram_scores: List[float]) -> List[Union[None, float]]:
r"""
.. math:
\frac{%
\log P_\text{model}\left(\xi\right)
}{%
\log P_\text{unigram}\left(\xi\right)
}
>>> '{:.3f}'.format(calc_norm_lp_div([-14.7579], [-35.6325])[0])
>>> '{:.3f}'.format(calc_norm_lp_div_scores([-14.7579], [-35.6325])[0])
'-0.414'
"""
results = []
Expand All @@ -148,17 +163,19 @@ def calc_norm_lp_div(log_prob_scores: List[float],
return results


def calc_norm_lp_sub(log_prob_scores: List[float],
unigram_scores: List[float]) -> List[Union[None, float]]:
def calc_norm_lp_sub_scores(
log_prob_scores: List[float],
unigram_scores: List[float]) -> List[Union[None, float]]:
r"""
.. math:
\log P_\text{model}\left(\xi\right)
- \log P_\text{unigram}\left(\xi\right)
>>> '{:.3f}'.format(calc_norm_lp_sub_scores([-14.7579], [-35.6325])[0])
'20.875'
"""

results = []
for log_prob, unigram_score, length in zip(log_prob_scores,
unigram_scores):
for log_prob, unigram_score in zip(log_prob_scores, unigram_scores):
if log_prob is None or numpy.isclose(unigram_score, 0.0, rtol=1e-05):
x = None
else:
Expand All @@ -167,12 +184,28 @@ def calc_norm_lp_sub(log_prob_scores: List[float],
return results


def _f(score: float, unigram_score: float, length: int, method: str) -> float:
def calc_slor_scores(norm_lp_sub_scores: List[float],
lengths: List[int]) -> List[Union[None, float]]:
r"""Calculate SLOR (Syntactic Log-Odds Ratio)
.. math:
\frac{%
\log P_\text{model}\left(\xi\right)
- \log P_\text{unigram}\left(\xi\right)
}{%
\text{length}\left(\xi\right)
}
>>> '{:.3f}'.format(calc_slor_scores([20.8746], [4])[0])
'5.219'
"""

if method == 'len':
return (float(score) - float(unigram_score)) / length
else:
raise ValueError
results = []
for norm_lp_sub_score, length in zip(norm_lp_sub_scores, lengths):
if (norm_lp_sub_score is None) or length == 0:
x = None
else:
x = norm_lp_sub_score / length
results.append(x)
return results


def tokenize(sentences: List[str]) -> Tuple[List[int], List[List[str]]]:
Expand Down
31 changes: 18 additions & 13 deletions trf/cmdline.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,14 +21,16 @@ def translate(en: str):
return '係り受け木の深さ'
elif en == 'r_conditional':
return '仮定節'
elif en == 'mean_loglikelihood':
return '言語モデルの対数尤度'
elif en == 'acceptability_div':
elif en == 'log_prob':
return '容認度 (LogProb)'
elif en == 'mean_lp':
return '容認度 (Mean LP)'
elif en == 'norm_lp_div':
return '容認度 (Norm LP (Div))'
elif en == 'acceptability_sub':
elif en == 'norm_lp_sub':
return '容認度 (Norm LP (Sub))'
elif en == 'acceptability_slor (SLOR)':
return '容認度'
elif en == 'slor':
return '容認度 (SLOR)'
else:
return en

Expand Down Expand Up @@ -71,6 +73,10 @@ def show(self, lang: str='ja'):
sys.exit(1)


def _f(score: float) -> str:
return 'None' if score is None else '{:.2f}'.format(score)


def main():

executables = ['juman', 'knp', 'rnnlm']
Expand Down Expand Up @@ -135,15 +141,14 @@ def main():
Section('syntax', metrics).show()

metrics = []
acceptability = \
Acceptability(text,
a = Acceptability(text,
args.delimiter,
args.rnnlm_model_path)
score = acceptability.mean_loglikelihood
score = 'None' if score is None else '{:.2f}'.format(score)
metrics.append(Metric('mean_loglikelihood', score))
normalized_score = acceptability.normalized_scores_len
metrics.append(Metric('norm_len', normalized_score))
metrics.append(Metric('log_prob', _f(a.log_prob)))
metrics.append(Metric('mean_lp', _f(a.mean_lp)))
metrics.append(Metric('norm_lp_div', _f(a.norm_lp_div)))
metrics.append(Metric('norm_lp_sub', _f(a.norm_lp_sub)))
metrics.append(Metric('slor', _f(a.slor)))
Section('language_model', metrics).show()


Expand Down
Binary file added trf/constant.pyc
Binary file not shown.

0 comments on commit dcda036

Please sign in to comment.