diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
index 6b6792096e..e731514f8d 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenCountFilter.cs
@@ -25,12 +25,12 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// a replacement for the maximum field length setting inside .
///
/// By default, this filter ignores any tokens in the wrapped
- /// once the limit has been reached, which can result in being
- /// called prior to returning false. For most
- /// implementations this should be acceptable, and faster
- /// then consuming the full stream. If you are wrapping a
- /// which requires that the full stream of tokens be exhausted in order to
- /// function properly, use the
+ /// once the limit has been reached, which can result in being
+ /// called prior to returning false. For most
+ /// implementations this should be acceptable, and faster
+ /// then consuming the full stream. If you are wrapping a
+ /// which requires that the full stream of tokens be exhausted in order to
+ /// function properly, use the
/// consumeAllTokens
/// option.
///
@@ -91,8 +91,9 @@ public override bool IncrementToken()
}
else
{
- while (consumeAllTokens && m_input.IncrementToken()) // NOOP
+ while (consumeAllTokens && m_input.IncrementToken())
{
+ // NOOP
}
return false;
}
@@ -105,4 +106,4 @@ public override void Reset()
exhausted = false;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
index 6b680643a7..f4942169cd 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/LimitTokenPositionFilter.cs
@@ -26,12 +26,12 @@ namespace Lucene.Net.Analysis.Miscellaneous
/// are not greater than the configured limit.
///
/// By default, this filter ignores any tokens in the wrapped
- /// once the limit has been exceeded, which can result in being
- /// called prior to returning false. For most
- /// implementations this should be acceptable, and faster
+ /// once the limit has been exceeded, which can result in being
+ /// called prior to returning false. For most
+ /// implementations this should be acceptable, and faster
/// then consuming the full stream. If you are wrapping a
- /// which requires that the full stream of tokens be exhausted in order to
- /// function properly, use the
+ /// which requires that the full stream of tokens be exhausted in order to
+ /// function properly, use the
/// consumeAllTokens
/// option.
///
@@ -91,8 +91,9 @@ public override bool IncrementToken()
}
else
{
- while (consumeAllTokens && m_input.IncrementToken()) // NOOP
+ while (consumeAllTokens && m_input.IncrementToken())
{
+ // NOOP
}
exhausted = true;
return false;
@@ -112,4 +113,4 @@ public override void Reset()
exhausted = false;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
index ee0dfdf80a..b90e137f67 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Miscellaneous/TrimFilter.cs
@@ -39,7 +39,7 @@ public sealed class TrimFilter : TokenFilter
/// the Lucene match version
/// the stream to consume
/// whether to update offsets
- /// @deprecated Offset updates are not supported anymore as of Lucene 4.4.
+ /// @deprecated Offset updates are not supported anymore as of Lucene 4.4.
[Obsolete("Offset updates are not supported anymore as of Lucene 4.4.")]
public TrimFilter(LuceneVersion version, TokenStream @in, bool updateOffsets)
: base(@in)
@@ -84,6 +84,7 @@ public override bool IncrementToken()
// eat the first characters
for (start = 0; start < len && char.IsWhiteSpace(termBuffer[start]); start++)
{
+ // LUCENENET: intentionally empty
}
// eat the end characters
for (end = len; end >= start && char.IsWhiteSpace(termBuffer[end - 1]); end--)
@@ -111,4 +112,4 @@ public override bool IncrementToken()
return true;
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
index 03f694210c..14e3e2610f 100644
--- a/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
+++ b/src/Lucene.Net.Analysis.Common/Analysis/Sinks/TeeSinkTokenFilter.cs
@@ -35,16 +35,16 @@ namespace Lucene.Net.Analysis.Sinks
/// TeeSinkTokenFilter source1 = new TeeSinkTokenFilter(new WhitespaceTokenizer(version, reader1));
/// TeeSinkTokenFilter.SinkTokenStream sink1 = source1.NewSinkTokenStream();
/// TeeSinkTokenFilter.SinkTokenStream sink2 = source1.NewSinkTokenStream();
- ///
+ ///
/// TeeSinkTokenFilter source2 = new TeeSinkTokenFilter(new WhitespaceTokenizer(version, reader2));
/// source2.AddSinkTokenStream(sink1);
/// source2.AddSinkTokenStream(sink2);
- ///
+ ///
/// TokenStream final1 = new LowerCaseFilter(version, source1);
/// TokenStream final2 = source2;
/// TokenStream final3 = new EntityDetect(sink1);
/// TokenStream final4 = new URLDetect(sink2);
- ///
+ ///
/// d.Add(new TextField("f1", final1, Field.Store.NO));
/// d.Add(new TextField("f2", final2, Field.Store.NO));
/// d.Add(new TextField("f3", final3, Field.Store.NO));
@@ -131,6 +131,7 @@ public void ConsumeAllTokens()
{
while (IncrementToken())
{
+ // LUCENENET: intentionally empty
}
}
@@ -182,7 +183,7 @@ public abstract class SinkFilter
{
///
/// Returns true, iff the current state of the passed-in shall be stored
- /// in the sink.
+ /// in the sink.
///
public abstract bool Accept(AttributeSource source);
@@ -271,4 +272,4 @@ public override bool Accept(AttributeSource source)
}
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/BasqueStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/BasqueStemmer.cs
index d3a2fba7d8..ced24409d1 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/BasqueStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/BasqueStemmer.cs
@@ -1007,7 +1007,7 @@ public override bool Stem()
lab2:
m_cursor = m_limit - v_2;
goto replab1;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab1:
// repeat, line 143
@@ -1036,7 +1036,7 @@ public override bool Stem()
lab4:
m_cursor = m_limit - v_3;
goto replab3;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab3:
// do, line 144
@@ -1066,4 +1066,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/CatalanStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/CatalanStemmer.cs
index bce4bc1fd8..0e868af310 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/CatalanStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/CatalanStemmer.cs
@@ -840,7 +840,7 @@ private bool r_cleaning()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -1162,4 +1162,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/DutchStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/DutchStemmer.cs
index 43942bf3fb..c7425b40f6 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/DutchStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/DutchStemmer.cs
@@ -191,7 +191,7 @@ private bool r_prelude()
lab1:
m_cursor = v_2;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
m_cursor = v_1;
@@ -301,7 +301,7 @@ private bool r_prelude()
lab4:
m_cursor = v_4;
goto replab3;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab3:
return true;
@@ -475,7 +475,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -1015,4 +1015,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/EnglishStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/EnglishStemmer.cs
index 8497229812..857e02c2f5 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/EnglishStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/EnglishStemmer.cs
@@ -321,7 +321,7 @@ private bool r_prelude()
lab4:
m_cursor = v_4;
goto replab3;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab3: {/* LUCENENET: intentionally blank */}
} while (false);
@@ -1318,7 +1318,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -1553,4 +1553,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/FrenchStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/FrenchStemmer.cs
index c883fab5d3..b53f1eb819 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/FrenchStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/FrenchStemmer.cs
@@ -395,7 +395,7 @@ private bool r_prelude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -655,7 +655,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -1522,7 +1522,7 @@ private bool r_un_accent()
} while (false);
lab1:
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
if (v_1 > 0)
@@ -1773,4 +1773,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/German2Stemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/German2Stemmer.cs
index 0337fd7582..2492e943ae 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/German2Stemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/German2Stemmer.cs
@@ -210,7 +210,7 @@ private bool r_prelude()
lab1:
m_cursor = v_2;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
m_cursor = v_1;
@@ -292,7 +292,7 @@ private bool r_prelude()
lab7:
m_cursor = v_5;
goto replab6;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab6:
return true;
@@ -497,7 +497,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -893,4 +893,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/GermanStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/GermanStemmer.cs
index ef20dc014a..9786a358f6 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/GermanStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/GermanStemmer.cs
@@ -112,20 +112,20 @@ private bool r_prelude()
// test, line 30
v_1 = m_cursor;
// repeat, line 30
-
+
while (true)
{
v_2 = m_cursor;
-
+
do
{
// (, line 30
// or, line 33
-
+
do
{
v_3 = m_cursor;
-
+
do
{
// (, line 31
@@ -160,29 +160,29 @@ private bool r_prelude()
//continue replab0;
goto end_of_outer_loop;
-
+
} while (false);
lab1:
m_cursor = v_2;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
m_cursor = v_1;
// repeat, line 36
-
+
while (true)
{
v_4 = m_cursor;
-
+
do
{
// goto, line 36
-
+
while (true)
{
v_5 = m_cursor;
-
+
do
{
// (, line 36
@@ -193,11 +193,11 @@ private bool r_prelude()
// [, line 37
m_bra = m_cursor;
// or, line 37
-
+
do
{
v_6 = m_cursor;
-
+
do
{
// (, line 37
@@ -254,12 +254,12 @@ private bool r_prelude()
//continue replab4;
goto end_of_outer_loop_2;
-
+
} while (false);
lab5:
m_cursor = v_4;
goto replab4;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab4:
return true;
@@ -287,10 +287,10 @@ private bool r_mark_regions()
I_x = m_cursor;
m_cursor = v_1;
// gopast, line 49
-
+
while (true)
{
-
+
do
{
if (!(InGrouping(g_v, 97, 252)))
@@ -308,10 +308,10 @@ private bool r_mark_regions()
}
golab0:
// gopast, line 49
-
+
while (true)
{
-
+
do
{
if (!(OutGrouping(g_v, 97, 252)))
@@ -331,7 +331,7 @@ private bool r_mark_regions()
// setmark p1, line 49
I_p1 = m_cursor;
// try, line 50
-
+
do
{
// (, line 50
@@ -343,7 +343,7 @@ private bool r_mark_regions()
} while (false);
lab4:
// gopast, line 51
-
+
while (true)
{
do
@@ -363,10 +363,10 @@ private bool r_mark_regions()
}
golab5:
// gopast, line 51
-
+
while (true)
{
-
+
do
{
if (!(OutGrouping(g_v, 97, 252)))
@@ -393,11 +393,11 @@ private bool r_postlude()
int among_var;
int v_1;
// repeat, line 55
-
+
while (true)
{
v_1 = m_cursor;
-
+
do
{
// (, line 55
@@ -458,12 +458,12 @@ private bool r_postlude()
//continue replab0;
goto end_of_outer_loop;
-
+
} while (false);
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -777,7 +777,7 @@ private bool r_standard_suffix()
return true;
}
-
+
public override bool Stem()
{
int v_1;
@@ -848,4 +848,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/ItalianStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/ItalianStemmer.cs
index 4f1c688193..b30c8ed8bc 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/ItalianStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/ItalianStemmer.cs
@@ -364,7 +364,7 @@ private bool r_prelude()
lab1:
m_cursor = v_2;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
m_cursor = v_1;
@@ -452,7 +452,7 @@ private bool r_prelude()
lab3:
m_cursor = v_3;
goto replab2;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab2:
return true;
@@ -757,7 +757,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -1346,4 +1346,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/KpStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/KpStemmer.cs
index 170c04da52..3db3443e69 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/KpStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/KpStemmer.cs
@@ -1905,7 +1905,7 @@ private bool r_measure()
} while (false);
lab3:
goto replab2;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab2:
// atleast, line 209
@@ -1953,7 +1953,7 @@ private bool r_measure()
lab5:
m_cursor = v_5;
goto replab4;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab4:
if (v_4 > 0)
@@ -1988,7 +1988,7 @@ private bool r_measure()
} while (false);
lab9:
goto replab8;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab8:
// atleast, line 210
@@ -2036,7 +2036,7 @@ private bool r_measure()
lab11:
m_cursor = v_9;
goto replab10;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab10:
if (v_8 > 0)
@@ -2164,7 +2164,7 @@ public override bool Stem()
lab3:
m_cursor = v_3;
goto replab2;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab2: {/* LUCENENET: intentionally blank */}
} while (false);
@@ -2437,7 +2437,7 @@ public override bool Stem()
lab20:
m_cursor = v_19;
goto replab19;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab19: {/* LUCENENET: intentionally blank */}
} while (false);
@@ -2456,4 +2456,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PorterStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PorterStemmer.cs
index b0cc8acb09..d2554537de 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PorterStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PorterStemmer.cs
@@ -778,7 +778,7 @@ public override bool Stem()
lab3:
m_cursor = v_3;
goto replab2;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab2: {/* LUCENENET: intentionally blank */}
} while (false);
@@ -1033,7 +1033,7 @@ public override bool Stem()
lab25:
m_cursor = v_19;
goto replab24;
- end_of_outer_loop_2: { }
+ end_of_outer_loop_2: { /* LUCENENET: intentionally empty */ }
}
replab24: {/* LUCENENET: intentionally blank */}
} while (false);
@@ -1052,4 +1052,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PortugueseStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PortugueseStemmer.cs
index f141f92b1a..35ea03e219 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PortugueseStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/PortugueseStemmer.cs
@@ -331,7 +331,7 @@ private bool r_prelude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -636,7 +636,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -1269,4 +1269,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/RomanianStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/RomanianStemmer.cs
index 6715582828..c7ebe4d10f 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/RomanianStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/RomanianStemmer.cs
@@ -390,7 +390,7 @@ private bool r_prelude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -695,7 +695,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -901,7 +901,7 @@ private bool r_standard_suffix()
lab1:
m_cursor = m_limit - v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
// [, line 132
@@ -1186,4 +1186,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/SpanishStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/SpanishStemmer.cs
index 2392e8faa8..c0fc4f2aa6 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/SpanishStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/SpanishStemmer.cs
@@ -600,7 +600,7 @@ private bool r_postlude()
lab1:
m_cursor = v_1;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
return true;
@@ -1320,4 +1320,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/TurkishStemmer.cs b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/TurkishStemmer.cs
index a94f625034..7100948ad9 100644
--- a/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/TurkishStemmer.cs
+++ b/src/Lucene.Net.Analysis.Common/Tartarus/Snowball/Ext/TurkishStemmer.cs
@@ -3518,7 +3518,7 @@ private bool r_more_than_one_syllable_word()
lab1:
m_cursor = v_3;
goto replab0;
- end_of_outer_loop: { }
+ end_of_outer_loop: { /* LUCENENET: intentionally empty */ }
}
replab0:
if (v_2 > 0)
@@ -3739,4 +3739,4 @@ public override int GetHashCode()
return this.GetType().FullName.GetHashCode();
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Analysis.Kuromoji/Util/ToStringUtil.cs b/src/Lucene.Net.Analysis.Kuromoji/Util/ToStringUtil.cs
index 86de6d408c..e25e6ffe74 100644
--- a/src/Lucene.Net.Analysis.Kuromoji/Util/ToStringUtil.cs
+++ b/src/Lucene.Net.Analysis.Kuromoji/Util/ToStringUtil.cs
@@ -1391,7 +1391,7 @@ public static void GetRomanization(StringBuilder builder, string s)
builder.Append(ch);
break;
}
- break_main: { }
+ break_main: { /* LUCENENET: intentionally empty */ }
}
}
}
diff --git a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs
index 4db2ee385f..3950e7559f 100644
--- a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs
+++ b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/PhoneticEngine.cs
@@ -153,7 +153,7 @@ public void Apply(IPhonemeExpr phonemeExpr, int maxPhonemes)
}
}
}
- EXPR_break: { }
+ EXPR_break: { /* LUCENENET: intentionally empty */}
this.phonemes.Clear();
this.phonemes.UnionWith(newPhonemes);
diff --git a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Rule.cs b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Rule.cs
index fb2b53fbd6..7e30152f76 100644
--- a/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Rule.cs
+++ b/src/Lucene.Net.Analysis.Phonetic/Language/Bm/Rule.cs
@@ -338,6 +338,7 @@ public static IDictionary> GetInstanceMap(NameType nameType,
nameTypes.TryGetValue(rt, out var ruleTypes) && ruleTypes != null &&
ruleTypes.TryGetValue(lang, out var rules) && rules != null)
{
+ // LUCENENET: intentionally empty
}
else
{
diff --git a/src/Lucene.Net.Analysis.Phonetic/PhoneticFilter.cs b/src/Lucene.Net.Analysis.Phonetic/PhoneticFilter.cs
index e8287a742b..c4a27e4200 100644
--- a/src/Lucene.Net.Analysis.Phonetic/PhoneticFilter.cs
+++ b/src/Lucene.Net.Analysis.Phonetic/PhoneticFilter.cs
@@ -76,9 +76,12 @@ public override bool IncrementToken()
phonetic = v;
}
}
- catch (Exception ignored) when (ignored.IsException()) { } // just use the direct text
+ catch (Exception ignored) when (ignored.IsException())
+ {
+ // just use the direct text
+ }
- if (phonetic is null) return true;
+ if (phonetic is null) return true;
if (!inject)
{
diff --git a/src/Lucene.Net.Analysis.SmartCn/Hhmm/BigramDictionary.cs b/src/Lucene.Net.Analysis.SmartCn/Hhmm/BigramDictionary.cs
index 457770bfaf..b9d16273ae 100644
--- a/src/Lucene.Net.Analysis.SmartCn/Hhmm/BigramDictionary.cs
+++ b/src/Lucene.Net.Analysis.SmartCn/Hhmm/BigramDictionary.cs
@@ -43,7 +43,7 @@ private BigramDictionary()
public const int PRIME_BIGRAM_LENGTH = 402137;
///
- /// The word associations are stored as FNV1 hashcodes, which have a small probability of collision, but save memory.
+ /// The word associations are stored as FNV1 hashcodes, which have a small probability of collision, but save memory.
///
private long[] bigramHashTable;
@@ -107,11 +107,11 @@ private bool LoadFromObj(FileInfo serialObj)
// The data in Lucene is stored in a proprietary binary format (similar to
// .NET's BinarySerializer) that cannot be read back in .NET. Therefore, the
// data was extracted using Java's DataOutputStream using the following Java code.
- // It can then be read in using the LoadFromInputStream method below
+ // It can then be read in using the LoadFromInputStream method below
// (using a DataInputStream instead of a BinaryReader), and saved
// in the correct (BinaryWriter) format by calling the SaveToObj method.
// Alternatively, the data can be loaded from disk using the files
- // here(https://issues.apache.org/jira/browse/LUCENE-1629) in the analysis.data.zip file,
+ // here(https://issues.apache.org/jira/browse/LUCENE-1629) in the analysis.data.zip file,
// which will automatically produce the .mem files.
//public void saveToOutputStream(java.io.DataOutputStream stream) throws IOException
@@ -227,7 +227,7 @@ private void Load(string dictRoot)
if (serialObj.Exists && LoadFromObj(serialObj))
{
-
+ // LUCENENET: intentionally empty
}
else
{
@@ -259,7 +259,7 @@ private void Load(string dictRoot)
public virtual void LoadFromFile(string dctFilePath)
{
int i, cnt, length, total = 0;
- // The file only counted 6763 Chinese characters plus 5 reserved slots 3756~3760.
+ // The file only counted 6763 Chinese characters plus 5 reserved slots 3756~3760.
// The 3756th is used (as a header) to store information.
int[]
buffer = new int[3];
diff --git a/src/Lucene.Net.Analysis.SmartCn/Hhmm/WordDictionary.cs b/src/Lucene.Net.Analysis.SmartCn/Hhmm/WordDictionary.cs
index cb609ff1cc..b8cd7cbbfa 100644
--- a/src/Lucene.Net.Analysis.SmartCn/Hhmm/WordDictionary.cs
+++ b/src/Lucene.Net.Analysis.SmartCn/Hhmm/WordDictionary.cs
@@ -45,9 +45,9 @@ private WordDictionary()
public const int PRIME_INDEX_LENGTH = 12071;
///
- /// wordIndexTable guarantees to hash all Chinese characters in Unicode into
- /// PRIME_INDEX_LENGTH array. There will be conflict, but in reality this
- /// program only handles the 6768 characters found in GB2312 plus some
+ /// wordIndexTable guarantees to hash all Chinese characters in Unicode into
+ /// PRIME_INDEX_LENGTH array. There will be conflict, but in reality this
+ /// program only handles the 6768 characters found in GB2312 plus some
/// ASCII characters. Therefore in order to guarantee better precision, it is
/// necessary to retain the original symbol in the charIndexTable.
///
@@ -56,13 +56,13 @@ private WordDictionary()
private char[] charIndexTable;
///
- /// To avoid taking too much space, the data structure needed to store the
+ /// To avoid taking too much space, the data structure needed to store the
/// lexicon requires two multidimensional arrays to store word and frequency.
- /// Each word is placed in a char[]. Each char represents a Chinese char or
- /// other symbol. Each frequency is put into an int. These two arrays
- /// correspond to each other one-to-one. Therefore, one can use
- /// wordItem_charArrayTable[i][j] to look up word from lexicon, and
- /// wordItem_frequencyTable[i][j] to look up the corresponding frequency.
+ /// Each word is placed in a char[]. Each char represents a Chinese char or
+ /// other symbol. Each frequency is put into an int. These two arrays
+ /// correspond to each other one-to-one. Therefore, one can use
+ /// wordItem_charArrayTable[i][j] to look up word from lexicon, and
+ /// wordItem_frequencyTable[i][j] to look up the corresponding frequency.
///
private char[][][] wordItem_charArrayTable;
@@ -119,7 +119,7 @@ public virtual void Load(string dctFileRoot)
if (serialObj.Exists && LoadFromObj(serialObj))
{
-
+ // LUCENENET: intentionally empty
}
else
{
@@ -179,11 +179,11 @@ private bool LoadFromObj(FileInfo serialObj)
// The data in Lucene is stored in a proprietary binary format (similar to
// .NET's BinarySerializer) that cannot be read back in .NET. Therefore, the
// data was extracted using Java's DataOutputStream using the following Java code.
- // It can then be read in using the LoadFromInputStream method below
+ // It can then be read in using the LoadFromInputStream method below
// (using a DataInputStream instead of a BinaryReader), and saved
// in the correct (BinaryWriter) format by calling the SaveToObj method.
// Alternatively, the data can be loaded from disk using the files
- // here(https://issues.apache.org/jira/browse/LUCENE-1629) in the analysis.data.zip file,
+ // here(https://issues.apache.org/jira/browse/LUCENE-1629) in the analysis.data.zip file,
// which will automatically produce the .mem files.
//public void saveToOutputStream(java.io.DataOutputStream stream) throws IOException
@@ -415,7 +415,7 @@ private int LoadMainDataFromFile(string dctFilePath)
}
///
- /// The original lexicon puts all information with punctuation into a
+ /// The original lexicon puts all information with punctuation into a
/// chart (from 1 to 3755). Here it then gets expanded, separately being
/// placed into the chart that has the corresponding symbol.
///
@@ -423,8 +423,8 @@ private void ExpandDelimiterData()
{
int i;
int cnt;
- // Punctuation then treating index 3755 as 1,
- // distribute the original punctuation corresponding dictionary into
+ // Punctuation then treating index 3755 as 1,
+ // distribute the original punctuation corresponding dictionary into
int delimiterIndex = 3755 + GB2312_FIRST_CHAR;
i = 0;
while (i < wordItem_charArrayTable[delimiterIndex].Length)
@@ -546,7 +546,7 @@ private void SortEachItems()
}
///
- /// Calculate character 's position in hash table,
+ /// Calculate character 's position in hash table,
/// then initialize the value of that position in the address table.
///
private bool SetTableIndex(char c, int j)
diff --git a/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/MultiTrie2.cs b/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/MultiTrie2.cs
index d84551ca88..58a0f9271d 100644
--- a/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/MultiTrie2.cs
+++ b/src/Lucene.Net.Analysis.Stempel/Egothor.Stemmer/MultiTrie2.cs
@@ -144,7 +144,8 @@ public override string GetFully(string key)
}
}
}
- catch (Exception x) when (x.IsIndexOutOfBoundsException()) { }
+ catch (Exception x) when (x.IsIndexOutOfBoundsException()) { /* ignored */ }
+
return result.ToString();
}
@@ -202,7 +203,8 @@ public override string GetLastOnPath(string key)
}
}
}
- catch (Exception x) when (x.IsIndexOutOfBoundsException()) { }
+ catch (Exception x) when (x.IsIndexOutOfBoundsException()) { /* ignored */ }
+
return result.ToString();
}
diff --git a/src/Lucene.Net.Benchmark/ByTask/Tasks/AnalyzerFactoryTask.cs b/src/Lucene.Net.Benchmark/ByTask/Tasks/AnalyzerFactoryTask.cs
index 8b4892f7cd..b950a49030 100644
--- a/src/Lucene.Net.Benchmark/ByTask/Tasks/AnalyzerFactoryTask.cs
+++ b/src/Lucene.Net.Benchmark/ByTask/Tasks/AnalyzerFactoryTask.cs
@@ -371,7 +371,7 @@ public override void SetParams(string @params)
///
/// Instantiates the given analysis factory class after pulling params from
/// the given stream tokenizer, then stores the result in the appropriate
- /// pipeline component list.
+ /// pipeline component list.
///
/// Stream tokenizer from which to draw analysis factory params.
/// Analysis factory class to instantiate.
@@ -468,7 +468,7 @@ private void CreateAnalysisPipelineComponent(StreamTokenizer stok, Type clazz)
}
}
}
- WHILE_LOOP_BREAK: { }
+ WHILE_LOOP_BREAK: { /* LUCENENET: intentionally empty */ }
if (!argMap.ContainsKey("luceneMatchVersion"))
{
diff --git a/src/Lucene.Net.Benchmark/ByTask/Utils/Algorithm.cs b/src/Lucene.Net.Benchmark/ByTask/Utils/Algorithm.cs
index 56162161af..fc917c9957 100644
--- a/src/Lucene.Net.Benchmark/ByTask/Utils/Algorithm.cs
+++ b/src/Lucene.Net.Benchmark/ByTask/Utils/Algorithm.cs
@@ -40,8 +40,8 @@ public class Algorithm
///
/// Read algorithm from file.
- /// Property examined: alt.tasks.packages == comma separated list of
- /// alternate Assembly names where tasks would be searched for, when not found
+ /// Property examined: alt.tasks.packages == comma separated list of
+ /// alternate Assembly names where tasks would be searched for, when not found
/// in the default Assembly (that of ).
/// If the same task class appears in more than one Assembly, the Assembly
/// indicated first in this list will be used.
@@ -163,7 +163,7 @@ public Algorithm(PerfRunData runData)
}
stok.NextToken();
}
- BALANCED_PARENS_BREAK: { }
+ BALANCED_PARENS_BREAK: { /* LUCENENET: intentionally empty */ }
}
stok.EndOfLineIsSignificant = false;
string prm = @params.ToString().Trim();
diff --git a/src/Lucene.Net.Benchmark/Support/TagSoup/Parser.cs b/src/Lucene.Net.Benchmark/Support/TagSoup/Parser.cs
index e788e211ed..3498f39200 100644
--- a/src/Lucene.Net.Benchmark/Support/TagSoup/Parser.cs
+++ b/src/Lucene.Net.Benchmark/Support/TagSoup/Parser.cs
@@ -962,6 +962,7 @@ private void Push(Element e)
}
catch (Exception ioe) when (ioe.IsIOException())
{
+ // ignored
} // Can't be thrown for root I believe.
}
if (Foreign(prefix, ns))
@@ -1094,6 +1095,7 @@ public virtual void Decl(char[] buffer, int startIndex, int length)
}
catch (Exception)
{
+ // ignored
}
}
}
diff --git a/src/Lucene.Net.Benchmark/Support/TagSoup/XMLWriter.cs b/src/Lucene.Net.Benchmark/Support/TagSoup/XMLWriter.cs
index 35b30f5877..9e77f92b70 100644
--- a/src/Lucene.Net.Benchmark/Support/TagSoup/XMLWriter.cs
+++ b/src/Lucene.Net.Benchmark/Support/TagSoup/XMLWriter.cs
@@ -1225,6 +1225,7 @@ private string DoPrefix(string uri, string qName, bool isElement)
}
for (; prefix is null || nsSupport.GetUri(prefix) != null; prefix = "__NS" + ++prefixCounter)
{
+ // LUCENENET: intentionally empty
}
nsSupport.DeclarePrefix(prefix, uri);
doneDeclTable[uri] = prefix;
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs
index dde879d23e..61f2eb7855 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextStoredFieldsWriter.cs
@@ -112,7 +112,7 @@ public override void WriteField(FieldInfo info, IIndexableField field)
// LUCENENET specific - To avoid boxing/unboxing, we don't
// call GetNumericValue(). Instead, we check the field.NumericType and then
- // call the appropriate conversion method.
+ // call the appropriate conversion method.
if (field.NumericType != NumericFieldType.NONE)
{
switch (field.NumericType)
@@ -192,6 +192,7 @@ public override void Abort()
}
catch (Exception ignored) when (ignored.IsThrowable())
{
+ // ignored
}
IOUtils.DeleteFilesIgnoringExceptions(_directory,
IndexFileNames.SegmentFileName(_segment, "", FIELDS_EXTENSION));
@@ -238,4 +239,4 @@ private void NewLine()
SimpleTextUtil.WriteNewline(_output);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs
index 8011853173..284fd756ee 100644
--- a/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs
+++ b/src/Lucene.Net.Codecs/SimpleText/SimpleTextTermVectorsWriter.cs
@@ -185,6 +185,7 @@ public override sealed void Abort()
}
catch (Exception t) when (t.IsThrowable())
{
+ // ignored
}
IOUtils.DeleteFilesIgnoringExceptions(_directory,
IndexFileNames.SegmentFileName(_segment, "", VECTORS_EXTENSION));
@@ -234,4 +235,4 @@ private void NewLine()
SimpleTextUtil.WriteNewline(_output);
}
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
index d73ef5869a..fae80ddc63 100644
--- a/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
+++ b/src/Lucene.Net.Highlighter/Highlight/Highlighter.cs
@@ -256,7 +256,7 @@ public TextFragment[] GetBestTextFragments(
lastEndOffset = Math.Max(lastEndOffset, endOffset);
}
- //Test what remains of the original text beyond the point where we stopped analyzing
+ //Test what remains of the original text beyond the point where we stopped analyzing
if (
// if there is text beyond the last token considered..
(lastEndOffset < text.Length)
@@ -332,6 +332,7 @@ public TextFragment[] GetBestTextFragments(
}
catch (Exception e) when (e.IsException())
{
+ // ignored
}
}
}
@@ -340,7 +341,7 @@ public TextFragment[] GetBestTextFragments(
///
/// Improves readability of a score-sorted list of TextFragments by merging any fragments
/// that were contiguous in the original text into one larger fragment with the correct order.
- /// This will leave a "null" in the array entry for the lesser scored fragment.
+ /// This will leave a "null" in the array entry for the lesser scored fragment.
///
/// An array of document fragments in descending score
private static void MergeContiguousFragments(TextFragment[] frag) // LUCENENET: CA1822: Mark members as static
@@ -357,7 +358,7 @@ private static void MergeContiguousFragments(TextFragment[] frag) // LUCENENET:
{
continue;
}
- //merge any contiguous blocks
+ //merge any contiguous blocks
for (int x = 0; x < frag.Length; x++)
{
if (frag[x] is null)
diff --git a/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragmentsBuilder.cs b/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragmentsBuilder.cs
index 070a97ffcd..6c4f0003c4 100644
--- a/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragmentsBuilder.cs
+++ b/src/Lucene.Net.Highlighter/VectorHighlight/BaseFragmentsBuilder.cs
@@ -339,7 +339,7 @@ protected virtual IList DiscreteMultiValueHighlighting(IList result = new JCG.List();
@@ -351,7 +351,7 @@ protected virtual IList DiscreteMultiValueHighlighting(IList multiValuedSeparator;
diff --git a/src/Lucene.Net.QueryParser/Classic/QueryParser.cs b/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
index c87d0acbc5..adf028bef4 100644
--- a/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Classic/QueryParser.cs
@@ -934,6 +934,7 @@ private void Jj_rescan_token()
}
catch (LookaheadSuccess)
{
+ // ignored
}
}
jj_rescan = false;
diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParser.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParser.cs
index 148b25ebe7..e6ab1fa200 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParser.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParser.cs
@@ -1201,7 +1201,7 @@ private void Jj_add_error_token(int kind, int pos)
jj_expentries.Add(jj_expentry);
goto jj_entries_loop_break;
}
- jj_entries_loop_continue: { }
+ jj_entries_loop_continue: { /* LUCENENET: intentionally empty */ }
}
jj_entries_loop_break:
if (pos != 0) jj_lasttokens[(jj_endpos = pos) - 1] = kind;
@@ -1291,9 +1291,7 @@ private void Jj_rescan_token()
p = p.next;
} while (p != null);
}
-#pragma warning disable 168
- catch (LookaheadSuccess ls) { }
-#pragma warning restore 168
+ catch (LookaheadSuccess) { /* ignored */ }
}
jj_rescan = false;
}
diff --git a/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs b/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs
index 660dad8271..420f7674e0 100644
--- a/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs
+++ b/src/Lucene.Net.QueryParser/Flexible/Standard/Parser/StandardSyntaxParserTokenManager.cs
@@ -943,7 +943,7 @@ public virtual Token GetNextToken()
}
throw new TokenMgrError(EOFSeen, curLexState, error_line, error_column, error_after, m_curChar, TokenMgrError.LEXICAL_ERROR);
- EOFLoop_continue: { }
+ EOFLoop_continue: { /* LUCENENET: intentionally empty */ }
}
}
diff --git a/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs b/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs
index 36c511ee06..35c2a45ef2 100644
--- a/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs
+++ b/src/Lucene.Net.QueryParser/Surround/Parser/QueryParser.cs
@@ -897,7 +897,7 @@ private void Jj_rescan_token()
p = p.next;
} while (p != null);
}
- catch (LookaheadSuccess /*ls*/) { }
+ catch (LookaheadSuccess /*ls*/) { /* ignored */ }
}
jj_rescan = false;
}
diff --git a/src/Lucene.Net.Spatial/Prefix/AbstractVisitingPrefixTreeFilter.cs b/src/Lucene.Net.Spatial/Prefix/AbstractVisitingPrefixTreeFilter.cs
index 534273dffb..5bae4bd0d5 100644
--- a/src/Lucene.Net.Spatial/Prefix/AbstractVisitingPrefixTreeFilter.cs
+++ b/src/Lucene.Net.Spatial/Prefix/AbstractVisitingPrefixTreeFilter.cs
@@ -239,7 +239,7 @@ protected VisitorTemplate(AbstractVisitingPrefixTreeFilter outerInstance, Atomic
}
}
}//main loop
- main_break: { }
+ main_break: { /* LUCENENET: intentionally empty */ }
return Finish();
}
@@ -514,4 +514,4 @@ internal virtual void Reset(Cell cell)
#endregion Nested type: VNode
} //class VisitorTemplate
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Suggest/Spell/SpellChecker.cs b/src/Lucene.Net.Suggest/Spell/SpellChecker.cs
index 0f7b2d42bc..d23a02f0a1 100644
--- a/src/Lucene.Net.Suggest/Spell/SpellChecker.cs
+++ b/src/Lucene.Net.Suggest/Spell/SpellChecker.cs
@@ -458,7 +458,10 @@ public virtual void ClearIndex()
var dir = this.spellIndex;
#pragma warning disable 612, 618
using (var writer = new IndexWriter(dir, new IndexWriterConfig(LuceneVersion.LUCENE_CURRENT, null)
- { OpenMode = OpenMode.CREATE })) { }
+ { OpenMode = OpenMode.CREATE }))
+ {
+ // LUCENENET: intentionally empty, replaces .close()
+ }
#pragma warning restore 612, 618
SwapSearcher(dir);
}
@@ -765,4 +768,4 @@ internal virtual IndexSearcher CreateSearcher(Directory dir)
/// disposed, otherwise false.
internal virtual bool IsDisposed => disposed;
}
-}
\ No newline at end of file
+}
diff --git a/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs b/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs
index 178f07b741..7e1adac673 100644
--- a/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs
+++ b/src/Lucene.Net.Suggest/Suggest/Fst/FSTCompletionBuilder.cs
@@ -24,9 +24,9 @@ namespace Lucene.Net.Search.Suggest.Fst
///
/// Finite state automata based implementation of "autocomplete" functionality.
- ///
+ ///
///
Implementation details
- ///
+ ///
///
/// The construction step in the object finalizer works as follows:
///
@@ -41,10 +41,10 @@ namespace Lucene.Net.Search.Suggest.Fst
/// root node has arcs labeled with all possible weights. We cache all these
/// arcs, highest-weight first.
///
- ///
+ ///
///
///
- /// At runtime, in ,
+ /// At runtime, in ,
/// the automaton is utilized as follows:
///
/// For each possible term weight encoded in the automaton (cached arcs from
@@ -63,43 +63,43 @@ namespace Lucene.Net.Search.Suggest.Fst
/// insufficient, we proceed to the next (smaller) weight leaving the root node
/// and repeat the same algorithm again.
///
- ///
+ ///
///
Runtime behavior and performance characteristic
- ///
+ ///
/// The algorithm described above is optimized for finding suggestions to short
/// prefixes in a top-weights-first order. This is probably the most common use
/// case: it allows presenting suggestions early and sorts them by the global
/// frequency (and then alphabetically).
- ///
+ ///
///
///
/// If there is an exact match in the automaton, it is returned first on the
/// results list (even with by-weight sorting).
- ///
+ ///
///
///
/// Note that the maximum lookup time for any prefix is the time of
/// descending to the subtree, plus traversal of the subtree up to the number of
/// requested suggestions (because they are already presorted by weight on the
/// root level and alphabetically at any node level).
- ///
+ ///
///
///
/// To order alphabetically only (no ordering by priorities), use identical term
/// weights for all terms. Alphabetical suggestions are returned even if
/// non-constant weights are used, but the algorithm for doing this is
/// suboptimal.
- ///
+ ///
///
///
/// "alphabetically" in any of the documentation above indicates UTF-8
/// representation order, nothing else.
- ///
+ ///
///
///
/// NOTE: the FST file format is experimental and subject to suddenly
/// change, requiring you to rebuild the FST suggest index.
- ///
+ ///
///
///
///
@@ -118,7 +118,7 @@ public class FSTCompletionBuilder
/// highly-weighted completions (because these are filled-in first), but will
/// decrease significantly for low-weighted terms (but these should be
/// infrequent, so it is all right).
- ///
+ ///
///
/// The number of buckets must be within [1, 255] range.
///
@@ -173,7 +173,7 @@ public FSTCompletionBuilder()
///
///
/// Max shared suffix sharing length.
- ///
+ ///
/// See the description of this parameter in 's constructor.
/// In general, for very large inputs you'll want to construct a non-minimal
/// automaton which will be larger, but the construction will take far less ram.
@@ -232,9 +232,9 @@ public virtual FSTCompletion Build()
this.automaton = BuildAutomaton(sorter);
// Dispose of it if it is a disposable
- using (sorter as IDisposable)
+ if (sorter is IDisposable disposable)
{
-
+ disposable.Dispose();
}
return new FSTCompletion(automaton);
@@ -269,4 +269,4 @@ private FST