Skip to content

Commit

Permalink
increase hash map size to speed up 10K data set
Browse files Browse the repository at this point in the history
  • Loading branch information
anitasv committed Jan 31, 2024
1 parent 67d1bb1 commit 04e8de2
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions src/main/java/dev/morling/onebrc/CalculateAverage_vaidhy.java
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ public long address() {
private static class ChunkProcessorImpl implements MapReduce<PrimitiveHashMap> {

// 1 << 14 > 10,000 so it works
private final PrimitiveHashMap statistics = new PrimitiveHashMap(14);
private final PrimitiveHashMap statistics = new PrimitiveHashMap(15);

@Override
public void process(long keyStartAddress, long keyEndAddress, long hash, long suffix, int temperature) {
Expand Down Expand Up @@ -573,7 +573,7 @@ private static Map<String, String> toPrintMap(Map<String, IntSummaryStatistics>
private static Map<String, IntSummaryStatistics> combineOutputs(
List<PrimitiveHashMap> list) {

Map<String, IntSummaryStatistics> output = new HashMap<>(10000);
Map<String, IntSummaryStatistics> output = HashMap.newHashMap(10000);
for (PrimitiveHashMap map : list) {
for (HashEntry entry : map.entrySet()) {
if (entry.value != null) {
Expand Down

0 comments on commit 04e8de2

Please sign in to comment.