Skip to content

Commit

Permalink
Call Ample.putGCCandidates on compaction completion (apache#4031)
Browse files Browse the repository at this point in the history
The call to putGCCandidates was lost in apache#3604 with
the removal of ManagerMetadataUtil.replaceDatafiles.


Co-authored-by: Keith Turner <[email protected]>
  • Loading branch information
dlmarion and keith-turner authored Dec 6, 2023
1 parent 7334478 commit 1a40c47
Show file tree
Hide file tree
Showing 7 changed files with 9 additions and 12 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -958,7 +958,14 @@ private TabletMetadata commitCompaction(TCompactionStats stats, ExternalCompacti
// ELASTICITY_TODO check return value and retry, could fail because of race conditions
var result = tabletsMutator.process().get(extent);
if (result.getStatus() == Ample.ConditionalResult.Status.ACCEPTED) {
// compaction was committed
// compaction was committed, mark the compaction input files for deletion
//
// ELASTICITIY_TODO in the case of process death the GC candidates would never be added
// like #3811. If compaction commit were moved to FATE per #3559 then this would not
// be an issue. If compaction commit is never moved to FATE, then this addition could
// moved to the compaction refresh process. The compaction refresh process will go away
// if compaction commit is moved to FATE, so should only do this if not moving to FATE.
ctx.getAmple().putGcCandidates(extent.tableId(), ecm.getJobFiles());
break;
} else {
// compaction failed to commit, maybe something changed on the tablet so lets reread the
Expand Down
2 changes: 1 addition & 1 deletion test/src/main/java/org/apache/accumulo/test/GCRunIT.java
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ public void forceMissingDirTest() throws Exception {
* GCRun need to support injecting synthetic row data, or another solution is required.
*/
@Test
@Disabled("deleting prev row causes scan to fail before row read validation")
@Disabled("disabled since test creation, deleting prev row causes scan to fail before row read validation")
public void forceMissingPrevRowTest() {}

private void scanReferences(GCRun userGC) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,8 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.fs.RemoteIterator;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;

@Disabled // ELASTICITY_TODO
public class GarbageCollectWALIT extends ConfigurableMacBase {

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@
import org.apache.hadoop.io.Text;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Expand Down Expand Up @@ -129,7 +128,6 @@ private void killMacGc() throws ProcessNotFoundException, InterruptedException,
}

@Test
@Disabled // ELASTICITY_TODO
public void gcTest() throws Exception {
killMacGc();
final String table = "test_ingest";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,9 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;

// verify trash is not used with Hadoop defaults, since Trash is not enabled by default
@Disabled // ELASTICITY_TODO
public class GarbageCollectorTrashDefaultIT extends GarbageCollectorTrashBase {

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,11 +36,9 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FileSystem;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;

// verify that trash is used if Hadoop is configured to use it
@Disabled // ELASTICITY_TODO
public class GarbageCollectorTrashEnabledIT extends GarbageCollectorTrashBase {

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,9 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.TrashPolicyDefault;
import org.junit.jupiter.api.Disabled;
import org.junit.jupiter.api.Test;

// verify that trash is used if Hadoop is configured to use it and that using a custom policy works
@Disabled // ELASTICITY_TODO
public class GarbageCollectorTrashEnabledWithCustomPolicyIT extends GarbageCollectorTrashBase {

public static class NoFlushFilesInTrashPolicy extends TrashPolicyDefault {
Expand Down

0 comments on commit 1a40c47

Please sign in to comment.