diff --git a/pom.xml b/pom.xml
index 2499043f0..15ec170f7 100644
--- a/pom.xml
+++ b/pom.xml
@@ -268,7 +268,7 @@
com.github.conveyal
gtfs-lib
- 3d79493
+ a3e5707
diff --git a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java
index 7b6b94270..bd50dff8f 100644
--- a/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java
+++ b/src/main/java/com/conveyal/datatools/editor/controllers/api/EditorController.java
@@ -29,8 +29,10 @@
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
+import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
@@ -41,6 +43,7 @@
import static com.conveyal.datatools.common.utils.SparkUtils.getObjectNode;
import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.manager.controllers.api.UserController.inTestingEnvironment;
+import static org.eclipse.jetty.http.HttpStatus.OK_200;
import static spark.Spark.delete;
import static spark.Spark.options;
import static spark.Spark.patch;
@@ -129,6 +132,10 @@ private void registerRoutes() {
put(ROOT_ROUTE + ID_PARAM + "/stop_times", this::updateStopTimesFromPatternStops, json::write);
delete(ROOT_ROUTE + ID_PARAM + "/trips", this::deleteTripsForPattern, json::write);
}
+
+ if ("stop".equals(classToLowercase)) {
+ delete(ROOT_ROUTE + ID_PARAM + "/cascadeDeleteStop", this::cascadeDeleteStop, json::write);
+ }
}
/**
@@ -258,6 +265,81 @@ private String deleteTripsForPattern(Request req, Response res) {
}
}
+ /**
+ * HTTP endpoint to delete a stop and all references in stop times and pattern stops given a string stop_id (i.e. not
+ * the integer ID field). Then normalize the stop times for all updated patterns (i.e. the ones where the stop has
+ * been deleted).
+ */
+ private String cascadeDeleteStop(Request req, Response res) {
+ // Table writer closes the database connection after use, so a new one is required for each task.
+ JdbcTableWriter tableWriter;
+ long startTime = System.currentTimeMillis();
+ String namespace = getNamespaceAndValidateSession(req);
+ String stopIdColumnName = "stop_id";
+
+ // NOTE: This is a string stop ID, not the integer ID that other HTTP endpoints use.
+ String stopId = req.params("id");
+ if (stopId == null) {
+ logMessageAndHalt(req, 400, "Must provide a valid stopId.");
+ }
+
+ try (
+ Connection connection = datasource.getConnection();
+ PreparedStatement statement = connection.prepareStatement(
+ String.format("select id, stop_sequence from %s.pattern_stops where %s = ?", namespace, stopIdColumnName)
+ )
+ ) {
+ // Get the patterns to be normalized before the related stop is deleted.
+ statement.setString(1, stopId);
+ ResultSet resultSet = statement.executeQuery();
+ Map patternsToBeNormalized = new HashMap<>();
+ while (resultSet.next()) {
+ patternsToBeNormalized.put(
+ resultSet.getInt("id"),
+ resultSet.getInt("stop_sequence")
+ );
+ }
+
+ tableWriter = new JdbcTableWriter(Table.STOP_TIMES, datasource, namespace);
+ int deletedCountStopTimes = tableWriter.deleteWhere(stopIdColumnName, stopId, true);
+
+ int deletedCountPatternStops = 0;
+ if (!patternsToBeNormalized.isEmpty()) {
+ tableWriter = new JdbcTableWriter(Table.PATTERN_STOP, datasource, namespace);
+ deletedCountPatternStops = tableWriter.deleteWhere(stopIdColumnName, stopId, true);
+ if (deletedCountPatternStops > 0) {
+ for (Map.Entry patternStop : patternsToBeNormalized.entrySet()) {
+ tableWriter = new JdbcTableWriter(Table.PATTERN_STOP, datasource, namespace);
+ int stopSequence = patternStop.getValue();
+ // Begin with the stop prior to the one deleted, unless at the beginning.
+ int beginWithSequence = (stopSequence != 0) ? stopSequence - 1 : stopSequence;
+ tableWriter.normalizeStopTimesForPattern(patternStop.getKey(), beginWithSequence);
+ }
+ }
+ }
+
+ tableWriter = new JdbcTableWriter(Table.STOPS, datasource, namespace);
+ int deletedCountStop = tableWriter.deleteWhere(stopIdColumnName, stopId, true);
+
+ return formatJSON(
+ String.format(
+ "Deleted %d stop, %d pattern stops and %d stop times.",
+ deletedCountStop,
+ deletedCountPatternStops,
+ deletedCountStopTimes),
+ OK_200
+ );
+ } catch (InvalidNamespaceException e) {
+ logMessageAndHalt(req, 400, "Invalid namespace.", e);
+ return null;
+ } catch (Exception e) {
+ logMessageAndHalt(req, 500, "Error deleting entity.", e);
+ return null;
+ } finally {
+ LOG.info("Cascade delete of stop operation took {} msec.", System.currentTimeMillis() - startTime);
+ }
+ }
+
/**
* Currently designed to delete multiple trips in a single transaction. Trip IDs should be comma-separated in a query
* parameter. TODO: Implement this for other entity types?
diff --git a/src/test/java/com/conveyal/datatools/editor/controllers/api/EditorControllerTest.java b/src/test/java/com/conveyal/datatools/editor/controllers/api/EditorControllerTest.java
index 8e5cacc19..45a658b3f 100644
--- a/src/test/java/com/conveyal/datatools/editor/controllers/api/EditorControllerTest.java
+++ b/src/test/java/com/conveyal/datatools/editor/controllers/api/EditorControllerTest.java
@@ -25,22 +25,28 @@
import spark.utils.IOUtils;
import java.io.IOException;
+import java.sql.SQLException;
import java.util.Date;
import java.util.stream.Stream;
+import static com.conveyal.datatools.TestUtils.assertThatSqlCountQueryYieldsExpectedCount;
import static com.conveyal.datatools.TestUtils.createFeedVersionFromGtfsZip;
import static com.conveyal.datatools.manager.auth.Auth0Users.USERS_API_PATH;
import static com.conveyal.datatools.manager.controllers.api.UserController.TEST_AUTH0_DOMAIN;
import static io.restassured.RestAssured.given;
+import static org.eclipse.jetty.http.HttpStatus.OK_200;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
+import static org.junit.jupiter.api.Assertions.assertEquals;
public class EditorControllerTest extends UnitTest {
private static final Logger LOG = LoggerFactory.getLogger(EditorControllerTest.class);
private static Project project;
private static FeedSource feedSource;
+ private static FeedSource feedSourceCascadeDelete;
private static FeedVersion feedVersion;
+ private static FeedVersion feedVersionCascadeDelete;
private static final ObjectMapper mapper = new ObjectMapper();
/**
@@ -55,18 +61,23 @@ public static void setUp() throws Exception {
UserController.setBaseUsersUrl("http://" + TEST_AUTH0_DOMAIN + USERS_API_PATH);
// Create a project, feed sources, and feed versions to merge.
project = new Project();
- project.name = String.format("Test %s", new Date().toString());
+ project.name = String.format("Test %s", new Date());
Persistence.projects.create(project);
+
feedSource = new FeedSource("BART");
feedSource.projectId = project.id;
Persistence.feedSources.create(feedSource);
+
+ feedSourceCascadeDelete = new FeedSource("CASCADE_DELETE");
+ feedSourceCascadeDelete.projectId = project.id;
+ Persistence.feedSources.create(feedSourceCascadeDelete);
+
feedVersion = createFeedVersionFromGtfsZip(feedSource, "bart_old.zip");
- // Create and run snapshot job
- Snapshot snapshot = new Snapshot("Snapshot of " + feedVersion.name, feedSource.id, feedVersion.namespace);
- CreateSnapshotJob createSnapshotJob =
- new CreateSnapshotJob(Auth0UserProfile.createTestAdminUser(), snapshot, true, false, false);
- // Run in current thread so tests do not run until this is complete.
- createSnapshotJob.run();
+ feedVersionCascadeDelete = createFeedVersionFromGtfsZip(feedSourceCascadeDelete, "bart_old.zip");
+
+ // Create and run snapshot jobs
+ crateAndRunSnapshotJob(feedVersion.name, feedSource.id, feedVersion.namespace);
+ crateAndRunSnapshotJob(feedVersionCascadeDelete.name, feedSourceCascadeDelete.id, feedVersionCascadeDelete.namespace);
LOG.info("{} setup completed in {} ms", EditorControllerTest.class.getSimpleName(), System.currentTimeMillis() - startTime);
}
@@ -74,6 +85,17 @@ public static void setUp() throws Exception {
public static void tearDown() {
project.delete();
feedSource.delete();
+ feedSourceCascadeDelete.delete();
+ }
+
+ /**
+ * Create and run a snapshot job in the current thread (so tests do not run until this is complete).
+ */
+ private static void crateAndRunSnapshotJob(String feedVersionName, String feedSourceId, String namespace) {
+ Snapshot snapshot = new Snapshot("Snapshot of " + feedVersionName, feedSourceId, namespace);
+ CreateSnapshotJob createSnapshotJob =
+ new CreateSnapshotJob(Auth0UserProfile.createTestAdminUser(), snapshot, true, false, false);
+ createSnapshotJob.run();
}
private static Stream createPatchTableTests() {
@@ -143,6 +165,44 @@ public void canPatchStopsConditionally() throws IOException {
}
}
+ /**
+ * Test the removal of a stop and all references in stop times and pattern stops.
+ */
+ @Test
+ void canCascadeDeleteStop() throws IOException, SQLException {
+ // Get a fresh feed source so that the editor namespace was updated after snapshot.
+ FeedSource freshFeedSource = Persistence.feedSources.getById(feedVersionCascadeDelete.feedSourceId);
+ String stopId = "WARM";
+ String stopCountSql = getCountSql(freshFeedSource.editorNamespace, "stops", stopId);
+ String stopTimesCountSql = getCountSql(freshFeedSource.editorNamespace, "stop_times", stopId);
+ String patternStopsCountSql = getCountSql(freshFeedSource.editorNamespace, "pattern_stops", stopId);
+
+ // Check for presence of stopId in stops, stop times and pattern stops.
+ assertThatSqlCountQueryYieldsExpectedCount(stopCountSql, 1);
+ assertThatSqlCountQueryYieldsExpectedCount(stopTimesCountSql, 522);
+ assertThatSqlCountQueryYieldsExpectedCount(patternStopsCountSql, 4);
+
+ String path = String.format(
+ "/api/editor/secure/stop/%s/cascadeDeleteStop?feedId=%s&sessionId=test",
+ stopId,
+ feedVersionCascadeDelete.feedSourceId
+ );
+ String response = given()
+ .port(DataManager.PORT)
+ .delete(path)
+ .then()
+ .extract()
+ .response()
+ .asString();
+ JsonNode json = mapper.readTree(response);
+ assertEquals(OK_200, json.get("code").asInt());
+
+ // Check for removal of stopId in stops, stop times and pattern stops.
+ assertThatSqlCountQueryYieldsExpectedCount(stopCountSql, 0);
+ assertThatSqlCountQueryYieldsExpectedCount(stopTimesCountSql, 0);
+ assertThatSqlCountQueryYieldsExpectedCount(patternStopsCountSql, 0);
+ }
+
/**
* Perform patch table request on the feed source ID with the requested query and patch JSON. A null query will
* apply the patch JSON to the entire table.
@@ -182,4 +242,16 @@ private static JsonNode graphqlQuery (String namespace, String graphQLQueryFile)
.asString();
return mapper.readTree(graphQLString);
}
+
+ /**
+ * Build a sql statement to provide a count on the number of rows matching the stop id.
+ */
+ private static String getCountSql(String namespace, String tableName, String stopId) {
+ return String.format(
+ "SELECT count(*) FROM %s.%s WHERE stop_id = '%s'",
+ namespace,
+ tableName,
+ stopId
+ );
+ }
}