Skip to content

Commit

Permalink
Merge branch 'ibi-group:dev' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
mvanlaar authored May 20, 2023
2 parents b629235 + c8e9c77 commit ba6f895
Show file tree
Hide file tree
Showing 3 changed files with 162 additions and 8 deletions.
2 changes: 1 addition & 1 deletion pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@
<groupId>com.github.conveyal</groupId>
<artifactId>gtfs-lib</artifactId>
<!-- Latest dev build on jitpack.io -->
<version>3d79493</version>
<version>a3e5707</version>
<!-- Exclusions added in order to silence SLF4J warnings about multiple bindings:
http://www.slf4j.org/codes.html#multiple_bindings
-->
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,10 @@
import java.io.IOException;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
Expand All @@ -41,6 +43,7 @@
import static com.conveyal.datatools.common.utils.SparkUtils.getObjectNode;
import static com.conveyal.datatools.common.utils.SparkUtils.logMessageAndHalt;
import static com.conveyal.datatools.manager.controllers.api.UserController.inTestingEnvironment;
import static org.eclipse.jetty.http.HttpStatus.OK_200;
import static spark.Spark.delete;
import static spark.Spark.options;
import static spark.Spark.patch;
Expand Down Expand Up @@ -129,6 +132,10 @@ private void registerRoutes() {
put(ROOT_ROUTE + ID_PARAM + "/stop_times", this::updateStopTimesFromPatternStops, json::write);
delete(ROOT_ROUTE + ID_PARAM + "/trips", this::deleteTripsForPattern, json::write);
}

if ("stop".equals(classToLowercase)) {
delete(ROOT_ROUTE + ID_PARAM + "/cascadeDeleteStop", this::cascadeDeleteStop, json::write);
}
}

/**
Expand Down Expand Up @@ -258,6 +265,81 @@ private String deleteTripsForPattern(Request req, Response res) {
}
}

/**
* HTTP endpoint to delete a stop and all references in stop times and pattern stops given a string stop_id (i.e. not
* the integer ID field). Then normalize the stop times for all updated patterns (i.e. the ones where the stop has
* been deleted).
*/
private String cascadeDeleteStop(Request req, Response res) {
// Table writer closes the database connection after use, so a new one is required for each task.
JdbcTableWriter tableWriter;
long startTime = System.currentTimeMillis();
String namespace = getNamespaceAndValidateSession(req);
String stopIdColumnName = "stop_id";

// NOTE: This is a string stop ID, not the integer ID that other HTTP endpoints use.
String stopId = req.params("id");
if (stopId == null) {
logMessageAndHalt(req, 400, "Must provide a valid stopId.");
}

try (
Connection connection = datasource.getConnection();
PreparedStatement statement = connection.prepareStatement(
String.format("select id, stop_sequence from %s.pattern_stops where %s = ?", namespace, stopIdColumnName)
)
) {
// Get the patterns to be normalized before the related stop is deleted.
statement.setString(1, stopId);
ResultSet resultSet = statement.executeQuery();
Map<Integer, Integer> patternsToBeNormalized = new HashMap<>();
while (resultSet.next()) {
patternsToBeNormalized.put(
resultSet.getInt("id"),
resultSet.getInt("stop_sequence")
);
}

tableWriter = new JdbcTableWriter(Table.STOP_TIMES, datasource, namespace);
int deletedCountStopTimes = tableWriter.deleteWhere(stopIdColumnName, stopId, true);

int deletedCountPatternStops = 0;
if (!patternsToBeNormalized.isEmpty()) {
tableWriter = new JdbcTableWriter(Table.PATTERN_STOP, datasource, namespace);
deletedCountPatternStops = tableWriter.deleteWhere(stopIdColumnName, stopId, true);
if (deletedCountPatternStops > 0) {
for (Map.Entry<Integer, Integer> patternStop : patternsToBeNormalized.entrySet()) {
tableWriter = new JdbcTableWriter(Table.PATTERN_STOP, datasource, namespace);
int stopSequence = patternStop.getValue();
// Begin with the stop prior to the one deleted, unless at the beginning.
int beginWithSequence = (stopSequence != 0) ? stopSequence - 1 : stopSequence;
tableWriter.normalizeStopTimesForPattern(patternStop.getKey(), beginWithSequence);
}
}
}

tableWriter = new JdbcTableWriter(Table.STOPS, datasource, namespace);
int deletedCountStop = tableWriter.deleteWhere(stopIdColumnName, stopId, true);

return formatJSON(
String.format(
"Deleted %d stop, %d pattern stops and %d stop times.",
deletedCountStop,
deletedCountPatternStops,
deletedCountStopTimes),
OK_200
);
} catch (InvalidNamespaceException e) {
logMessageAndHalt(req, 400, "Invalid namespace.", e);
return null;
} catch (Exception e) {
logMessageAndHalt(req, 500, "Error deleting entity.", e);
return null;
} finally {
LOG.info("Cascade delete of stop operation took {} msec.", System.currentTimeMillis() - startTime);
}
}

/**
* Currently designed to delete multiple trips in a single transaction. Trip IDs should be comma-separated in a query
* parameter. TODO: Implement this for other entity types?
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,22 +25,28 @@
import spark.utils.IOUtils;

import java.io.IOException;
import java.sql.SQLException;
import java.util.Date;
import java.util.stream.Stream;

import static com.conveyal.datatools.TestUtils.assertThatSqlCountQueryYieldsExpectedCount;
import static com.conveyal.datatools.TestUtils.createFeedVersionFromGtfsZip;
import static com.conveyal.datatools.manager.auth.Auth0Users.USERS_API_PATH;
import static com.conveyal.datatools.manager.controllers.api.UserController.TEST_AUTH0_DOMAIN;
import static io.restassured.RestAssured.given;
import static org.eclipse.jetty.http.HttpStatus.OK_200;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.junit.jupiter.api.Assertions.assertEquals;

public class EditorControllerTest extends UnitTest {
private static final Logger LOG = LoggerFactory.getLogger(EditorControllerTest.class);
private static Project project;
private static FeedSource feedSource;
private static FeedSource feedSourceCascadeDelete;
private static FeedVersion feedVersion;
private static FeedVersion feedVersionCascadeDelete;
private static final ObjectMapper mapper = new ObjectMapper();

/**
Expand All @@ -55,25 +61,41 @@ public static void setUp() throws Exception {
UserController.setBaseUsersUrl("http://" + TEST_AUTH0_DOMAIN + USERS_API_PATH);
// Create a project, feed sources, and feed versions to merge.
project = new Project();
project.name = String.format("Test %s", new Date().toString());
project.name = String.format("Test %s", new Date());
Persistence.projects.create(project);

feedSource = new FeedSource("BART");
feedSource.projectId = project.id;
Persistence.feedSources.create(feedSource);

feedSourceCascadeDelete = new FeedSource("CASCADE_DELETE");
feedSourceCascadeDelete.projectId = project.id;
Persistence.feedSources.create(feedSourceCascadeDelete);

feedVersion = createFeedVersionFromGtfsZip(feedSource, "bart_old.zip");
// Create and run snapshot job
Snapshot snapshot = new Snapshot("Snapshot of " + feedVersion.name, feedSource.id, feedVersion.namespace);
CreateSnapshotJob createSnapshotJob =
new CreateSnapshotJob(Auth0UserProfile.createTestAdminUser(), snapshot, true, false, false);
// Run in current thread so tests do not run until this is complete.
createSnapshotJob.run();
feedVersionCascadeDelete = createFeedVersionFromGtfsZip(feedSourceCascadeDelete, "bart_old.zip");

// Create and run snapshot jobs
crateAndRunSnapshotJob(feedVersion.name, feedSource.id, feedVersion.namespace);
crateAndRunSnapshotJob(feedVersionCascadeDelete.name, feedSourceCascadeDelete.id, feedVersionCascadeDelete.namespace);
LOG.info("{} setup completed in {} ms", EditorControllerTest.class.getSimpleName(), System.currentTimeMillis() - startTime);
}

@AfterAll
public static void tearDown() {
project.delete();
feedSource.delete();
feedSourceCascadeDelete.delete();
}

/**
* Create and run a snapshot job in the current thread (so tests do not run until this is complete).
*/
private static void crateAndRunSnapshotJob(String feedVersionName, String feedSourceId, String namespace) {
Snapshot snapshot = new Snapshot("Snapshot of " + feedVersionName, feedSourceId, namespace);
CreateSnapshotJob createSnapshotJob =
new CreateSnapshotJob(Auth0UserProfile.createTestAdminUser(), snapshot, true, false, false);
createSnapshotJob.run();
}

private static Stream<Arguments> createPatchTableTests() {
Expand Down Expand Up @@ -143,6 +165,44 @@ public void canPatchStopsConditionally() throws IOException {
}
}

/**
* Test the removal of a stop and all references in stop times and pattern stops.
*/
@Test
void canCascadeDeleteStop() throws IOException, SQLException {
// Get a fresh feed source so that the editor namespace was updated after snapshot.
FeedSource freshFeedSource = Persistence.feedSources.getById(feedVersionCascadeDelete.feedSourceId);
String stopId = "WARM";
String stopCountSql = getCountSql(freshFeedSource.editorNamespace, "stops", stopId);
String stopTimesCountSql = getCountSql(freshFeedSource.editorNamespace, "stop_times", stopId);
String patternStopsCountSql = getCountSql(freshFeedSource.editorNamespace, "pattern_stops", stopId);

// Check for presence of stopId in stops, stop times and pattern stops.
assertThatSqlCountQueryYieldsExpectedCount(stopCountSql, 1);
assertThatSqlCountQueryYieldsExpectedCount(stopTimesCountSql, 522);
assertThatSqlCountQueryYieldsExpectedCount(patternStopsCountSql, 4);

String path = String.format(
"/api/editor/secure/stop/%s/cascadeDeleteStop?feedId=%s&sessionId=test",
stopId,
feedVersionCascadeDelete.feedSourceId
);
String response = given()
.port(DataManager.PORT)
.delete(path)
.then()
.extract()
.response()
.asString();
JsonNode json = mapper.readTree(response);
assertEquals(OK_200, json.get("code").asInt());

// Check for removal of stopId in stops, stop times and pattern stops.
assertThatSqlCountQueryYieldsExpectedCount(stopCountSql, 0);
assertThatSqlCountQueryYieldsExpectedCount(stopTimesCountSql, 0);
assertThatSqlCountQueryYieldsExpectedCount(patternStopsCountSql, 0);
}

/**
* Perform patch table request on the feed source ID with the requested query and patch JSON. A null query will
* apply the patch JSON to the entire table.
Expand Down Expand Up @@ -182,4 +242,16 @@ private static JsonNode graphqlQuery (String namespace, String graphQLQueryFile)
.asString();
return mapper.readTree(graphQLString);
}

/**
* Build a sql statement to provide a count on the number of rows matching the stop id.
*/
private static String getCountSql(String namespace, String tableName, String stopId) {
return String.format(
"SELECT count(*) FROM %s.%s WHERE stop_id = '%s'",
namespace,
tableName,
stopId
);
}
}

0 comments on commit ba6f895

Please sign in to comment.