diff --git a/doc/sphinx-guides/source/api/native-api.rst b/doc/sphinx-guides/source/api/native-api.rst index 6d33ce9f968..4af6ea6948f 100644 --- a/doc/sphinx-guides/source/api/native-api.rst +++ b/doc/sphinx-guides/source/api/native-api.rst @@ -1861,7 +1861,7 @@ The API call requires a Json body that includes the embargo's end date (dateAvai Remove an Embargo on Files in a Dataset ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -/api/datasets/$dataset-id/files/actions/:unset-embargo can be used to remove an embargo on one or more files in a dataset. Embargoes can be removed from files that are only in a draft dataset version (and are not in any previously published version) by anyone who can edit the dataset. The same API call can be used by a superuser to remove embargos from files that have already been released as part of a previously published dataset version. +``/api/datasets/$dataset-id/files/actions/:unset-embargo`` can be used to remove an embargo on one or more files in a dataset. Embargoes can be removed from files that are only in a draft dataset version (and are not in any previously published version) by anyone who can edit the dataset. The same API call can be used by a superuser to remove embargos from files that have already been released as part of a previously published dataset version. The API call requires a Json body that includes the list of the fileIds that the embargo should be removed from. All files listed must be in the specified dataset. For example: @@ -1873,6 +1873,61 @@ The API call requires a Json body that includes the list of the fileIds that the export JSON='{"fileIds":[300,301]}' curl -H "X-Dataverse-key: $API_TOKEN" -H "Content-Type:application/json" "$SERVER_URL/api/datasets/:persistentId/files/actions/:unset-embargo?persistentId=$PERSISTENT_IDENTIFIER" -d "$JSON" + + +Get the Archival Status of a Dataset By Version +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Archiving is an optional feature that may be configured for a Dataverse instance. When that is enabled, this API call be used to retrieve the status. Note that this requires "superuser" credentials. + +``GET /api/datasets/$dataset-id/$version/archivalStatus`` returns the archival status of the specified dataset version. + +The response is a JSON object that will contain a "status" which may be "success", "pending", or "failure" and a "message" which is archive system specific. For "success" the message should provide an identifier or link to the archival copy. For example: + +.. code-block:: bash + + export API_TOKEN=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + export SERVER_URL=https://demo.dataverse.org + export PERSISTENT_IDENTIFIER=doi:10.5072/FK2/7U7YBV + export VERSION=1.0 + + curl -H "X-Dataverse-key: $API_TOKEN" -H "Accept:application/json" "$SERVER_URL/api/datasets/:persistentId/$VERSION/archivalStatus?persistentId=$PERSISTENT_IDENTIFIER" + +Set the Archival Status of a Dataset By Version +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Archiving is an optional feature that may be configured for a Dataverse instance. When that is enabled, this API call be used to set the status. Note that this is intended to be used by the archival system and requires "superuser" credentials. + +``PUT /api/datasets/$dataset-id/$version/archivalStatus`` sets the archival status of the specified dataset version. + +The body is a JSON object that must contain a "status" which may be "success", "pending", or "failure" and a "message" which is archive system specific. For "success" the message should provide an identifier or link to the archival copy. For example: + +.. code-block:: bash + + export API_TOKEN=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + export SERVER_URL=https://demo.dataverse.org + export PERSISTENT_IDENTIFIER=doi:10.5072/FK2/7U7YBV + export VERSION=1.0 + export JSON='{"status":"failure","message":"Something went wrong"}' + + curl -H "X-Dataverse-key: $API_TOKEN" -H "Content-Type:application/json" -X PUT "$SERVER_URL/api/datasets/:persistentId/$VERSION/archivalStatus?persistentId=$PERSISTENT_IDENTIFIER" -d "$JSON" + +Delete the Archival Status of a Dataset By Version +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Archiving is an optional feature that may be configured for a Dataverse instance. When that is enabled, this API call be used to delete the status. Note that this is intended to be used by the archival system and requires "superuser" credentials. + +``DELETE /api/datasets/$dataset-id/$version/archivalStatus`` deletes the archival status of the specified dataset version. + +.. code-block:: bash + + export API_TOKEN=xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx + export SERVER_URL=https://demo.dataverse.org + export PERSISTENT_IDENTIFIER=doi:10.5072/FK2/7U7YBV + export VERSION=1.0 + + curl -H "X-Dataverse-key: $API_TOKEN" -X DELETE "$SERVER_URL/api/datasets/:persistentId/$VERSION/archivalStatus?persistentId=$PERSISTENT_IDENTIFIER" + Files ----- diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java b/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java index faa91b87e12..510cb2866e8 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java +++ b/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java @@ -6,11 +6,11 @@ import edu.harvard.iq.dataverse.branding.BrandingUtil; import edu.harvard.iq.dataverse.dataset.DatasetUtil; import edu.harvard.iq.dataverse.license.License; -import edu.harvard.iq.dataverse.util.BundleUtil; import edu.harvard.iq.dataverse.util.FileUtil; import edu.harvard.iq.dataverse.util.StringUtil; import edu.harvard.iq.dataverse.util.SystemConfig; import edu.harvard.iq.dataverse.util.DateUtil; +import edu.harvard.iq.dataverse.util.json.JsonUtil; import edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder; import edu.harvard.iq.dataverse.workflows.WorkflowComment; import java.io.Serializable; @@ -27,6 +27,7 @@ import javax.json.Json; import javax.json.JsonArray; import javax.json.JsonArrayBuilder; +import javax.json.JsonObject; import javax.json.JsonObjectBuilder; import javax.persistence.CascadeType; import javax.persistence.Column; @@ -94,6 +95,14 @@ public enum VersionState { public static final int ARCHIVE_NOTE_MAX_LENGTH = 1000; public static final int VERSION_NOTE_MAX_LENGTH = 1000; + //Archival copies: Status message required components + public static final String ARCHIVAL_STATUS = "status"; + public static final String ARCHIVAL_STATUS_MESSAGE = "message"; + //Archival Copies: Allowed Statuses + public static final String ARCHIVAL_STATUS_PENDING = "pending"; + public static final String ARCHIVAL_STATUS_SUCCESS = "success"; + public static final String ARCHIVAL_STATUS_FAILURE = "failure"; + @Id @GeneratedValue(strategy = GenerationType.IDENTITY) private Long id; @@ -152,6 +161,11 @@ public enum VersionState { // removed pending further investigation (v4.13) private String archiveNote; + // Originally a simple string indicating the location of the archival copy. As + // of v5.12, repurposed to provide a more general json archival status (failure, + // pending, success) and message (serialized as a string). The archival copy + // location is now expected as the contents of the message for the status + // 'success'. See the /api/datasets/{id}/{version}/archivalStatus API calls for more details @Column(nullable=true, columnDefinition = "TEXT") private String archivalCopyLocation; @@ -180,6 +194,8 @@ public enum VersionState { @Transient private DatasetVersionDifference dvd; + @Transient + private JsonObject archivalStatus; public Long getId() { return this.id; @@ -319,9 +335,39 @@ public void setArchiveNote(String note) { public String getArchivalCopyLocation() { return archivalCopyLocation; } + + public String getArchivalCopyLocationStatus() { + populateArchivalStatus(false); + + if(archivalStatus!=null) { + return archivalStatus.getString(ARCHIVAL_STATUS); + } + return null; + } + public String getArchivalCopyLocationMessage() { + populateArchivalStatus(false); + if(archivalStatus!=null) { + return archivalStatus.getString(ARCHIVAL_STATUS_MESSAGE); + } + return null; + } + + private void populateArchivalStatus(boolean force) { + if(archivalStatus ==null || force) { + if(archivalCopyLocation!=null) { + try { + archivalStatus = JsonUtil.getJsonObject(archivalCopyLocation); + } catch(Exception e) { + logger.warning("DatasetVersion id: " + id + "has a non-JsonObject value, parsing error: " + e.getMessage()); + logger.fine(archivalCopyLocation); + } + } + } + } public void setArchivalCopyLocation(String location) { this.archivalCopyLocation = location; + populateArchivalStatus(true); } public String getDeaccessionLink() { diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetVersionServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DatasetVersionServiceBean.java index 580d95b4b1d..df787ae1391 100644 --- a/src/main/java/edu/harvard/iq/dataverse/DatasetVersionServiceBean.java +++ b/src/main/java/edu/harvard/iq/dataverse/DatasetVersionServiceBean.java @@ -1187,4 +1187,12 @@ private DatasetVersion getPreviousVersionWithUnf(DatasetVersion datasetVersion) return null; } + /** + * Merges the passed datasetversion to the persistence context. + * @param ver the DatasetVersion whose new state we want to persist. + * @return The managed entity representing {@code ver}. + */ + public DatasetVersion merge( DatasetVersion ver ) { + return em.merge(ver); + } } // end class diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java b/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java index 153d3f266b1..04323f5cef8 100644 --- a/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java +++ b/src/main/java/edu/harvard/iq/dataverse/api/Datasets.java @@ -87,6 +87,7 @@ import edu.harvard.iq.dataverse.util.json.JSONLDUtil; import edu.harvard.iq.dataverse.util.json.JsonLDTerm; import edu.harvard.iq.dataverse.util.json.JsonParseException; +import edu.harvard.iq.dataverse.util.json.JsonUtil; import edu.harvard.iq.dataverse.search.IndexServiceBean; import static edu.harvard.iq.dataverse.util.json.JsonPrinter.*; import static edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder.jsonObjectBuilder; @@ -216,6 +217,9 @@ public class Datasets extends AbstractApiBean { @Inject DataverseRoleServiceBean dataverseRoleService; + @EJB + DatasetVersionServiceBean datasetversionService; + /** * Used to consolidate the way we parse and handle dataset versions. * @param @@ -2259,7 +2263,7 @@ public Response completeMPUpload(String partETagBody, @QueryParam("globalid") St eTagList.add(new PartETag(Integer.parseInt(partNo), object.getString(partNo))); } for(PartETag et: eTagList) { - logger.info("Part: " + et.getPartNumber() + " : " + et.getETag()); + logger.fine("Part: " + et.getPartNumber() + " : " + et.getETag()); } } catch (JsonException je) { logger.info("Unable to parse eTags from: " + partETagBody); @@ -2524,7 +2528,7 @@ public Command handleLatestPublished() { if ( dsv == null || dsv.getId() == null ) { throw new WrappedResponse( notFound("Dataset version " + versionNumber + " of dataset " + ds.getId() + " not found") ); } - if (dsv.isReleased()) { + if (dsv.isReleased()&& uriInfo!=null) { MakeDataCountLoggingServiceBean.MakeDataCountEntry entry = new MakeDataCountEntry(uriInfo, headers, dvRequestService, ds); mdcLogService.logEntry(entry); } @@ -3282,4 +3286,104 @@ public Response getCurationStates() throws WrappedResponse { csvSB.append("\n"); return ok(csvSB.toString(), MediaType.valueOf(FileUtil.MIME_TYPE_CSV), "datasets.status.csv"); } + + // APIs to manage archival status + + @GET + @Produces(MediaType.APPLICATION_JSON) + @Path("/{id}/{version}/archivalStatus") + public Response getDatasetVersionArchivalStatus(@PathParam("id") String datasetId, + @PathParam("version") String versionNumber, @Context UriInfo uriInfo, @Context HttpHeaders headers) { + + try { + AuthenticatedUser au = findAuthenticatedUserOrDie(); + if (!au.isSuperuser()) { + return error(Response.Status.FORBIDDEN, "Superusers only."); + } + DataverseRequest req = createDataverseRequest(au); + DatasetVersion dsv = getDatasetVersionOrDie(req, versionNumber, findDatasetOrDie(datasetId), uriInfo, + headers); + + if (dsv.getArchivalCopyLocation() == null) { + return error(Status.NO_CONTENT, "This dataset version has not been archived"); + } else { + JsonObject status = JsonUtil.getJsonObject(dsv.getArchivalCopyLocation()); + return ok(status); + } + } catch (WrappedResponse wr) { + return wr.getResponse(); + } + } + + @PUT + @Consumes(MediaType.APPLICATION_JSON) + @Path("/{id}/{version}/archivalStatus") + public Response setDatasetVersionArchivalStatus(@PathParam("id") String datasetId, + @PathParam("version") String versionNumber, JsonObject update, @Context UriInfo uriInfo, + @Context HttpHeaders headers) { + + logger.fine(JsonUtil.prettyPrint(update)); + try { + AuthenticatedUser au = findAuthenticatedUserOrDie(); + + if (!au.isSuperuser()) { + return error(Response.Status.FORBIDDEN, "Superusers only."); + } + + if (update.containsKey(DatasetVersion.ARCHIVAL_STATUS) && update.containsKey(DatasetVersion.ARCHIVAL_STATUS_MESSAGE)) { + String status = update.getString(DatasetVersion.ARCHIVAL_STATUS); + if (status.equals(DatasetVersion.ARCHIVAL_STATUS_PENDING) || status.equals(DatasetVersion.ARCHIVAL_STATUS_FAILURE) + || status.equals(DatasetVersion.ARCHIVAL_STATUS_SUCCESS)) { + + DataverseRequest req = createDataverseRequest(au); + DatasetVersion dsv = getDatasetVersionOrDie(req, versionNumber, findDatasetOrDie(datasetId), + uriInfo, headers); + + if (dsv == null) { + return error(Status.NOT_FOUND, "Dataset version not found"); + } + + dsv.setArchivalCopyLocation(JsonUtil.prettyPrint(update)); + dsv = datasetversionService.merge(dsv); + logger.fine("location now: " + dsv.getArchivalCopyLocation()); + logger.fine("status now: " + dsv.getArchivalCopyLocationStatus()); + logger.fine("message now: " + dsv.getArchivalCopyLocationMessage()); + + return ok("Status updated"); + } + } + } catch (WrappedResponse wr) { + return wr.getResponse(); + } + + return error(Status.BAD_REQUEST, "Unacceptable status format"); + } + + @DELETE + @Produces(MediaType.APPLICATION_JSON) + @Path("/{id}/{version}/archivalStatus") + public Response deleteDatasetVersionArchivalStatus(@PathParam("id") String datasetId, + @PathParam("version") String versionNumber, @Context UriInfo uriInfo, @Context HttpHeaders headers) { + + try { + AuthenticatedUser au = findAuthenticatedUserOrDie(); + if (!au.isSuperuser()) { + return error(Response.Status.FORBIDDEN, "Superusers only."); + } + + DataverseRequest req = createDataverseRequest(au); + DatasetVersion dsv = getDatasetVersionOrDie(req, versionNumber, findDatasetOrDie(datasetId), uriInfo, + headers); + if (dsv == null) { + return error(Status.NOT_FOUND, "Dataset version not found"); + } + dsv.setArchivalCopyLocation(null); + dsv = datasetversionService.merge(dsv); + + return ok("Status deleted"); + + } catch (WrappedResponse wr) { + return wr.getResponse(); + } + } } diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DuraCloudSubmitToArchiveCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DuraCloudSubmitToArchiveCommand.java index 50667993bdc..d37d9e655b0 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DuraCloudSubmitToArchiveCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/DuraCloudSubmitToArchiveCommand.java @@ -23,6 +23,9 @@ import java.util.Map; import java.util.logging.Logger; +import javax.json.Json; +import javax.json.JsonObjectBuilder; + import org.apache.commons.codec.binary.Hex; import org.duracloud.client.ContentStore; import org.duracloud.client.ContentStoreManager; @@ -88,6 +91,11 @@ public WorkflowStepResult performArchiveSubmission(DatasetVersion dv, ApiToken t .replace('.', '-').toLowerCase() + "_v" + dv.getFriendlyVersionNumber(); ContentStore store; + //Set a failure status that will be updated if we succeed + JsonObjectBuilder statusObject = Json.createObjectBuilder(); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS, DatasetVersion.ARCHIVAL_STATUS_FAILURE); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS_MESSAGE, "Bag not transferred"); + try { /* * If there is a failure in creating a space, it is likely that a prior version @@ -194,7 +202,9 @@ public void run() { sb.append("/duradmin/spaces/sm/"); sb.append(store.getStoreId()); sb.append("/" + spaceName + "/" + fileName); - dv.setArchivalCopyLocation(sb.toString()); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS, DatasetVersion.ARCHIVAL_STATUS_SUCCESS); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS_MESSAGE, sb.toString()); + logger.fine("DuraCloud Submission step complete: " + sb.toString()); } catch (ContentStoreException | IOException e) { // TODO Auto-generated catch block @@ -217,6 +227,9 @@ public void run() { } catch (NoSuchAlgorithmException e) { logger.severe("MD5 MessageDigest not available!"); } + finally { + dv.setArchivalCopyLocation(statusObject.build().toString()); + } } else { logger.warning( "DuraCloud Submision Workflow aborted: Dataset locked for finalizePublication, or because file validation failed"); diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/GoogleCloudSubmitToArchiveCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/GoogleCloudSubmitToArchiveCommand.java index 7eb09452abb..d93dcc9156a 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/GoogleCloudSubmitToArchiveCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/GoogleCloudSubmitToArchiveCommand.java @@ -23,6 +23,9 @@ import java.util.Map; import java.util.logging.Logger; +import javax.json.Json; +import javax.json.JsonObjectBuilder; + import org.apache.commons.codec.binary.Hex; import com.google.auth.oauth2.ServiceAccountCredentials; import com.google.cloud.storage.Blob; @@ -50,6 +53,11 @@ public WorkflowStepResult performArchiveSubmission(DatasetVersion dv, ApiToken t logger.fine("Project: " + projectName + " Bucket: " + bucketName); if (bucketName != null && projectName != null) { Storage storage; + //Set a failure status that will be updated if we succeed + JsonObjectBuilder statusObject = Json.createObjectBuilder(); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS, DatasetVersion.ARCHIVAL_STATUS_FAILURE); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS_MESSAGE, "Bag not transferred"); + try { FileInputStream fis = new FileInputStream(System.getProperty("dataverse.files.directory") + System.getProperty("file.separator") + "googlecloudkey.json"); storage = StorageOptions.newBuilder() @@ -157,7 +165,9 @@ public void run() { StringBuffer sb = new StringBuffer("https://console.cloud.google.com/storage/browser/"); sb.append(bucketName + "/" + spaceName); - dv.setArchivalCopyLocation(sb.toString()); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS, DatasetVersion.ARCHIVAL_STATUS_SUCCESS); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS_MESSAGE, sb.toString()); + } } else { logger.warning("GoogleCloud Submision Workflow aborted: Dataset locked for pidRegister"); @@ -169,6 +179,8 @@ public void run() { return new Failure("GoogleCloud Submission Failure", e.getLocalizedMessage() + ": check log for details"); + } finally { + dv.setArchivalCopyLocation(statusObject.build().toString()); } return WorkflowStepResult.OK; } else { diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/LocalSubmitToArchiveCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/LocalSubmitToArchiveCommand.java index b336d9a77f9..c12bdc63981 100644 --- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/LocalSubmitToArchiveCommand.java +++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/LocalSubmitToArchiveCommand.java @@ -19,6 +19,9 @@ import java.util.Map; import java.util.logging.Logger; +import javax.json.Json; +import javax.json.JsonObjectBuilder; + import java.io.File; import java.io.FileOutputStream; @@ -39,6 +42,12 @@ public WorkflowStepResult performArchiveSubmission(DatasetVersion dv, ApiToken t logger.fine("In LocalCloudSubmitToArchive..."); String localPath = requestedSettings.get(":BagItLocalPath"); String zipName = null; + + //Set a failure status that will be updated if we succeed + JsonObjectBuilder statusObject = Json.createObjectBuilder(); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS, DatasetVersion.ARCHIVAL_STATUS_FAILURE); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS_MESSAGE, "Bag not transferred"); + try { Dataset dataset = dv.getDataset(); @@ -68,7 +77,8 @@ public WorkflowStepResult performArchiveSubmission(DatasetVersion dv, ApiToken t if (srcFile.renameTo(destFile)) { logger.fine("Localhost Submission step: Content Transferred"); - dv.setArchivalCopyLocation("file://" + zipName); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS, DatasetVersion.ARCHIVAL_STATUS_SUCCESS); + statusObject.add(DatasetVersion.ARCHIVAL_STATUS_MESSAGE, "file://" + zipName); } else { logger.warning("Unable to move " + zipName + ".partial to " + zipName); } @@ -80,7 +90,10 @@ public WorkflowStepResult performArchiveSubmission(DatasetVersion dv, ApiToken t } catch (Exception e) { logger.warning("Failed to archive " + zipName + " : " + e.getLocalizedMessage()); e.printStackTrace(); + } finally { + dv.setArchivalCopyLocation(statusObject.build().toString()); } + return WorkflowStepResult.OK; } diff --git a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonUtil.java b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonUtil.java index ae6935945e8..f4a3c635f8b 100644 --- a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonUtil.java +++ b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonUtil.java @@ -3,6 +3,8 @@ import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.google.gson.JsonObject; + +import java.io.StringReader; import java.io.StringWriter; import java.util.HashMap; import java.util.Map; @@ -56,4 +58,9 @@ public static String prettyPrint(javax.json.JsonObject jsonObject) { return stringWriter.toString(); } + public static javax.json.JsonObject getJsonObject(String serializedJson) { + try (StringReader rdr = new StringReader(serializedJson)) { + return Json.createReader(rdr).readObject(); + } + } } diff --git a/src/main/resources/db/migration/V5.11.0.1__8605-support-archival-status.sql b/src/main/resources/db/migration/V5.11.0.1__8605-support-archival-status.sql new file mode 100644 index 00000000000..cf708ad0ea9 --- /dev/null +++ b/src/main/resources/db/migration/V5.11.0.1__8605-support-archival-status.sql @@ -0,0 +1,2 @@ +UPDATE datasetversion SET archivalCopyLocation = CONCAT('{"status":"success", "message":"', archivalCopyLocation,'"}') where archivalCopyLocation is not null and not archivalCopyLocation='Attempted'; +UPDATE datasetversion SET archivalCopyLocation = CONCAT('{"status":"failure", "message":"Attempted"}') where archivalCopyLocation='Attempted'; diff --git a/src/test/java/edu/harvard/iq/dataverse/api/DatasetsIT.java b/src/test/java/edu/harvard/iq/dataverse/api/DatasetsIT.java index 5a2197af001..4921bd882f8 100644 --- a/src/test/java/edu/harvard/iq/dataverse/api/DatasetsIT.java +++ b/src/test/java/edu/harvard/iq/dataverse/api/DatasetsIT.java @@ -57,6 +57,8 @@ import javax.json.Json; import javax.json.JsonArray; import javax.json.JsonObjectBuilder; +import javax.ws.rs.core.Response.Status; + import static javax.ws.rs.core.Response.Status.NO_CONTENT; import static javax.ws.rs.core.Response.Status.OK; import javax.xml.stream.XMLInputFactory; @@ -2809,5 +2811,108 @@ public void testRestrictFileTermsOfUseAndAccess() throws IOException { disallowRequestAccess.then().assertThat().statusCode(BAD_REQUEST.getStatusCode()); } - + + /** + * In this test we do CRUD of archivalStatus (Note this and other archiving + * related tests are part of + * https://github.com/harvard-lts/hdc-integration-tests) + * + * This test requires the root dataverse to be published to pass. + */ + @Test + public void testArchivalStatusAPI() throws IOException { + + Response createUser = UtilIT.createRandomUser(); + createUser.prettyPrint(); + assertEquals(200, createUser.getStatusCode()); + String username = UtilIT.getUsernameFromResponse(createUser); + String apiToken = UtilIT.getApiTokenFromResponse(createUser); + Response makeSuperUser = UtilIT.makeSuperUser(username); + assertEquals(200, makeSuperUser.getStatusCode()); + + Response createNoAccessUser = UtilIT.createRandomUser(); + createNoAccessUser.prettyPrint(); + String apiTokenNoAccess = UtilIT.getApiTokenFromResponse(createNoAccessUser); + + Response createDataverseResponse = UtilIT.createRandomDataverse(apiToken); + createDataverseResponse.prettyPrint(); + String dataverseAlias = UtilIT.getAliasFromResponse(createDataverseResponse); + + Response createDatasetResponse = UtilIT.createRandomDatasetViaNativeApi(dataverseAlias, apiToken); + createDatasetResponse.prettyPrint(); + Integer datasetId = JsonPath.from(createDatasetResponse.body().asString()).getInt("data.id"); + + Response getDatasetJsonBeforePublishing = UtilIT.nativeGet(datasetId, apiToken); + getDatasetJsonBeforePublishing.prettyPrint(); + String protocol = JsonPath.from(getDatasetJsonBeforePublishing.getBody().asString()).getString("data.protocol"); + String authority = JsonPath.from(getDatasetJsonBeforePublishing.getBody().asString()) + .getString("data.authority"); + String identifier = JsonPath.from(getDatasetJsonBeforePublishing.getBody().asString()) + .getString("data.identifier"); + String datasetPersistentId = protocol + ":" + authority + "/" + identifier; + + Response publishDataverse = UtilIT.publishDataverseViaSword(dataverseAlias, apiToken); + assertEquals(200, publishDataverse.getStatusCode()); + + logger.info("Attempting to publish a major version"); + // Return random sleep 9/13/2019 + // Without it we've seen some DB deadlocks + // 3 second sleep, to allow the indexing to finish: + + try { + Thread.sleep(3000l); + } catch (InterruptedException iex) { + } + + Response publishDataset = UtilIT.publishDatasetViaNativeApi(datasetPersistentId, "major", apiToken); + assertEquals(200, publishDataset.getStatusCode()); + + String pathToJsonFileSingle = "doc/sphinx-guides/source/_static/api/dataset-simple-update-metadata.json"; + Response addSubjectSingleViaNative = UtilIT.updateFieldLevelDatasetMetadataViaNative(datasetPersistentId, pathToJsonFileSingle, apiToken); + addSubjectSingleViaNative.prettyPrint(); + addSubjectSingleViaNative.then().assertThat() + .statusCode(OK.getStatusCode()); + + + // Now change the title +// Response response = UtilIT.updateDatasetJsonLDMetadata(datasetId, apiToken, +// "{\"title\": \"New Title\", \"@context\":{\"title\": \"http://purl.org/dc/terms/title\"}}", true); +// response.then().assertThat().statusCode(OK.getStatusCode()); + + int status = Status.CONFLICT.getStatusCode(); + while (status == Status.CONFLICT.getStatusCode()) { + + Response publishV2 = UtilIT.publishDatasetViaNativeApi(datasetPersistentId, "major", apiToken); + status = publishV2.thenReturn().statusCode(); + } + assertEquals(OK.getStatusCode(), status); + + if (!UtilIT.sleepForReindex(datasetPersistentId, apiToken, 3000)) { + logger.info("Still indexing after 3 seconds"); + } + + //Verify the status is empty + Response nullStatus = UtilIT.getDatasetVersionArchivalStatus(datasetId, "1.0", apiToken); + nullStatus.then().assertThat().statusCode(NO_CONTENT.getStatusCode()); + + //Set it + Response setStatus = UtilIT.setDatasetVersionArchivalStatus(datasetId, "1.0", apiToken, "pending", + "almost there"); + setStatus.then().assertThat().statusCode(OK.getStatusCode()); + + //Get it + Response getStatus = UtilIT.getDatasetVersionArchivalStatus(datasetId, "1.0", apiToken); + getStatus.then().assertThat().body("data.status", equalTo("pending")).body("data.message", + equalTo("almost there")); + + //Delete it + Response deleteStatus = UtilIT.deleteDatasetVersionArchivalStatus(datasetId, "1.0", apiToken); + deleteStatus.then().assertThat().statusCode(OK.getStatusCode()); + + //Make sure it's gone + Response nullStatus2 = UtilIT.getDatasetVersionArchivalStatus(datasetId, "1.0", apiToken); + nullStatus2.then().assertThat().statusCode(NO_CONTENT.getStatusCode()); + + } + } diff --git a/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java b/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java index 19b94f34db7..c791ce72f41 100644 --- a/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java +++ b/src/test/java/edu/harvard/iq/dataverse/api/UtilIT.java @@ -1194,12 +1194,20 @@ static Response publishDatasetViaSword(String persistentId, String apiToken) { } static Response publishDatasetViaNativeApi(String idOrPersistentId, String majorOrMinor, String apiToken) { + return publishDatasetViaNativeApi(idOrPersistentId, majorOrMinor, apiToken, false); + } + + static Response publishDatasetViaNativeApi(String idOrPersistentId, String majorOrMinor, String apiToken, boolean mustBeIndexed) { + String idInPath = idOrPersistentId; // Assume it's a number. String optionalQueryParam = ""; // If idOrPersistentId is a number we'll just put it in the path. - if (!NumberUtils.isNumber(idOrPersistentId)) { + if (!NumberUtils.isCreatable(idOrPersistentId)) { idInPath = ":persistentId"; optionalQueryParam = "&persistentId=" + idOrPersistentId; } + if(mustBeIndexed) { + optionalQueryParam = optionalQueryParam+"&assureIsIndexed=true"; + } RequestSpecification requestSpecification = given(); if (apiToken != null) { requestSpecification = given() @@ -2384,6 +2392,28 @@ static Boolean sleepForLock(String idOrPersistentId, String lockType, String api } + static boolean sleepForReindex(String idOrPersistentId, String apiToken, int duration) { + int i = 0; + Response timestampResponse; + do { + timestampResponse = UtilIT.getDatasetTimestamps(idOrPersistentId, apiToken); + + try { + Thread.sleep(200); + i++; + if (i > duration) { + break; + } + } catch (InterruptedException ex) { + Logger.getLogger(UtilIT.class.getName()).log(Level.SEVERE, null, ex); + } + } while (timestampResponse.body().jsonPath().getBoolean("data.hasStaleIndex")); + + return i <= duration; + + } + + //Helper function that returns true if a given search returns a non-zero response within a fixed time limit // a given duration returns false if still zero results after given duration static Boolean sleepForSearch(String searchPart, String apiToken, String subTree, int duration) { @@ -2474,6 +2504,20 @@ static Response unlockDataset(long datasetId, String lockType, String apiToken) return response; } + static Response getDatasetTimestamps(String idOrPersistentId, String apiToken) { + String idInPath = idOrPersistentId; // Assume it's a number. + String queryParams = ""; // If idOrPersistentId is a number we'll just put it in the path. + if (!NumberUtils.isCreatable(idOrPersistentId)) { + idInPath = ":persistentId"; + queryParams = "?persistentId=" + idOrPersistentId; + } + + Response response = given() + .header(API_TOKEN_HTTP_HEADER, apiToken) + .get("api/datasets/" + idInPath + "/timestamps" + queryParams); + return response; + } + static Response exportOaiSet(String setName) { String apiPath = String.format("/api/admin/metadata/exportOAI/%s", setName); return given().put(apiPath); @@ -2866,6 +2910,25 @@ static Response setDatasetCurationLabel(Integer datasetId, String apiToken, Stri .put("/api/datasets/" + datasetId + "/curationStatus?label=" + label); return response; } + + static Response getDatasetVersionArchivalStatus(Integer datasetId, String version, String apiToken) { + Response response = given() + .header(API_TOKEN_HTTP_HEADER, apiToken) + .get("/api/datasets/" + datasetId + "/" + version + "/archivalStatus"); + return response; + } + static Response setDatasetVersionArchivalStatus(Integer datasetId, String version, String apiToken, String status, String message) { + Response response = given() + .header(API_TOKEN_HTTP_HEADER, apiToken).contentType("application/json; charset=utf-8").body("{\"status\":\"" + status + "\", \"message\":\"" + message + "\"}") + .put("/api/datasets/" + datasetId + "/" + version + "/archivalStatus"); + return response; + } + static Response deleteDatasetVersionArchivalStatus(Integer datasetId, String version, String apiToken) { + Response response = given() + .header(API_TOKEN_HTTP_HEADER, apiToken) + .delete("/api/datasets/" + datasetId + "/" + version + "/archivalStatus"); + return response; + } private static DatasetField constructPrimitive(String fieldName, String value) { DatasetField field = new DatasetField();