diff --git a/conf/solr/4.6.0/schema.xml b/conf/solr/4.6.0/schema.xml
index 5d57509fccb..ee13003283b 100644
--- a/conf/solr/4.6.0/schema.xml
+++ b/conf/solr/4.6.0/schema.xml
@@ -123,7 +123,7 @@
-
+
@@ -167,7 +167,7 @@
-
+
@@ -251,7 +251,7 @@
-
+
@@ -266,117 +266,117 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
@@ -407,6 +407,8 @@
+
+
diff --git a/doc/Sphinx/source/Developers/dev-main.rst b/doc/Sphinx/source/Developers/dev-main.rst
index 4e6a6072533..1bd9b47ce3e 100644
--- a/doc/Sphinx/source/Developers/dev-main.rst
+++ b/doc/Sphinx/source/Developers/dev-main.rst
@@ -15,7 +15,7 @@ Download solr-4.6.0.tgz from http://lucene.apache.org/solr/ to any directory you
- ``cd ~/solr``
- ``tar xvfz solr-4.6.0.tgz``
- ``cd solr-4.6.0/example``
-- ``cp ~/NetBeansProjects/dataverse_temp/conf/solr/4.6.0/schema.xml solr/collection1/conf/schema.xml``
+- ``cp ~/NetBeansProjects/dataverse/conf/solr/4.6.0/schema.xml solr/collection1/conf/schema.xml``
- ``java -jar start.jar``
Please note: If you prefer, once the proper ``schema.xml`` file is in place, you can simply double-click "start.jar" rather that running ``java -jar start.jar`` from the command line.
diff --git a/doc/Sphinx/source/User/dataset-management.rst b/doc/Sphinx/source/User/dataset-management.rst
index c3553ec4163..93737478ab6 100644
--- a/doc/Sphinx/source/User/dataset-management.rst
+++ b/doc/Sphinx/source/User/dataset-management.rst
@@ -38,6 +38,6 @@ browse it or search for it. Once your Dataset is ready to go public, go to your
click on the "Unpublished" button on the right hand side of the page which should indicate:
"This Dataset is unpublished. To publish it click 'Publish Dataset' link."
-Important Note: Once a Dataset is published it cannot be unpublished.
+Important Note: Once a Dataset is published it **cannot be unpublished**; it can be archived instead.
.. |image1| image:: ./img/DatasetDiagram.png
diff --git a/doc/Sphinx/source/User/find-use-data.rst b/doc/Sphinx/source/User/find-use-data.rst
index d03a8c2c047..50885016302 100644
--- a/doc/Sphinx/source/User/find-use-data.rst
+++ b/doc/Sphinx/source/User/find-use-data.rst
@@ -1,6 +1,9 @@
Finding and Using Data
+++++++++++++++++++++++
+Finding Data
+=============
+
Without logging in to Dataverse, users can browse
Dataverse, search for dataverses, datasets, and files, view dataset descriptions and files for
public datasets, and subset, analyze and visualize data for public data
@@ -9,7 +12,7 @@ files. To view a restricted dataverse, dataset, or file, a user will need to be
A user can search the dataverses, datasets, and files within a particular dataverse by using the search bar found on a dataverse page. For example, if you are on the Murray Research Archive Dataverse page, you can search that specific dataverse's contents by using the search bar and/or facets displayed on the page.
Basic Search
-===============
+--------------
From the Dataverse homepage, you can begin searching with an exact phrase search or entering a search term or query in the search box that says, "Search this dataverse"
**Search results features**
@@ -20,15 +23,15 @@ From the Dataverse homepage, you can begin searching with an exact phrase search
- Viewing more or less: the top five results show in each facet, to view more, click on "More..." in the bottom right of a facet. Once you've chosen to see more, an option to view less will appear in the bottom left of the facet.
- Result cards: after entering a search term or query, results cards that match appear underneath the search bar and to the right of the facets.
- - Relevancy of results: each results card shows which metadata fields match the search query or term you entered into the search bar. If the search term or query was found in the title or name of the dataverse, dataset, or file, the search term or query will be bolded within it.
+ - Relevancy of results: each results card shows which metadata fields match the search query or term you entered into the search bar with the matching term or query bolded. If the search term or query was found in the title or name of the dataverse, dataset, or file, the search term or query will be bolded within it.
- Other basic search features:
- - Sorting results: search results can be sorted by name (both A-Z and Z-A), publication date, or relevancy of results. The sort button can be found about the search results, in the top right.
- - Bookmarkable URLs: search URLS can be copied and sent to a fellow researcher or can be bookmarked for you to be able to return to at a later time.
+ - Sorting results: search results can be sorted by name (A-Z or Z-A), newest or oldest, or relevancy of results. The sort button can be found above the search results, in the top right.
+ - Bookmarkable URLs: search URLs can be copied and sent to a fellow researcher or can be bookmarked for you to be able to return to at a later time.
Advanced Search
-================
+-----------------
In an advanced search, you can refine your criteria by choosing which
Metadata fields to search. You can perform an advanced search on Citation metadata fields as well as domain specific metadata fields (Social Sciences & Humanities, Astronomy & Astrophysics, and Biomedical). Additionally, you can perform an advanced search for dataverses and files.
@@ -83,11 +86,28 @@ To perform an advanced search, click the Advanced Search link next to the search
Browsing Dataverse
-===================
+--------------------
-In Dataverse, browsing happens when a user hasn't entered a search term or query into the basic search bar. Browsing is the default for a user when they are on the Dataverse homepage or a specific dataverse's page. When browsing, only Dataverses and Datasets appear in the results list and the results can be sorted by Name (both A-Z and Z-A) and Publication Date (Newest and Oldest).
+In Dataverse, browsing happens when a user hasn't entered a search term or query into the basic search bar. Browsing is the default for a user when they are on the Dataverse homepage or a specific dataverse's page. When browsing, only dataverses and datasets appear in the results list and the results can be sorted by Name (A-Z or Z-A) and by Newest or Oldest.
Additionally, a user can browse all dataverses by clicking on the triangle to the right of a dataverse's name in the breadcrumbs. A drop down menu will appear that holds a listing of all the dataverses within that dataverse. You can click the triangles next to a dataverse's name to view more dataverses within that dataverse. As you continue to browse dataverses, they will appear in the breadcrumbs.
The dataverse breadcrumbs appear on each dataverse page and can be used to navigate throughout Dataverse.
+
+Using Data
+===========
+
+View Dataverses & Datasets
+-------------
+
+After performing a search and finding the dataverse or dataset you are looking for, click on the name of the dataverse or dataset or on the thumbnail image to be taken to the page for that dataverse or dataset. Once on a dataverse page, you can view the dataverses, datasets, and files within that dataverse.
+
+Once on a dataset page, you will see the title, citation, description, and several other fields as well as a button to email the dataset contact. Below that information, the files, metadata, and version information for the dataset are available.
+
+Download Files
+--------------
+
+Within the Files tab on a dataset page, a user can either Explore the file using SolaFide or Download the file as tab-delimited, RData, Saved Original, or the variable metadata.
+
+
diff --git a/scripts/api/data/metadatablocks/astrophysics.tsv b/scripts/api/data/metadatablocks/astrophysics.tsv
index ab10986f87e..6557aba45ae 100644
--- a/scripts/api/data/metadatablocks/astrophysics.tsv
+++ b/scripts/api/data/metadatablocks/astrophysics.tsv
@@ -1,31 +1,31 @@
#metadataBlock name displayName
astrophysics Astronomy and Astrophysics Metadata
#datasetField name title description watermark fieldType displayOrder advancedSearchField allowControlledVocabulary allowmultiples facetable showabovefold required parent metadatablock_id
- type Type The nature or genre of the content of the files in the dataset. text 0 TRUE TRUE TRUE TRUE FALSE FALSE astrophysics
- facility Facility The observatory or facility where the data was obtained. text 1 TRUE TRUE TRUE TRUE FALSE FALSE astrophysics
- instrument Instrument The instrument used to collect the data. text 2 TRUE TRUE TRUE TRUE FALSE FALSE astrophysics
- resolution Resolution The resolution of the data object contents. float 3 FALSE FALSE TRUE FALSE FALSE FALSE astrophysics
- resolution.Spatial Spatial Resolution The spatial (angular) resolution that is typical of the observations, in decimal degrees. float 4 TRUE FALSE FALSE TRUE FALSE FALSE resolution astrophysics
- resolution.Spectral Spectral Resolution The spectral resolution that is typical of the observations, given as the ratio λ/Δλ. float 5 TRUE FALSE FALSE TRUE FALSE FALSE resolution astrophysics
- resolution.Temporal Time Resolution The temporal resolution that is typical of the observations, given in seconds. float 6 FALSE FALSE FALSE FALSE FALSE FALSE resolution astrophysics
- resolution.Redshift Redshift Resolution The resolution in redshift (unitless) or Doppler velocity (km/s) in the data object. float 7 FALSE FALSE FALSE FALSE FALSE FALSE resolution astrophysics
- coverage Coverage The extent of scope of the content of the data object. text 8 FALSE FALSE TRUE FALSE FALSE FALSE astrophysics
- coverage.Spectral.Bandpass Bandpass Conventional bandpass name text 9 TRUE TRUE FALSE TRUE FALSE FALSE coverage astrophysics
- coverage.Spectral.CentralWavelength Central Wavelength (m) The central wavelength of the spectral bandpass, in meters. float 10 TRUE FALSE FALSE TRUE FALSE FALSE coverage astrophysics
- coverage.Spectral.MinimumWavelength Minimum Wavelength (m) The minimum wavelength of the spectral bandpass, in meters. float 11 TRUE FALSE FALSE TRUE FALSE FALSE coverage astrophysics
- coverage.Spectral.MaximumWavelength Maximum Wavelength (m) The maximum wavelength of the spectral bandpass, in meters. float 12 TRUE FALSE FALSE TRUE FALSE FALSE coverage astrophysics
- coverage.Temporal.StartTime Dataset Start Date Dataset Start Date date 13 TRUE FALSE FALSE TRUE FALSE FALSE coverage astrophysics
- coverage.Temporal.StopTime Dataset End Date Dataset End Date date 14 TRUE FALSE FALSE TRUE FALSE FALSE coverage astrophysics
- coverage.Spectral Spectral Coverage The spectral coverage of the data object. text 15 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.Spatial Sky Coverage The sky coverage of the data object. text 16 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.Depth Depth Coverage The (typical) depth coverage, or sensitivity, of the data object in Jy. float 17 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.ObjectDensity Object Density The (typical) density of objects, catalog entries, telescope pointings, etc., on the sky, in number per square degree. float 18 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.ObjectCount Object Count The total number of objects, catalog entries, etc., in the data object. int 19 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.SkyFraction Fraction of Sky The fraction of the sky represented in the observations, ranging from 0 to 1. float 20 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.Polarization Polarization The polarization coverage text 21 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.Redshift.MinimumValue coverage.Redshift.MinimumValue The minimum value of the redshift (unitless) or Doppler velocity (km/s in the data object. float 22 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- coverage.Redshift.MaximumValue coverage.Redshift.MaximumValue The maximum value of the redshift (unitless) or Doppler velocity (km/s in the data object. float 23 FALSE FALSE FALSE FALSE FALSE FALSE coverage astrophysics
- redshiftType RedshiftType RedshiftType string C "Redshift"; or "Optical" or "Radio" definitions of Doppler velocity used in the data object. text 24 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ astroType Type The nature or genre of the content of the files in the dataset. text 0 TRUE TRUE TRUE TRUE FALSE FALSE astrophysics
+ astroFacility Facility The observatory or facility where the data was obtained. text 1 TRUE TRUE TRUE TRUE FALSE FALSE astrophysics
+ astroInstrument Instrument The instrument used to collect the data. text 2 TRUE TRUE TRUE TRUE FALSE FALSE astrophysics
+ resolution.Spatial Spatial Resolution The spatial (angular) resolution that is typical of the observations, in decimal degrees. float 3 TRUE FALSE FALSE TRUE FALSE FALSE astrophysics
+ resolution.Spectral Spectral Resolution The spectral resolution that is typical of the observations, given as the ratio λ/Δλ. float 4 TRUE FALSE FALSE TRUE FALSE FALSE astrophysics
+ resolution.Temporal Time Resolution The temporal resolution that is typical of the observations, given in seconds. float 5 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ coverage.Spectral.Bandpass Bandpass Conventional bandpass name text 6 TRUE TRUE FALSE TRUE FALSE FALSE astrophysics
+ coverage.Spectral.CentralWavelength Central Wavelength (m) The central wavelength of the spectral bandpass, in meters. float 7 TRUE FALSE TRUE TRUE FALSE FALSE astrophysics
+ coverage.Spectral.Wavelength Wavelength Range The minimum and maximum wavelength of the spectral bandpass. 8 FALSE FALSE TRUE FALSE FALSE FALSE astrophysics
+ coverage.Spectral.MinimumWavelength Minimum (m) The minimum wavelength of the spectral bandpass, in meters. float 9 TRUE FALSE FALSE TRUE FALSE FALSE coverage.Spectral.Wavelength astrophysics
+ coverage.Spectral.MaximumWavelength Maximum (m) The maximum wavelength of the spectral bandpass, in meters. float 10 TRUE FALSE FALSE TRUE FALSE FALSE coverage.Spectral.Wavelength astrophysics
+ coverage.Temporal Dataset Date Range Time period covered by the data. 11 TRUE FALSE FALSE FALSE FALSE FALSE
+ coverage.Temporal.StartTime Start Dataset Start Date YYYY-MM-DD date 12 FALSE FALSE FALSE TRUE FALSE FALSE coverage.Temporal astrophysics
+ coverage.Temporal.StopTime End Dataset End Date YYYY-MM-DD date 13 FALSE FALSE FALSE TRUE FALSE FALSE coverage.Temporal astrophysics
+ coverage.Spatial Sky Coverage The sky coverage of the data object. text 14 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ coverage.Depth Depth Coverage The (typical) depth coverage, or sensitivity, of the data object in Jy. float 15 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ coverage.ObjectDensity Object Density The (typical) density of objects, catalog entries, telescope pointings, etc., on the sky, in number per square degree. float 16 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ coverage.ObjectCount Object Count The total number of objects, catalog entries, etc., in the data object. int 17 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ coverage.SkyFraction Fraction of Sky The fraction of the sky represented in the observations, ranging from 0 to 1. float 18 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ coverage.Polarization Polarization The polarization coverage text 19 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ redshiftType RedshiftType RedshiftType string C "Redshift"; or "Optical" or "Radio" definitions of Doppler velocity used in the data object. text 20 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ resolution.Redshift Redshift Resolution The resolution in redshift (unitless) or Doppler velocity (km/s) in the data object. float 21 FALSE FALSE FALSE FALSE FALSE FALSE astrophysics
+ coverage.RedshiftValue Redshift Value The value of the redshift (unitless) or Doppler velocity (km/s in the data object. 22 FALSE FALSE TRUE FALSE FALSE FALSE astrophysics
+ coverage.Redshift.MinimumValue Minimum The minimum value of the redshift (unitless) or Doppler velocity (km/s in the data object. float 23 FALSE FALSE FALSE FALSE FALSE FALSE coverage.RedshiftValue astrophysics
+ coverage.Redshift.MaximumValue Maximum The maximum value of the redshift (unitless) or Doppler velocity (km/s in the data object. float 24 FALSE FALSE FALSE FALSE FALSE FALSE coverage.RedshiftValue astrophysics
ucd UCD (Unified Content Descriptors) A list of the UCDs (Unified Content Descriptors) represented in the data object. text 25 FALSE TRUE TRUE FALSE FALSE FALSE astrophysics
#controlledVocabulary DatasetField Value displayOrder
type Image 0
diff --git a/scripts/api/data/metadatablocks/general.tsv b/scripts/api/data/metadatablocks/general.tsv
index 61d2461c7bc..80409aabfb0 100644
--- a/scripts/api/data/metadatablocks/general.tsv
+++ b/scripts/api/data/metadatablocks/general.tsv
@@ -3,54 +3,50 @@
#datasetField name title description watermark fieldType displayOrder advancedSearchField allowControlledVocabulary allowmultiples facetable showabovefold required parent metadatablock_id
title Title Full title by which the Dataset is known. Enter title... text 0 TRUE FALSE FALSE FALSE TRUE TRUE citation
author Author The person(s), corporate body(ies), or agency(ies) responsible for creating the work. text 1 FALSE FALSE TRUE FALSE TRUE FALSE citation
- authorName Name Enter the author's Family Name, Given Name, or the name of the organization responsible for this dataset. FamilyName, GivenName or Organization text 2 TRUE FALSE FALSE TRUE TRUE TRUE author citation
+ authorName Name The author's Family Name, Given Name or the name of the organization responsible for this dataset. FamilyName, GivenName or Organization text 2 TRUE FALSE FALSE TRUE TRUE TRUE author citation
authorAffiliation Affiliation The organization with which the author is affiliated. text 3 TRUE TRUE FALSE TRUE TRUE FALSE author citation
- distributorContact Contact E-mail The e-mail address(es) of the contact(s) for the Dataset. text 4 FALSE FALSE TRUE FALSE TRUE TRUE citation
+ distributorContact Contact E-mail The e-mail address(es) of the contact(s) for the Dataset. This will not be displayed. text 4 FALSE FALSE TRUE FALSE TRUE TRUE citation
description Description A summary describing the purpose, nature, and scope of the Dataset. textbox 5 TRUE FALSE FALSE FALSE TRUE TRUE citation
- keyword Keyword Free-form text that describe important aspects of the Dataset. text 6 TRUE FALSE TRUE TRUE TRUE FALSE citation
- subject Subject Select any domain-specific Subject(s) that are relevant to the Dataset. text 7 TRUE TRUE TRUE TRUE TRUE TRUE citation
+ keyword Keyword Key terms that describe important aspects of the Dataset. text 6 TRUE FALSE TRUE TRUE TRUE FALSE citation
+ subject Subject Domain-specific Subjects that are topically relevant to the Dataset. text 7 TRUE TRUE TRUE TRUE TRUE TRUE citation
notesText Notes Additional important information about the Dataset textbox 8 FALSE FALSE FALSE FALSE TRUE FALSE citation
- otherId Other ID Another unique identifier that identifies this dataset (e.g., producer's or archive's number). 9 FALSE FALSE TRUE FALSE FALSE FALSE citation
+ otherId Other ID Another unique identifier that identifies this dataset (e.g., producer's or another repository's number). 9 FALSE FALSE TRUE FALSE FALSE FALSE citation
otherIdValue Identifier Other identifier that corresponds to this Dataset. text 10 FALSE FALSE FALSE FALSE FALSE FALSE otherId citation
otherIdAgency Agency Name of agency which generated this identifier. text 11 FALSE FALSE FALSE FALSE FALSE FALSE otherId citation
publication Related Publication Publications that use the data from this Dataset. 12 FALSE FALSE TRUE FALSE FALSE FALSE citation
- publicationCitation Publication Citation Provide the full bibliographic citation for this related publication. textbox 13 FALSE FALSE FALSE FALSE FALSE FALSE publication citation
+ publicationCitation Publication Citation The full bibliographic citation for this related publication. textbox 13 FALSE FALSE FALSE FALSE FALSE FALSE publication citation
publicationIDType ID Type The type of digital identifier used for this publication (e.g., Digital Object Identifier (DOI)). text 14 FALSE TRUE FALSE FALSE FALSE FALSE publication citation
- publicationIDNumber ID Number The actual identifier for the selected ID type. text 15 FALSE FALSE FALSE FALSE FALSE FALSE publication citation
- publicationURL URL Link to the publication web page (in the journal site, archive site or other). url 16 FALSE FALSE FALSE FALSE FALSE FALSE publication citation
- contributor Contributor The institution or person responsible for collecting, managing, distributing, or otherwise contributing to the development of the resource. 17 FALSE FALSE TRUE FALSE FALSE FALSE citation
- contributorType Type The type of contributor of the resource. text 18 FALSE TRUE FALSE TRUE FALSE FALSE contributor citation
- contributorName Name The name of the contributor. text 19 TRUE FALSE FALSE FALSE FALSE FALSE contributor citation
- contributorAffiliation Affiliation If applicable, the organization with which the contributor is affiliated. text 20 FALSE FALSE FALSE FALSE FALSE FALSE contributor citation
- contributorAbbreviation Abbreviation If applicable, the abbreviation by which the contributor is commonly known. (ex. IQSS, ICPSR) text 21 FALSE FALSE FALSE FALSE FALSE FALSE contributor citation
- productionDate Production Date Date when the data collection/other material(s) were produced (not distributed, released or archived). date 22 TRUE FALSE FALSE TRUE FALSE FALSE citation
+ publicationIDNumber ID Number The identifier for the selected ID type. text 15 FALSE FALSE FALSE FALSE FALSE FALSE publication citation
+ publicationURL URL Link to the publication web page (e.g., journal article page, archive record page, or other). url 16 FALSE FALSE FALSE FALSE FALSE FALSE publication citation
+ contributor Contributor The organization or person responsible for either collecting, managing, distributing, or otherwise contributing in some form to the development of the resource. 17 FALSE FALSE TRUE FALSE FALSE FALSE citation
+ contributorType Type The type of contributor of the resource. text 18 TRUE TRUE FALSE TRUE FALSE FALSE contributor citation
+ contributorName Name The Family Name, Given Name or organization name of the contributor. FamilyName, GivenName or Organization text 19 TRUE FALSE FALSE TRUE FALSE FALSE contributor citation
+ contributorAffiliation Affiliation The organization which the contributor is affiliated with. text 20 FALSE FALSE FALSE FALSE FALSE FALSE contributor citation
+ contributorAbbreviation Abbreviation The abbreviation by which the contributor's affiliation is commonly known (e.g., IQSS, ICPSR, etc). text 21 FALSE FALSE FALSE FALSE FALSE FALSE contributor citation
+ productionDate Production Date Date when the data collection or other materials were produced (not distributed, released or archived). date 22 TRUE FALSE FALSE TRUE FALSE FALSE citation
productionPlace Production Place The location where the data collection and any other related materials were produced. text 23 FALSE FALSE FALSE FALSE FALSE FALSE citation
grantNumber Grant Information Grant Information 24 FALSE FALSE TRUE FALSE FALSE FALSE citation
grantNumberValue Grant Number The grant or contract number of the project that sponsored the effort. text 25 FALSE FALSE FALSE FALSE FALSE FALSE grantNumber citation
grantNumberAgency Grant Agency Grant Number Agency text 26 FALSE TRUE FALSE FALSE FALSE FALSE grantNumber citation
- depositor Depositor Name of a person (or organization) who provided this Dataset to the archive originally storing the data. text 27 FALSE FALSE FALSE FALSE FALSE FALSE citation
+ depositor Depositor The person (Family Name, Given Name) or the name of the organization that deposited this Dataset to the repository. text 27 FALSE FALSE FALSE FALSE FALSE FALSE citation
dateOfDeposit Deposit Date Date that the Dataset was deposited into the repository. date 28 FALSE FALSE FALSE TRUE FALSE FALSE citation
relatedMaterial Related Material Any material related to this Dataset. textbox 29 FALSE FALSE TRUE FALSE FALSE FALSE citation
- relatedDatasets Related Datasets Any Datasets that are relevant to this one, such as prior research on this subject. textbox 30 FALSE FALSE TRUE FALSE FALSE FALSE citation
- otherReferences Other References Any references that would serve as background or supporting material to this Dataset. text 31 FALSE FALSE TRUE FALSE FALSE FALSE citation
- dataSources Data Sources List of books, articles, serials, or machine-readable data files, if any, that served as the sources of the data collection. text 32 FALSE FALSE TRUE FALSE FALSE FALSE citation
+ relatedDatasets Related Datasets Any Datasets that are related to this Dataset, such as previous research on this subject. textbox 30 FALSE FALSE TRUE FALSE FALSE FALSE citation
+ otherReferences Other References Any references that would serve as background or supporting material to this Dataset. text 31 FALSE FALSE TRUE FALSE FALSE FALSE citation
#controlledVocabulary DatasetField Value displayOrder
- subject Arts and Architecture 0
+ subject Arts and Humanities 0
subject Astronomy and Astrophysics 1
subject Business and Management 2
subject Chemistry 3
- subject Earth Sciences 4
+ subject Earth and Environmental Sciences 4
subject Engineering 5
- subject Environmental Sciences 6
- subject Health Sciences 7
- subject Information Technology 8
- subject Law 9
- subject Life Sciences 10
- subject Mathematical Sciences 11
- subject Medical Sciences 12
- subject Physics 13
- subject Social Sciences and Humanities 14
- subject Other 15
+ subject Medicine, Health & Life Sciences 6
+ subject Computer and Information Science 7
+ subject Law 8
+ subject Mathematical Sciences 9
+ subject Physics 10
+ subject Social Sciences 11
+ subject Other 12
publicationIDType ark 0
publicationIDType arXiv 1
publicationIDType bibcode 2
diff --git a/scripts/api/data/metadatablocks/social_science.tsv b/scripts/api/data/metadatablocks/social_science.tsv
index ea632c4a0d7..7370a02124e 100644
--- a/scripts/api/data/metadatablocks/social_science.tsv
+++ b/scripts/api/data/metadatablocks/social_science.tsv
@@ -2,50 +2,52 @@
socialscience Social Science and Humanities Metadata
#datasetField name title description watermark fieldType displayOrder advancedSearchField allowControlledVocabulary allowmultiples facetable showabovefold required parent metadatablock_id
topicClassification Topic Classification The classification field indicates the broad important topic(s) and subjects that the data cover. Library of Congress subject terms may be used here. 0 FALSE FALSE TRUE FALSE FALSE FALSE socialscience
- topicClassValue Topic Classification Topic or Subject term. text 1 TRUE FALSE FALSE TRUE FALSE FALSE topicClassification socialscience
+ topicClassValue Term Topic or Subject term that is relevant to this Dataset. text 1 TRUE FALSE FALSE TRUE FALSE FALSE topicClassification socialscience
topicClassVocab Vocabulary Provided for specification of the controlled vocabulary in use, e.g., LCSH, MeSH, etc. text 2 FALSE FALSE FALSE FALSE FALSE FALSE topicClassification socialscience
topicClassVocabURI URL Specifies the URL location for the full controlled vocabulary. url 3 FALSE FALSE FALSE FALSE FALSE FALSE topicClassification socialscience
- software Software Software 4 FALSE FALSE TRUE FALSE FALSE FALSE socialscience
- softwareName Name Name of software used to generate the study. text 5 FALSE TRUE FALSE FALSE FALSE FALSE software socialscience
- softwareVersion Version Version of the software used to generate the study. text 6 FALSE FALSE FALSE FALSE FALSE FALSE software socialscience
- series Series Series 7 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ software Software Information about the software used to generate the Dataset. 4 FALSE FALSE TRUE FALSE FALSE FALSE socialscience
+ softwareName Name Name of software used to generate the Dataset. text 5 FALSE TRUE FALSE FALSE FALSE FALSE software socialscience
+ softwareVersion Version Version of the software used to generate the Dataset. text 6 FALSE FALSE FALSE FALSE FALSE FALSE software socialscience
+ series Series Information about the codebook series. 7 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
seriesName Name Name of the codebook series to which the codebook belongs. text 8 FALSE FALSE FALSE FALSE FALSE FALSE series socialscience
seriesInformation Information History of the series and summary of those features that apply to the series as a whole. text 9 FALSE FALSE FALSE FALSE FALSE FALSE series socialscience
- timePeriodCoveredStart Time Period Covered - Start Time period to which the data refer. This item reflects the time period covered by the data, not the dates of coding or making documents machine-readable or the dates the data were collected. Also known as span. The ISO standard for dates (YYYY-MM-DD) is recommended, although this form accepts YYYY or YYY-MM as well. Inclusion of this element is recommended. date 10 TRUE FALSE FALSE TRUE FALSE FALSE socialscience
- timePeriodCoveredEnd Time Period Covered - End Time period to which the data refer. This item reflects the time period covered by the data, not the dates of coding or making documents machine-readable or the dates the data were collected. Also known as span. The ISO standard for dates (YYYY-MM-DD) is recommended, although this form accepts YYYY or YYY-MM as well. Inclusion of this element is recommended. date 11 TRUE FALSE FALSE TRUE FALSE FALSE socialscience
- dateOfCollectionStart Date of Collection - Start Dates when the data were collected. Use the event attribute to specify start, end, or single for each date entered. The ISO standard for dates (YYYY-MM-DD) is recommended, although this form will accept YYYY or YYY-MM as well. Inclusion of this element in the codebook is recommended. date 12 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- dateOfCollectionEnd Date of Collection - End Dates when the data were collected. Use the event attribute to specify start, end, or single for each date entered. The ISO standard for dates (YYYY-MM-DD) is recommended, although this form accepts YYYY or YYY-MM as well. Inclusion of this element in the codebook is recommended. date 13 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- country Country/Nation Country were the data was collected. If more than one, they can be separated by commas. text 14 TRUE FALSE FALSE TRUE FALSE FALSE socialscience
- geographicCoverage Geographic Coverage Information on the geographic coverage of the data. Inclusion of this element in the codebook is recommended. text 15 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
- geographicUnit Geographic Unit Information on the geographic coverage of the data. Inclusion of this element in the codebook is recommended. text 16 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
- geographicBoundingBox Geographic Bounding Box The fundamental geometric description for any data set that models geography is the geographic bounding box. It describes the minimum box, defined by west and east longitudes and north and south latitudes, which includes the largest geographic extent of the dataset's geographic coverage. This element is used in the first pass of a coordinate-based search. Inclusion of this element in the codebook is recommended, but is required if the bound polygon box is included. text 17 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- westLongitude West Longitude Westernmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -180,0 <= West Bounding Longitude Value <= 180,0. text 18 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
- eastLongitude East Longitude Easternmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -180,0 <= East Bounding Longitude Value <= 180,0. text 19 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
- northLongitude North Latitude Northernmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -90,0 <= North Bounding Latitude Value <= 90,0. text 20 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
- southLongitude South Latitude Southernmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -90,0 <= South Bounding Latitude Value <= 90,0. text 21 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
- unitOfAnalysis Unit of Analysis Basic unit of analysis or observation that this file describes, such as individuals, families/households, groups, institutions/organizations, administrative units, and more. For information about the DDI's intention to prepare a controlled vocabulary for this element, please refer to the DDI web page at http://ww.icpsr.umich.edu/DDI/codebook.html. textbox 22 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- universe Universe Description of the population covered by the data in the file; the group of people or other elements that are the object of the study and to which the study results refer. Age, nationality, and residence commonly help to delineate a given universe, but any number of other factors may be used, such as age limits, sex, marital status, race, ethnic group, nationality, income, veteran status, criminal convictions, and more. The universe may consist of elements other than persons, such as housing units, court cases, deaths, countries, and so on. In general, it should be possible to tell from the description of the universe whether a given individual or element is a member of the population under study. Also known as the universe of interest, population of interest, and target population. textbox 23 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
- kindOfData Kind of Data Type of data included in the file: survey data, census/enumeration data, aggregate data, clinical data, event/transaction data, program source code, machine-readable text, administrative records data, experimental data, psychological test, textual data, coded textual, coded documents, time budget diaries, observation data/ratings, process-produced data, or other. textbox 24 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
- timeMethod Time Method The time method or time dimension of the data collection, such as panel, cross-sectional, trend, time- series, or other. For information about a DDI initiative to develop a controlled vocabulary for the time method, please see the DDI web page at http://www.icpsr.umich.edu/DDI/codebook.html. text 25 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- dataCollector Data Collector Individual, agency or organization responsible for administering the questionnaire or interview or compiling the data. text 26 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- collectorTraining Collector Training Type of training provided to the data collector text 27 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- frequencyOfDataCollection Frequency If the data collected includes more than one point in time, indicate the frequency with which the data was collected; that is, monthly, quarterly, or other. text 28 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- samplingProcedure Sampling Procedure Type of sample and sample design used to select the survey respondents to represent the population. May include reference to the target sample size and the sampling fraction. textbox 29 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- sampleFrame Sample Frame Details of the sample frame. text 30 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- targetSampleSize Target Sample Size Specific information regarding the target sample size, actual sample size, and the formula used to determine this. text 31 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- deviationsFromSampleDesign Major Deviations for Sample Design "Show correspondence as well as discrepancies between the sampled units (obtained) and available statistics for the population (age, sex-ratio, marital status, and so on) as a whole. textbox 32 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- collectionMode Collection Mode Method used to collect the data; instrumentation characteristics (for example, telephone interview, mail questionnaire, or other). text 33 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- researchInstrument Type of Research Instrument Type of data collection instrument used. Structured indicates an instrument in which all respondents are asked the same questions/tests, possibly with precoded answers. If a small portion of such a questionnaire includes open-ended questions, provide appropriate comments. Semi-structured indicates that the research instrument contains mainly open-ended questions. Unstructured indicates that in-depth interviews were conducted. text 34 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- originOfSources Origin of Sources For historical materials, information about the origin of the sources and the rules followed in establishing the sources should be specified. May not be relevant to survey data. text 35 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- characteristicOfSources Characteristic of Sources Noted Assessment of characteristics and source material. text 36 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- accessToSources Documentation and Access to Sources Level of documentation of the original sources. May not be relevant to survey data. textbox 37 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- dataCollectionSituation Characteristics of Data Collection Situation Description of noteworthy aspects of the data collection situation. Includes information on factors such as cooperativeness of respondents, duration of interviews, number of call backs, or similar. text 38 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- actionsToMinimizeLoss Actions to Minimize Losses Summary of actions taken to minimize data loss. Include information on actions such as follow-up visits, supervisory checks, historical matching, estimation, and so on. text 39 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- controlOperations Control Operations Control OperationsMethods to facilitate data control performed by the primary investigator or by the data archive. text 40 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- weighting Weighting The use of sampling procedures might make it necessary to apply weights to produce accurate statistical results. Describe here the criteria for using weights in analysis of a collection. If a weighting formula or coefficient was developed, provide this formula, define its elements, and indicate how the formula is applied to the data. text 41 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- cleaningOperations Cleaning Operations Methods used to clean the data collection, such as consistency checking, wildcode checking, or other. text 42 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- datasetLevelErrorNotes Study Level Error Notes Note element used for any information annotating or clarifying the methodology and processing of the study. For the element declaration for this tag, see the generic note declaration at the end of the DTD. text 43 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- responseRate Response Rate Percentage of sample members who provided information. text 44 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- samplingErrorEstimates Estimates of Sampling Error Measure of how precisely one can estimate a population value from a given sample. text 45 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
- otherDataAppraisal Other Forms of Data Appraisal Other issues pertaining to the data appraisal. Describe issues such as response variance, nonresponse rate and testing for bias, interviewer and response bias, confidence levels, question bias, or similar. text 46 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
-#controlledVocabulary DatasetField Value displayOrder
+ timePeriodCovered Time Period Covered Time period to which the data refer. This item reflects the time period covered by the data, not the dates of coding or making documents machine-readable or the dates the data were collected. Also known as span. 10 FALSE FALSE TRUE FALSE FALSE FALSE socialscience
+ timePeriodCoveredStart Start Start date which reflects the time period covered by the data, not the dates of coding or making documents machine-readable or the dates the data were collected. YYYY-MM-DD date 11 TRUE FALSE FALSE TRUE FALSE FALSE timePeriodCovered socialscience
+ timePeriodCoveredEnd End End date which reflects the time period covered by the data, not the dates of coding or making documents machine-readable or the dates the data were collected. YYYY-MM-DD date 12 TRUE FALSE FALSE TRUE FALSE FALSE timePeriodCovered socialscience
+ dateOfCollection Date of Collection Contains the date(s) when the data were collected. 13 FALSE FALSE TRUE FALSE FALSE FALSE socialscience
+ dateOfCollectionStart Start Date when the data collection started. YYYY-MM-DD date 14 FALSE FALSE FALSE FALSE FALSE FALSE dateOfCollection socialscience
+ dateOfCollectionEnd End Date when the data collection ended. YYYY-MM-DD date 15 FALSE FALSE FALSE FALSE FALSE FALSE dateOfCollection socialscience
+ country Country/Nation Country or nation where the data was collected. If more than one, they can be separated by commas. text 16 TRUE FALSE FALSE TRUE FALSE FALSE socialscience
+ geographicCoverage Geographic Coverage Information on the geographic coverage of the data. Includes the total geographic scope of the data. text 17 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
+ geographicUnit Geographic Unit Lowest level of geographic aggregation covered by the dataset (e.g., country, state, province, city, etc). text 18 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
+ geographicBoundingBox Geographic Bounding Box The fundamental geometric description for any Dataset that models geography is the geographic bounding box. It describes the minimum box, defined by west and east longitudes and north and south latitudes, which includes the largest geographic extent of the Dataset's geographic coverage. This element is used in the first pass of a coordinate-based search. Inclusion of this element in the codebook is recommended, but is required if the bound polygon box is included. text 19 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ westLongitude West Longitude Westernmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -180,0 <= West Bounding Longitude Value <= 180,0. text 20 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
+ eastLongitude East Longitude Easternmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -180,0 <= East Bounding Longitude Value <= 180,0. text 21 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
+ northLongitude North Latitude Northernmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -90,0 <= North Bounding Latitude Value <= 90,0. text 22 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
+ southLongitude South Latitude Southernmost coordinate delimiting the geographic extent of the dataset. A valid range of values, expressed in decimal degrees, is -90,0 <= South Bounding Latitude Value <= 90,0. text 23 FALSE FALSE FALSE FALSE FALSE FALSE geographicBoundingBox socialscience
+ unitOfAnalysis Unit of Analysis Basic unit of analysis or observation that this Dataset describes, such as individuals, families/households, groups, institutions/organizations, administrative units, and more. For information about the DDI's controlled vocabulary for this element, please refer to the DDI web page at http://www.ddialliance.org/Specification/DDI-CV/. textbox 24 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ universe Universe Description of the population covered by the data in the file; the group of people or other elements that are the object of the study and to which the study results refer. Age, nationality, and residence commonly help to delineate a given universe, but any number of other factors may be used, such as age limits, sex, marital status, race, ethnic group, nationality, income, veteran status, criminal convictions, and more. The universe may consist of elements other than persons, such as housing units, court cases, deaths, countries, and so on. In general, it should be possible to tell from the description of the universe whether a given individual or element is a member of the population under study. Also known as the universe of interest, population of interest, and target population. textbox 25 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
+ kindOfData Kind of Data Type of data included in the file: survey data, census/enumeration data, aggregate data, clinical data, event/transaction data, program source code, machine-readable text, administrative records data, experimental data, psychological test, textual data, coded textual, coded documents, time budget diaries, observation data/ratings, process-produced data, or other. textbox 26 TRUE FALSE FALSE FALSE FALSE FALSE socialscience
+ timeMethod Time Method The time method or time dimension of the data collection, such as panel, cross-sectional, trend, time- series, or other. text 27 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ collectorTraining Collector Training Type of training provided to the data collector text 28 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ frequencyOfDataCollection Frequency If the data collected includes more than one point in time, indicate the frequency with which the data was collected; that is, monthly, quarterly, or other. text 29 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ samplingProcedure Sampling Procedure Type of sample and sample design used to select the survey respondents to represent the population. May include reference to the target sample size and the sampling fraction. textbox 30 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ sampleFrame Sample Frame Details of the sample frame. text 31 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ targetSampleSize Target Sample Size Specific information regarding the target sample size, actual sample size, and the formula used to determine this. text 32 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ deviationsFromSampleDesign Major Deviations for Sample Design Show correspondence as well as discrepancies between the sampled units (obtained) and available statistics for the population (age, sex-ratio, marital status, etc.) as a whole. text 33 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ collectionMode Collection Mode Method used to collect the data; instrumentation characteristics (e.g., telephone interview, mail questionnaire, or other). text 34 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ researchInstrument Type of Research Instrument Type of data collection instrument used. Structured indicates an instrument in which all respondents are asked the same questions/tests, possibly with precoded answers. If a small portion of such a questionnaire includes open-ended questions, provide appropriate comments. Semi-structured indicates that the research instrument contains mainly open-ended questions. Unstructured indicates that in-depth interviews were conducted. text 35 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ dataSources Data Sources List of books, articles, serials, or machine-readable data files that served as the sources of the data collection. text 36 FALSE FALSE TRUE FALSE FALSE FALSE socialscience
+ originOfSources Origin of Sources For historical materials, information about the origin of the sources and the rules followed in establishing the sources should be specified. May not be relevant to survey data. text 37 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ characteristicOfSources Characteristic of Sources Noted Assessment of characteristics and source material. text 38 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ accessToSources Documentation and Access to Sources Level of documentation of the original sources. May not be relevant to survey data. textbox 39 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ dataCollectionSituation Characteristics of Data Collection Situation Description of noteworthy aspects of the data collection situation. Includes information on factors such as cooperativeness of respondents, duration of interviews, number of call backs, or similar. text 40 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ actionsToMinimizeLoss Actions to Minimize Losses Summary of actions taken to minimize data loss. Include information on actions such as follow-up visits, supervisory checks, historical matching, estimation, and so on. text 41 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ controlOperations Control Operations Control OperationsMethods to facilitate data control performed by the primary investigator or by the data archive. text 42 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ weighting Weighting The use of sampling procedures might make it necessary to apply weights to produce accurate statistical results. Describes the criteria for using weights in analysis of a collection. If a weighting formula or coefficient was developed, the formula is provided, its elements are defined, and it is indicated how the formula was applied to the data. text 43 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ cleaningOperations Cleaning Operations Methods used to clean the data collection, such as consistency checking, wildcode checking, or other. text 44 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ datasetLevelErrorNotes Study Level Error Notes Note element used for any information annotating or clarifying the methodology and processing of the study. text 45 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ responseRate Response Rate Percentage of sample members who provided information. text 46 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ samplingErrorEstimates Estimates of Sampling Error Measure of how precisely one can estimate a population value from a given sample. text 47 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+ otherDataAppraisal Other Forms of Data Appraisal Other issues pertaining to the data appraisal. Describe issues such as response variance, nonresponse rate and testing for bias, interviewer and response bias, confidence levels, question bias, or similar. text 48 FALSE FALSE FALSE FALSE FALSE FALSE socialscience
+#controlledVocabulary DatasetField Value displayOrder
\ No newline at end of file
diff --git a/scripts/database/homebrew/run-reference_data.sql b/scripts/database/homebrew/run-reference_data.sql
index 389c812b414..99fa05b476b 100755
--- a/scripts/database/homebrew/run-reference_data.sql
+++ b/scripts/database/homebrew/run-reference_data.sql
@@ -1,2 +1,2 @@
#!/bin/sh
-~/.homebrew/bin/psql dataverse_db -f $HOME/NetBeansProjects/dataverse_temp/scripts/database/reference_data.sql
+~/.homebrew/bin/psql dataverse_db -f $HOME/NetBeansProjects/dataverse/scripts/database/reference_data.sql
diff --git a/scripts/search/populate b/scripts/search/populate
index 71bb4a21dbc..77b0a0fa976 100755
--- a/scripts/search/populate
+++ b/scripts/search/populate
@@ -1,10 +1,27 @@
#!/bin/bash
DVDIR='data/in/dataverses'
-DSDIR='data/in/datasets'
-FILESDIR='data/in/files'
+DVDIR_ROOT='data/in/dataverses.root'
+DVDIR_BIRDS='data/in/dataverses.birds'
+DVDIR_TREES='data/in/dataverses.trees'
+#DSDIR='data/in/datasets'
+#FILESDIR='data/in/files'
+#mkdir -p $DSDIR
+#mkdir -p $FILESDIR
+rm -rf data/in
mkdir -p $DVDIR
-mkdir -p $DSDIR
-mkdir -p $FILESDIR
+mkdir -p $DVDIR_ROOT
+mkdir -p $DVDIR_BIRDS
+mkdir -p $DVDIR_TREES
count=1; ./tab2json dv.tsv | while read i; do echo $i | python -m json.tool > $DVDIR/$count; let count++; done
-count=1; ./tab2json ds.tsv | while read i; do echo $i | python -m json.tool > $DSDIR/$count; let count++; done
+rm $DVDIR/1
+mv $DVDIR/2 $DVDIR_ROOT/2
+mv $DVDIR/3 $DVDIR_ROOT/3
+mv $DVDIR/4 $DVDIR_BIRDS/4
+mv $DVDIR/5 $DVDIR_BIRDS/5
+mv $DVDIR/6 $DVDIR_BIRDS/6
+mv $DVDIR/7 $DVDIR_TREES/7
+rm $DVDIR/8
+mv $DVDIR/9 $DVDIR_TREES/9
+rmdir $DVDIR
+#count=1; ./tab2json ds.tsv | while read i; do echo $i | python -m json.tool > $DSDIR/$count; let count++; done
#count=1; ./tab2json files.tsv | while read i; do echo $i | python -m json.tool > $FILESDIR/$count; let count++; done
diff --git a/scripts/search/tests/dataset-versioning02 b/scripts/search/tests/dataset-versioning02
index 2074e260027..a2523b74dca 100755
--- a/scripts/search/tests/dataset-versioning02
+++ b/scripts/search/tests/dataset-versioning02
@@ -2,9 +2,11 @@
# We assume you've done everything in scripts/search/tests/dataset-versioning01
#
# We assume you just released your dataset for the first time.
+#
+# To do this, you must also release the Tree and Root dataverses
# raw version
#diff <(curl -s 'http://localhost:8080/api/datasets/17/versions/1?key=pete') scripts/search/tests/expected/dataset-versioning02raw
-# anon can now see the dataset and file
+# anon can now see the dataset and file and parents
diff -u <(curl -s 'http://localhost:8080/api/search?q=trees&showrelevance=true') scripts/search/tests/expected/dataset-versioning02anon
# here's the solr doc for the dataset
#diff -u scripts/search/tests/expected/dataset-versioning02dataset_17solr <(curl -s 'http://localhost:8983/solr/collection1/select?rows=100&wt=json&indent=true&q=id:dataset_17') | egrep -v '_version_|release_or_create_date_dt'
diff --git a/scripts/search/tests/dataset-versioning03 b/scripts/search/tests/dataset-versioning03
index 6af6074bdfb..7b341ab4d38 100755
--- a/scripts/search/tests/dataset-versioning03
+++ b/scripts/search/tests/dataset-versioning03
@@ -8,10 +8,6 @@
#
# anon should be able to see the published 1.0 version but not the new draft (no change from dataset-versioning02anon)
diff -u <(curl -s 'http://localhost:8080/api/search?q=trees&showrelevance=true') scripts/search/tests/expected/dataset-versioning02anon
-# pete should be able to see the published version 1.0 with published=true (same as anon)
-diff -u <(curl -s 'http://localhost:8080/api/search?q=trees&key=pete&published=true') scripts/search/tests/expected/dataset-versioning03pete-published-only
-# pete should be able to see the newer draft version with unpublished=true
-diff -u <(curl -s 'http://localhost:8080/api/search?q=trees&key=pete&unpublished=true') scripts/search/tests/expected/dataset-versioning03pete-unpublished-only
# pete should be able to see both published and unpublished by not specifying either
diff -u <(curl -s 'http://localhost:8080/api/search?q=trees&key=pete') scripts/search/tests/expected/dataset-versioning03pete-both
# here's the solr doc for the dataset
diff --git a/scripts/search/tests/expected/dataset-versioning02anon b/scripts/search/tests/expected/dataset-versioning02anon
index eff0685ddbe..9517f19a28a 100644
--- a/scripts/search/tests/expected/dataset-versioning02anon
+++ b/scripts/search/tests/expected/dataset-versioning02anon
@@ -3,10 +3,10 @@
"q":"trees",
"fq_provided":"[]",
"fq_actual":"[{!join from=groups_s to=perms_ss}id:group_public]",
- "total_count":2,
+ "total_count":3,
"start":0,
- "count_in_response":2,
- "items":"[datafile_18:trees.png:18, dataset_17:Rings of Trees and Other Observations:17]",
+ "count_in_response":3,
+ "items":"[datafile_18:trees.png:18, dataset_17:Rings of Trees and Other Observations:17, dataverse_11:Trees:11]",
"relevance":[
{
"id":"datafile_18",
@@ -59,6 +59,17 @@
]
}
]
+ },
+ {
+ "id":"dataverse_11",
+ "matched_fields":"[name]",
+ "detailsArray":[
+ {
+ "name":[
+ "Trees"
+ ]
+ }
+ ]
}
]
}
\ No newline at end of file
diff --git a/scripts/search/tests/expected/dataset-versioning03pete-published-only b/scripts/search/tests/expected/dataset-versioning03pete-published-only
deleted file mode 100644
index 73ec649cd85..00000000000
--- a/scripts/search/tests/expected/dataset-versioning03pete-published-only
+++ /dev/null
@@ -1,10 +0,0 @@
-
-{
- "q":"trees",
- "fq_provided":"[published_s:Published]",
- "fq_actual":"[published_s:Published, ({!join from=groups_s to=perms_ss}id:group_public OR {!join from=groups_s to=perms_ss}id:group_user1)]",
- "total_count":2,
- "start":0,
- "count_in_response":2,
- "items":"[datafile_18:trees.png:18, dataset_17:Rings of Trees and Other Observations:17]"
-}
\ No newline at end of file
diff --git a/scripts/search/tests/expected/dataset-versioning03pete-unpublished-only b/scripts/search/tests/expected/dataset-versioning03pete-unpublished-only
deleted file mode 100644
index c4aef9e1f5c..00000000000
--- a/scripts/search/tests/expected/dataset-versioning03pete-unpublished-only
+++ /dev/null
@@ -1,10 +0,0 @@
-
-{
- "q":"trees",
- "fq_provided":"[published_s:Unpublished]",
- "fq_actual":"[published_s:Unpublished, ({!join from=groups_s to=perms_ss}id:group_public OR {!join from=groups_s to=perms_ss}id:group_user1)]",
- "total_count":4,
- "start":0,
- "count_in_response":4,
- "items":"[dataset_17_draft:Rings of Conifers and Other Observations:17, dataverse_10:Birds:10, dataverse_11:Trees:11, dataverse_16:Chestnut Trees:16]"
-}
\ No newline at end of file
diff --git a/scripts/search/tests/expected/highlighting-nick-trees b/scripts/search/tests/expected/highlighting-nick-trees
index 24a3dddd267..aa322b16d16 100644
--- a/scripts/search/tests/expected/highlighting-nick-trees
+++ b/scripts/search/tests/expected/highlighting-nick-trees
@@ -10,18 +10,23 @@
"relevance":[
{
"id":"datafile_18",
- "matched_fields":"[description]",
+ "matched_fields":"[description, filename_without_extension_en]",
"detailsArray":[
{
"description":[
"Trees are lovely."
]
+ },
+ {
+ "filename_without_extension_en":[
+ "trees"
+ ]
}
]
},
{
"id":"dataset_17_draft",
- "matched_fields":"[title, citation_t, description, notesText, authorAffiliation, keyword, contributorName]",
+ "matched_fields":"[title, citation_t, description, notesText, authorAffiliation, authorName, keyword, contributorName]",
"detailsArray":[
{
"title":[
@@ -30,7 +35,7 @@
},
{
"citation_t":[
- "Tree, Tony, 2014, \"Rings of Trees and Other Observations\", http://dx.doi.org/10.5072/FK2/17, Root"
+ "Tree, Tony, 2014, \"Rings of Trees and Other Observations\", http://dx.doi.org/10.5072/FK2/17, Root"
]
},
{
@@ -48,6 +53,11 @@
"Trees Inc."
]
},
+ {
+ "authorName":[
+ "Tree, Tony"
+ ]
+ },
{
"keyword":[
"trees"
@@ -73,8 +83,13 @@
},
{
"id":"dataverse_11",
- "matched_fields":"[name]",
+ "matched_fields":"[description, name]",
"detailsArray":[
+ {
+ "description":[
+ "A tree dataverse with some birds"
+ ]
+ },
{
"name":[
"Trees"
diff --git a/scripts/search/tests/expected/highlighting-pete-trees b/scripts/search/tests/expected/highlighting-pete-trees
index 622a008212e..a9aa8680150 100644
--- a/scripts/search/tests/expected/highlighting-pete-trees
+++ b/scripts/search/tests/expected/highlighting-pete-trees
@@ -10,18 +10,23 @@
"relevance":[
{
"id":"datafile_18",
- "matched_fields":"[description]",
+ "matched_fields":"[description, filename_without_extension_en]",
"detailsArray":[
{
"description":[
"Trees are lovely."
]
+ },
+ {
+ "filename_without_extension_en":[
+ "trees"
+ ]
}
]
},
{
"id":"dataset_17_draft",
- "matched_fields":"[title, citation_t, description, notesText, authorAffiliation, keyword, contributorName]",
+ "matched_fields":"[title, citation_t, description, notesText, authorAffiliation, authorName, keyword, contributorName]",
"detailsArray":[
{
"title":[
@@ -30,7 +35,7 @@
},
{
"citation_t":[
- "Tree, Tony, 2014, \"Rings of Trees and Other Observations\", http://dx.doi.org/10.5072/FK2/17, Root"
+ "Tree, Tony, 2014, \"Rings of Trees and Other Observations\", http://dx.doi.org/10.5072/FK2/17, Root"
]
},
{
@@ -48,6 +53,11 @@
"Trees Inc."
]
},
+ {
+ "authorName":[
+ "Tree, Tony"
+ ]
+ },
{
"keyword":[
"trees"
@@ -73,8 +83,13 @@
},
{
"id":"dataverse_11",
- "matched_fields":"[name]",
+ "matched_fields":"[description, name]",
"detailsArray":[
+ {
+ "description":[
+ "A tree dataverse with some birds"
+ ]
+ },
{
"name":[
"Trees"
diff --git a/scripts/search/tests/highlighting b/scripts/search/tests/highlighting
index 8393cd80b46..af3c19c97bf 100755
--- a/scripts/search/tests/highlighting
+++ b/scripts/search/tests/highlighting
@@ -3,6 +3,7 @@
# We assume you've added the bird and tree dataverses with this:
#
#cd scripts/search
+#./populate
#./create
#exit
#
diff --git a/src/main/java/edu/harvard/iq/dataverse/AdvancedSearchPage.java b/src/main/java/edu/harvard/iq/dataverse/AdvancedSearchPage.java
index 9385364aa6d..6d22ddff854 100644
--- a/src/main/java/edu/harvard/iq/dataverse/AdvancedSearchPage.java
+++ b/src/main/java/edu/harvard/iq/dataverse/AdvancedSearchPage.java
@@ -24,7 +24,6 @@ public class AdvancedSearchPage {
DatasetFieldServiceBean datasetFieldService;
private Dataverse dataverse;
- private String query;
private List metadataBlocks;
private Map> metadataFieldMap = new HashMap();
private List metadataFieldList;
@@ -33,7 +32,7 @@ public class AdvancedSearchPage {
public void init() {
/**
* @todo: support advanced search at any depth in the dataverse
- * hierarchy
+ * hierarchy https://redmine.hmdc.harvard.edu/issues/3894
*/
this.dataverse = dataverseServiceBean.findRootDataverse();
this.metadataBlocks = dataverseServiceBean.findAllMetadataBlocks();
@@ -69,7 +68,6 @@ public String find() throws IOException {
}
logger.info("query: " + query); */
StringBuilder queryBuilder = new StringBuilder();
- queryBuilder.append(query.trim());
String delimiter = "[\"]+";
for (DatasetFieldType dsfType : metadataFieldList) {
@@ -128,16 +126,6 @@ public void setDataverse(Dataverse dataverse) {
this.dataverse = dataverse;
}
- public String getQuery() {
- return query;
- }
-
- public void setQuery(String query) {
- this.query = query;
-
- }
-
-
public List getMetadataBlocks() {
return metadataBlocks;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataFile.java b/src/main/java/edu/harvard/iq/dataverse/DataFile.java
index a3c812b7f03..c38a0e42ce1 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataFile.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataFile.java
@@ -25,7 +25,6 @@ public class DataFile extends DvObject {
private static final char INGEST_STATUS_INPROGRESS = 67;
private static final char INGEST_STATUS_ERROR = 68;
- @NotBlank
private String name;
@NotBlank
@@ -56,6 +55,16 @@ public DataFile() {
fileMetadataFieldValues = new ArrayList<>();
}
+ public DataFile(String contentType) {
+ this.contentType = contentType;
+ this.fileMetadatas = new ArrayList<>();
+ fileMetadataFieldValues = new ArrayList<>();
+ }
+
+ // The dvObject field "name" should not be used in
+ // datafile objects.
+ // The file name must be stored in the file metadata.
+ @Deprecated
public DataFile(String name, String contentType) {
this.name = name;
this.contentType = contentType;
@@ -131,11 +140,16 @@ public String getOriginalFormatLabel() {
return null;
}
-
+
+ // The dvObject field "name" should not be used in
+ // datafile objects.
+ // The file name must be stored in the file metadata.
+ @Deprecated
public String getName() {
return name;
}
+ @Deprecated
public void setName(String name) {
this.name = name;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/Dataset.java b/src/main/java/edu/harvard/iq/dataverse/Dataset.java
index 18187bcee4f..4c4d6a972b3 100644
--- a/src/main/java/edu/harvard/iq/dataverse/Dataset.java
+++ b/src/main/java/edu/harvard/iq/dataverse/Dataset.java
@@ -30,10 +30,6 @@ public class Dataset extends DvObjectContainer {
private static final long serialVersionUID = 1L;
- // #VALIDATION: page defines maxlength in input:textarea component
- @Size(max = 1000, message = "Description must be at most 1000 characters.")
- private String description;
-
@OneToMany(mappedBy = "owner", cascade = CascadeType.MERGE)
private List files = new ArrayList();
@@ -77,14 +73,6 @@ public void setIdentifier(String identifier) {
this.identifier = identifier;
}
- public String getDescription() {
- return description;
- }
-
- public void setDescription(String description) {
- this.description = description;
- }
-
public String getPersistentURL() {
if (this.getProtocol().equals("hdl")) {
return getHandleURL();
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java b/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java
index bd6e3f265e1..00936b9365a 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetDistributor.java
@@ -6,6 +6,7 @@
package edu.harvard.iq.dataverse;
+import java.util.Comparator;
import javax.persistence.Version;
/**
@@ -13,7 +14,14 @@
* @author skraffmiller
*/
public class DatasetDistributor {
-
+
+ public static Comparator DisplayOrder = new Comparator() {
+ @Override
+ public int compare(DatasetDistributor o1, DatasetDistributor o2) {
+ return o1.getDisplayOrder()-o2.getDisplayOrder();
+ }
+ };
+
/** Creates a new instance of DatasetDistributor */
public DatasetDistributor() {
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java
index 061eda80726..39a29a2a871 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetFieldType.java
@@ -341,9 +341,9 @@ public String getDisplayName() {
}
public SolrField getSolrField() {
- SolrField.SolrType solrType2 = SolrField.SolrType.TEXT_GENERAL;
+ SolrField.SolrType solrType = SolrField.SolrType.TEXT_EN;
if (fieldType != null) {
- solrType2 = fieldType.equals("date") ? SolrField.SolrType.INTEGER : SolrField.SolrType.TEXT_GENERAL;
+ solrType = fieldType.equals("date") ? SolrField.SolrType.INTEGER : SolrField.SolrType.TEXT_EN;
Boolean parentAllowsMultiplesBoolean = false;
if (isHasParent()) {
@@ -355,13 +355,13 @@ public SolrField getSolrField() {
boolean makeSolrFieldMultivalued;
// http://stackoverflow.com/questions/5800762/what-is-the-use-of-multivalued-field-type-in-solr
- if (solrType2 == SolrField.SolrType.TEXT_GENERAL) {
+ if (solrType == SolrField.SolrType.TEXT_EN) {
makeSolrFieldMultivalued = (allowMultiples || parentAllowsMultiplesBoolean);
} else {
makeSolrFieldMultivalued = false;
}
- return new SolrField(name, solrType2, makeSolrFieldMultivalued, facetable);
+ return new SolrField(name, solrType, makeSolrFieldMultivalued, facetable);
} else {
/**
@@ -369,8 +369,8 @@ public SolrField getSolrField() {
*/
String oddValue = name + getTmpNullFieldTypeIdentifier();
boolean makeSolrFieldMultivalued = false;
- SolrField solrField2 = new SolrField(oddValue, solrType2, makeSolrFieldMultivalued, facetable);
- return solrField2;
+ SolrField solrField = new SolrField(oddValue, solrType, makeSolrFieldMultivalued, facetable);
+ return solrField;
}
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java b/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java
index 34c5f0d6ccc..d9d58b7e96d 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetPage.java
@@ -312,11 +312,11 @@ public void refresh(ActionEvent e) {
DataFile dataFile = fileMetadata.getDataFile();
// and see if any are marked as "ingest-in-progress":
if (dataFile.isIngestInProgress()) {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Refreshing the status of the file " + dataFile.getName() + "...");
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Refreshing the status of the file " + fileMetadata.getLabel() + "...");
// and if so, reload the file object from the database...
dataFile = datafileService.find(dataFile.getId());
if (!dataFile.isIngestInProgress()) {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "File " + dataFile.getName() + " finished ingesting.");
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "File " + fileMetadata.getLabel() + " finished ingesting.");
// and, if the status has changed - i.e., if the ingest has
// completed, or failed, update the object in the list of
// files visible to the page:
@@ -337,22 +337,6 @@ public String save() {
if (replicationFor) {
updateTitle();
}
- /*
- * The code below was likely added before real versioning has been
- * added to the application. It shouldn't be necessary anymore.
- * -- L.A.
- if (!(dataset.getVersions().get(0).getFileMetadatas() == null) && !dataset.getVersions().get(0).getFileMetadatas().isEmpty()) {
- int fmdIndex = 0;
- for (FileMetadata fmd : dataset.getVersions().get(0).getFileMetadatas()) {
- for (FileMetadata fmdTest : editVersion.getFileMetadatas()) {
- if (fmd.equals(fmdTest)) {
- dataset.getVersions().get(0).getFileMetadatas().get(fmdIndex).setDataFile(fmdTest.getDataFile());
- }
- }
- fmdIndex++;
- }
- }
- */
/*
* Save and/or ingest files, if there are any:
@@ -374,6 +358,15 @@ public String save() {
if (dataset.getFileSystemDirectory() != null && Files.exists(dataset.getFileSystemDirectory())) {
for (DataFile dFile : newFiles) {
String tempFileLocation = getFilesTempDirectory() + "/" + dFile.getFileSystemName();
+
+ // These are all brand new files, so they should all have
+ // one filemetadata total. You do NOT want to use
+ // getLatestFilemetadata() here - because it relies on
+ // comparing the object IDs of the corresponding datasetversions...
+ // Which may not have been persisted yet.
+ // -- L.A. 4.0 beta.
+ FileMetadata fileMetadata = dFile.getFileMetadatas().get(0);
+ String fileName = fileMetadata.getLabel();
//boolean ingestedAsTabular = false;
boolean metadataExtracted = false;
@@ -397,12 +390,12 @@ public String save() {
dFile.setContentType("application/fits");
metadataExtracted = ingestService.extractIndexableMetadata(tempFileLocation, dFile, editVersion);
} catch (IOException mex) {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + dFile.getName(), mex);
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.SEVERE, "Caught exception trying to extract indexable metadata from file " + fileName, mex);
}
if (metadataExtracted) {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + dFile.getName());
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Successfully extracted indexable metadata from file " + fileName);
} else {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + dFile.getName());
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Failed to extract indexable metadata from file " + fileName);
}
}
@@ -417,7 +410,7 @@ public String save() {
try {
dFile.setmd5(md5Checksum.CalculateMD5(dFile.getFileSystemLocation().toString()));
} catch (Exception md5ex) {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + dFile.getName());
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Could not calculate MD5 signature for the new file " + fileName);
}
} catch (IOException ioex) {
@@ -460,12 +453,10 @@ public String save() {
editMode = null;
// Queue the ingest jobs for asynchronous execution:
- // TODO: instead of dataset.getFiles(), use
- // editversion.getFileMetadatas() ... -- L.A. 4.0 alpha
for (DataFile dataFile : dataset.getFiles()) {
if (dataFile.isIngestScheduled()) {
dataFile.SetIngestInProgress();
- Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Attempting to queue the file " + dataFile.getName() + " for ingest.");
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Attempting to queue the file " + dataFile.getFileMetadata().getLabel() + " for ingest.");
ingestService.asyncIngestAsTabular(dataFile);
}
}
@@ -536,7 +527,7 @@ public void handleDropBoxUpload(ActionEvent e) {
status = getClient().executeMethod(dropBoxMethod);
if (status == 200) {
dropBoxStream = dropBoxMethod.getResponseBodyAsStream();
- dFile = new DataFile(fileName, "application/octet-stream");
+ dFile = new DataFile("application/octet-stream");
dFile.setOwner(dataset);
// save the file, in the temporary location for now:
@@ -544,8 +535,13 @@ public void handleDropBoxUpload(ActionEvent e) {
if (getFilesTempDirectory() != null) {
Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "Will attempt to save the DropBox file as: " + getFilesTempDirectory() + "/" + dFile.getFileSystemName());
Files.copy(dropBoxStream, Paths.get(getFilesTempDirectory(), dFile.getFileSystemName()), StandardCopyOption.REPLACE_EXISTING);
- long writtenBytes = dFile.getFileSystemLocation().toFile().length();
- Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "File size, expected: " + fileSize + ", written: " + writtenBytes);
+ File tempFile = Paths.get(getFilesTempDirectory(), dFile.getFileSystemName()).toFile();
+ if (tempFile.exists()) {
+ long writtenBytes = tempFile.length();
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.INFO, "File size, expected: " + fileSize + ", written: " + writtenBytes);
+ } else {
+ throw new IOException();
+ }
}
}
} catch (IOException ex) {
@@ -569,7 +565,7 @@ public void handleDropBoxUpload(ActionEvent e) {
FileMetadata fmd = new FileMetadata();
fmd.setDataFile(dFile);
dFile.getFileMetadatas().add(fmd);
- fmd.setLabel(dFile.getName());
+ fmd.setLabel(fileName);
fmd.setCategory(dFile.getContentType());
if (editVersion.getFileMetadatas() == null) {
editVersion.setFileMetadatas(new ArrayList());
@@ -584,13 +580,13 @@ public void handleDropBoxUpload(ActionEvent e) {
// what it is.
String fileType = null;
try {
- fileType = FileUtil.determineFileType(Paths.get(getFilesTempDirectory(), dFile.getFileSystemName()).toFile(), dFile.getName());
+ fileType = FileUtil.determineFileType(Paths.get(getFilesTempDirectory(), dFile.getFileSystemName()).toFile(), fileName);
Logger.getLogger(DatasetPage.class.getName()).log(Level.FINE, "File utility recognized the file as " + fileType);
if (fileType != null && !fileType.equals("")) {
dFile.setContentType(fileType);
}
} catch (IOException ex) {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to run the file utility mime type check on file " + dFile.getName());
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to run the file utility mime type check on file " + fileName);
}
newFiles.add(dFile);
@@ -599,16 +595,17 @@ public void handleDropBoxUpload(ActionEvent e) {
public void handleFileUpload(FileUploadEvent event) {
UploadedFile uFile = event.getFile();
- DataFile dFile = new DataFile(uFile.getFileName(), uFile.getContentType());
+ DataFile dFile = new DataFile(uFile.getContentType());
FileMetadata fmd = new FileMetadata();
+ fmd.setLabel(uFile.getFileName());
+ fmd.setCategory(dFile.getContentType());
+
dFile.setOwner(dataset);
fmd.setDataFile(dFile);
dFile.getFileMetadatas().add(fmd);
- fmd.setLabel(dFile.getName());
- fmd.setCategory(dFile.getContentType());
-
+
if (editVersion.getFileMetadatas() == null) {
editVersion.setFileMetadatas(new ArrayList());
}
@@ -635,7 +632,7 @@ public void handleFileUpload(FileUploadEvent event) {
// which may have already recognized the type correctly...)
String fileType = null;
try {
- fileType = FileUtil.determineFileType(Paths.get(getFilesTempDirectory(), dFile.getFileSystemName()).toFile(), dFile.getName());
+ fileType = FileUtil.determineFileType(Paths.get(getFilesTempDirectory(), dFile.getFileSystemName()).toFile(), fmd.getLabel());
Logger.getLogger(DatasetPage.class.getName()).log(Level.FINE, "File utility recognized the file as " + fileType);
if (fileType != null && !fileType.equals("")) {
// let's look at the browser's guess regarding the mime type
@@ -648,7 +645,7 @@ public void handleFileUpload(FileUploadEvent event) {
}
}
} catch (IOException ex) {
- Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to run the file utility mime type check on file " + dFile.getName());
+ Logger.getLogger(DatasetPage.class.getName()).log(Level.WARNING, "Failed to run the file utility mime type check on file " + fmd.getLabel());
}
newFiles.add(dFile);
diff --git a/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java b/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java
index 6aae5eea420..12d5d0699c8 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DatasetVersion.java
@@ -101,7 +101,15 @@ public List getDatasetFields() {
return datasetFields;
}
+ /**
+ * Sets the dataset fields for this version. Also updates the fields to
+ * have @{code this} as their dataset version.
+ * @param datasetFields
+ */
public void setDatasetFields(List datasetFields) {
+ for ( DatasetField dsf : datasetFields ) {
+ dsf.setDatasetVersion(this);
+ }
this.datasetFields = datasetFields;
}
@@ -358,6 +366,10 @@ public List getDatasetAuthors() {
return retList;
}
+ public void setDatasetAuthors( List authors ) {
+ // FIXME add the authores to the relevant fields
+ }
+
public String getCitation(boolean isOnlineVersion) {
@@ -451,7 +463,12 @@ public List getDatasetDistributors() {
//todo get distributors from DatasetfieldValues
return new ArrayList();
}
+
+ public void setDatasetDistributors( List distributors) {
+ //todo implement
+ }
+
public String getDistributorNames() {
String str = "";
for (DatasetDistributor sd : this.getDatasetDistributors()) {
diff --git a/src/main/java/edu/harvard/iq/dataverse/Dataverse.java b/src/main/java/edu/harvard/iq/dataverse/Dataverse.java
index 7a13ef56c28..1e54d97faac 100644
--- a/src/main/java/edu/harvard/iq/dataverse/Dataverse.java
+++ b/src/main/java/edu/harvard/iq/dataverse/Dataverse.java
@@ -12,6 +12,8 @@
import javax.persistence.CascadeType;
import javax.persistence.Column;
import javax.persistence.Entity;
+import javax.persistence.EnumType;
+import javax.persistence.Enumerated;
import javax.persistence.FetchType;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
@@ -75,6 +77,18 @@ public class Dataverse extends DvObjectContainer {
@OneToMany(mappedBy = "dataverse")
@OrderBy("displayOrder")
private List dataverseFacets = new ArrayList();
+
+ public enum ImageFormat { SQUARE, RECTANGLE }
+
+ @Enumerated(EnumType.STRING)
+ private ImageFormat logoFormat;
+ private String logo;
+ private String tagline;
+ private String linkUrl;
+ private String linkText;
+ private String linkColor;
+ private String textColor;
+ private String backgroundColor;
public List getMetadataBlocks() {
return getMetadataBlocks(false);
@@ -176,6 +190,72 @@ public void setFacetRoot(boolean facetRoot) {
this.facetRoot = facetRoot;
}
+ public ImageFormat getLogoFormat() {
+ return logoFormat;
+ }
+
+ public void setLogoFormat(ImageFormat logoFormat) {
+ this.logoFormat = logoFormat;
+ }
+
+ public String getLogo() {
+ return logo;
+ }
+
+ public void setLogo(String logo) {
+ this.logo = logo;
+ }
+
+ public String getTagline() {
+ return tagline;
+ }
+
+ public void setTagline(String tagline) {
+ this.tagline = tagline;
+ }
+
+ public String getLinkUrl() {
+ return linkUrl;
+ }
+
+ public void setLinkUrl(String linkUrl) {
+ this.linkUrl = linkUrl;
+ }
+
+ public String getLinkText() {
+ return linkText;
+ }
+
+ public void setLinkText(String linkText) {
+ this.linkText = linkText;
+ }
+
+ public String getLinkColor() {
+ return linkColor;
+ }
+
+ public void setLinkColor(String linkColor) {
+ this.linkColor = linkColor;
+ }
+
+ public String getTextColor() {
+ return textColor;
+ }
+
+ public void setTextColor(String textColor) {
+ this.textColor = textColor;
+ }
+
+ public String getBackgroundColor() {
+ return backgroundColor;
+ }
+
+ public void setBackgroundColor(String backgroundColor) {
+ this.backgroundColor = backgroundColor;
+ }
+
+
+
public void addRole(DataverseRole role) {
role.setOwner(this);
roles.add(role);
@@ -194,6 +274,7 @@ public List getOwners() {
return owners;
}
+
@Override
public boolean equals(Object object) {
// TODO: Warning - this method won't work in the case the id fields are not set
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java b/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java
index 36bcf5fd325..abc3f5e8746 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataverseHeaderFragment.java
@@ -9,8 +9,8 @@
import java.util.List;
import javax.ejb.EJB;
import javax.faces.view.ViewScoped;
+import javax.inject.Inject;
import javax.inject.Named;
-import org.primefaces.event.NodeSelectEvent;
import org.primefaces.model.DefaultTreeNode;
import org.primefaces.model.TreeNode;
@@ -24,6 +24,9 @@ public class DataverseHeaderFragment implements java.io.Serializable {
@EJB
DataverseServiceBean dataverseService;
+
+ @Inject
+ DataverseSession dataverseSession;
public List getDataverses(Dataverse dataverse) {
List dataverses = new ArrayList();
@@ -50,8 +53,13 @@ private TreeNode getDataverseNode(Dataverse dataverse, TreeNode root, boolean ex
for (Dataverse child : childDataversesOfCurrentDataverse) {
getDataverseNode(child, dataverseNode, false);
}
-
+
return dataverseNode;
-}
-
+ }
+
+ public String logout() {
+ dataverseSession.setUser(null);
+ return "/dataverse.xhtml?faces-redirect=true";
+ }
+
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataversePage.java b/src/main/java/edu/harvard/iq/dataverse/DataversePage.java
index 4f27d3d1abf..22977f579ac 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataversePage.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataversePage.java
@@ -38,11 +38,12 @@ public class DataversePage implements java.io.Serializable {
private static final Logger logger = Logger.getLogger(DataversePage.class.getCanonicalName());
public enum EditMode {
- CREATE, INFO, PERMISSIONS, SETUP
+
+ CREATE, INFO, PERMISSIONS, SETUP, THEME
}
@EJB
- DataverseServiceBean dataverseService;
+ DataverseServiceBean dataverseService;
@EJB
DatasetServiceBean datasetService;
@Inject
@@ -52,16 +53,16 @@ public enum EditMode {
@EJB
SearchServiceBean searchService;
@EJB
- DatasetFieldServiceBean datasetFieldService;
+ DatasetFieldServiceBean datasetFieldService;
@EJB
- DataverseFacetServiceBean dataverseFacetService;
+ DataverseFacetServiceBean dataverseFacetService;
@EJB
UserNotificationServiceBean userNotificationService;
-
+
private Dataverse dataverse = new Dataverse();
private EditMode editMode;
private Long ownerId;
- private DualListModel facets;
+ private DualListModel facets;
// private TreeNode treeWidgetRootNode = new DefaultTreeNode("Root", null);
public Dataverse getDataverse() {
@@ -101,6 +102,7 @@ public void init() {
if (dataverse.getId() != null) { // view mode for a dataverse
dataverse = dataverseService.find(dataverse.getId());
ownerId = dataverse.getOwner() != null ? dataverse.getOwner().getId() : null;
+ setDataverseDescriptionPage(dataverse.getDescription());
} else if (ownerId != null) { // create mode for a new child dataverse
editMode = EditMode.INFO;
dataverse.setOwner(dataverseService.find(ownerId));
@@ -119,10 +121,10 @@ public void init() {
}
}
}
-
+
List facetsSource = new ArrayList<>();
List facetsTarget = new ArrayList<>();
-
+
facetsSource.addAll(datasetFieldService.findAllFacetableFieldTypes());
List facetsList = dataverseFacetService.findByDataverseId(dataverse.getId());
@@ -142,33 +144,36 @@ public List getContents() {
public void edit(EditMode editMode) {
this.editMode = editMode;
+ setDataverseDescriptionPage(dataverse.getDescription());
+ updateCountDisplay();
if (editMode == EditMode.INFO) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Edit Dataverse", " - Edit your dataverse and click Save. Asterisks indicate required fields."));
} else if (editMode == EditMode.SETUP) {
FacesContext.getCurrentInstance().addMessage(null, new FacesMessage(FacesMessage.SEVERITY_INFO, "Edit Dataverse Setup", " - Edit the Metadata Blocks and Facets you want to associate with your dataverse. Note: facets will appear in the order shown on the list."));
- }
+ }
}
public String save() {
- Command cmd = null;
- //TODO change to Create - for now the page is expecting INFO instead.
- if (dataverse.getId() == null){
- dataverse.setOwner(ownerId != null ? dataverseService.find(ownerId) : null);
- cmd = new CreateDataverseCommand(dataverse, session.getUser());
- } else {
- cmd = new UpdateDataverseCommand(dataverse, facets.getTarget(), session.getUser());
- }
+ Command cmd = null;
+ //TODO change to Create - for now the page is expecting INFO instead.
+ dataverse.setDescription(dataverseDescriptionPage);
+ if (dataverse.getId() == null) {
+ dataverse.setOwner(ownerId != null ? dataverseService.find(ownerId) : null);
+ cmd = new CreateDataverseCommand(dataverse, session.getUser());
+ } else {
+ cmd = new UpdateDataverseCommand(dataverse, facets.getTarget(), session.getUser());
+ }
+
+ try {
+ dataverse = commandEngine.submit(cmd);
+ userNotificationService.sendNotification(session.getUser(), dataverse.getCreateDate(), Type.CREATEDV, dataverse.getId());
+ editMode = null;
+ } catch (CommandException ex) {
+ JH.addMessage(FacesMessage.SEVERITY_ERROR, ex.getMessage());
+ return null;
+ }
- try {
- dataverse = commandEngine.submit(cmd);
- userNotificationService.sendNotification(session.getUser(), dataverse.getCreateDate(), Type.CREATEDV, dataverse.getId());
- editMode = null;
- } catch (CommandException ex) {
- JH.addMessage(FacesMessage.SEVERITY_ERROR, ex.getMessage());
- return null;
- }
-
- return "/dataverse.xhtml?id=" + dataverse.getId() +"&faces-redirect=true";
+ return "/dataverse.xhtml?id=" + dataverse.getId() + "&faces-redirect=true";
}
public void cancel(ActionEvent e) {
@@ -202,7 +207,7 @@ public void editMetadataBlocks() {
dataverse.getMetadataBlocks(true).clear();
}
}
-
+
public boolean isInheritFacetFromParent() {
return !dataverse.isFacetRoot();
}
@@ -210,7 +215,7 @@ public boolean isInheritFacetFromParent() {
public void setInheritFacetFromParent(boolean inheritFacetFromParent) {
dataverse.setFacetRoot(!inheritFacetFromParent);
}
-
+
public void editFacets() {
if (dataverse.isFacetRoot()) {
dataverse.getDataverseFacets().addAll(dataverse.getOwner().getDataverseFacets());
@@ -218,15 +223,15 @@ public void editFacets() {
dataverse.getDataverseFacets(true).clear();
}
}
-
+
public DualListModel getFacets() {
return facets;
}
-
+
public void setFacets(DualListModel facets) {
this.facets = facets;
}
-
+
public String releaseDataverse() {
dataverse.setPublicationDate(new Timestamp(new Date().getTime()));
dataverse.setReleaseUser(session.getUser());
@@ -235,4 +240,55 @@ public String releaseDataverse() {
FacesContext.getCurrentInstance().addMessage(null, message);
return "/dataverse.xhtml?id=" + dataverse.getId() + "&faces-redirect=true";
}
+
+ public void updateCountDisplay() {
+ setDescriptionSize(new Integer(dataverseDescriptionPage.length()));
+ }
+
+ private String dataverseDescriptionPage;
+
+ public String getDataverseDescriptionPage() {
+ return dataverseDescriptionPage;
+ }
+
+ public void setDataverseDescriptionPage(String dataverseDescriptionPage) {
+ this.dataverseDescriptionPage = dataverseDescriptionPage;
+ }
+
+ private Integer descriptionSize = new Integer(0);
+
+ public Integer getDescriptionSize() {
+ return descriptionSize;
+ }
+
+ public void setDescriptionSize(Integer descriptionSize) {
+ this.descriptionSize = descriptionSize;
+ }
+
+ public String getCountString() {
+ return new Integer(1000 - descriptionSize.intValue()).toString() + " characters remaining";
+ }
+
+ public String getMetadataBlockPreview(MetadataBlock mdb, int numberOfItems) {
+ /// for beta, we will just preview the first n fields
+ StringBuilder mdbPreview = new StringBuilder();
+ int count = 0;
+ for (DatasetFieldType dsfType : mdb.getDatasetFieldTypes()) {
+ if (!dsfType.isChild()) {
+ if (count != 0) {
+ mdbPreview.append(", ");
+ if (count == numberOfItems) {
+ mdbPreview.append("etc.");
+ break;
+ }
+ }
+
+ mdbPreview.append(dsfType.getDisplayName());
+ count++;
+ }
+ }
+
+ return mdbPreview.toString();
+ }
+
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/DataverseRoleServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/DataverseRoleServiceBean.java
index 96fd0d726f2..12fdb266216 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DataverseRoleServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DataverseRoleServiceBean.java
@@ -97,20 +97,20 @@ public UserRoleAssignments assignmentsFor( final DataverseUser u, final DvObject
@Override
public UserRoleAssignments visit(Dataverse dv) {
- return roleAssignments(u, (Dataverse)d);
+ return roleAssignments(u, dv);
}
@Override
public UserRoleAssignments visit(Dataset ds) {
UserRoleAssignments asgn = ds.getOwner().accept(this);
- asgn.add( directRoleAssignments(u, d) );
+ asgn.add( directRoleAssignments(u, ds) );
return asgn;
}
@Override
public UserRoleAssignments visit(DataFile df) {
UserRoleAssignments asgn = df.getOwner().accept(this);
- asgn.add( directRoleAssignments(u, d) );
+ asgn.add( directRoleAssignments(u, df) );
return asgn;
}
});
diff --git a/src/main/java/edu/harvard/iq/dataverse/DvObject.java b/src/main/java/edu/harvard/iq/dataverse/DvObject.java
index 7db11369530..d7b012fc383 100644
--- a/src/main/java/edu/harvard/iq/dataverse/DvObject.java
+++ b/src/main/java/edu/harvard/iq/dataverse/DvObject.java
@@ -34,7 +34,7 @@ public String visit(Dataset ds) {
@Override
public String visit(DataFile df) {
- return df.getName();
+ return df.getFileMetadata().getLabel();
}
};
diff --git a/src/main/java/edu/harvard/iq/dataverse/FileMetadata.java b/src/main/java/edu/harvard/iq/dataverse/FileMetadata.java
index 1a02118e7bb..37945188ba5 100644
--- a/src/main/java/edu/harvard/iq/dataverse/FileMetadata.java
+++ b/src/main/java/edu/harvard/iq/dataverse/FileMetadata.java
@@ -15,6 +15,7 @@
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.Version;
+import org.hibernate.validator.constraints.NotBlank;
/**
*
@@ -23,7 +24,8 @@
@Entity
public class FileMetadata implements Serializable {
private static final long serialVersionUID = 1L;
-
+
+ @NotBlank(message = "Please specify a file name.")
private String label = "";
@Column(columnDefinition = "TEXT")
private String description = "";
@@ -40,7 +42,7 @@ public class FileMetadata implements Serializable {
public String getLabel() {
return label;
}
-
+
public void setLabel(String label) {
this.label = label;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/IndexServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/IndexServiceBean.java
index 442c81cb6ad..3f76323fbb0 100644
--- a/src/main/java/edu/harvard/iq/dataverse/IndexServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/IndexServiceBean.java
@@ -49,6 +49,7 @@ public class IndexServiceBean {
private static final Long tmpNsaGroupId = 2L;
private static final String PUBLISHED_STRING = "Published";
private static final String UNPUBLISHED_STRING = "Unpublished";
+ private static final String DRAFT_STRING = "Draft";
public String indexAll() {
/**
@@ -332,6 +333,7 @@ private String addOrUpdateDataset(IndexableDataset indexableDataset) {
}
sortByDate = majorVersionReleaseDate;
} else {
+ solrInputDocument.addField(SearchFields.PUBLICATION_STATUS, UNPUBLISHED_STRING);
Date createDate = dataset.getCreateDate();
if (createDate != null) {
if (true) {
@@ -352,7 +354,7 @@ private String addOrUpdateDataset(IndexableDataset indexableDataset) {
// solrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, dataset.getPublicationDate());
solrInputDocument.addField(SearchFields.PERMS, publicGroupString);
} else if (state.equals(indexableDataset.getDatasetState().WORKING_COPY)) {
- solrInputDocument.addField(SearchFields.PUBLICATION_STATUS, UNPUBLISHED_STRING);
+ solrInputDocument.addField(SearchFields.PUBLICATION_STATUS, DRAFT_STRING);
DataverseUser creator = dataset.getCreator();
if (creator != null) {
solrInputDocument.addField(SearchFields.PERMS, groupPerUserPrefix + creator.getId());
@@ -552,14 +554,42 @@ private String addOrUpdateDataset(IndexableDataset indexableDataset) {
datafileSolrInputDocument.addField(SearchFields.ID, "datafile_" + dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.ENTITY_ID, dataFile.getId());
datafileSolrInputDocument.addField(SearchFields.TYPE, "files");
- datafileSolrInputDocument.addField(SearchFields.NAME, dataFile.getName());
- datafileSolrInputDocument.addField(SearchFields.NAME_SORT, dataFile.getName());
+
+ FileMetadata fileMetadata = dataFile.getFileMetadata();
+ String filenameCompleteFinal = "";
+ if (fileMetadata != null) {
+ String filenameComplete = fileMetadata.getLabel();
+ if (filenameComplete != null) {
+ String filenameWithoutExtension = "";
+ // String extension = "";
+ int i = filenameComplete.lastIndexOf('.');
+ if (i > 0) {
+ // extension = filenameComplete.substring(i + 1);
+ try {
+ filenameWithoutExtension = filenameComplete.substring(0, i);
+ datafileSolrInputDocument.addField(SearchFields.FILENAME_WITHOUT_EXTENSION, filenameWithoutExtension);
+ } catch (IndexOutOfBoundsException ex) {
+ filenameWithoutExtension = "";
+ }
+ } else {
+ logger.info("problem with filename '" + filenameComplete + "': no extension? empty string as filename?");
+ filenameWithoutExtension = filenameComplete;
+ }
+ filenameCompleteFinal = filenameComplete;
+ }
+ }
+ datafileSolrInputDocument.addField(SearchFields.NAME, filenameCompleteFinal);
+ datafileSolrInputDocument.addField(SearchFields.NAME_SORT, filenameCompleteFinal);
+
datafileSolrInputDocument.addField(SearchFields.RELEASE_OR_CREATE_DATE, sortByDate);
+ if (majorVersionReleaseDate == null) {
+ datafileSolrInputDocument.addField(SearchFields.PUBLICATION_STATUS, UNPUBLISHED_STRING);
+ }
if (indexableDataset.getDatasetState().equals(indexableDataset.getDatasetState().PUBLISHED)) {
datafileSolrInputDocument.addField(SearchFields.PUBLICATION_STATUS, PUBLISHED_STRING);
datafileSolrInputDocument.addField(SearchFields.PERMS, publicGroupString);
} else if (indexableDataset.getDatasetState().equals(indexableDataset.getDatasetState().WORKING_COPY)) {
- datafileSolrInputDocument.addField(SearchFields.PUBLICATION_STATUS, UNPUBLISHED_STRING);
+ datafileSolrInputDocument.addField(SearchFields.PUBLICATION_STATUS, DRAFT_STRING);
DataverseUser creator = dataFile.getOwner().getCreator();
if (creator != null) {
datafileSolrInputDocument.addField(SearchFields.PERMS, groupPerUserPrefix + creator.getId());
@@ -826,6 +856,10 @@ public static String getUNPUBLISHED_STRING() {
return UNPUBLISHED_STRING;
}
+ public static String getDRAFT_STRING() {
+ return DRAFT_STRING;
+ }
+
public String delete(Dataverse doomed) {
/**
* @todo allow for configuration of hostname and port
diff --git a/src/main/java/edu/harvard/iq/dataverse/MetadataBlock.java b/src/main/java/edu/harvard/iq/dataverse/MetadataBlock.java
index 665b606ebb2..26076312522 100644
--- a/src/main/java/edu/harvard/iq/dataverse/MetadataBlock.java
+++ b/src/main/java/edu/harvard/iq/dataverse/MetadataBlock.java
@@ -7,7 +7,7 @@
package edu.harvard.iq.dataverse;
import java.io.Serializable;
-import java.util.Collection;
+import java.util.List;
import javax.persistence.CascadeType;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
@@ -52,12 +52,12 @@ public void setName(String name) {
}
@OneToMany(mappedBy = "metadataBlock", cascade = {CascadeType.REMOVE, CascadeType.MERGE, CascadeType.PERSIST})
- private Collection datasetFieldTypes;
- public Collection getDatasetFieldTypes() {
+ private List datasetFieldTypes;
+ public List getDatasetFieldTypes() {
return datasetFieldTypes;
}
- public void setDatasetFieldTypes(Collection datasetFieldTypes) {
+ public void setDatasetFieldTypes(List datasetFieldTypes) {
this.datasetFieldTypes = datasetFieldTypes;
}
@@ -77,6 +77,11 @@ public void setDisplayName(String displayName) {
this.displayName = displayName;
}
+ public boolean isRequired() {
+ // eventually this will be dynamic, for now only citation is required
+ return "citation".equals(name);
+ }
+
public int hashCode() {
int hash = 0;
hash += (this.id != null ? this.id.hashCode() : 0);
diff --git a/src/main/java/edu/harvard/iq/dataverse/SearchIncludeFragment.java b/src/main/java/edu/harvard/iq/dataverse/SearchIncludeFragment.java
index 7fb4fa40063..70c2191dff7 100644
--- a/src/main/java/edu/harvard/iq/dataverse/SearchIncludeFragment.java
+++ b/src/main/java/edu/harvard/iq/dataverse/SearchIncludeFragment.java
@@ -89,7 +89,7 @@ public class SearchIncludeFragment {
private Map numberOfFacets = new HashMap<>();
private List directChildDvObjectContainerList = new ArrayList<>();
private boolean debug = false;
- private boolean showUnpublished;
+// private boolean showUnpublished;
List filterQueriesDebug = new ArrayList<>();
// private Map friendlyName = new HashMap<>();
@@ -257,11 +257,11 @@ public void search() {
logger.info("queryToPassToSolr: " + queryToPassToSolr);
logger.info("sort by: " + sortField);
SearchServiceBean.PublishedToggle publishedToggle = null;
- if (showUnpublished) {
- publishedToggle = SearchServiceBean.PublishedToggle.UNPUBLISHED;
- } else {
- publishedToggle = SearchServiceBean.PublishedToggle.PUBLISHED;
- }
+// if (showUnpublished) {
+// publishedToggle = SearchServiceBean.PublishedToggle.UNPUBLISHED;
+// } else {
+// publishedToggle = SearchServiceBean.PublishedToggle.PUBLISHED;
+// }
solrQueryResponse = searchService.search(session.getUser(), dataverse, queryToPassToSolr, filterQueriesFinal, sortField, sortOrder, paginationStart, publishedToggle);
solrQueryResponseAllTypes = searchService.search(session.getUser(), dataverse, queryToPassToSolr, filterQueriesFinalAllTypes, sortField, sortOrder, paginationStart, publishedToggle);
} catch (EJBException ex) {
@@ -372,13 +372,13 @@ public void search() {
// friendlyName.put(SearchFields.DISTRIBUTION_DATE_YEAR_ONLY, "Distribution Date");
}
- public boolean isShowUnpublished() {
- return showUnpublished;
- }
-
- public void setShowUnpublished(boolean showUnpublished) {
- this.showUnpublished = showUnpublished;
- }
+// public boolean isShowUnpublished() {
+// return showUnpublished;
+// }
+//
+// public void setShowUnpublished(boolean showUnpublished) {
+// this.showUnpublished = showUnpublished;
+// }
public String getBrowseModeString() {
return browseModeString;
@@ -856,6 +856,10 @@ public String getUNPUBLISHED() {
return IndexServiceBean.getUNPUBLISHED_STRING();
}
+ public String getDRAFT() {
+ return IndexServiceBean.getDRAFT_STRING();
+ }
+
public List getFriendlyNamesFromFilterQuery(String filterQuery) {
String[] parts = filterQuery.split(":");
String key = parts[0];
diff --git a/src/main/java/edu/harvard/iq/dataverse/SearchServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/SearchServiceBean.java
index 0f5c022f7c6..27cbcdd25c1 100644
--- a/src/main/java/edu/harvard/iq/dataverse/SearchServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/SearchServiceBean.java
@@ -53,17 +53,21 @@ public class SearchServiceBean {
PublishedToggle publishedToggle = PublishedToggle.PUBLISHED;
+ /*
+ * @deprecated The Published/Unpublished toggle was an experiment: https://docs.google.com/a/harvard.edu/document/d/1clGJKOmrH8zhQyG_8vQHui5L4fszdqRjM4t3U6NFJXg/edit?usp=sharing
+ */
+ @Deprecated
public enum PublishedToggle {
PUBLISHED, UNPUBLISHED
};
public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse, String query, List filterQueries, String sortField, String sortOrder, int paginationStart, PublishedToggle publishedToggle) {
- if (publishedToggle.equals(PublishedToggle.PUBLISHED)) {
- filterQueries.add(SearchFields.PUBLICATION_STATUS + ":" + IndexServiceBean.getPUBLISHED_STRING());
- } else {
- filterQueries.add(SearchFields.PUBLICATION_STATUS + ":" + IndexServiceBean.getUNPUBLISHED_STRING());
- }
+// if (publishedToggle.equals(PublishedToggle.PUBLISHED)) {
+// filterQueries.add(SearchFields.PUBLICATION_STATUS + ":" + IndexServiceBean.getPUBLISHED_STRING());
+// } else {
+// filterQueries.add(SearchFields.PUBLICATION_STATUS + ":" + IndexServiceBean.getUNPUBLISHED_STRING());
+// }
/**
* @todo make "localhost" and port number a config option
*/
@@ -86,6 +90,11 @@ public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse
solrFieldsToHightlightOnMap.put(SearchFields.AFFILIATION, "Affiliation");
solrFieldsToHightlightOnMap.put(SearchFields.CITATION, "Citation");
solrFieldsToHightlightOnMap.put(SearchFields.FILE_TYPE_MIME, "File Type");
+ /**
+ * @todo: show highlight on file card?
+ * https://redmine.hmdc.harvard.edu/issues/3848
+ */
+ solrFieldsToHightlightOnMap.put(SearchFields.FILENAME_WITHOUT_EXTENSION, "Filename Without Extension");
List datasetFields = datasetFieldService.findAllOrderedById();
for (DatasetFieldType datasetFieldType: datasetFields) {
String solrField = datasetFieldType.getSolrField().getNameSearchable();
@@ -113,8 +122,9 @@ public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse
if (dataverseUser != null) {
if (dataverseUser.isGuest()) {
permissionFilterQuery = publicOnly;
+// solrQuery.addFacetField(SearchFields.PUBLICATION_STATUS); // remove ... just for dev
} else {
-// solrQuery.addFacetField(SearchFields.PUBLICATION_STATUS);
+ solrQuery.addFacetField(SearchFields.PUBLICATION_STATUS);
/**
* Non-guests might get more than public stuff with an OR or
* two.
@@ -143,6 +153,16 @@ public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse
}
}
}
+
+ /**
+ * @todo: Remove! Or at least keep this commented out! Very dangerous!
+ * If you pass in "null" for permissionFilterQuery then everyone, even
+ * guest, has "NSA Nick" privs and can see everything! This override
+ * should only be used during dev.
+ */
+// String dangerZone = null;
+// permissionFilterQuery = dangerZone;
+
solrQuery.addFilterQuery(permissionFilterQuery);
// solrQuery.addFacetField(SearchFields.HOST_DATAVERSE);
@@ -289,6 +309,14 @@ public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse
/**
* @todo put all this in the constructor?
*/
+ List states = (ArrayList) solrDocument.getFieldValue(SearchFields.PUBLICATION_STATUS);
+ for (String state : states) {
+ if (state.equals(IndexServiceBean.getUNPUBLISHED_STRING())) {
+ solrSearchResult.setUnpublishedState(true);
+ } else if (state.equals(IndexServiceBean.getDRAFT_STRING())) {
+ solrSearchResult.setDraftState(true);
+ }
+ }
// logger.info(id + ": " + description);
solrSearchResult.setDescriptionNoSnippet(description);
solrSearchResult.setId(id);
@@ -334,6 +362,9 @@ public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse
List facetCategoryList = new ArrayList();
List typeFacetCategories = new ArrayList<>();
+ boolean hidePublicationStatusFacet = true;
+ boolean draftsAvailable = false;
+ boolean unpublishedAvailable = false;
for (FacetField facetField : queryResponse.getFacetFields()) {
FacetCategory facetCategory = new FacetCategory();
List facetLabelList = new ArrayList<>();
@@ -347,6 +378,13 @@ public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse
// quote field facets
facetLabel.setFilterQuery(facetField.getName() + ":\"" + facetFieldCount.getName() + "\"");
facetLabelList.add(facetLabel);
+ if (facetField.getName().equals(SearchFields.PUBLICATION_STATUS)) {
+ if (facetLabel.getName().equals(IndexServiceBean.getUNPUBLISHED_STRING())) {
+ unpublishedAvailable = true;
+ } else if (facetLabel.getName().equals(IndexServiceBean.getDRAFT_STRING())) {
+ draftsAvailable = true;
+ }
+ }
}
}
facetCategory.setName(facetField.getName());
@@ -410,6 +448,13 @@ public SolrQueryResponse search(DataverseUser dataverseUser, Dataverse dataverse
if (facetCategory.getName().equals(SearchFields.TYPE)) {
// the "type" facet is special, these are not
typeFacetCategories.add(facetCategory);
+ } else if (facetCategory.getName().equals(SearchFields.PUBLICATION_STATUS)) {
+ if (unpublishedAvailable || draftsAvailable) {
+ hidePublicationStatusFacet = false;
+ }
+ if (!hidePublicationStatusFacet) {
+ facetCategoryList.add(facetCategory);
+ }
} else {
facetCategoryList.add(facetCategory);
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/SolrField.java b/src/main/java/edu/harvard/iq/dataverse/SolrField.java
index 264ad521719..1940e8b37a9 100644
--- a/src/main/java/edu/harvard/iq/dataverse/SolrField.java
+++ b/src/main/java/edu/harvard/iq/dataverse/SolrField.java
@@ -46,7 +46,12 @@ public boolean isFacetable() {
public enum SolrType {
- STRING("string"), TEXT_GENERAL("text_general"), INTEGER("int"), LONG("long");
+ /**
+ * @todo: make this configurable from text_en to text_general or
+ * non-English languages? We changed it to text_en to improve English
+ * language searching in https://redmine.hmdc.harvard.edu/issues/3859
+ */
+ STRING("string"), TEXT_EN("text_en"), INTEGER("int"), LONG("long");
private String type;
diff --git a/src/main/java/edu/harvard/iq/dataverse/SolrSearchResult.java b/src/main/java/edu/harvard/iq/dataverse/SolrSearchResult.java
index 8a4101c6a29..98e3ba25c11 100644
--- a/src/main/java/edu/harvard/iq/dataverse/SolrSearchResult.java
+++ b/src/main/java/edu/harvard/iq/dataverse/SolrSearchResult.java
@@ -41,6 +41,33 @@ public class SolrSearchResult {
private String dataverseAffiliation;
private String citation;
private String filetype;
+// private boolean statePublished;
+ private boolean unpublishedState;
+ private boolean draftState;
+
+// public boolean isStatePublished() {
+// return statePublished;
+// }
+
+// public void setStatePublished(boolean statePublished) {
+// this.statePublished = statePublished;
+// }
+
+ public boolean isUnpublishedState() {
+ return unpublishedState;
+ }
+
+ public void setUnpublishedState(boolean unpublishedState) {
+ this.unpublishedState = unpublishedState;
+ }
+
+ public boolean isDraftState() {
+ return draftState;
+ }
+
+ public void setDraftState(boolean draftState) {
+ this.draftState = draftState;
+ }
/**
* @todo: used? remove
*/
@@ -215,6 +242,9 @@ public JsonObject toJsonObject() {
typeSpecificFields.add(SearchFields.NAME, this.name);
typeSpecificFields.add(SearchFields.FILE_TYPE_MIME, this.filetype);
}
+ if (this.id == null) {
+ this.id = "bug3809";
+ }
JsonObject jsonObject = Json.createObjectBuilder()
.add(SearchFields.ID, this.id)
.add(SearchFields.ENTITY_ID, this.entityId)
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Browse.java b/src/main/java/edu/harvard/iq/dataverse/api/Browse.java
index f439e2c4b21..2d3f245bd44 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/Browse.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/Browse.java
@@ -79,7 +79,7 @@ public String browse() throws FileNotFoundException {
datasetsArrayBuilder.add(datasetObjectBuilder);
List files = dataset.getFiles();
for (DataFile file : files) {
- logger.info("file: " + file.getName());
+ logger.info("file: " + file.getFileMetadata().getLabel());
String fileInfo = dataverse.getAlias();// + "|" + dataset.getTitle() + "|" + file.getName();
JsonObjectBuilder fileInfoBuilder = Json.createObjectBuilder().add("fileInfo", fileInfo);
filesArrayBuilder.add(fileInfoBuilder);
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Config.java b/src/main/java/edu/harvard/iq/dataverse/api/Config.java
index d1a751556b2..bdbff72a4c7 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/Config.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/Config.java
@@ -26,7 +26,7 @@ public String getSolrSchema() {
String type = datasetField.getSolrField().getSolrType().getType();
String multivalued = datasetField.getSolrField().isAllowedToBeMultivalued().toString();
//
- sb.append("\n");
+ sb.append(" \n");
}
List listOfStaticFields = new ArrayList();
@@ -70,7 +70,7 @@ public String getSolrSchema() {
}
//
- sb.append("\n");
+ sb.append(" \n");
}
return sb.toString();
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/DownloadInfo.java b/src/main/java/edu/harvard/iq/dataverse/api/DownloadInfo.java
index 0dffc2c641e..14f830b7ebf 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/DownloadInfo.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/DownloadInfo.java
@@ -136,7 +136,9 @@ public Long getDataFileId() {
public String getFileName() {
if (dataFile != null) {
- return dataFile.getName();
+ if (dataFile.getFileMetadata() != null) {
+ return dataFile.getFileMetadata().getLabel();
+ }
}
return null;
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Files.java b/src/main/java/edu/harvard/iq/dataverse/api/Files.java
index 269570c46a9..a6fb013426d 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/Files.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/Files.java
@@ -42,7 +42,11 @@ public String add(DataFile dataFile, @QueryParam("key") String apiKey) {
return error("Invalid apikey '" + apiKey + "'");
}
engineSvc.submit(new UpdateDatasetCommand(dataset, u));
- return "file " + dataFile.getName() + " created/updated with dataset " + dataset.getId() + " (and probably indexed, check server.log)\n";
+ String fileName = "[No name?]";
+ if (dataFile.getFileMetadata() != null) {
+ fileName = dataFile.getFileMetadata().getLabel();
+ }
+ return "file " + fileName + " created/updated with dataset " + dataset.getId() + " (and probably indexed, check server.log)\n";
} catch (EJBException ex) {
Throwable cause = ex;
StringBuilder sb = new StringBuilder();
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/Search.java b/src/main/java/edu/harvard/iq/dataverse/api/Search.java
index 74c16d69e12..bd4221859c1 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/Search.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/Search.java
@@ -42,8 +42,6 @@ public String search(@QueryParam("key") String apiKey,
@QueryParam("sort") String sortField,
@QueryParam("order") String sortOrder,
@QueryParam("start") final int paginationStart,
- @QueryParam("published") boolean publishedOnly,
- @QueryParam("unpublished") boolean unpublishedOnly,
@QueryParam("showrelevance") boolean showRelevance) {
if (query != null) {
if (sortField == null) {
@@ -65,12 +63,6 @@ public String search(@QueryParam("key") String apiKey,
}
}
SearchServiceBean.PublishedToggle publishedToggle = SearchServiceBean.PublishedToggle.PUBLISHED;
- if (publishedOnly) {
- publishedToggle = SearchServiceBean.PublishedToggle.PUBLISHED;
- }
- if (unpublishedOnly) {
- publishedToggle = SearchServiceBean.PublishedToggle.UNPUBLISHED;
- }
solrQueryResponse = searchService.search(dataverseUser, dataverseService.findRootDataverse(), query, filterQueries, sortField, sortOrder, paginationStart, publishedToggle);
} catch (EJBException ex) {
Throwable cause = ex;
diff --git a/src/main/java/edu/harvard/iq/dataverse/api/SearchFields.java b/src/main/java/edu/harvard/iq/dataverse/api/SearchFields.java
index 915e76e69e2..90eb064993d 100644
--- a/src/main/java/edu/harvard/iq/dataverse/api/SearchFields.java
+++ b/src/main/java/edu/harvard/iq/dataverse/api/SearchFields.java
@@ -47,6 +47,10 @@ public class SearchFields {
*/
public static final String FILE_TYPE_MIME = "filetypemime_s";
public static final String FILE_TYPE = "filetype_s";
+ /**
+ * @todo change from dynamic to static?
+ */
+ public static final String FILENAME_WITHOUT_EXTENSION = "filename_without_extension_en";
// removing Host Dataverse facets per https://redmine.hmdc.harvard.edu/issues/3777#note-5
// public static final String HOST_DATAVERSE = "hostdataverse_s";
@@ -68,7 +72,7 @@ public class SearchFields {
public static final String RELEASE_OR_CREATE_DATE = "release_or_create_date_dt";
public static final String GROUPS = "groups_s";
public static final String PERMS = "perms_ss";
- public static final String PUBLICATION_STATUS = "published_s";
+ public static final String PUBLICATION_STATUS = "published_ss";
// Used for performance. Why hit the db if solr has the data?
public static final String ENTITY_ID = "entityid";
public static final String PARENT_NAME = "parentname";
diff --git a/src/main/java/edu/harvard/iq/dataverse/dataaccess/DataFileConverter.java b/src/main/java/edu/harvard/iq/dataverse/dataaccess/DataFileConverter.java
index 0eba4426f94..c585ec9cbff 100644
--- a/src/main/java/edu/harvard/iq/dataverse/dataaccess/DataFileConverter.java
+++ b/src/main/java/edu/harvard/iq/dataverse/dataaccess/DataFileConverter.java
@@ -150,7 +150,7 @@ public static FileAccessObject performFormatConversion (DataFile file, FileAcces
fileDownload.setIsLocalFile(true);
fileDownload.setMimeType(formatType);
- String dbFileName = file.getName();
+ String dbFileName = fileDownload.getFileName();
if (dbFileName == null || dbFileName.equals("")) {
dbFileName = "f" + file.getId().toString();
diff --git a/src/main/java/edu/harvard/iq/dataverse/dataaccess/FileAccessObject.java b/src/main/java/edu/harvard/iq/dataverse/dataaccess/FileAccessObject.java
index bb762b7c5c1..fa336a71477 100644
--- a/src/main/java/edu/harvard/iq/dataverse/dataaccess/FileAccessObject.java
+++ b/src/main/java/edu/harvard/iq/dataverse/dataaccess/FileAccessObject.java
@@ -96,7 +96,7 @@ public void open () throws IOException {
this.setSize(getLocalFileSize(file));
this.setMimeType(file.getContentType());
- this.setFileName(file.getName());
+ this.setFileName(file.getFileMetadata().getLabel());
if (file.getContentType() != null &&
diff --git a/src/main/java/edu/harvard/iq/dataverse/dataaccess/StoredOriginalFile.java b/src/main/java/edu/harvard/iq/dataverse/dataaccess/StoredOriginalFile.java
index fdad5774de3..86e3b64292c 100644
--- a/src/main/java/edu/harvard/iq/dataverse/dataaccess/StoredOriginalFile.java
+++ b/src/main/java/edu/harvard/iq/dataverse/dataaccess/StoredOriginalFile.java
@@ -78,12 +78,13 @@ public static FileAccessObject retrieve (DataFile dataFile, FileAccessObject fil
fileDownload.setMimeType("application/x-unknown");
}
- if (dataFile.getName() != null) {
+ String fileName = fileDownload.getFileName();
+ if (fileName != null) {
if ( originalMimeType != null) {
String origFileExtension = generateOriginalExtension(originalMimeType);
- fileDownload.setFileName(dataFile.getName().replaceAll(".tab$", origFileExtension));
+ fileDownload.setFileName(fileName.replaceAll(".tab$", origFileExtension));
} else {
- fileDownload.setFileName(dataFile.getName().replaceAll(".tab$", ""));
+ fileDownload.setFileName(fileName.replaceAll(".tab$", ""));
}
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReleaseDatasetCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReleaseDatasetCommand.java
index deb7c52bec1..8dc6a55dbf8 100644
--- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReleaseDatasetCommand.java
+++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/ReleaseDatasetCommand.java
@@ -24,8 +24,7 @@
* @author skraffmiller
*/
@RequiredPermissionsMap({
- @RequiredPermissions(dataverseName = "", value = Permission.Release),
- @RequiredPermissions(dataverseName = "", value = Permission.EditMetadata)
+ @RequiredPermissions(dataverseName = "", value = Permission.Release)
})
public class ReleaseDatasetCommand extends AbstractCommand {
private static final Logger logger = Logger.getLogger(ReleaseDatasetCommand.class.getCanonicalName());
@@ -33,7 +32,7 @@ public class ReleaseDatasetCommand extends AbstractCommand {
Dataset theDataset;
public ReleaseDatasetCommand(Dataset datasetIn, DataverseUser user, boolean minor) {
- super(user, datasetIn.getOwner());
+ super(user, datasetIn);
minorRelease = minor;
theDataset = datasetIn;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java
index ded316b61a8..31771d35a80 100644
--- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java
+++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDatasetCommand.java
@@ -30,7 +30,7 @@ public class UpdateDatasetCommand extends AbstractCommand {
private final Dataset theDataset;
public UpdateDatasetCommand(Dataset theDataset, DataverseUser user) {
- super(user, theDataset.getOwner());
+ super(user, theDataset);
this.theDataset = theDataset;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDataverseCommand.java b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDataverseCommand.java
index 40c0c801cd7..0b9dac2ada8 100644
--- a/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDataverseCommand.java
+++ b/src/main/java/edu/harvard/iq/dataverse/engine/command/impl/UpdateDataverseCommand.java
@@ -39,7 +39,7 @@ public Dataverse execute(CommandContext ctxt) throws CommandException {
for ( DatasetFieldType df : facetList ) {
ctxt.facets().create(i++, df.getId(), result.getId());
}
-
+ ctxt.index().indexDataverse(result);
return result;
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/export/DDIExportServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/export/DDIExportServiceBean.java
index 296f7084f4e..e7f60ae44b9 100644
--- a/src/main/java/edu/harvard/iq/dataverse/export/DDIExportServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/export/DDIExportServiceBean.java
@@ -533,7 +533,7 @@ private void createFileDscr(XMLStreamWriter xmlw, Set excludedFieldSet,
if (checkField("fileTxt", excludedFieldSet, includedFieldSet)) {
xmlw.writeStartElement("fileName");
- xmlw.writeCharacters(df.getName());
+ xmlw.writeCharacters(df.getFileMetadata().getLabel());
xmlw.writeEndElement(); // fileName
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/ingest/IngestServiceBean.java b/src/main/java/edu/harvard/iq/dataverse/ingest/IngestServiceBean.java
index 5d6e7043c21..b957eace269 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ingest/IngestServiceBean.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ingest/IngestServiceBean.java
@@ -205,10 +205,11 @@ public boolean ingestAsTabular(String tempFileLocation, DataFile dataFile) throw
// it up with the Ingest Service Provider Registry:
//TabularDataFileReader ingestPlugin = IngestSP.getTabDataReaderByMIMEType(dFile.getContentType());
//TabularDataFileReader ingestPlugin = new DTAFileReader(new DTAFileReaderSpi());
+ String fileName = dataFile.getFileMetadata().getLabel();
TabularDataFileReader ingestPlugin = getTabDataReaderByMimeType(dataFile);
if (ingestPlugin == null) {
- throw new IOException("Could not find ingest plugin for the file " + dataFile.getName());
+ throw new IOException("Could not find ingest plugin for the file " + fileName);
}
FileInputStream tempFileInputStream = null;
@@ -248,7 +249,7 @@ public boolean ingestAsTabular(String tempFileLocation, DataFile dataFile) throw
// and replace (or add) the extension ".tab" to the filename:
dataFile.setContentType(MIME_TYPE_TAB);
- dataFile.getFileMetadata().setLabel(FileUtil.replaceExtension(dataFile.getName(), "tab"));
+ dataFile.getFileMetadata().setLabel(FileUtil.replaceExtension(fileName, "tab"));
dataFile.setDataTable(tabDataIngest.getDataTable());
tabDataIngest.getDataTable().setDataFile(dataFile);
diff --git a/src/main/java/edu/harvard/iq/dataverse/ingest/tabulardata/impl/plugins/csv/CSVFileReader.java b/src/main/java/edu/harvard/iq/dataverse/ingest/tabulardata/impl/plugins/csv/CSVFileReader.java
index 48f58e5eea9..0399baddd4d 100644
--- a/src/main/java/edu/harvard/iq/dataverse/ingest/tabulardata/impl/plugins/csv/CSVFileReader.java
+++ b/src/main/java/edu/harvard/iq/dataverse/ingest/tabulardata/impl/plugins/csv/CSVFileReader.java
@@ -29,17 +29,8 @@
import javax.inject.Inject;
-// Rosuda Wrappers and Methods for R-calls to Rserve
-import org.rosuda.REngine.REXP;
-import org.rosuda.REngine.REXPMismatchException;
-import org.rosuda.REngine.RList;
-import org.rosuda.REngine.Rserve.RFileInputStream;
-import org.rosuda.REngine.Rserve.RFileOutputStream;
-import org.rosuda.REngine.Rserve.*;
-
import edu.harvard.iq.dataverse.DataTable;
import edu.harvard.iq.dataverse.datavariable.DataVariable;
-import edu.harvard.iq.dataverse.datavariable.VariableCategory;
import edu.harvard.iq.dataverse.datavariable.VariableFormatType;
import edu.harvard.iq.dataverse.datavariable.VariableServiceBean;
@@ -47,7 +38,9 @@
import edu.harvard.iq.dataverse.ingest.tabulardata.TabularDataFileReader;
import edu.harvard.iq.dataverse.ingest.tabulardata.spi.TabularDataFileReaderSpi;
import edu.harvard.iq.dataverse.ingest.tabulardata.TabularDataIngest;
-import edu.harvard.iq.dataverse.rserve.*;
+import java.math.BigDecimal;
+import java.math.MathContext;
+import java.math.RoundingMode;
import javax.naming.Context;
import javax.naming.InitialContext;
import javax.naming.NamingException;
@@ -71,13 +64,30 @@ public class CSVFileReader extends TabularDataFileReader {
VariableServiceBean varService;
private static final Logger dbglog = Logger.getLogger(CSVFileReader.class.getPackage().getName());
+ private static final int DIGITS_OF_PRECISION_DOUBLE = 15;
+ private static final String FORMAT_IEEE754 = "%+#." + DIGITS_OF_PRECISION_DOUBLE + "e";
+ private MathContext doubleMathContext;
private char delimiterChar = ',';
+
+ // DATE FORMATS
+ private static SimpleDateFormat[] DATE_FORMATS = new SimpleDateFormat[] {
+ new SimpleDateFormat("yyyy-MM-dd")
+ };
+
+ // TIME FORMATS
+ private static SimpleDateFormat[] TIME_FORMATS = new SimpleDateFormat[] {
+ // Date-time up to seconds with timezone, e.g. 2013-04-08 13:14:23 -0500
+ new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z"),
+ // Date-time up to seconds and no timezone, e.g. 2013-04-08 13:14:23
+ new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
+ };
public CSVFileReader(TabularDataFileReaderSpi originator) {
super(originator);
}
private void init() throws IOException {
+ doubleMathContext = new MathContext(DIGITS_OF_PRECISION_DOUBLE, RoundingMode.HALF_EVEN);
Context ctx = null;
try {
ctx = new InitialContext();
@@ -132,7 +142,7 @@ public int readFile(BufferedReader csvReader, DataTable dataTable, PrintWriter f
String[] valueTokens;
int lineCounter = 0;
-
+
// Read first line:
line = csvReader.readLine();
@@ -178,7 +188,10 @@ public int readFile(BufferedReader csvReader, DataTable dataTable, PrintWriter f
dataTable.setVarQuantity(new Long(varQnty));
dataTable.setDataVariables(variableList);
- boolean[] isNumericVariable = new boolean[varQnty];
+ boolean[] isNumericVariable = new boolean[varQnty];
+ boolean[] isIntegerVariable = new boolean[varQnty];
+ boolean[] isTimeVariable = new boolean[varQnty];
+ boolean[] isDateVariable = new boolean[varQnty];
for (int i = 0; i < varQnty; i++) {
// OK, let's assume that every variable is numeric;
@@ -186,11 +199,18 @@ public int readFile(BufferedReader csvReader, DataTable dataTable, PrintWriter f
// moment we find a value that's not a legit numeric one, we'll
// assume that it is in fact a String.
isNumericVariable[i] = true;
+ isIntegerVariable[i] = true;
+ isDateVariable[i] = true;
+ isTimeVariable[i] = true;
}
// First, "learning" pass.
// (we'll save the incoming stream in another temp file:)
+ SimpleDateFormat[] selectedDateTimeFormat = new SimpleDateFormat[varQnty];
+ SimpleDateFormat[] selectedDateFormat = new SimpleDateFormat[varQnty];
+
+
File firstPassTempFile = File.createTempFile("firstpass-", ".tab");
PrintWriter firstPassWriter = new PrintWriter(firstPassTempFile.getAbsolutePath());
@@ -217,6 +237,7 @@ public int readFile(BufferedReader csvReader, DataTable dataTable, PrintWriter f
if (valueTokens[i] != null && (!valueTokens[i].equals(""))) {
boolean isNumeric = false;
+ boolean isInteger = false;
if (valueTokens[i].equalsIgnoreCase("NaN")
|| valueTokens[i].equalsIgnoreCase("NA")
@@ -229,16 +250,123 @@ public int readFile(BufferedReader csvReader, DataTable dataTable, PrintWriter f
try {
Double testDoubleValue = new Double(valueTokens[i]);
isNumeric = true;
- } catch (Exception ex) {
+ } catch (NumberFormatException ex) {
// the token failed to parse as a double number;
// so we'll have to assume it's just a string variable.
}
}
+
if (!isNumeric) {
isNumericVariable[i] = false;
+ } else if (isIntegerVariable[i]) {
+ try {
+ Integer testIntegerValue = new Integer(valueTokens[i]);
+ isInteger = true;
+ } catch (NumberFormatException ex) {
+ // the token failed to parse as an integer number;
+ // we'll assume it's a non-integere numeric...
+ }
+ if (!isInteger) {
+ isIntegerVariable[i] = false;
+ }
}
}
- }
+ }
+
+ // And if we have concluded that this is not a numeric column,
+ // let's see if we can parse the string token as a date or
+ // a date-time value:
+
+ if (!isNumericVariable[i]) {
+
+ Date dateResult = null;
+
+ if (isTimeVariable[i]) {
+ if (valueTokens[i] != null && (!valueTokens[i].equals(""))) {
+ boolean isTime = false;
+
+ if (selectedDateTimeFormat[i] != null) {
+ dbglog.info("will try selected format " + selectedDateTimeFormat[i].toPattern());
+ ParsePosition pos = new ParsePosition(0);
+ dateResult = selectedDateTimeFormat[i].parse(valueTokens[i], pos);
+
+ if (dateResult == null) {
+ dbglog.info(selectedDateTimeFormat[i].toPattern() + ": null result.");
+ } else if (pos.getIndex() != valueTokens[i].length()) {
+ dbglog.info(selectedDateTimeFormat[i].toPattern() + ": didn't parse to the end - bad time zone?");
+ } else {
+ // OK, successfully parsed a value!
+ isTime = true;
+ dbglog.info(selectedDateTimeFormat[i].toPattern() + " worked!");
+ }
+ } else {
+ for (SimpleDateFormat format : TIME_FORMATS) {
+ dbglog.info("will try format " + format.toPattern());
+ ParsePosition pos = new ParsePosition(0);
+ dateResult = format.parse(valueTokens[i], pos);
+ if (dateResult == null) {
+ dbglog.info(format.toPattern() + ": null result.");
+ continue;
+ }
+ if (pos.getIndex() != valueTokens[i].length()) {
+ dbglog.info(format.toPattern() + ": didn't parse to the end - bad time zone?");
+ continue;
+ }
+ // OK, successfully parsed a value!
+ isTime = true;
+ dbglog.info(format.toPattern() + " worked!");
+ selectedDateTimeFormat[i] = format;
+ break;
+ }
+ }
+ if (!isTime) {
+ isTimeVariable[i] = false;
+ // OK, the token didn't parse as a time value;
+ // But we will still try to parse it as a date, below.
+ // unless of course we have already decided that this column
+ // is NOT a date.
+ } else {
+ // And if it is a time value, we are going to assume it's
+ // NOT a date.
+ isDateVariable[i] = false;
+ }
+ }
+ }
+
+ if (isDateVariable[i]) {
+ if (valueTokens[i] != null && (!valueTokens[i].equals(""))) {
+ boolean isDate = false;
+
+ // TODO:
+ // Strictly speaking, we should be doing the same thing
+ // here as with the time formats above; select the
+ // first one that works, then insist that all the
+ // other values in this column match it... but we
+ // only have one, as of now, so it should be ok.
+ // -- L.A. 4.0 beta
+
+ for (SimpleDateFormat format : DATE_FORMATS) {
+ // Strict parsing - it will throw an
+ // exception if it doesn't parse!
+ format.setLenient(false);
+ dbglog.info("will try format " + format.toPattern());
+ try {
+ dateResult = format.parse(valueTokens[i]);
+ dbglog.info("format " + format.toPattern() + " worked!");
+ isDate = true;
+ selectedDateFormat[i] = format;
+ break;
+ } catch (ParseException ex) {
+ //Do nothing
+ dbglog.info("format " + format.toPattern() + " didn't work.");
+ }
+ }
+ if (!isDate) {
+ isDateVariable[i] = false;
+ }
+ }
+ }
+ }
}
firstPassWriter.println(line);
@@ -255,7 +383,21 @@ public int readFile(BufferedReader csvReader, DataTable dataTable, PrintWriter f
for (int i = 0; i < varQnty; i++) {
if (isNumericVariable[i]) {
dataTable.getDataVariables().get(i).setVariableFormatType(varService.findVariableFormatTypeByName("numeric"));
- dataTable.getDataVariables().get(i).setVariableIntervalType(varService.findVariableIntervalTypeByName("continuous"));
+
+ if (isIntegerVariable[i]) {
+ dataTable.getDataVariables().get(i).setVariableIntervalType(varService.findVariableIntervalTypeByName("discrete"));
+ } else {
+ dataTable.getDataVariables().get(i).setVariableIntervalType(varService.findVariableIntervalTypeByName("continuous"));
+ }
+ } else if (isDateVariable[i] && selectedDateFormat[i] != null) {
+ // Dates are still Strings, i.e., they are "character" and "discrete";
+ // But we add special format values for them:
+ dataTable.getDataVariables().get(i).setFormatSchemaName(DATE_FORMATS[0].toPattern());
+ dataTable.getDataVariables().get(i).setFormatCategory("date");
+ } else if (isTimeVariable[i] && selectedDateTimeFormat[i] != null) {
+ // Same for time values:
+ dataTable.getDataVariables().get(i).setFormatSchemaName(selectedDateTimeFormat[i].toPattern());
+ dataTable.getDataVariables().get(i).setFormatCategory("time");
}
}
@@ -303,13 +445,60 @@ public int readFile(BufferedReader csvReader, DataTable dataTable, PrintWriter f
// numeric zero:
caseRow[i] = "0";
} else {
- try {
- Double testDoubleValue = new Double(valueTokens[i]);
- caseRow[i] = testDoubleValue.toString();
- } catch (Exception ex) {
- throw new IOException ("Failed to parse a value recognized as numeric in the first pass! (?)");
+ if (isIntegerVariable[i]) {
+ try {
+ Integer testIntegerValue = new Integer(valueTokens[i]);
+ caseRow[i] = testIntegerValue.toString();
+ } catch (NumberFormatException ex) {
+ throw new IOException ("Failed to parse a value recognized as an integer in the first pass! (?)");
+ }
+ } else {
+ try {
+ Double testDoubleValue = new Double(valueTokens[i]);
+ if (testDoubleValue.equals(0.0)) {
+ caseRow[i] = "0.0";
+ } else {
+ // One possible implementation:
+ //
+ // Round our fractional values to 15 digits
+ // (minimum number of digits of precision guaranteed by
+ // type Double) and format the resulting representations
+ // in a IEEE 754-like "scientific notation" - for ex.,
+ // 753.24 will be encoded as 7.5324e2
+ BigDecimal testBigDecimal = new BigDecimal(valueTokens[i], doubleMathContext);
+ /*
+ // an experiment - what's gonna happen if we just
+ // use the string representation of the bigdecimal object
+ // above?
+ caseRow[i] = testBigDecimal.toString();
+ */
+
+ caseRow[i] = String.format(FORMAT_IEEE754, testBigDecimal);
+
+ // Strip meaningless zeros and extra + signs:
+ caseRow[i] = caseRow[i].replaceFirst("00*e", "e");
+ caseRow[i] = caseRow[i].replaceFirst("\\.e", ".0e");
+ caseRow[i] = caseRow[i].replaceFirst("e\\+00", "");
+ caseRow[i] = caseRow[i].replaceFirst("^\\+", "");
+ }
+
+ } catch (NumberFormatException ex) {
+ throw new IOException("Failed to parse a value recognized as numeric in the first pass! (?)");
+ }
}
}
+ } else if (isTimeVariable[i] || isDateVariable[i]) {
+ // Time and Dates are stored NOT quoted (don't ask).
+ if (valueTokens[i] != null) {
+ String charToken = valueTokens[i];
+ // Dealing with quotes:
+ // remove the leading and trailing quotes, if present:
+ charToken = charToken.replaceFirst("^\"*", "");
+ charToken = charToken.replaceFirst("\"*$", "");
+ caseRow[i] = charToken;
+ } else {
+ caseRow[i] = "";
+ }
} else {
// Treat as a String:
// Strings are stored in tab files quoted;
diff --git a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java
index 3ca3f24bb48..8a01b3b7b9b 100644
--- a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java
+++ b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonParser.java
@@ -1,14 +1,23 @@
package edu.harvard.iq.dataverse.util.json;
import edu.harvard.iq.dataverse.ControlledVocabularyValue;
+import edu.harvard.iq.dataverse.DatasetAuthor;
import edu.harvard.iq.dataverse.DatasetField;
import edu.harvard.iq.dataverse.DatasetFieldCompoundValue;
import edu.harvard.iq.dataverse.DatasetFieldServiceBean;
import edu.harvard.iq.dataverse.DatasetFieldType;
import edu.harvard.iq.dataverse.DatasetFieldValue;
+import edu.harvard.iq.dataverse.DatasetVersion;
+import edu.harvard.iq.dataverse.MetadataBlockServiceBean;
+import java.text.DateFormat;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
import java.util.Collections;
+import java.util.Date;
import java.util.LinkedList;
import java.util.List;
+import java.util.Set;
import javax.json.JsonArray;
import javax.json.JsonObject;
import javax.json.JsonString;
@@ -19,13 +28,85 @@
*/
public class JsonParser {
+ private final DateFormat dateFormat = new SimpleDateFormat( JsonPrinter.TIME_FORMAT_STRING );
+
DatasetFieldServiceBean datasetFieldSvc;
+ MetadataBlockServiceBean blockService;
- public JsonParser(DatasetFieldServiceBean datasetFieldSvc) {
+ public JsonParser(DatasetFieldServiceBean datasetFieldSvc, MetadataBlockServiceBean blockService) {
this.datasetFieldSvc = datasetFieldSvc;
+ this.blockService = blockService;
+ }
+
+ public DatasetVersion parseDatasetVersion( JsonObject obj ) throws JsonParseException {
+ try {
+ DatasetVersion dsv = new DatasetVersion();
+
+ dsv.setArchiveNote( obj.getString("archiveNote", null) );
+ dsv.setDeaccessionLink( obj.getString("deaccessionLink", null) );
+ dsv.setVersion( parseLong(obj.getString("version")) );
+ dsv.setVersionNumber( parseLong(obj.getString("versionNumber")) );
+ dsv.setMinorVersionNumber( parseLong(obj.getString("minorVersionNumber")) );
+ dsv.setId( parseLong(obj.getString("id")) );
+
+ String versionStateStr = obj.getString("versionState");
+ if ( versionStateStr != null ) {
+ dsv.setVersionState( DatasetVersion.VersionState.valueOf(versionStateStr) );
+ }
+
+ dsv.setReleaseTime( parseDate(obj.getString("releaseDate")) );
+ dsv.setLastUpdateTime( parseDate(obj.getString("lastUpdateTime")) );
+ dsv.setCreateTime( parseDate(obj.getString("createTime")) );
+ dsv.setArchiveTime( parseDate(obj.getString("archiveTime")) );
+
+ dsv.setDatasetFields( parseMetadataBlocks(obj.getJsonObject("metadataBlocks")) );
+
+ // parse authors
+ JsonArray authorsJson = obj.getJsonArray("authors");
+ List authors = new ArrayList<>( authorsJson.size() );
+ for ( JsonObject authorJson : authorsJson.getValuesAs(JsonObject.class) ) {
+ DatasetAuthor author = new DatasetAuthor();
+ author.setAffiliation( parseField( authorJson.getJsonObject("affiliation")) );
+ author.setIdType( authorJson.getString("idType") );
+ author.setIdValue( authorJson.getString("idValue"));
+ author.setDisplayOrder( parsePrimitiveInt(authorJson.getString("displayOrder"), 0) );
+ author.setName( parseField( authorJson.getJsonObject("name")) );
+
+ authors.add( author );
+ author.setDatasetVersion(dsv);
+ }
+ dsv.setDatasetAuthors(authors);
+
+ // parse distributors
+ // CONTPOINT
+
+ return dsv;
+
+ } catch (ParseException ex) {
+ throw new JsonParseException("Error parsing date:" + ex.getMessage(), ex);
+ } catch ( NumberFormatException ex ) {
+ throw new JsonParseException("Error parsing number:" + ex.getMessage(), ex);
+ }
+ }
+
+ public List parseMetadataBlocks( JsonObject json ) throws JsonParseException {
+ Set keys = json.keySet();
+ List fields = new LinkedList<>();
+
+ for ( String blockName : keys ) {
+ blockService.findByName(blockName);
+ JsonObject blockJson = json.getJsonObject(blockName);
+ JsonArray fieldsJson = blockJson.getJsonArray("fields");
+ for ( JsonObject fieldJson : fieldsJson.getValuesAs(JsonObject.class) ) {
+ fields.add( parseField(fieldJson) );
+ }
+ }
+
+ return fields;
}
public DatasetField parseField( JsonObject json ) throws JsonParseException {
+ if ( json == null ) return null;
DatasetField ret = new DatasetField();
DatasetFieldType type = datasetFieldSvc.findByName(json.getString("typeName"));
if ( type == null ) {
@@ -136,4 +217,16 @@ public List parseControlledVocabularyValue( DatasetFi
}
}
+ Date parseDate( String str ) throws ParseException {
+ return str==null ? null : dateFormat.parse(str);
+ }
+
+ Long parseLong( String str ) throws NumberFormatException {
+ return (str==null) ? null : Long.valueOf(str);
+ }
+
+ int parsePrimitiveInt( String str, int defaultValue ) {
+ return str==null ? defaultValue : Integer.parseInt(str);
+ }
+
}
diff --git a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java
index f5eb33d8066..596399d8fa3 100644
--- a/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java
+++ b/src/main/java/edu/harvard/iq/dataverse/util/json/JsonPrinter.java
@@ -4,6 +4,7 @@
import edu.harvard.iq.dataverse.DataFile;
import edu.harvard.iq.dataverse.Dataset;
import edu.harvard.iq.dataverse.DatasetAuthor;
+import edu.harvard.iq.dataverse.DatasetDistributor;
import edu.harvard.iq.dataverse.DatasetFieldType;
import edu.harvard.iq.dataverse.DatasetField;
import edu.harvard.iq.dataverse.DatasetFieldCompoundValue;
@@ -30,17 +31,18 @@
import static edu.harvard.iq.dataverse.util.json.NullSafeJsonBuilder.jsonObjectBuilder;
import java.util.Deque;
-import java.util.HashMap;
import java.util.LinkedList;
import java.util.Map;
+import javax.json.JsonObject;
/**
* Convert objects to Json.
* @author michael
*/
public class JsonPrinter {
-
- private static final DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss X");
+ public static final String TIME_FORMAT_STRING = "yyyy-MM-dd hh:mm:ss X";
+
+ private static final DateFormat dateFormat = new SimpleDateFormat(TIME_FORMAT_STRING);
public static final BriefJsonPrinter brief = new BriefJsonPrinter();
@@ -127,18 +129,35 @@ public static JsonObjectBuilder json( DatasetVersion dsv ) {
JsonObjectBuilder bld = jsonObjectBuilder()
.add("id", dsv.getId())
.add("version", dsv.getVersion() )
+ .add("versionNumber", dsv.getVersionNumber())
+ .add("versionMinorNumber", dsv.getMinorVersionNumber())
.add("versionState", dsv.getVersionState().name() )
.add("versionNote", dsv.getVersionNote())
.add("title", dsv.getTitle())
.add("archiveNote", dsv.getArchiveNote())
.add("deaccessionLink", dsv.getDeaccessionLink())
.add("distributionDate", dsv.getDistributionDate())
- .add("distributorNames", dsv.getDistributorNames())
.add("productionDate", dsv.getProductionDate())
.add("UNF", dsv.getUNF())
.add("archiveTime", format(dsv.getArchiveTime()) )
+ .add("lastUpdateTime", format(dsv.getLastUpdateTime()) )
+ .add("releaseTime", format(dsv.getReleaseTime()) )
+ .add("createTime", format(dsv.getCreateTime()) )
;
-
+
+ // Add distributors
+ List dists = dsv.getDatasetDistributors();
+ if ( ! dists.isEmpty() ) {
+ if ( dists.size() > 1 ) {
+ Collections.sort(dists, DatasetDistributor.DisplayOrder );
+ }
+ JsonArrayBuilder ab = Json.createArrayBuilder();
+ for ( DatasetDistributor dist : dists ) {
+ ab.add( json(dist) );
+ }
+ bld.add( "distributors", ab );
+ }
+
// Add authors
List auth = dsv.getDatasetAuthors();
if ( ! auth.isEmpty() ) {
@@ -157,6 +176,18 @@ public static JsonObjectBuilder json( DatasetVersion dsv ) {
return bld;
}
+ public static JsonObjectBuilder json( DatasetDistributor dist ) {
+ return jsonObjectBuilder()
+ .add( "displayOrder",dist.getDisplayOrder())
+ .add( "version",dist.getVersion())
+ .add( "abbreviation", json(dist.getAbbreviation()) )
+ .add( "affiliation", json(dist.getAffiliation()) )
+ .add( "logo", json(dist.getLogo()) )
+ .add( "name", json(dist.getName()) )
+ .add( "url", json(dist.getUrl()) )
+ ;
+ }
+
public static JsonObjectBuilder jsonByBlocks( List fields ) {
JsonObjectBuilder blocksBld = jsonObjectBuilder();
@@ -176,7 +207,7 @@ public static JsonObjectBuilder jsonByBlocks( List fields ) {
* @param fields
* @return JSON Object builder with the block and fields information.
*/
- public static JsonObjectBuilder json( MetadataBlock block, List fields) {
+ public static JsonObjectBuilder json( MetadataBlock block, List fields ) {
JsonObjectBuilder blockBld = jsonObjectBuilder();
blockBld.add("displayName", block.getDisplayName());
@@ -254,9 +285,13 @@ public static JsonObjectBuilder json( FileMetadata fmd ) {
}
public static JsonObjectBuilder json( DataFile df ) {
+ String fileName = "";
+ if (df.getFileMetadata() != null) {
+ fileName = df.getFileMetadata().getLabel();
+ }
return jsonObjectBuilder()
.add("id", df.getId() )
- .add("name", df.getName())
+ .add("name", fileName)
.add("contentType", df.getContentType())
.add("filename", df.getFilename())
.add("originalFileFormat", df.getOriginalFileFormat())
@@ -271,8 +306,8 @@ public static JsonObjectBuilder json( DatasetAuthor da ) {
return jsonObjectBuilder()
.add( "idType", da.getIdType() )
.add( "idValue", da.getIdValue() )
- .addStrValue( "name", da.getName() )
- .addStrValue( "affiliation", da.getAffiliation() )
+ .add( "name", json(da.getName()) )
+ .add( "affiliation", json(da.getAffiliation()) )
.add( "displayOrder", da.getDisplayOrder() )
;
}
diff --git a/src/main/webapp/WEB-INF/glassfish-web.xml b/src/main/webapp/WEB-INF/glassfish-web.xml
index 618629b364f..80a56f29cd0 100644
--- a/src/main/webapp/WEB-INF/glassfish-web.xml
+++ b/src/main/webapp/WEB-INF/glassfish-web.xml
@@ -10,4 +10,5 @@
+
diff --git a/src/main/webapp/dataset.xhtml b/src/main/webapp/dataset.xhtml
index d129e81c9e6..8dd80a36f76 100644
--- a/src/main/webapp/dataset.xhtml
+++ b/src/main/webapp/dataset.xhtml
@@ -58,7 +58,7 @@
+ and permissionServiceBean.on(DatasetPage.dataset).canIssueCommand('UpdateDatasetCommand')}">