diff --git a/conf/solr/8.11.1/readme.md b/conf/solr/8.11.1/readme.md deleted file mode 100644 index 4457cf9a7df..00000000000 --- a/conf/solr/8.11.1/readme.md +++ /dev/null @@ -1 +0,0 @@ -Please see the dev guide for what to do with Solr config files. \ No newline at end of file diff --git a/conf/solr/8.11.1/solrconfig.xml b/conf/solr/8.11.1/solrconfig.xml deleted file mode 100644 index 3e4e5adc7b6..00000000000 --- a/conf/solr/8.11.1/solrconfig.xml +++ /dev/null @@ -1,1410 +0,0 @@ - - - - - - - - - 7.3.0 - - - - - - - - - - - - - - - - - - - - ${solr.data.dir:} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ${solr.lock.type:native} - - - - - - - - - - - - - - - - - - - - - - - - - - - - - ${solr.ulog.dir:} - ${solr.ulog.numVersionBuckets:65536} - - - - - ${solr.autoCommit.maxTime:15000} - false - - - - - - ${solr.autoSoftCommit.maxTime:-1} - - - - - - - - - - - - - - 1024 - - - - - - - - - - - - - - - - - - - - - - - - true - - - - - - 20 - - - 200 - - - - - - - - - - - - - - - - false - - - - - - - - - - - - - - - - - - - - - - explicit - 10 - edismax - 0.075 - - dvName^400 - authorName^180 - dvSubject^190 - dvDescription^180 - dvAffiliation^170 - title^130 - subject^120 - keyword^110 - topicClassValue^100 - dsDescriptionValue^90 - authorAffiliation^80 - publicationCitation^60 - producerName^50 - fileName^30 - fileDescription^30 - variableLabel^20 - variableName^10 - _text_^1.0 - - - dvName^200 - authorName^100 - dvSubject^100 - dvDescription^100 - dvAffiliation^100 - title^75 - subject^75 - keyword^75 - topicClassValue^75 - dsDescriptionValue^75 - authorAffiliation^75 - publicationCitation^75 - producerName^75 - - - - isHarvested:false^25000 - - - - - - - - - - - - - - - - - - explicit - json - true - - - - - - - - explicit - - - - - - _text_ - - - - - - - true - ignored_ - _text_ - - - - - - - - - text_general - - - - - - default - _text_ - solr.DirectSolrSpellChecker - - internal - - 0.5 - - 2 - - 1 - - 5 - - 4 - - 0.01 - - - - - - - - - - - - default - on - true - 10 - 5 - 5 - true - true - 10 - 5 - - - spellcheck - - - - - - - - - - true - - - tvComponent - - - - - - - - - - - - true - false - - - terms - - - - - - - - string - - - - - - explicit - - - elevator - - - - - - - - - - - 100 - - - - - - - - 70 - - 0.5 - - [-\w ,/\n\"']{20,200} - - - - - - - ]]> - ]]> - - - - - - - - - - - - - - - - - - - - - - - - ,, - ,, - ,, - ,, - ,]]> - ]]> - - - - - - 10 - .,!? - - - - - - - WORD - - - en - US - - - - - - - - - - - - - - [^\w-\.] - _ - - - - - - - yyyy-MM-dd'T'HH:mm:ss.SSSZ - yyyy-MM-dd'T'HH:mm:ss,SSSZ - yyyy-MM-dd'T'HH:mm:ss.SSS - yyyy-MM-dd'T'HH:mm:ss,SSS - yyyy-MM-dd'T'HH:mm:ssZ - yyyy-MM-dd'T'HH:mm:ss - yyyy-MM-dd'T'HH:mmZ - yyyy-MM-dd'T'HH:mm - yyyy-MM-dd HH:mm:ss.SSSZ - yyyy-MM-dd HH:mm:ss,SSSZ - yyyy-MM-dd HH:mm:ss.SSS - yyyy-MM-dd HH:mm:ss,SSS - yyyy-MM-dd HH:mm:ssZ - yyyy-MM-dd HH:mm:ss - yyyy-MM-dd HH:mmZ - yyyy-MM-dd HH:mm - yyyy-MM-dd - - - - - - - - - - - - - - - - - - - - - - - - - - - - - text/plain; charset=UTF-8 - - - - - ${velocity.template.base.dir:} - ${velocity.solr.resource.loader.enabled:true} - ${velocity.params.resource.loader.enabled:false} - - - - - 5 - - - - - - - - - - - - - - diff --git a/doc/sphinx-guides/source/_static/installation/files/etc/systemd/solr.service b/doc/sphinx-guides/source/_static/installation/files/etc/systemd/solr.service index d89ee108377..8bb21e38083 100644 --- a/doc/sphinx-guides/source/_static/installation/files/etc/systemd/solr.service +++ b/doc/sphinx-guides/source/_static/installation/files/etc/systemd/solr.service @@ -8,6 +8,7 @@ Type = forking WorkingDirectory = /usr/local/solr/solr-8.11.1 ExecStart = /usr/local/solr/solr-8.11.1/bin/solr start -m 1g -j "jetty.host=127.0.0.1" ExecStop = /usr/local/solr/solr-8.11.1/bin/solr stop +Environment="SOLR_OPTS=-Dsolr.jetty.request.header.size=102400" LimitNOFILE=65000 LimitNPROC=65000 Restart=on-failure diff --git a/doc/sphinx-guides/source/admin/metadatacustomization.rst b/doc/sphinx-guides/source/admin/metadatacustomization.rst index 53bc32eca3d..d635dbc3643 100644 --- a/doc/sphinx-guides/source/admin/metadatacustomization.rst +++ b/doc/sphinx-guides/source/admin/metadatacustomization.rst @@ -516,7 +516,7 @@ the Solr schema configuration, including any enabled metadata schemas: ``curl "http://localhost:8080/api/admin/index/solr/schema"`` -You can use :download:`update-fields.sh <../../../../conf/solr/8.11.1/update-fields.sh>` to easily add these to the +You can use :download:`update-fields.sh <../../../../modules/solr-configset/src/main/scripts/update-fields.sh>` to easily add these to the Solr schema you installed for your Dataverse installation. The script needs a target XML file containing your Solr schema. (See the :doc:`/installation/prerequisites/` section of diff --git a/doc/sphinx-guides/source/conf.py b/doc/sphinx-guides/source/conf.py index 2c2ddf1bdf6..1cd1505a69f 100755 --- a/doc/sphinx-guides/source/conf.py +++ b/doc/sphinx-guides/source/conf.py @@ -18,6 +18,9 @@ sys.path.insert(0, os.path.abspath('../../')) import sphinx_bootstrap_theme +import xml.etree.ElementTree as et +pom = et.parse("../../../modules/dataverse-parent/pom.xml") +ns = {"mvn": "http://maven.apache.org/POM/4.0.0"} # Activate the theme. # html_theme = 'bootstrap' @@ -438,4 +441,5 @@ rst_prolog = """ .. |toctitle| replace:: Contents: .. |anotherSub| replace:: Yes, there can be multiple. -""" +.. |solr_version| replace:: {solr_version} +""".format(solr_version=pom.find("./mvn:properties/mvn:solr.version", ns).text) diff --git a/doc/sphinx-guides/source/installation/prerequisites.rst b/doc/sphinx-guides/source/installation/prerequisites.rst index 0d1610be8bf..0061c28878e 100644 --- a/doc/sphinx-guides/source/installation/prerequisites.rst +++ b/doc/sphinx-guides/source/installation/prerequisites.rst @@ -154,86 +154,164 @@ Configuring Database Access for the Dataverse Installation (and the Dataverse So Solr ---- -The Dataverse Software search index is powered by Solr. +The Dataverse Software search index is powered by `Solr `_. Supported Versions ================== -The Dataverse Software has been tested with Solr version 8.11.1. Future releases in the 8.x series are likely to be compatible; however, this cannot be confirmed until they are officially tested. Major releases above 8.x (e.g. 9.x) are not supported. +The Dataverse Software has been tested with Solr version |solr_version|. Future releases in the 8.x series are likely to +be compatible; however, this cannot be confirmed until they are officially tested. Major releases above 8.x (e.g. 9.x) +are not supported. + +- Releases up to 4.20 supported Solr 7.x.x. +- Releases 5.0 to 5.3 supported Solr 7.7.2. +- Releases 5.4 to 5.9 supported Solr 8.8.1. +- Releases since 5.10 support Solr |solr_version| + Installing Solr =============== -You should not run Solr as root. Create a user called ``solr`` and a directory to install Solr into:: +Note: this guide describes setting up a small installation, using the Solr *standalone* mode. For larger installations or +higher availability requirements, please take a look at `Solr Cloud `_ mode. + +Optional Step 0: - useradd solr +- Solr launches asynchronously and attempts to use the ``lsof`` binary to watch for its own availability. + Installation of this package isn't required but will prevent a warning in the log at startup. (Use ``dnf``, ``yum`` or + ``apt-get`` to install this standard package.) +- Solr 8.x runs on Java 11 (same as your Dataverse installation). Remember to install it when running Solr on a + separated machine. + +**Step 1**: You should **not** run Solr as ``root``! Create a user and group called ``solr`` (as root) and a directory to +install Solr into: + +.. parsed-literal:: mkdir /usr/local/solr - chown solr:solr /usr/local/solr + groupadd -r --gid 8983 solr + useradd -r --home-dir /usr/local/solr --uid 8983 --gid 8983 solr + chown solr: /usr/local/solr -Become the ``solr`` user and then download and configure Solr:: +**Step 2:** Become the ``solr`` user and then download and configure Solr: - su - solr +.. parsed-literal:: + sudo -u solr -s cd /usr/local/solr - wget https://archive.apache.org/dist/lucene/solr/8.11.1/solr-8.11.1.tgz - tar xvzf solr-8.11.1.tgz - cd solr-8.11.1 - cp -r server/solr/configsets/_default server/solr/collection1 + wget https://archive.apache.org/dist/lucene/solr/|solr_version|/solr-|solr_version|.tgz + tar xvzf solr-|solr_version|.tgz + exit -You should already have a "dvinstall.zip" file that you downloaded from https://github.com/IQSS/dataverse/releases . Unzip it into ``/tmp``. Then copy the files into place:: - cp /tmp/dvinstall/schema*.xml /usr/local/solr/solr-8.11.1/server/solr/collection1/conf - cp /tmp/dvinstall/solrconfig.xml /usr/local/solr/solr-8.11.1/server/solr/collection1/conf +Solr Init Script +================ -Note: The Dataverse Project team has customized Solr to boost results that come from certain indexed elements inside the Dataverse installation, for example prioritizing results from Dataverse collections over Datasets. If you would like to remove this, edit your ``solrconfig.xml`` and remove the ```` element and its contents. If you have ideas about how this boosting could be improved, feel free to contact us through our Google Group https://groups.google.com/forum/#!forum/dataverse-dev . +**Step 3:** Once you installed Solr, you need to add to the init system to start on boot, stop on shutdown etc. Please choose the +right option for your underlying Linux operating system. *It will not be necessary to execute both!* -A Dataverse installation requires a change to the ``jetty.xml`` file that ships with Solr. Edit ``/usr/local/solr/solr-8.11.1/server/etc/jetty.xml`` , increasing ``requestHeaderSize`` from ``8192`` to ``102400`` +SystemD based systems +^^^^^^^^^^^^^^^^^^^^^ -Solr will warn about needing to increase the number of file descriptors and max processes in a production environment but will still run with defaults. We have increased these values to the recommended levels by adding ulimit -n 65000 to the init script, and the following to ``/etc/security/limits.conf``:: +For systems running systemd (like RedHat or derivatives since 7, Debian since 9, Ubuntu since 15.04), as root, download +:download:`solr.service<../_static/installation/files/etc/systemd/solr.service>` and place it in ``/tmp``. Then start +Solr and configure it to start at boot with the following commands (run as root again): - solr soft nproc 65000 - solr hard nproc 65000 - solr soft nofile 65000 - solr hard nofile 65000 +.. parsed-literal:: + cp /tmp/solr.service /etc/systemd/system + systemctl daemon-reload + systemctl start solr.service + systemctl enable solr.service -On operating systems which use systemd such as RHEL/derivative, you may then add a line like LimitNOFILE=65000 for the number of open file descriptors and a line with LimitNPROC=65000 for the max processes to the systemd unit file, or adjust the limits on a running process using the prlimit tool:: +SysVinit based systems +^^^^^^^^^^^^^^^^^^^^^^ - # sudo prlimit --pid pid --nofile=65000:65000 +For (older) systems using init.d (like CentOS 6 or Devuan), download this :download:`Solr init script <../_static/installation/files/etc/init.d/solr>` +and place it in ``/tmp``. Then start Solr and configure it to start at boot with the following commands (run as root again): -Solr launches asynchronously and attempts to use the ``lsof`` binary to watch for its own availability. Installation of this package isn't required but will prevent a warning in the log at startup:: +.. parsed-literal:: + cp /tmp/solr /etc/init.d + service start solr + chkconfig solr on - # yum install lsof -Finally, you need to tell Solr to create the core "collection1" on startup:: +Creating Solr Core +================== - echo "name=collection1" > /usr/local/solr/solr-8.11.1/server/solr/collection1/core.properties +Solr Cores hold the actual data of your index. They get created from templates called "config sets". We provide a +template that has been tuned carefully for usage within a Dataverse installation and is distributed as a ZIP file. -Solr Init Script -================ +Note: The Dataverse Project team has customized the cores ``solrconfig.xml`` to boost Solr search results that come from +certain indexed elements inside the Dataverse installation, for example prioritizing results from Dataverse collections +over Datasets. If you would like to remove this, edit this file and remove the ```` element and its +contents. If you have ideas about how this boosting could be improved, feel free to contact us through our +`Google Group `_. -Please choose the right option for your underlying Linux operating system. -It will not be necessary to execute both! +**Step 4:** If not already done, please download the latest release package ``dvinstall.zip`` at +https://github.com/IQSS/dataverse/releases. -For systems running systemd (like RedHat or derivatives since 7, Debian since 9, Ubuntu since 15.04), as root, download :download:`solr.service<../_static/installation/files/etc/systemd/solr.service>` and place it in ``/tmp``. Then start Solr and configure it to start at boot with the following commands:: +**Step 5:** Extract our Solr Dataverse config set from it and unpack the configset directory: - cp /tmp/solr.service /etc/systemd/system - systemctl daemon-reload - systemctl start solr.service - systemctl enable solr.service +.. parsed-literal:: + sudo -u solr -s + cd solr-|solr_version|/server/solr/configsets + unzip path/to/dvinstall.zip solr-configset.zip + unzip solr-configset.zip -For systems using init.d (like CentOS 6), download this :download:`Solr init script <../_static/installation/files/etc/init.d/solr>` and place it in ``/tmp``. Then start Solr and configure it to start at boot with the following commands:: +**Step 6:** Create the core within your running Solr instance: - cp /tmp/solr /etc/init.d - service start solr - chkconfig solr on +.. parsed-literal:: + /usr/local/solr/solr-|solr_version|/bin/solr create -c collection1 -d dataverse + + +Tuning Solr +=========== + +The next steps are mostly extracted from the recommendations for +`"Taking Solr to Production" `_. + +They are mostly necessary for older Linux distributions using System V init systems. If you are using our +SystemD unit file (see above), they may be skipped. + +1. A Dataverse installation requires a change to the ``jetty.xml`` file that ships with Solr. + Edit ``/usr/local/solr/*/server/etc/jetty.xml`` , increasing ``requestHeaderSize`` from ``8192`` to ``102400``. + + Alternative: use ``SOLR_OPTS`` to set the system property (see Solr docs linked above). + +2. Solr will warn about needing to increase the number of file descriptors and max processes in a production environment + but will still run with defaults. We have increased these values to the recommended levels by adding ulimit -n 65000 + to the init script, and the following to ``/etc/security/limits.conf``: + + .. parsed-literal:: + solr soft nproc 65000 + solr hard nproc 65000 + solr soft nofile 65000 + solr hard nofile 65000 + + Note: This is not necessary with SystemD, which ignores these settings (see unit file instead)! + If not using our unit file, you may need to add a line like ``LimitNOFILE=65000`` for the number of open file + descriptors and a line with ``LimitNPROC=65000`` for the max processes to the systemd unit file. + + Alternative: adjust the limits on a running process using the ``prlimit`` tool: + + .. parsed-literal:: + sudo prlimit --pid pid --nofile=65000:65000 Securing Solr ============= -Our sample init script and systemd service file linked above tell Solr to only listen on localhost (127.0.0.1). We strongly recommend that you also use a firewall to block access to the Solr port (8983) from outside networks, for added redundancy. +Our sample init script and systemd service file linked above tell Solr to only listen on localhost (127.0.0.1). We +strongly recommend that you also use a firewall to block access to the Solr port (8983) from outside networks, for +added redundancy. -It is **very important** not to allow direct access to the Solr API from outside networks! Otherwise, any host that can reach the Solr port (8983 by default) can add or delete data, search unpublished data, and even reconfigure Solr. For more information, please see https://lucene.apache.org/solr/guide/7_3/securing-solr.html. A particularly serious security issue that has been identified recently allows a potential intruder to remotely execute arbitrary code on the system. See `RCE in Solr via Velocity Template `_ for more information. +It is **very important** not to allow direct access to the Solr API from outside networks! Otherwise, any host that can +reach the Solr port (8983 by default) can add or delete data, search unpublished data, and even reconfigure Solr. For +more information, please see https://lucene.apache.org/solr/guide/7_3/securing-solr.html. A particularly serious +security issue that has been identified recently allows a potential intruder to remotely execute arbitrary code on the +system. See `RCE in Solr via Velocity Template `_ +for more information. -If you're running your Dataverse installation across multiple service hosts you'll want to remove the jetty.host argument (``-j jetty.host=127.0.0.1``) from the startup command line, but make sure Solr is behind a firewall and only accessible by the Dataverse installation host(s), by specific ip address(es). +If you're running your Dataverse installation across multiple service hosts you'll want to remove the jetty.host +argument (``-j jetty.host=127.0.0.1``) from the startup command line, but make sure Solr is behind a firewall and only +accessible by the Dataverse installation host(s), by specific ip address(es). We additionally recommend that the Solr service account's shell be disabled, as it isn't necessary for daily operation:: @@ -247,7 +325,10 @@ or simply prepend each command you would run as the Solr user with "sudo -u solr # sudo -u solr command -Finally, we would like to reiterate that it is simply never a good idea to run Solr as root! Running the process as a non-privileged user would substantially minimize any potential damage even in the event that the instance is compromised. +Finally, we would like to reiterate that it is simply never a good idea to run Solr as root! Running the process as +non-privileged user would substantially minimize any potential damage even in the event that the instance is compromised. + + jq -- diff --git a/modules/dataverse-parent/pom.xml b/modules/dataverse-parent/pom.xml index 3a8e0bc1d06..f3c661b3791 100644 --- a/modules/dataverse-parent/pom.xml +++ b/modules/dataverse-parent/pom.xml @@ -15,6 +15,7 @@ ../../scripts/zipdownload ../container-base ../dataverse-spi + ../solr-configset 0.43.0 + collection1 + dataverse @@ -407,7 +421,6 @@ --> - @@ -433,8 +446,6 @@ - - diff --git a/modules/solr-configset/pom.xml b/modules/solr-configset/pom.xml new file mode 100644 index 00000000000..1fab865dd77 --- /dev/null +++ b/modules/solr-configset/pom.xml @@ -0,0 +1,295 @@ + + + 4.0.0 + + + edu.harvard.iq + dataverse-parent + ${revision} + ../dataverse-parent + + + solr-configset + + ${solr.packaging.type} + + + ${project.build.directory}/${solr.configset} + jar + ${project.build.sourceDirectory}/io/gdcc/solrteur/solrteur.java + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + ${target.java.version} + + ${compilerArgument} + + + + + + com.googlecode.maven-download-plugin + download-maven-plugin + 1.6.8 + + + download-solr + initialize + + wget + + + https://archive.apache.org/dist/lucene/solr/${solr.version}/solr-${solr.version}.zip + false + ${project.build.directory} + true + + + + + + + dev.jbang + jbang-maven-plugin + 0.0.7 + + + extract-template + generate-resources + + run + + + + --solr-version="${solr.version}" + --target="${solr.configset.directory}" + extract-zip + --zip="${project.build.directory}/solr-${solr.version}.zip" + + + + + + compile-solrconfig-xml + process-resources + + run + + + + --solr-version="${solr.version}" + --target="${solr.configset.directory}" + solrconfig + --xslts="${project.resources[0].directory}/solrconfig-xslt" + + + + + + compile-schema + process-resources + + run + + + + --solr-version="${solr.version}" + --target="${solr.configset.directory}" + schema + + --tsvs="${project.resources[0].directory}/tsv" + --base="${project.resources[0].directory}/schema.xml" + + + + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + + test-solr-config + test + + test + + + + ${project.build.directory} + + + SolrConfig + + **/*Test.java + **/*IT.java + + + + + + + + + + + info.picocli + picocli + 4.6.3 + provided + + + net.sf.saxon + Saxon-HE + 10.6 + provided + + + + org.junit.jupiter + junit-jupiter + test + + + org.apache.solr + solr-solrj + test + + + org.apache.solr + solr-core + ${solr.version} + test + + + + + + zip + + true + + + + + maven-assembly-plugin + ${maven-assembly-plugin.version} + + false + ${project.artifactId} + + src/assembly/default.xml + + + + + create-archive + package + + single + + + + + + + + + + ct + + docker + /opt/solr/server/solr/configsets + 1073741824 + + + + + + io.fabric8 + docker-maven-plugin + true + + true + + + solr + ${ct.orgname}/solr:${revision} + ${ct.registry} + + solr:${solr.version} + + + + + -XX:MaxRAMPercentage=80.0 -XX:MinHeapFreeRatio=20 -XX:MaxHeapFreeRatio=40 + + -Dsolr.jetty.request.header.size=102400 + + + + ${git.build.time} + ]]> + https://k8s-docs.gdcc.io + https://k8s-docs.gdcc.io + https://github.com/gdcc/dataverse/tree/develop%2Bct/modules/solr-configset + ${project.version} + ${git.commit.id.abbrev} + Global Dataverse Community Consortium + Apache-2.0 + gdcc/solr :: Dataverse-ready Solr + This container image provides a Dataverse-ready Solr Search Index. + + + ${solr.configsets.path} + ${project.basedir}/src/assembly/default.xml + + + + + + solr-precreate ${solr.collection} ${solr.configsets.path}/${solr.configset} + + + ${solr.memory} + + 8983:8983 + + + + solr-temp-volume:/var/solr + + + + + + + + solr-temp-volume + local + + tmpfs + tmpfs + size=256m,uid=8939 + + + true + + + + + + + + + + + \ No newline at end of file diff --git a/modules/solr-configset/src/assembly/default.xml b/modules/solr-configset/src/assembly/default.xml new file mode 100644 index 00000000000..87487791d81 --- /dev/null +++ b/modules/solr-configset/src/assembly/default.xml @@ -0,0 +1,19 @@ + + default + + zip + + false + + + ${solr.configset.directory} + ${solr.configset} + + **/* + + + + + diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/CompileSchema.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/CompileSchema.java new file mode 100644 index 00000000000..c63232b1edc --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/CompileSchema.java @@ -0,0 +1,222 @@ +package io.gdcc.solrteur.cmd; + +import io.gdcc.solrteur.mdb.tsv.Block; +import io.gdcc.solrteur.mdb.MetadataBlockTSVReader; +import io.gdcc.solrteur.mdb.tsv.Field; +import io.gdcc.solrteur.mdb.tsv.ParserException; +import io.gdcc.solrteur.solrteur; +import picocli.CommandLine; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +@CommandLine.Command( + name = "schema", + mixinStandardHelpOptions = true, + usageHelpAutoWidth = true, + showDefaultValues = true, + sortOptions = false, + description = "Compile the schema.xml from a template and add metadata fields from blocks%n") +public class CompileSchema implements Callable { + + @CommandLine.ParentCommand + private solrteur cliParent; + + @CommandLine.Option(required = true, + names = {"--base"}, + paramLabel = "", + description = "Path to the base schema.xml, to be enriched with fields from blocks") + private String baseSchemaPath; + + @CommandLine.Option(required = true, + names = {"--tsvs"}, + paramLabel = "", + description = "Repeatable argument to directories and/or TSV files with metadata blocks.") + private Path[] mdbTsvPaths; + + @CommandLine.Option(required = false, + names = {"--mfb"}, + paramLabel = "", + description = "String to indicate a mark after which all should be inserted.", + defaultValue = "SCHEMA-FIELDS::BEGIN") + private String markFieldsBegin; + + @CommandLine.Option(required = false, + names = {"--mfe"}, + paramLabel = "", + description = "String to indicate a mark where insertion of all shall end.", + defaultValue = "SCHEMA-FIELDS::END") + private String markFieldsEnd; + + @CommandLine.Option(required = false, + names = {"--mcfb"}, + paramLabel = "", + description = "String to indicate a mark after which all should be inserted.", + defaultValue = "SCHEMA-COPY-FIELDS::BEGIN") + private String markCopyFieldsBegin; + + @CommandLine.Option(required = false, + names = {"--mcfe"}, + paramLabel = "", + description = "String to indicate a mark where insertion of all shall end.", + defaultValue = "SCHEMA-COPY-FIELDS::END") + private String markCopyFieldsEnd; + + /** + * Business logic routine, calling all the execution steps. + * @return The exit code + */ + @Override + public Integer call() throws Exception { + getFieldsFromBlocks(); + + return CommandLine.ExitCode.OK; + } + + private void getFieldsFromBlocks() throws solrteur.AbortScriptException { + + MetadataBlockTSVReader reader = new MetadataBlockTSVReader(); + + // Walk all locations and add files to a large list + List allTsvFiles = new ArrayList<>(); + for (Path path : mdbTsvPaths) { + if (Files.isDirectory(path)) { + allTsvFiles.addAll(findTsvFiles(path)); + } else if (Files.isReadable(path) && Files.isRegularFile(path)) { + allTsvFiles.add(path); + } + } + + // Much nicer in output if sorted + Collections.sort(allTsvFiles); + + // Iterate the files and read the blocks to have a set of 'em (necessary for cross-checking fields) + Map blockPathMap = new HashMap<>(); + boolean hadErrors = false; + for (Path path : allTsvFiles) { + try { + List lines = Files.readAllLines(path, StandardCharsets.UTF_8); + Block block = reader.retrieveBlock(lines); + + if (blockPathMap.containsKey(block)) { + throw new ParserException("Metadata block " + block.getName() + "already present in " + blockPathMap.get(block) + "!"); + } + blockPathMap.put(block, path); + + } catch (ParserException e) { + // Log a warning but continue parsing with next file to get done as much as possible. + hadErrors = true; + logErrors(path, e); + } catch (IOException e) { + // Log a warning but continue parsing with next file to get done as much as possible. + hadErrors = true; + solrteur.Logger.warn(new solrteur.AbortScriptException("Could not read "+path, e)); + } + } + + // Abort here if there were errors + if (hadErrors) { + throw new solrteur.AbortScriptException("Experienced parsing errors, fix your block definitions first to continue", null); + } + + // If all blocks could be read and are valid, let's extract the fields + Map> blockFieldsMap = new HashMap<>(); + Set blocks = blockPathMap.keySet(); + + for (Map.Entry mdb : blockPathMap.entrySet()) { + Block block = mdb.getKey(); + Path path = mdb.getValue(); + + try { + List lines = Files.readAllLines(path, StandardCharsets.UTF_8); + // First store all retrieved fields, we check on uniqueness later + blockFieldsMap.put(block, reader.retrieveFields(lines, blocks)); + } catch (ParserException e) { + // Log a warning but continue parsing with next file to get done as much as possible. + hadErrors = true; + logErrors(path, e); + } catch (IOException e) { + // Log a warning but continue parsing with next file to get done as much as possible. + hadErrors = true; + solrteur.Logger.warn(new solrteur.AbortScriptException("Could not read "+path, e)); + } + } + + // Abort here if there were errors + if (hadErrors) { + throw new solrteur.AbortScriptException("Experienced parsing errors, fix your field definitions first to continue", null); + } + + // We need to check uniqueness of fields across blocks + Map fieldBlockMap = new HashMap<>(); + for (Map.Entry> entry : blockFieldsMap.entrySet()) { + Block block = entry.getKey(); + System.out.println("BLOCK: " + block.getName()); + + for (Field field : entry.getValue()) { + System.out.println(" FIELD: " + field.getName()); + + if (fieldBlockMap.containsKey(field)) { + hadErrors = true; + solrteur.Logger.warn("Duplicate field '" + field.getName() + "' in block '" + block.getName() + + "' from '" + blockPathMap.get(block) + "': already defined by block '" + + fieldBlockMap.get(field).getName() + "' from '" + blockPathMap.get(fieldBlockMap.get(field)) + "'"); + } else { + fieldBlockMap.put(field, block); + } + } + } + + // Abort here if there were errors + if (hadErrors) { + throw new solrteur.AbortScriptException("Stopping analysis", null); + } + + } + private void injectFields() {} + private void injectCopyFields() {} + + private List findTsvFiles(Path dir) throws solrteur.AbortScriptException { + if (!Files.isDirectory(dir)) { + return List.of(); + } + + try (Stream walk = Files.walk(dir, 2)) { + return walk + .filter(Files::isRegularFile) + .filter(Files::isReadable) + .filter(path -> path.toString().endsWith(".tsv")) + .sorted() + .collect(Collectors.toList()); + } catch (IOException e) { + throw new solrteur.AbortScriptException("Could not walk over TSV files at " + dir, e); + } + } + + private void logErrors(final Path path, final ParserException e) { + String fileName = path.getFileName().toString(); + logPE(fileName, "", "", e); + } + + private void logPE(final String fileName, final String lineNumber, final String indent, final ParserException e) { + String ln = lineNumber.isEmpty() ? e.getLineNumber() : lineNumber; + + solrteur.Logger.warn(fileName + ln + ": " + indent + e.getMessage()); + for (ParserException pe : e.getSubExceptions()) { + logPE(fileName, ln, indent + " ", pe); + } + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/CompileSolrConfig.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/CompileSolrConfig.java new file mode 100644 index 00000000000..0a5269f2d99 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/CompileSolrConfig.java @@ -0,0 +1,120 @@ +//DEPS net.sf.saxon:Saxon-HE:10.6 + +package io.gdcc.solrteur.cmd; + +import io.gdcc.solrteur.solrteur; +import net.sf.saxon.s9api.Processor; +import net.sf.saxon.s9api.SaxonApiException; +import net.sf.saxon.s9api.Serializer; +import net.sf.saxon.s9api.Xslt30Transformer; +import net.sf.saxon.s9api.XsltCompiler; +import net.sf.saxon.s9api.XsltExecutable; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; +import picocli.CommandLine.Option; + +import javax.xml.transform.stream.StreamSource; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.nio.file.StandardOpenOption; +import java.util.List; +import java.util.concurrent.Callable; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import io.gdcc.solrteur.solrteur.AbortScriptException; +import io.gdcc.solrteur.solrteur.Logger; + +@Command(name = "solrconfig", + mixinStandardHelpOptions = true, + usageHelpAutoWidth = true, + showDefaultValues = true, + sortOptions = false, + description = "Compile the solrconfig.xml from a source and XSLT files%n") +public class CompileSolrConfig implements Callable { + + @ParentCommand + private solrteur cliParent; + + @Option(required = true, + names = {"--xslts"}, + paramLabel = "", + description = "Path to the directory with XSLTs to adapt solrconfig.xml") + private String solrConfigXSLTDir; + + /** + * Business logic routine, calling all the execution steps. + * @return The exit code + */ + @Override + public Integer call() throws Exception { + applySolrConfigXSLT(); + return CommandLine.ExitCode.OK; + } + + private void applySolrConfigXSLT() throws AbortScriptException { + Logger.info("Starting to transform solrconfig.xml..."); + + final Path solrConfig = cliParent.getTargetDir().resolve(Path.of("conf", "solrconfig.xml")); + final Path xsltDir = Path.of(solrConfigXSLTDir); + + // Find all the XSLT files + final List xsltFiles; + try (Stream walk = Files.walk(xsltDir, 2)) { + xsltFiles = walk.filter(path -> path.toFile().isFile()) + .filter(path -> path.toString().endsWith("xslt")) + .sorted() + .collect(Collectors.toList()); + } catch (IOException e) { + throw new AbortScriptException("Could not walk over XSLT files at " + xsltDir, e); + } + + // Log found XSLT files + Logger.info("Found XSLT files in " + solrConfigXSLTDir + ":"); + for (Path xsltFile : xsltFiles) { + Logger.info(xsltFile.toString().substring(xsltDir.toString().length()+1)); + } + + // Setup the XSLT processor + final Processor processor = new Processor(false); + final XsltCompiler compiler = processor.newXsltCompiler(); + + try { + + // First iteration uses initial solrconfig.xml as input source + StreamSource source = new StreamSource(Files.newInputStream(solrConfig)); + + // For every XSLT, we need to do the transformation and rotate the input source afterwards, + // so we apply the next transformation to the already transformed content. + for (Path xsltFile : xsltFiles) { + final XsltExecutable stylesheet = compiler.compile(new StreamSource(Files.newInputStream(xsltFile))); + + // Prepare a fresh temporary file (which will be deleted when we read it back for the next iteration) + final Path tmpFile = Files.createTempFile(null, null); + Serializer out = processor.newSerializer(Files.newOutputStream(tmpFile)); + + // Actual transformation happens + Xslt30Transformer transformer = stylesheet.load30(); + transformer.transform(source, out); + + // Read back the transformed config and rotate the source. The opening option makes the old temp file + // go away after it has been read. + source = new StreamSource(Files.newInputStream(tmpFile, StandardOpenOption.DELETE_ON_CLOSE)); + } + + // The final transformation still reads back the final result, so we need to push InputStream content somewhere + Files.copy(source.getInputStream(), + solrConfig, + StandardCopyOption.REPLACE_EXISTING); + } catch (IOException e) { + throw new AbortScriptException("Could not complete solrconfig.xml compilation", e); + } catch (SaxonApiException e) { + throw new AbortScriptException("XML transformation failed", e); + } + + Logger.info("Finished applying XSLT transformations, saved to " + solrConfig); + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/ExtractConfigSet.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/ExtractConfigSet.java new file mode 100644 index 00000000000..e51c4ec2457 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/ExtractConfigSet.java @@ -0,0 +1,109 @@ +package io.gdcc.solrteur.cmd; + +import io.gdcc.solrteur.solrteur; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.ParentCommand; +import picocli.CommandLine.Option; + +import java.io.IOException; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.StandardCopyOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.concurrent.Callable; + +import io.gdcc.solrteur.solrteur.AbortScriptException; +import io.gdcc.solrteur.solrteur.Logger; + +@Command( + name = "extract-zip", + mixinStandardHelpOptions = true, + usageHelpAutoWidth = true, + showDefaultValues = true, + sortOptions = false, + description = "Extract default configset from Solr ZIP distribution%n") +public class ExtractConfigSet implements Callable { + + @ParentCommand + private solrteur cliParent; + + @Option(required = true, + names = {"--zip"}, + paramLabel = "", + description = "Path to local Solr ZIP distribution file") + private Path solrZipFile; + + @Option(required = false, + names = {"--zip-subpath"}, + paramLabel = "", + description = "Relative path within ZIP to _default configset", + defaultValue = "solr-{{ solr.version }}/server/solr/configsets/_default") + private String solrConfigSetZipPath; + + /** + * Business logic routine, calling all the execution steps. + * @return The exit code + */ + @Override + public Integer call() throws Exception { + replaceVariables(); + extractConfigSet(); + return CommandLine.ExitCode.OK; + } + + private void replaceVariables() throws AbortScriptException { + this.solrConfigSetZipPath = this.solrConfigSetZipPath.replaceAll("\\Q{{ solr.version }}\\E", cliParent.getSolrVersion()); + } + + private void extractConfigSet() throws AbortScriptException { + // Wrap the file system in a try-with-resources statement + // to auto-close it when finished and prevent a memory leak + // NOTE: the explicit casting to ClassLoader is necessary to make this compatible with JDK 11 _and_ 17 + try (FileSystem zipFileSystem = FileSystems.newFileSystem(solrZipFile, (ClassLoader) null)) { + Path zipSource = zipFileSystem.getPath(solrConfigSetZipPath); + + // TODO: should we delete the target before copying the new content? (Usually this shouldn't change, but better safe than sorry?) + + Logger.info("Extracting " + solrConfigSetZipPath + " from " + solrZipFile + " into " + cliParent.getTargetDir()); + Files.walkFileTree(zipSource, new SimpleFileVisitor<>() { + // Copy the directory structure (skip existing with the same name) + @Override + public FileVisitResult preVisitDirectory(Path zippedDir, BasicFileAttributes attrs) throws IOException { + // Remove the leading path part from the ZIP file structure, as we don't want it in target + String strippedZipPath = zippedDir.toString().substring(solrConfigSetZipPath.length()); + Path targetDir = Path.of(cliParent.getTargetDir().toString(), strippedZipPath); + + try { + //Logger.info(solrZipFile + ":" + zippedDir + " -> " + targetDir); + Files.copy(zippedDir, targetDir, StandardCopyOption.COPY_ATTRIBUTES); + } catch (FileAlreadyExistsException e) { + // intentional ignore - simply reuse the existing directory + } + + return FileVisitResult.CONTINUE; + } + + // Copy & replace files already present (which makes any run idempotent by deleting the former run) + @Override + public FileVisitResult visitFile(Path zippedFile, BasicFileAttributes attrs) throws IOException { + String strippedZipPath = zippedFile.toString().substring(solrConfigSetZipPath.length()); + Path targetFile = Path.of(cliParent.getTargetDir().toString(), strippedZipPath); + + //Logger.info(solrZipFile + ":" + zippedFile + " -> " + targetFile); + Files.copy(zippedFile, targetFile, StandardCopyOption.REPLACE_EXISTING, StandardCopyOption.COPY_ATTRIBUTES); + + return FileVisitResult.CONTINUE; + } + }); + + } catch(IOException e) { + throw new AbortScriptException("Extracting from ZIP file "+solrZipFile+" failed", e); + } + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/package-info.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/package-info.java new file mode 100644 index 00000000000..485085204df --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/cmd/package-info.java @@ -0,0 +1 @@ +package io.gdcc.solrteur.cmd; \ No newline at end of file diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/MetadataBlockTSVReader.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/MetadataBlockTSVReader.java new file mode 100644 index 00000000000..71699efdf75 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/MetadataBlockTSVReader.java @@ -0,0 +1,184 @@ +package io.gdcc.solrteur.mdb; + +import io.gdcc.solrteur.mdb.tsv.Block; +import io.gdcc.solrteur.mdb.tsv.Configuration; +import io.gdcc.solrteur.mdb.tsv.Field; +import io.gdcc.solrteur.mdb.tsv.ParserException; +import io.gdcc.solrteur.mdb.tsv.ParsingState; + +import javax.swing.text.html.parser.Parser; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Collectors; + +public class MetadataBlockTSVReader { + + private Configuration config; + + public MetadataBlockTSVReader() { + this.config = Configuration.defaultConfig(); + } + + public MetadataBlockTSVReader(Configuration config) { + this.config = config; + } + + /** + * Extract the metadata block definition from a single TSV file and return it. + * @param lines + * @return + * @throws ParserException + */ + public Block retrieveBlock(final List lines) throws ParserException { + // Assertions + Objects.requireNonNull(lines, "You must provide a list of strings, it may be empty but not null"); + + ParsingState state = ParsingState.Init; + Block.BlockBuilder blockBuilder = null; + ParserException parentException = new ParserException("Has errors:"); + int lineIndex = 0; + + for (String line : lines) { + // Skip lines that are empty, blanks only or comments + if (line.isBlank() || line.startsWith(config.commentIndicator())) { + // Increment line counter before skipping to next line + lineIndex++; + continue; + } + + // If the line starts with a new section trigger, analyse the state + if (line.startsWith(config.triggerIndicator())) { + state = state.transitionState(line, config); + + // Being here means we transitioned from one state to the next - otherwise there would have been an exception. + switch (state) { + case MetadataBlock: + try { + blockBuilder = new Block.BlockBuilder(line, config); + } catch (ParserException e) { + // This is critical, as we cannot parse the following lines with a broken header + throw e.withLineNumber(lineIndex); + } + break; + case Fields: + Objects.requireNonNull(blockBuilder, "BlockBuilder not initialized, cannot build block"); + + // In case there had been parsing errors, stop here + if (parentException.hasSubExceptions()) { + throw parentException; + } + + // We managed to complete parsing the block section, return the block now. + // The last line of the block section is the line before this one (which transitioned the state) + return blockBuilder.build(lineIndex-1); + default: + // Intentionally left blank, as the other sections are of no interest to us here. + } + } else { + // Proceed analysis + switch(state) { + case Init: throw new ParserException("Only comments, empty or blank lines allowed before block definition") + .withLineNumber(lineIndex); + case MetadataBlock: + Objects.requireNonNull(blockBuilder, "BlockBuilder not initialized, cannot parse"); + try { + blockBuilder.parseAndValidateLine(line); + } catch (ParserException e) { + parentException.addSubException(e.withLineNumber(lineIndex)); + } + break; + default: + throw new ParserException("We should never see this exception, as we looked for the block only") + .withLineNumber(lineIndex); + } + } + + // Increment line counter + lineIndex++; + } + + // The trigger switch did not kick in - only one explanation. + throw new ParserException("Missing fields section."); + } + + public List retrieveFields(final List lines, final Set knownBlocks) throws ParserException { + Objects.requireNonNull(lines, "You must provide a list of strings, it may be empty but not null"); + Objects.requireNonNull(knownBlocks, "You must provide a set of known blocks, it may be empty"); + + // Read the block again, so we are at that stage and can continue with fields + Block currentBlock = retrieveBlock(lines); + ParsingState state = ParsingState.MetadataBlock; + + int lineIndex = currentBlock.getIndexLastLineofBlockSection()+1; + List linesAfterBlock = lines.stream() + .skip(lineIndex) + .collect(Collectors.toUnmodifiableList()); + + Field.FieldsBuilder fieldsBuilder = null; + ParserException parentException = new ParserException("Has errors:"); + + for (String line : linesAfterBlock) { + // Skip lines that are empty, blanks only or comments + if (line.isBlank() || line.startsWith(config.commentIndicator())) { + // Increment line counter + lineIndex++; + continue; + } + + // If the line starts with a new section trigger, analyse the state + if (line.startsWith(config.triggerIndicator())) { + state = state.transitionState(line, config); + + // Being here means we transitioned from one state to the next - otherwise there would have been an exception. + switch (state) { + case Fields: + try { + fieldsBuilder = new Field.FieldsBuilder(line, currentBlock.getName(), config); + } catch (ParserException e) { + // This is critical, as we cannot parse the following lines with a broken header + throw e.withLineNumber(lineIndex); + } + break; + case Vocabularies: + // We managed to get to the vocab section, meaning the fields are all done. Return fields! + Objects.requireNonNull(fieldsBuilder, "FieldsBuilder not initialized, cannot build fields"); + + // In case there had been parsing errors, stop here + if (parentException.hasSubExceptions()) { + throw parentException; + } + + return fieldsBuilder.build(); + default: + // Intentionally left blank, as the other sections are of no interest to us here. + } + } else { + // Proceed analysis + switch (state) { + case Fields: + Objects.requireNonNull(fieldsBuilder, "FieldsBuilder not initialized, cannot parse"); + + try{ + // TODO: Extend with checking if this field is for a different block (which is allowed by spec) + fieldsBuilder.parseAndValidateLine(lineIndex, line); + } catch (ParserException e) { + parentException.addSubException(e.withLineNumber(lineIndex)); + } + + break; + default: + throw new ParserException("We should never see this exception, as we looked for the fields only") + .withLineNumber(lineIndex); + } + } + + // Increment line counter + lineIndex++; + } + + // The trigger switch did not kick in - only one explanation. + throw new ParserException("Missing fields section."); + } + +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/package-info.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/package-info.java new file mode 100644 index 00000000000..a74044f0e2b --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/package-info.java @@ -0,0 +1 @@ +package io.gdcc.solrteur.mdb; \ No newline at end of file diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Block.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Block.java new file mode 100644 index 00000000000..b294b949e4f --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Block.java @@ -0,0 +1,288 @@ +package io.gdcc.solrteur.mdb.tsv; + +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public final class Block { + public static final String KEYWORD = "metadataBlock"; + public static final String NAME_PATTERN = "[a-z]+(([\\d_])|([A-Z0-9][a-z0-9]+))*([A-Z])?"; + + /** + * Programmatic variant of the spec of a #metadataBlock. List all the column headers and associated restrictions + * on the values of a column. + */ + public enum Header { + KEYWORD( + Block.KEYWORD, + String::isEmpty, + "must have no value (be empty)" + ), + NAME( + "name", + Predicate.not(String::isBlank).and(Pattern.compile(Block.NAME_PATTERN).asMatchPredicate()), + "must not be blank and match regex pattern " + Block.NAME_PATTERN + ), + DATAVERSE_ALIAS( + "dataverseAlias", + Predicate.not(String::isBlank).or(String::isEmpty), + "must be either empty or not blank" + ), + DISPLAY_NAME( + "displayName", + Predicate.not(String::isBlank).and(h -> h.length() < 257), + "must not be blank and shorter than 256 chars" + ), + BLOCK_URI( + "blockURI", + Validator::isValidUrl, + "must be a valid URL" + ); + + private final String value; + private final Predicate test; + private final String errorMessage; + + Header(final String value, final Predicate test, final String errorMessage) { + this.value = value; + this.test = test; + this.errorMessage = errorMessage; + } + + private static final Map valueMap; + static { + Map map = new ConcurrentHashMap<>(); + Arrays.stream(Header.values()).forEach(h -> map.put(h.toString(), h)); + valueMap = Collections.unmodifiableMap(map); + } + + /** + * Inverse lookup of a {@link Header} from a {@link String}. + * + * @param value A textual string to look up. + * @return Matching {@link Header} wrapped in {@link Optional} or an empty {@link Optional}. + */ + public static Optional
getByValue(String value) { + return Optional.ofNullable(valueMap.get(value)); + } + + /** + * Retrieve all column headers of a metadata block definition as a spec-like list of strings, + * usable for validation and more. The list is ordered as the spec defines the order of the headers. + * + * @return List of the column headers, in order + */ + public static List getHeaders() { + return Arrays.stream(Header.values()).map(Header::toString).collect(Collectors.toUnmodifiableList()); + } + + /** + * Parse a {@link String} as a header of a metadata block definition. Will validate the presence or absence + * of column headers as defined by the spec. This is not a lenient parser - headers need to comply with order + * from the spec. On the other hand, it is case-insensitive. + * + * @param line The textual line to parse for column headers + * @param config The parser configuration to be used + * @return A list of {@link Header} build from the line of text + * @throws ParserException When presented column headers are missing, invalid or the complete line is just wrong. + * @throws IllegalStateException When a column header cannot be found within the enum {@link Header}. + * This should never happen, as the validation would fail before! + */ + public static List
parseAndValidate(final String line, final Configuration config) throws ParserException { + List validatedColumns = Validator.validateHeaderLine(line, getHeaders(), config); + // the IllegalStateException shall never happen, as we already validated the header! + return validatedColumns.stream() + .map(Header::getByValue) + .map(o -> o.orElseThrow(IllegalStateException::new)) + .collect(Collectors.toUnmodifiableList()); + } + + @Override + public String toString() { + return value; + } + + /** + * Test a given {@link String} if it matches the restrictions applied on values of this type. + * + * @param sut The textual string to test. + * @return True if matching or false in every other case. + */ + public boolean isValid(final String sut) { + return sut != null && test.test(sut); + } + + /** + * Receive a proper error message for this type of value (should be extended with more context in calling code!). + * + * @return The error message, always in the form of "must ...". (Create a sentence with it.) + */ + public String getErrorMessage() { + return errorMessage; + } + } + + /** + * Blocks should not be build directly, but using this builder pattern. This allows for validation before handing + * over an object to work with and containing a complete POJO representation of a (custom) metadata block. + */ + public static final class BlockBuilder { + private final Configuration config; + private final List
header; + + private Block block; + private boolean hasErrors = false; + + /** + * Create a builder with a line containing the header of the metadata block definition, so the order of + * the columns can be determined from it. (The builder is stateful.) + * + * @param header The textual line with the column headers. + * @throws ParserException + */ + public BlockBuilder(final String header, final Configuration config) throws ParserException { + this.config = config; + this.header = Block.Header.parseAndValidate(header, config); + } + + /** + * Analyse a line containing a concrete metadata block definition by parsing and validating it. + * + * This will fail: + * - when the line is null or blanks only + * - when another line has been analysed before (spec allows only 1 definition in a single custom metadata block) + * - when the columns within the line do not match the length of the header + * - when the column values do not match the column type restrictions (as implied by the header) + * + * The exception might contain sub-exceptions, as the parser will do its best to keep going and find as many + * problems as possible to avoid unnecessary (pesky) re-iterations. + * + * @param line The metadata block definition line to analyse. + * @throws ParserException If the parsing fails (see description). + */ + public void parseAndValidateLine(final String line) throws ParserException { + // no null or blank lines for the parser. (blank lines can be skipped and not sent here by calling code) + if (line == null || line.isBlank()) { + this.hasErrors = true; + throw new ParserException("Must not be empty nor blanks only nor null."); + } + + // only 1 block definition allowed as per spec + if (this.block != null) { + this.hasErrors = true; + throw new ParserException("Must not add more than one metadata block definition"); + } else { + this.block = parseAndValidateColumns(line.split(config.columnSeparator())); + } + } + + /** + * Parse and validate the columns (usually given by {@code parseAndValidateLine}). + * This is package private, becoming testable this way. + * + * @param lineParts + * @return A {@link Block} object (modifiable for builder internal use) + * @throws ParserException + */ + Block parseAndValidateColumns(final String[] lineParts) throws ParserException { + if (lineParts == null || lineParts.length != header.size()) { + throw new ParserException("Does not match length of metadata block headline"); + } + + Block block = new Block(); + ParserException parserException = new ParserException("Has validation errors:"); + + for (int i = 0; i < lineParts.length; i++) { + Block.Header column = header.get(i); + String value = lineParts[i]; + if( ! column.isValid(value)) { + parserException.addSubException( + "Invalid value '" + value + "' for column '" + column + "', " + column.getErrorMessage()); + } else { + block.set(column, value); + } + } + + if (parserException.hasSubExceptions()) { + // setting this to true to ensure no block will be created accidentally via build(). + this.hasErrors = true; + throw parserException; + } else { + return block; + } + } + + public boolean hasSucceeded() { + return ! this.hasErrors && this.block != null; + } + + /** + * Execute the builder to create the {@link Block} POJO, containing the representation of the custom metadata + * block that has been analysed. Will execute associated field builders (which will execute associated + * vocabulary builders). + */ + public Block build(int indexLastLineofBlockSection) { + if (hasSucceeded()) { + block.indexLastLineofBlockSection = indexLastLineofBlockSection; + return block; + } else { + throw new IllegalStateException("Trying to build a block with errors or without parsing a line first"); + } + } + } + + /* ---- Actual Block Class starting here ---- */ + + private final Map properties = new EnumMap<>(Header.class); + private List fields = Collections.emptyList(); + private int indexLastLineofBlockSection; + + private Block() {} + + private void set(Header column, String value) { + this.properties.put(column, value); + } + public Optional get(Header column) { + return Optional.ofNullable(this.properties.get(column)); + } + public String get(Header column, String defaultValue) { + return this.properties.getOrDefault(column, defaultValue); + } + + public int getIndexLastLineofBlockSection() { + return indexLastLineofBlockSection; + } + + public String getName() { + return this.properties.get(Header.NAME); + } + + /** + * Get fields for this metadata block. + * @return List of fields. May be empty, but never null. + */ + public List getFields() { + return fields; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof Block)) return false; + Block block = (Block) o; + return this.getName().equals(block.getName()); + } + + @Override + public int hashCode() { + return Objects.hash(properties); + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Configuration.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Configuration.java new file mode 100644 index 00000000000..5bce96576a8 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Configuration.java @@ -0,0 +1,67 @@ +package io.gdcc.solrteur.mdb.tsv; + +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +public final class Configuration { + + private final String comment; + private final String trigger; + private final String column; + private final Matcher rtrimMatcher; + private final Boolean allowDeepFieldNesting; + + public Configuration( + String comment, + String trigger, + String column, + boolean allowDeepFieldNesting + ) { + notNullNotEmpty("Comment indicator", comment); + this.comment = comment; + + notNullNotEmpty("Triggering indicator (keyword prefix)", trigger); + this.trigger = trigger; + + notNullNotEmpty("Column separator", column); + this.column = column; + + this.rtrimMatcher = Pattern.compile("(" + this.column + ")+$").matcher(""); + + this.allowDeepFieldNesting = allowDeepFieldNesting; + } + + public static Configuration defaultConfig() { + return new Configuration("%%", "#", "\t", false); + } + + private static void notNullNotEmpty(String optionName, String value) { + if (value == null || value.isEmpty()) { + throw new IllegalArgumentException(optionName + " may not be null or empty"); + } + } + + public String commentIndicator() { + return comment; + } + + public String triggerIndicator() { + return trigger; + } + + public String columnSeparator() { + return column; + } + + public String rtrimColumns(String line) { + return line == null ? null : rtrimMatcher.reset(line).replaceAll(""); + } + + public String trigger(String keyword) { + return this.triggerIndicator() + keyword; + } + + public boolean deepFieldNestingEnabled() { + return this.allowDeepFieldNesting; + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ControlledVocabulary.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ControlledVocabulary.java new file mode 100644 index 00000000000..487b2198079 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ControlledVocabulary.java @@ -0,0 +1,114 @@ +package io.gdcc.solrteur.mdb.tsv; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +public final class ControlledVocabulary { + + public static final String KEYWORD = "controlledVocabulary"; + + /** + * Programmatic variant of the spec of a #controlledVocabulary. List all the column headers and associated restrictions + * on the values of a column. + */ + public enum Header { + // TODO: add the rest of the fields + KEYWORD( + ControlledVocabulary.KEYWORD, + String::isEmpty, + "must have no value (be empty)" + ); + + private final String value; + private final Predicate test; + private final String errorMessage; + + Header(final String value, final Predicate test, final String errorMessage) { + this.value = value; + this.test = test; + this.errorMessage = errorMessage; + } + + Header(final String value) { + this.value = value; + this.test = Predicate.not(String::isBlank); + this.errorMessage = "must not be empty or blank"; + } + + private static final Map valueMap; + static { + Map map = new ConcurrentHashMap<>(); + Arrays.stream(Header.values()).forEach(h -> map.put(h.toString(), h)); + valueMap = Collections.unmodifiableMap(map); + } + + /** + * Inverse lookup of a {@link ControlledVocabulary.Header} from a {@link String}. + * + * @param value A textual string to look up. + * @return Matching {@link ControlledVocabulary.Header} wrapped in {@link Optional} or an empty {@link Optional}. + */ + public static Optional getByValue(String value) { + return Optional.ofNullable(valueMap.get(value)); + } + + /** + * Retrieve all column headers of controlled vocabulary definitions as a spec-like list of strings, + * usable for validation and more. The list is ordered as the spec defines the order of the headers. + * + * @return List of the column headers, in order + */ + public static List getHeaders() { + return List.copyOf(valueMap.keySet()); + } + + /** + * Parse a {@link String} as a header of a metadata block definition. Will validate the presence or absence + * of column headers as defined by the spec. This is not a lenient parser - headers need to comply with order + * from the spec. On the other hand, it is case-insensitive. + * + * @param line The textual line to parse for column headers + * @param config The parser configuration to be used + * @return A list of {@link ControlledVocabulary.Header} build from the line of text + * @throws ParserException When presented column headers are missing, invalid or the complete line is just wrong. + * @throws IllegalStateException When a column header cannot be found within the enum {@link ControlledVocabulary.Header}. + * This should never happen, as the validation would fail before! + */ + public static List parseAndValidate(final String line, final Configuration config) throws ParserException { + List validatedColumns = Validator.validateHeaderLine(line, getHeaders(), config); + // the exception shall never happen, as we already validated the header! + return validatedColumns.stream() + .map(ControlledVocabulary.Header::getByValue) + .map(o -> o.orElseThrow(IllegalStateException::new)) + .collect(Collectors.toUnmodifiableList()); + } + + @Override + public String toString() { + return value; + } + + public boolean isValid(final String sut) { + return sut != null && test.test(sut); + } + + public String getErrorMessage() { + return errorMessage; + } + } + + public static final class ControlledVocabularyBuilder { + // TODO: extend! + public ControlledVocabulary build() { + return new ControlledVocabulary(); + } + } + + private ControlledVocabulary() {} +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Field.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Field.java new file mode 100644 index 00000000000..d6eff8a25ef --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Field.java @@ -0,0 +1,500 @@ +package io.gdcc.solrteur.mdb.tsv; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.Predicate; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +public final class Field { + + public static final String KEYWORD = "datasetField"; + + /** + * Currently, the spec says we need to comply with Solr and DDI rules. + * De-facto commonly used are camel-cased names. + * See also https://solr.apache.org/guide/8_11/defining-fields.html#field-properties + * + * This regex matches the strict Solr name spec plus adds "." as valid chars + * (we need it for comptability with astrophysics.tsv) + */ + public static final String NAME_PATTERN = "^(?=_[\\w.]+[^_]$|[^_][\\w.]+_?$)[a-zA-Z_][\\w.]+$"; + public static final String WHITESPACE_ONLY = "^\\s+$"; + + public static final Predicate matchBoolean = s -> "TRUE".equals(s) || "FALSE".equals(s); + public static final Predicate matchParentDisplayFormat = Pattern.compile("^[;,:]?$").asMatchPredicate(); + + /** + * Programmatic variant of the spec of a #datasetField. List all the column headers and associated restrictions + * on the values of a column. + */ + public enum Header { + KEYWORD( + Field.KEYWORD, + String::isEmpty, + "must have no value (be empty)" + ), + + NAME( + "name", + Predicate.not(String::isBlank).and(Pattern.compile(Field.NAME_PATTERN).asMatchPredicate()), + "must not be blank and match regex pattern " + Field.NAME_PATTERN + ), + TITLE("title"), + DESCRIPTION("description"), + WATERMARK( + "watermark", + Predicate.not(Pattern.compile(WHITESPACE_ONLY).asMatchPredicate()), + "must not be whitespace only" + ), + + FIELDTYPE( + "fieldType", + Types.matchesTypes(), + "must be one of [" + String.join(", ", Types.getTypesList()) + "]" + ), + + DISPLAY_ORDER( + "displayOrder", + Pattern.compile("\\d+").asMatchPredicate(), + "must be a non-negative integer" + ), + DISPLAY_FORMAT( + "displayFormat", + Predicate.not(Pattern.compile(WHITESPACE_ONLY).asMatchPredicate()), + "must not be whitespace only" + ), + + ADVANCED_SEARCH_FIELD("advancedSearchField", matchBoolean, "must be 'TRUE' or 'FALSE"), + ALLOW_CONTROLLED_VOCABULARY("allowControlledVocabulary", matchBoolean, "must be 'TRUE' or 'FALSE"), + ALLOW_MULTIPLES("allowmultiples", matchBoolean, "must be 'TRUE' or 'FALSE"), + FACETABLE("facetable", matchBoolean, "must be 'TRUE' or 'FALSE"), + DISPLAY_ON_CREATE("displayoncreate", matchBoolean, "must be 'TRUE' or 'FALSE"), + REQUIRED("required", matchBoolean, "must be 'TRUE' or 'FALSE"), + + PARENT( + "parent", + Pattern.compile(Field.NAME_PATTERN).asMatchPredicate().or(String::isEmpty), + "must be either empty or match regex pattern " + Field.NAME_PATTERN + ), + METADATABLOCK_ID( + "metadatablock_id", + Predicate.not(String::isBlank).and(Pattern.compile(Block.NAME_PATTERN).asMatchPredicate()), + "must not be blank and match regex pattern " + Block.NAME_PATTERN + ), + TERM_URI( + "termURI", + s -> s.isEmpty() || Validator.isValidUrl(s), + "must be empty or a valid URL" + ); + + private final String value; + private final Predicate test; + private final String errorMessage; + + Header(final String value, final Predicate test, final String errorMessage) { + this.value = value; + this.test = test; + this.errorMessage = errorMessage; + } + + Header(final String value) { + this.value = value; + this.test = Predicate.not(String::isBlank); + this.errorMessage = "must not be empty or blank"; + } + + private static final Map valueMap; + static { + Map map = new ConcurrentHashMap<>(); + Arrays.stream(Header.values()).forEach(h -> map.put(h.toString(), h)); + valueMap = Collections.unmodifiableMap(map); + } + + /** + * Inverse lookup of a {@link Field.Header} from a {@link String}. + * + * @param value A textual string to look up. + * @return Matching {@link Field.Header} wrapped in {@link Optional} or an empty {@link Optional}. + */ + public static Optional
getByValue(String value) { + return Optional.ofNullable(valueMap.get(value)); + } + + /** + * Retrieve all column headers of field definitions as a spec-like list of strings, + * usable for validation and more. The list is ordered as the spec defines the order of the headers. + * + * @return List of the column headers, in order + */ + public static List getHeaders() { + return Arrays.stream(Header.values()).map(Header::toString).collect(Collectors.toUnmodifiableList()); + } + + /** + * Parse a {@link String} as a header of field definitions. Will validate the presence or absence + * of column headers as defined by the spec. This is not a lenient parser - headers need to comply with order + * from the spec. On the other hand, it is case-insensitive. + * + * @param line The textual line to parse for column headers + * @param config The parser configuration to be used + * @return A list of {@link Field.Header} build from the line of text + * @throws ParserException When presented column headers are missing, invalid or the complete line is just wrong. + * @throws IllegalStateException When a column header cannot be found within the enum {@link Field.Header}. + * This should never happen, as the validation would fail before! + */ + public static List
parseAndValidate(final String line, final Configuration config) throws ParserException { + List validatedColumns = Validator.validateHeaderLine(line, getHeaders(), config); + // the exception shall never happen, as we already validated the header! + return validatedColumns.stream() + .map(Header::getByValue) + .map(o -> o.orElseThrow(IllegalStateException::new)) + .collect(Collectors.toUnmodifiableList()); + } + + @Override + public String toString() { + return value; + } + + public boolean isValid(final String sut) { + return sut != null && test.test(sut); + } + + public String getErrorMessage() { + return errorMessage; + } + } + + public enum Types implements Predicate { + NONE("none"), + DATE("date"), + EMAIL("email"), + TEXT("text"), + TEXTBOX("textbox"), + URL("url"), + INT("int"), + FLOAT("float"); + + private final String name; + + Types(final String name) { + this.name = name; + } + + private static final Map valueMap; + static { + Map map = new ConcurrentHashMap<>(); + Arrays.stream(Types.values()).forEach(type -> map.put(type.toString(), type)); + valueMap = Collections.unmodifiableMap(map); + } + + @Override + public boolean test(String sut) { + // we demand correct case! + return this.toString().equals(sut); + } + + public static Predicate matchesTypes() { + Predicate test = NONE; + for (Types type : Types.values()) { + test = test.or(type); + } + return test; + } + + public static List getTypesList() { + return valueMap.keySet().stream().collect(Collectors.toUnmodifiableList()); + } + + @Override + public String toString() { + return this.name; + } + } + + public static final class FieldsBuilder { + private final Configuration config; + private final String containingBlockName; + private final List
header; + private final List fields; + + private int parsedLines = 0; + + /** + * Create a parser for field lines. As this is on the block it + * @param headerLine + * @param containingBlockName + * @param config + * @throws ParserException + */ + public FieldsBuilder(final String headerLine, String containingBlockName, final Configuration config) throws ParserException { + this.header = Header.parseAndValidate(headerLine, config); + this.containingBlockName = containingBlockName; + this.config = config; + this.fields = new ArrayList<>(); + } + + /** + * Analyse a line containing a concrete dataset field definition by parsing and validating it. + * + * This will fail: + * - when the line is null or blanks only + * - when the columns within the line do not match the length of the header + * - when the column values do not match the column type restrictions (as implied by the header) + * + * The exception might contain sub-exceptions, as the parser will do its best to keep going and find as many + * problems as possible to avoid unnecessary (pesky) re-iterations. + * + * Beware: + * 1. This method does not check for parent/child dependencies, as the spec does not say parent fields + * must be defined first. This needs to be done by {@link #build()}, as this is the indication all lines + * have been attempted to parse. + * + * @param line The dataset field definition line to analyse. + * @throws ParserException If the parsing fails (see description). + */ + public void parseAndValidateLine(final int lineIndex, final String line) throws ParserException { + // no null or blank lines for the parser. (blank lines can be skipped and not sent here by calling code) + if (line == null || line.isBlank()) { + throw new ParserException("must not be empty nor blanks only nor null.").withLineNumber(lineIndex); + } + + // save the field for further builder internal manipulation + // (if validation fails, the exception will prevent it) + this.fields.add(parseAndValidateColumns(lineIndex, line.split(config.columnSeparator()))); + } + + /** + * Parse and validate the columns (usually given by {@code parseAndValidateLine}). + * This is package private, becoming testable this way. + * + * @param lineParts + * @return A {@link Block} object (modifiable for builder internal use) + * @throws ParserException + */ + Field parseAndValidateColumns(final int lineIndex, final String[] lineParts) throws ParserException { + if (lineParts == null || lineParts.length > header.size()) { + throw new ParserException("Not matching length of dataset fields headline"); + } + + Field field = new Field(lineIndex); + ParserException parserException = new ParserException("Has validation errors:").withLineNumber(lineIndex); + + for (int i = 0; i < lineParts.length; i++) { + Field.Header column = header.get(i); + String value = lineParts[i]; + if( ! column.isValid(value)) { + parserException.addSubException( + "Invalid value '" + value + "' for column '" + column + "', " + column.getErrorMessage()); + } else { + field.set(column, value); + } + } + + // TODO: extend with the possibility to reference a different, but existing other block (also not recommended) + // check the metadata block reference matches the block this field is defined in + field.get(Header.METADATABLOCK_ID).ifPresent(id -> { + if (! this.containingBlockName.equals(id)) { + parserException.addSubException( + "Metadata block reference '" + id + + "' does not match parsed containing block '" + this.containingBlockName + "'"); + } + }); + + if (parserException.hasSubExceptions()) { + throw parserException; + } else { + return field; + } + } + + // TODO: extract distinct checks and move to separate methods to reduce complexity + public List build() throws ParserException { + // Check if all fields are unique within this block + List duplicates = fields.stream() + .collect(Collectors.groupingBy(Field::getName)) + .entrySet() + .stream() + .filter(entry -> entry.getValue().size() > 1) + .map(Map.Entry::getKey) + .collect(Collectors.toUnmodifiableList()); + + // This is critical - we cannot build relations when duplicates are present. Step away now. + if (!duplicates.isEmpty()) { + throw new ParserException("Found duplicate field definitions for '" + String.join("', '", duplicates) + "'"); + } + + // Create parent / child relations for compound fields and validate + // a) Get all fields that claim to have a parent + List children = fields.stream() + .filter(field -> !field.get(Header.PARENT, "").isEmpty()) + .collect(Collectors.toUnmodifiableList()); + + ParserException parentException = new ParserException("Compound field errors:"); + // b) Search and associate the parent field to these children + for (Field child : children) { + // Always non-null as we filtered for this before, so will never be "" (but we avoid the Optional) + String parentName = child.get(Header.PARENT, ""); + + Optional parentCandidate = fields.stream() + .filter(field -> field.getName().equals(parentName)) + .findFirst(); + + if (parentCandidate.isEmpty()) { + parentException.addSubException( + new ParserException("'" + child.getName() + "' misses its parent '" + parentName + "'") + .withLineNumber(child.lineIndex)); + } else { + // TODO: when this parsing is extended to allow fields belonging to other blocks, this would + // need extension to verify the parent candidate is within the same metadata block + Field parent = parentCandidate.get(); + + // Associate children and parents + child.parent = parent; + parent.children.add(child); + } + } + + // Check no cyclic dependencies present / deep nesting is optional. + // First, fetch fields that are children and parents at the same time: + List parentChilds = fields.stream() + .filter(field -> field.isParent() && field.isChild()) + .collect(Collectors.toUnmodifiableList()); + + // When deep nesting is disabled, this list must be empty (only 1 level of nesting allowed means + // there cannot be any parents that are children at the same time) + if (!config.deepFieldNestingEnabled() && !parentChilds.isEmpty()) { + parentChilds.forEach(field -> + parentException.addSubException( + new ParserException("'" + field.getName() + "' is not allowed to be parent and child at once") + .withLineNumber(field.lineIndex))); + // With deep nesting enabled, cyclic dependencies might happen. Need to recurse over these elements. + } else if (config.deepFieldNestingEnabled() && !parentChilds.isEmpty()) { + parentChilds.forEach(field -> { + if (hasCyclicDependency(field, field.parent)) { + parentException.addSubException( + new ParserException("'" + field.getName() + "' is part of a cyclic dependency") + .withLineNumber(field.lineIndex)); + } + }); + } + + // Ensure compound parents are a) of type "none", b) have no displayFormat other than ":", ";" or "," and + // are c) not facetable and d) not allowed to use a CV + List parents = fields.stream() + .filter(Field::isParent) + .collect(Collectors.toUnmodifiableList()); + + parents.forEach(field -> { + // a) type none + // The first check shall never be true, but just in case... + if (field.get(Header.FIELDTYPE).isEmpty() || !Types.NONE.test(field.get(Header.FIELDTYPE).get())) { + parentException.addSubException( + new ParserException("'" + field.getName() + "' is a parent but does not have 'fieldType'='none'") + .withLineNumber(field.lineIndex)); + } + // b) displayFormat contains only ":", ";" or "," or empty + if (field.get(Header.DISPLAY_FORMAT).isEmpty() || !matchParentDisplayFormat.test(field.get(Header.DISPLAY_FORMAT).get())) { + parentException.addSubException( + new ParserException("'" + field.getName() + "' is a parent but 'displayFormat' is not empty or from [;,:]") + .withLineNumber(field.lineIndex)); + } + // c) not facetable + if (field.get(Header.FACETABLE).isEmpty() || field.get(Header.FACETABLE).get().equals("TRUE")) { + parentException.addSubException( + new ParserException("'" + field.getName() + "' is a parent but has 'facetable'='TRUE'") + .withLineNumber(field.lineIndex)); + } + // c) not using a CV + if (field.get(Header.ALLOW_CONTROLLED_VOCABULARY).isEmpty() || field.get(Header.ALLOW_CONTROLLED_VOCABULARY).get().equals("TRUE")) { + parentException.addSubException( + new ParserException("'" + field.getName() + "' is a parent but has 'allowControlledVocabulary'='TRUE'") + .withLineNumber(field.lineIndex)); + } + }); + + // TODO: Extend check here with: + // 1) Unique numbers in displayOrder (attention: compound fields!) - should it print warnings when unsorted? + // 2) Warnings for facetable fields with suboptimal field types + // 3) Check if any compound fields are optional but have required children and warn about potential unwanted conditional requirements + // 4) Check no primitive (non-parent) field has type "none" + // 5) ... + + // Now either die or return fields + if (parentException.hasSubExceptions()) { + throw parentException; + } + return List.copyOf(fields); + } + + private boolean hasCyclicDependency(final Field root, final Field parent) { + // Found ourselves - stop recursion here. + if (parent == root) { + return true; + } + // Simple recursive search. Should be cheap enough for our expected workload. + if (parent.parent != null) { + return hasCyclicDependency(root, parent.parent); + } else { + return false; + } + } + } + + /* ---- Actual Field Class starting here ---- */ + + private Field parent; + private final List children = new ArrayList<>(); + private final Map properties = new EnumMap<>(Header.class); + private final int lineIndex; + + private Field(int lineIndex) { + this.lineIndex = lineIndex; + } + + private void set(final Header column, final String value) { + this.properties.put(column, value); + } + public Optional get(final Header column) { + return Optional.ofNullable(properties.get(column)); + } + public String get(final Header column, final String defaultValue) { + return properties.getOrDefault(column, defaultValue); + } + + public String getName() { + // Will always be non-null after creating the field with the builder + return properties.get(Header.NAME); + } + + public boolean isParent() { + return !this.children.isEmpty(); + } + + public boolean isChild() { + return this.parent != null; + } + + List controlledVocabularyValues = List.of(); + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (!(o instanceof Field)) return false; + Field field = (Field) o; + return field.getName().equals(this.getName()); + } + + @Override + public int hashCode() { + return Objects.hash(properties); + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParserError.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParserError.java new file mode 100644 index 00000000000..fd120ce4f1e --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParserError.java @@ -0,0 +1,16 @@ +package io.gdcc.solrteur.mdb.tsv; + +public final class ParserError { + String message; + Integer lineNumber; + + public ParserError(String message, Integer lineNumber) { + this.message = message; + this.lineNumber = lineNumber; + } + + public ParserError(ParserException exception, Integer lineNumber) { + this.message = exception.getMessage(); + this.lineNumber = lineNumber; + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParserException.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParserException.java new file mode 100644 index 00000000000..dca7f86b44a --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParserException.java @@ -0,0 +1,39 @@ +package io.gdcc.solrteur.mdb.tsv; + +import java.util.ArrayList; +import java.util.List; + +public final class ParserException extends Throwable { + + private final List subExceptions = new ArrayList<>(0); + private int lineNumber = -1; + + public ParserException(String message) { + super(message); + } + + public boolean hasSubExceptions() { + return !subExceptions.isEmpty(); + } + + public void addSubException(String message) { + this.subExceptions.add(new ParserException(message)); + } + + public void addSubException(ParserException ex) { + this.subExceptions.add(ex); + } + + public List getSubExceptions() { + return List.copyOf(subExceptions); + } + + public String getLineNumber() { + return lineNumber > 0 ? ":"+lineNumber : ""; + } + + public ParserException withLineNumber(int lineIndex) { + this.lineNumber = lineIndex + 1; + return this; + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParsingState.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParsingState.java new file mode 100644 index 00000000000..731cbc939c5 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/ParsingState.java @@ -0,0 +1,54 @@ +package io.gdcc.solrteur.mdb.tsv; + +public enum ParsingState { + Vocabularies(ControlledVocabulary.KEYWORD), + Fields(Field.KEYWORD, Vocabularies), + MetadataBlock(Block.KEYWORD, Fields), + // This state is only used exactly once and should never be reached from input. + // For safety, make the validation fail. + Init(null, MetadataBlock); + + private final String stateKeyword; + private final ParsingState nextState; + + ParsingState(String keyword, ParsingState next) { + this.stateKeyword = keyword; + this.nextState = next; + } + + /** + * Create final state (no next step) + * @param trigger + */ + ParsingState(String keyword) { + this.stateKeyword = keyword; + this.nextState = this; + } + + public boolean isAllowedFinalState() { + return this == Fields || this == Vocabularies; + } + + /** + * Return the next state if the given line is triggering the change correctly. + * Wrong header lines will trigger exceptions. + * + * @param headerLine The line to analyse for switching to the next parsing state + * @param config The parser configuration + * @return The new state (will throw if line not valid) + * @throws ParserException When the given line wasn't the expected one to transition the state + */ + public ParsingState transitionState(String headerLine, Configuration config) throws ParserException { + // if not null, not starting the same state again (no loops allowed) and starting the correct next state, return the next state + if(headerLine != null && + ! headerLine.startsWith(config.trigger(this.stateKeyword)) && + headerLine.startsWith(config.trigger(this.nextState.stateKeyword))) { + return this.nextState; + } + // otherwise, throw a parsing exception + throw new ParserException("Found invalid header '" + + (headerLine == null ? "null" : headerLine.substring(0, Math.min(25, headerLine.length()))) + + "...' while " + + (this.stateKeyword == null ? "initializing." : "in section '" + this.stateKeyword + "'.")); + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Validator.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Validator.java new file mode 100644 index 00000000000..2c950f1fd91 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/mdb/tsv/Validator.java @@ -0,0 +1,120 @@ +package io.gdcc.solrteur.mdb.tsv; + +import java.net.MalformedURLException; +import java.net.URISyntaxException; +import java.net.URL; +import java.util.ArrayList; +import java.util.List; + +public final class Validator { + + /** + * Test if a given value is a valid {@link java.net.URL} + * + * Remember, Java only supports HTTP/S, file and JAR protocols by default! + * Any URL not using such a protocol will not be considered a valid URL! + * {@see URL Constructor Summary} + * + * @param url The value to test + * @return True if valid URL, false otherwise + */ + public static boolean isValidUrl(String url) { + try { + new URL(url).toURI(); + return true; + } catch (MalformedURLException | URISyntaxException e) { + return false; + } + } + + /** + * Split and validate a textual line declared to be a header of custom metadata block definition section + * (the block, the fields or controlled vocabularies). Will return a list of the headers found (if they match) and + * when being a spec conform header line. + * + * As this function retrieves the relevant spec parts as parameters, it can be reused for all sections. + * You will need to transform into the resulting list into real Header enum values within calling code. + * + * This validator is strict with naming and order of appearance (must be same as spec), but is lenient + * about case (so you might use camel/pascal case variants). + * + * @param headerLine The textual line to analyse. + * @param validOrderedHeaders A list of Strings with the column headers from the spec in order of appearance. + * @param config The configuration to use + * @return A list of the found headers in normalized form if matching the spec + * @throws ParserException If any validation fails. Contains sub-exceptions with validation details. + */ + static List validateHeaderLine(final String headerLine, + final List validOrderedHeaders, + final Configuration config) throws ParserException { + // start a parenting parser exception to be filled with errors as subexceptions + ParserException ex = new ParserException("Invalid headline:"); + + if (headerLine == null || headerLine.isBlank()) { + ex.addSubException("Header may not be null, empty or whitespace only"); + throw ex; + } + + // test for trigger being present + if (! headerLine.startsWith(config.triggerIndicator())) { + ex.addSubException("Trigger sign '" + config.triggerIndicator() + "' not found"); + throw ex; + } + + // the actual split and validate length + String[] headerSplit = headerLine + .substring(config.triggerIndicator().length()) // remove the trigger indicator first + .split(config.columnSeparator()); + // missing headers? + if (headerSplit.length < validOrderedHeaders.size()) { + ex.addSubException( + "Less fields (" + headerSplit.length + ") found than required (" + validOrderedHeaders.size() + ")."); + } else if (headerSplit.length > validOrderedHeaders.size()) { + ex.addSubException( + "More fields (" + headerSplit.length + ") found than required (" + validOrderedHeaders.size() + ")."); + } + + // allocate a list of validated columns + List validatedColumns = new ArrayList<>(); + + // iterate the found header values + for (int i = 0; i < headerSplit.length; i++) { + String columnHeader = headerSplit[i]; + + // is the value a valid one? (in order of appearance and existing, but ignoring case) + if (i < validOrderedHeaders.size() && validOrderedHeaders.get(i).equalsIgnoreCase(columnHeader)) { + // add as entry of validated and present headers (to be used for line mapping) + // BUT use the normalized variant (makes comparisons easier) + validatedColumns.add(validOrderedHeaders.get(i)); + // when invalid, mark as such + } else { + ex.addSubException( + "Column " + (i+1) + " contains '" + columnHeader + "', but spec expects " + + (i < validOrderedHeaders.size() ? "'"+validOrderedHeaders.get(i)+"'" : "nothing") + " to be here." + ); + // additional hint when valid, but accidentally already present + if (validatedColumns.stream().anyMatch(columnHeader::equalsIgnoreCase)) { + ex.addSubException("Column " + (i+1) + " contains valid '" + columnHeader + "' already present."); + } + } + } + + // when there are headers missing, report them + if ( validatedColumns.size() < validOrderedHeaders.size() ) { + for (int i = 0; i < validOrderedHeaders.size(); i++) { + String missingHeader = validOrderedHeaders.get(i); + if (validatedColumns.stream().noneMatch(missingHeader::equalsIgnoreCase)) { + ex.addSubException("Missing column '" + missingHeader + "' from position " + (i+1) + "."); + } + } + } + + // Will only return the header column mapping if and only if the validation did not find errors. + // use an unmodifiable version of the list to avoid accidents without notice. Else throw the exception. + if (ex.hasSubExceptions()) { + throw ex; + } else { + return List.copyOf(validatedColumns); + } + } +} diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/package-info.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/package-info.java new file mode 100644 index 00000000000..ef7bd55f222 --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/package-info.java @@ -0,0 +1 @@ +package io.gdcc.solrteur; \ No newline at end of file diff --git a/modules/solr-configset/src/main/java/io/gdcc/solrteur/solrteur.java b/modules/solr-configset/src/main/java/io/gdcc/solrteur/solrteur.java new file mode 100644 index 00000000000..1a8fa5ae69b --- /dev/null +++ b/modules/solr-configset/src/main/java/io/gdcc/solrteur/solrteur.java @@ -0,0 +1,117 @@ +///usr/bin/env jbang "$0" "$@" ; exit $? +// +//SOURCES cmd/*.java +//SOURCES mdb/*.java +//SOURCES mdb/**/*.java +// +//DEPS info.picocli:picocli:4.6.3 + +package io.gdcc.solrteur; + +import io.gdcc.solrteur.cmd.CompileSchema; +import io.gdcc.solrteur.cmd.ExtractConfigSet; +import picocli.CommandLine; +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +import io.gdcc.solrteur.cmd.CompileSolrConfig; + +import java.nio.file.Path; + +/** + * This class is the main entry point into the different functions of handling different aspects of + * the Dataverse Solr flavor. + * + * (The name "solrteur" is a remix of the German word "Solarteur" which means "solar technician".) + */ +@Command(name = solrteur.CLI_NAME, + mixinStandardHelpOptions = true, + usageHelpAutoWidth = true, + version = solrteur.CLI_NAME+" "+ solrteur.CLI_VERSION, + description = "Execute different tasks around Dataverse and Solr", + subcommands = { + ExtractConfigSet.class, + CompileSolrConfig.class, + CompileSchema.class + }, + synopsisSubcommandLabel = "COMMAND") +public class solrteur { + public final static String CLI_NAME = "solrteur"; + public final static String CLI_VERSION = "1.0"; + + @Option( + required = true, + names = {"--solr-version", "-s"}, + paramLabel = "", + description = "Which version of Solr to use, e. g. 8.9 or 8.11.1") + private String solrVersion; + + public String getSolrVersion() { + return this.solrVersion; + } + + @Option(required = true, + names = {"--target", "-t"}, + paramLabel = "", + description = "Path to a target directory") + private Path targetDir; + + public Path getTargetDir() { + return this.targetDir; + } + + @Option( + names = {"--quiet", "-q"}, + description = "Decrease verbosity" + ) + static boolean quiet; + + /** + * A wrapper for Throwables to create a checked exception that leads to aborting the execution + */ + public static final class AbortScriptException extends Exception { + private AbortScriptException() {} + public AbortScriptException(String msg, Throwable cause) { + super(msg, cause); + } + } + /** + * Static inner logging wrapper for convenience. + * This is here because we don't want to add more clutter of a logging framework + * to the Maven output where we use this from. + */ + public static final class Logger { + static void log(String message) { + System.out.println(message); + } + static void logError(String message) { + System.err.println(message); + } + + public static void info(String message) { + if (!quiet) { + log(message); + } + } + public static void info(AbortScriptException ex) { + if (!quiet) { + log(ex.getMessage()); + log(ex.getCause().getMessage()); + } + } + + public static void warn(String message) { + logError(message); + } + + public static void warn(AbortScriptException ex) { + logError(ex.getMessage()); + logError(ex.getCause().getMessage()); + } + } + + public static void main(String... args) { + int exitCode = new CommandLine(new solrteur()).execute(args); + System.exit(exitCode); + } +} diff --git a/conf/solr/8.11.1/schema.xml b/modules/solr-configset/src/main/resources/schema.xml similarity index 100% rename from conf/solr/8.11.1/schema.xml rename to modules/solr-configset/src/main/resources/schema.xml diff --git a/modules/solr-configset/src/main/resources/solrconfig-xslt/01-disable-schemaless.xslt b/modules/solr-configset/src/main/resources/solrconfig-xslt/01-disable-schemaless.xslt new file mode 100644 index 00000000000..a9a9da12229 --- /dev/null +++ b/modules/solr-configset/src/main/resources/solrconfig-xslt/01-disable-schemaless.xslt @@ -0,0 +1,26 @@ + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/modules/solr-configset/src/main/resources/solrconfig-xslt/02-search-boosting.xslt b/modules/solr-configset/src/main/resources/solrconfig-xslt/02-search-boosting.xslt new file mode 100644 index 00000000000..2d4e8d39987 --- /dev/null +++ b/modules/solr-configset/src/main/resources/solrconfig-xslt/02-search-boosting.xslt @@ -0,0 +1,68 @@ + + + + + + + + + + + + + + + + + This boosting configuration has been + first introduced in 2015, see https://github.com/IQSS/dataverse/issues/1928#issuecomment-91651853, + been re-introduced in 2018 for Solr 7.2.1 update, see https://github.com/IQSS/dataverse/issues/4158, + and finally evolved to the current state later in 2018 https://github.com/IQSS/dataverse/issues/4938 + (merged with https://github.com/IQSS/dataverse/commit/3843e5366845d55c327cdb252dd9b4e4125b9b88) + + Since then, this has not been touched again (2021-12-21). + + edismax + 0.075 + + dvName^400 + authorName^180 + dvSubject^190 + dvDescription^180 + dvAffiliation^170 + title^130 + subject^120 + keyword^110 + topicClassValue^100 + dsDescriptionValue^90 + authorAffiliation^80 + publicationCitation^60 + producerName^50 + fileName^30 + fileDescription^30 + variableLabel^20 + variableName^10 + _text_^1.0 + + + dvName^200 + authorName^100 + dvSubject^100 + dvDescription^100 + dvAffiliation^100 + title^75 + subject^75 + keyword^75 + topicClassValue^75 + dsDescriptionValue^75 + authorAffiliation^75 + publicationCitation^75 + producerName^75 + + Even though this number is huge it only seems to apply a boost of ~1.5x to final result -MAD 4.9.3 + + isHarvested:false^25000 + + + + \ No newline at end of file diff --git a/modules/solr-configset/src/main/resources/solrconfig-xslt/03-static-schema-factory.xslt b/modules/solr-configset/src/main/resources/solrconfig-xslt/03-static-schema-factory.xslt new file mode 100644 index 00000000000..f154965b30a --- /dev/null +++ b/modules/solr-configset/src/main/resources/solrconfig-xslt/03-static-schema-factory.xslt @@ -0,0 +1,22 @@ + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/modules/solr-configset/src/main/resources/solrconfig-xslt/99-fix-special-chars.xslt b/modules/solr-configset/src/main/resources/solrconfig-xslt/99-fix-special-chars.xslt new file mode 100644 index 00000000000..9d6a3e2ee5e --- /dev/null +++ b/modules/solr-configset/src/main/resources/solrconfig-xslt/99-fix-special-chars.xslt @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/conf/solr/8.11.1/update-fields.sh b/modules/solr-configset/src/main/scripts/update-fields.sh similarity index 100% rename from conf/solr/8.11.1/update-fields.sh rename to modules/solr-configset/src/main/scripts/update-fields.sh diff --git a/modules/solr-configset/src/test/java/Constants.java b/modules/solr-configset/src/test/java/Constants.java new file mode 100644 index 00000000000..88a6abb8a24 --- /dev/null +++ b/modules/solr-configset/src/test/java/Constants.java @@ -0,0 +1,3 @@ +public class Constants { + public final static String TAG_CONFIG = "SolrConfig"; +} diff --git a/modules/solr-configset/src/test/java/EmbeddedSolrConfigIT.java b/modules/solr-configset/src/test/java/EmbeddedSolrConfigIT.java new file mode 100644 index 00000000000..00abf2929c0 --- /dev/null +++ b/modules/solr-configset/src/test/java/EmbeddedSolrConfigIT.java @@ -0,0 +1,106 @@ +import org.apache.solr.client.solrj.SolrClient; +import org.apache.solr.client.solrj.SolrServerException; +import org.apache.solr.client.solrj.embedded.EmbeddedSolrServer; +import org.apache.solr.client.solrj.request.CoreAdminRequest; +import org.apache.solr.client.solrj.request.CoreStatus; +import org.apache.solr.client.solrj.request.SolrPing; +import org.apache.solr.client.solrj.response.SolrPingResponse; +import org.apache.solr.core.NodeConfig; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Calendar; +import java.util.Comparator; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +@Tag(Constants.TAG_CONFIG) +@DisplayName("Embedded Solr Configuration Integration Test") +public class EmbeddedSolrConfigIT { + + final static String buildDirectory = System.getProperty("buildDirectory"); + final static String[] solrHomeSubPath = {"solr"}; + + static Path solrHome; + static Path solrConfigSets; + + final static String collectionName = "collection1"; + final static String configSetName = "dataverse"; + static SolrClient solrClient; + + @BeforeAll + public static void setUp() throws IOException { + checkAndSetupSolrDirectories(); + + // build a node config, so we can start an embedded server with it + final NodeConfig config = new NodeConfig.NodeConfigBuilder("embeddedSolrServerNode", solrHome) + .setConfigSetBaseDirectory(solrConfigSets.toString()) + .build(); + + // create the server + final EmbeddedSolrServer embeddedSolrServer = new EmbeddedSolrServer(config, collectionName); + solrClient = embeddedSolrServer; + } + + @AfterAll + public static void tearDown() throws IOException { + // delete the solr home (so we can run the test again without mvn clean) + try (Stream walk = Files.walk(solrHome)) { + walk.sorted(Comparator.reverseOrder()) + .map(Path::toFile) + //.peek(System.out::println) + .forEach(File::delete); + } + } + + @Test + @DisplayName("Deploy Solr Core from Dataverse ConfigSet") + public void deployDataverseCore() throws SolrServerException, IOException { + assumeTrue(solrClient != null); + + // create the core from our configset + final CoreAdminRequest.Create createRequest = new CoreAdminRequest.Create(); + createRequest.setCoreName(collectionName); + createRequest.setConfigSet(configSetName); + createRequest.process(solrClient); + + // get the core status + final CoreStatus coreStatus = CoreAdminRequest.getCoreStatus(collectionName, solrClient); + assertNotNull(coreStatus); + assertTrue(coreStatus.getCoreStartTime().before(Calendar.getInstance().getTime())); + + // check ping + final SolrPing ping = new SolrPing(); + SolrPingResponse pingResponse = ping.process(solrClient); + assertNotNull(pingResponse); + assertTrue(pingResponse.toString().contains("status=OK")); + } + + + private static void checkAndSetupSolrDirectories() throws IOException { + assertNotNull(buildDirectory); + final Path buildDir = Path.of(buildDirectory); + assertTrue(buildDir.isAbsolute() && + Files.exists(buildDir) && + Files.isDirectory(buildDir) && + Files.isReadable(buildDir) && + Files.isWritable(buildDir)); + + // create the solr home (might be replaced with a memory fs) + solrHome = Path.of(buildDirectory, solrHomeSubPath); + Files.createDirectories(solrHome); + + // we simply reuse the parent directory from the build as our configsets source directory + solrConfigSets = buildDir; + } +} diff --git a/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/BlockTest.java b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/BlockTest.java new file mode 100644 index 00000000000..b12da3702e3 --- /dev/null +++ b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/BlockTest.java @@ -0,0 +1,108 @@ +package io.gdcc.solrteur.mdb.tsv; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.NullAndEmptySource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +class BlockTest { + + private static final Logger logger = Logger.getLogger(BlockTest.class.getCanonicalName()); + + static final Configuration config = Configuration.defaultConfig(); + static final String validHeaderLine = "#metadataBlock\tname\tdataverseAlias\tdisplayName\tblockURI"; + static final String validBlockDef1 = "\tmyblock\t\tFooBar Block\thttps://foobar.com/"; + static final String validBlockDef2 = "\tmyblock\tdataverse\tFooBar Block\thttps://foobar.com/"; + + @Nested + class HeaderTest { + @ParameterizedTest + @ValueSource(strings = { + validHeaderLine, + "#metadataBlock\tNAME\tDataversealias\tDisplayname\tBlockURI" + }) + void successfulParseAndValidateHeaderLine(String headerLine) throws ParserException { + List headers = Block.Header.parseAndValidate(headerLine, config); + assertFalse(headers.isEmpty()); + assertEquals(List.of(Block.Header.values()), headers); + } + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = { + "hello", + "#metadataBlock test", + "#metadataBlock\tname\tdataverseAlias\tdisplayName", + "\t#metadataBlock\tname\tdataverseAlias\tdisplayName\tblockURI", + "#metadataBlock\tname\tdataverseAlias\tdisplayName\tblockURI\tfoobar", + "#metadataBlock\tname\tdataverseAlias\tdisplayName\tdisplayName\tblockURI", + "dataverseAlias\tdisplayName\tblockURI\t#metadataBlock\tname" + }) + void failingParseAndValidateHeaderLine(String headerLine) throws ParserException { + ParserException exception = assertThrows(ParserException.class, () -> Block.Header.parseAndValidate(headerLine, config)); + assertTrue(exception.hasSubExceptions()); + logger.log(Level.FINE, + exception.getSubExceptions().stream().map(Throwable::getMessage).collect(Collectors.joining("\n")) + ); + } + } + + @Nested + class ParseLineTest { + Block.BlockBuilder builder; + + @BeforeEach + void setUp() throws ParserException { + builder = new Block.BlockBuilder(validHeaderLine, config); + } + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = { + "\t", + "myblock", + "\tmyblock\t", + "\tmyblock\tdataverse", + "\tmyblock\tdataverse\tFooBar Block", + "\tmyblock\tdataverse\tFooBar Block\thttps://", + "\tmyblock\tdataverse\tFooBar Block\thttps://foobar.com/\thello", + "\tmyblock\t\tFooBar Block\thttps://foobar.com/\thello", + "myblock\tdataverse\tFooBar Block\thttps://foobar.com/", + "myblock\t\tFooBar Block\thttps://foobar.com/" + }) + void failingParseLine(String line) throws ParserException { + ParserException exception = assertThrows(ParserException.class, () -> builder.parseAndValidateLine(line)); + assertFalse(builder.hasSucceeded()); + } + + @ParameterizedTest + @ValueSource(strings = { + validBlockDef1, validBlockDef2 + }) + void succeedingParseLine(String line) throws ParserException { + builder.parseAndValidateLine(line); + assertTrue(builder.hasSucceeded()); + } + + @Test + void failingDoubleAdditionAttempt() throws ParserException { + builder.parseAndValidateLine(validBlockDef1); + assertTrue(builder.hasSucceeded()); + ParserException exception = assertThrows(ParserException.class, () -> builder.parseAndValidateLine(validBlockDef1)); + assertFalse(builder.hasSucceeded()); + } + } + +} diff --git a/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ConfigurationTest.java b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ConfigurationTest.java new file mode 100644 index 00000000000..5b632955bac --- /dev/null +++ b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ConfigurationTest.java @@ -0,0 +1,28 @@ +package io.gdcc.solrteur.mdb.tsv; + +import io.gdcc.solrteur.mdb.tsv.Configuration; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import static org.junit.jupiter.api.Assertions.*; + +class ConfigurationTest { + + private final Configuration testConfiguration = Configuration.defaultConfig(); + + @ParameterizedTest + @CsvSource(nullValues = "NULL", + value = { + "NULL,NULL", + "hello,hello", + "' hello',' hello'", + "' hello ',' hello '", + "' hello',' hello\t\t\t'", + "'\t\t\thello','\t\t\thello\t\t\t'", + "'\t\t\thello\ttest','\t\t\thello\ttest\t\t'", + "'\t\t\thello\ttest\t\t ','\t\t\thello\ttest\t\t '", + }) + void trimming(String expected, String sut) { + assertEquals(expected, testConfiguration.rtrimColumns(sut)); + } +} \ No newline at end of file diff --git a/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/FieldTest.java b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/FieldTest.java new file mode 100644 index 00000000000..028296ed290 --- /dev/null +++ b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/FieldTest.java @@ -0,0 +1,209 @@ +package io.gdcc.solrteur.mdb.tsv; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvFileSource; +import org.junit.jupiter.params.provider.EmptySource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.NullAndEmptySource; +import org.junit.jupiter.params.provider.NullSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.File; +import java.io.IOException; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.function.Predicate; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +class FieldTest { + + private static final Logger logger = Logger.getLogger(FieldTest.class.getCanonicalName()); + + static final Configuration config = Configuration.defaultConfig(); + static final String validHeaderLine = "#datasetField\tname\ttitle\tdescription\twatermark\tfieldType" + + "\tdisplayOrder\tdisplayFormat\tadvancedSearchField\tallowControlledVocabulary\tallowmultiples\tfacetable" + + "\tdisplayoncreate\trequired\tparent\tmetadatablock_id\ttermURI"; + static final String validContainingBlockName = "citation"; + static final String validFieldDef = "\ttitle\tTitle\tThe main title of the Dataset\t\ttext" + + "\t0\t\tTRUE\tFALSE\tFALSE\tFALSE\tTRUE\tTRUE\t\tcitation\thttp://purl.org/dc/terms/title"; + + @Nested + class TypesTest { + Predicate allowedTypes = Field.Types.matchesTypes(); + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = {"foobar", "hello_hello", "NONE", "DATE"}) + void failing(String subject) { + assertFalse(allowedTypes.test(subject)); + } + + @ParameterizedTest + @ValueSource(strings = {"none", "text", "textbox", "date", "email", "int", "float"}) + void succeeding(String subject) { + assertTrue(allowedTypes.test(subject)); + } + } + + @Nested + class HeaderFieldValueValidationTest { + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = {" ", "\t", "_foobar_", "_foo_bar_"}) + void invalidNames(String subject) { + assertFalse(Field.Header.NAME.isValid(subject)); + } + + @ParameterizedTest + @ValueSource(strings = {"foobar_", "foo_bar_", "_foobar", "_foo_bar", "foobar", "foobar1234", "foo_bar_1234"}) + void validNames(String subject) { + assertTrue(Field.Header.NAME.isValid(subject)); + assertTrue(Field.Header.PARENT.isValid(subject)); + } + + @ParameterizedTest + @EmptySource + void validParentName(String subject) { + assertTrue(Field.Header.PARENT.isValid(subject)); + } + + @ParameterizedTest + @NullSource + @ValueSource(strings = {" ", "\t"}) + void invalidEmptyOrText(String subject) { + assertFalse(Field.Header.WATERMARK.isValid(subject)); + assertFalse(Field.Header.DISPLAY_FORMAT.isValid(subject)); + } + + @ParameterizedTest + @ValueSource(strings = { "", "foobar", "My name is Hase, I know about nothing."}) + void validEmptyOrText(String subject) { + assertTrue(Field.Header.WATERMARK.isValid(subject)); + assertTrue(Field.Header.DISPLAY_FORMAT.isValid(subject)); + } + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = { "true", "false", "0", "1", "foobar"}) + void invalidBoolean(String subject) { + assertFalse(Field.Header.ADVANCED_SEARCH_FIELD.isValid(subject)); + assertFalse(Field.Header.ALLOW_CONTROLLED_VOCABULARY.isValid(subject)); + assertFalse(Field.Header.ALLOW_MULTIPLES.isValid(subject)); + assertFalse(Field.Header.FACETABLE.isValid(subject)); + assertFalse(Field.Header.DISPLAY_ON_CREATE.isValid(subject)); + assertFalse(Field.Header.REQUIRED.isValid(subject)); + } + + @ParameterizedTest + @ValueSource(strings = { "TRUE", "FALSE" }) + void validBoolean(String subject) { + assertTrue(Field.Header.ADVANCED_SEARCH_FIELD.isValid(subject)); + assertTrue(Field.Header.ALLOW_CONTROLLED_VOCABULARY.isValid(subject)); + assertTrue(Field.Header.ALLOW_MULTIPLES.isValid(subject)); + assertTrue(Field.Header.FACETABLE.isValid(subject)); + assertTrue(Field.Header.DISPLAY_ON_CREATE.isValid(subject)); + assertTrue(Field.Header.REQUIRED.isValid(subject)); + } + } + + @Nested + class HeaderLineTest { + @ParameterizedTest + @ValueSource(strings = { + validHeaderLine, + "#datasetfield\tName\tTITLE\tdescription\tWAtermark\tfieldType\tdisplayOrder\tdisplayFormat" + + "\tadvancedSearchField\tallowControlledVocabulary\tallowmultiples\tfacetable" + + "\tdisplayOnCreate\trequired\tparent\tmetadataBLOCK_ID\ttermUri" + }) + void successfulParseAndValidateHeaderLine(String headerLine) throws ParserException { + List headers = Field.Header.parseAndValidate(headerLine, config); + assertFalse(headers.isEmpty()); + assertEquals(List.of(Field.Header.values()), headers); + } + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = { + "hello", + "datasetField", + "#datasetField test", + "#datasetField\tname\ttitle\tdescription\twatermark\tfieldType", + "#datasetField\tname\ttitle\tdescription\twatermark\tfieldType\t\tdisplayoncreate\trequired\tparent\tmetadatablock_id\ttermURI" + }) + void failingParseAndValidateHeaderLine(String headerLine) throws ParserException { + ParserException exception = assertThrows(ParserException.class, () -> Field.Header.parseAndValidate(headerLine, config)); + assertTrue(exception.hasSubExceptions()); + logger.log(Level.FINE, + exception.getSubExceptions().stream().map(Throwable::getMessage).collect(Collectors.joining("\n")) + ); + } + } + + @Nested + @TestInstance(TestInstance.Lifecycle.PER_CLASS) + class ParseLineTest { + Field.FieldsBuilder builder; + + @BeforeEach + void setUp() throws ParserException { + builder = new Field.FieldsBuilder(validHeaderLine, validContainingBlockName, config); + } + + @ParameterizedTest + @NullAndEmptySource + @MethodSource("invalidFieldExamples") + void failingParseLine(String line) throws ParserException { + ParserException exception = assertThrows(ParserException.class, () -> builder.parseAndValidateLine(0, line)); + //assertFalse(builder.hasSucceeded()); + } + + @ParameterizedTest + @ValueSource(strings = {validFieldDef}) + @MethodSource("validFieldExamples") + void succeedingParseLine(String line) throws ParserException { + try { + System.out.println(line); + builder.parseAndValidateLine(0, line); + } catch (ParserException e) { + e.getSubExceptions().forEach(System.out::println); + fail(e); + } + //assertTrue(builder.hasSucceeded()); + } + + Stream validFieldExamples() throws IOException { + Path file = Path.of("", "src/test/resources", "fields", "valid_fields.csv"); + return Files.readAllLines(file, StandardCharsets.UTF_8).stream().map(s -> s.replaceAll(";", "\t")); + } + + Stream invalidFieldExamples() throws IOException { + // TODO: write a file with such examples that fail already at parsing the line + Path file = Path.of("", "src/test/resources", "fields", "invalid_fields.csv"); + return Files.readAllLines(file, StandardCharsets.UTF_8).stream().map(s -> s.replaceAll(";", "\t")); + } + } + + @Nested + @TestInstance(TestInstance.Lifecycle.PER_CLASS) + class BuildFieldListTest { + // TODO: write tests for the checks at build time + } +} \ No newline at end of file diff --git a/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ParsingStateTest.java b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ParsingStateTest.java new file mode 100644 index 00000000000..3722eb78289 --- /dev/null +++ b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ParsingStateTest.java @@ -0,0 +1,78 @@ +package io.gdcc.solrteur.mdb.tsv; + +import io.gdcc.solrteur.mdb.tsv.Configuration; +import io.gdcc.solrteur.mdb.tsv.Block; +import io.gdcc.solrteur.mdb.tsv.ControlledVocabulary; +import io.gdcc.solrteur.mdb.tsv.Field; +import io.gdcc.solrteur.mdb.tsv.ParserException; +import io.gdcc.solrteur.mdb.tsv.ParsingState; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertEquals; + +class ParsingStateTest { + + static Configuration config = Configuration.defaultConfig(); + + static Stream failingStateTransitionExamples() { + return Stream.of( + Arguments.of(ParsingState.Init, null), + Arguments.of(ParsingState.MetadataBlock, null), + Arguments.of(ParsingState.Fields, null), + Arguments.of(ParsingState.Vocabularies, null), + + Arguments.of(ParsingState.Init, ""), + Arguments.of(ParsingState.MetadataBlock, ""), + Arguments.of(ParsingState.Fields, ""), + Arguments.of(ParsingState.Vocabularies, ""), + + Arguments.of(ParsingState.Init, "foobar"), + Arguments.of(ParsingState.MetadataBlock, "foobar"), + Arguments.of(ParsingState.Fields, "foobar"), + Arguments.of(ParsingState.Vocabularies, "foobar"), + + Arguments.of(ParsingState.Init, config.triggerIndicator()), + Arguments.of(ParsingState.Init, config.commentIndicator()), + Arguments.of(ParsingState.Init, config.columnSeparator()), + + Arguments.of(ParsingState.Init, config.trigger(Field.KEYWORD)), + Arguments.of(ParsingState.Init, config.trigger(ControlledVocabulary.KEYWORD)), + + Arguments.of(ParsingState.MetadataBlock, config.commentIndicator()), + Arguments.of(ParsingState.MetadataBlock, config.trigger(ControlledVocabulary.KEYWORD)), + + Arguments.of(ParsingState.Fields, config.commentIndicator()), + Arguments.of(ParsingState.Fields, config.trigger(Block.KEYWORD)), + + Arguments.of(ParsingState.Vocabularies, config.commentIndicator()), + Arguments.of(ParsingState.Vocabularies, config.trigger(Block.KEYWORD)), + Arguments.of(ParsingState.Vocabularies, config.trigger(Field.KEYWORD)) + ); + } + + @ParameterizedTest + @MethodSource("failingStateTransitionExamples") + void failingTransitions(final ParsingState source, final String triggerLine) throws ParserException { + ParserException ex = assertThrows(ParserException.class, () -> source.transitionState(triggerLine, config)); + } + + static Stream successfulStateTransitionExamples() { + return Stream.of( + Arguments.of(ParsingState.Init, config.trigger(Block.KEYWORD), ParsingState.MetadataBlock), + Arguments.of(ParsingState.MetadataBlock, config.trigger(Field.KEYWORD), ParsingState.Fields), + Arguments.of(ParsingState.Fields, config.trigger(ControlledVocabulary.KEYWORD), ParsingState.Vocabularies) + ); + } + + @ParameterizedTest + @MethodSource("successfulStateTransitionExamples") + void successfulTransitions(final ParsingState source, final String triggerLine, final ParsingState expected) throws ParserException { + assertEquals(expected, source.transitionState(triggerLine, config)); + } + +} \ No newline at end of file diff --git a/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ValidatorTest.java b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ValidatorTest.java new file mode 100644 index 00000000000..5462c41130c --- /dev/null +++ b/modules/solr-configset/src/test/java/io/gdcc/solrteur/mdb/tsv/ValidatorTest.java @@ -0,0 +1,80 @@ +package io.gdcc.solrteur.mdb.tsv; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.NullAndEmptySource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.List; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class ValidatorTest { + + private static final Logger logger = Logger.getLogger(ValidatorTest.class.getCanonicalName()); + + @Nested + class UtilsTest { + @ParameterizedTest + @CsvSource(nullValues = "NULL", + value = { + "false,NULL", + "false,''", + "false,hello", + "false,https://", + "false,www.foo.bar", + "false,://foo.bar.com", + "true,https://wwww.foobar.com", + "true,https://wwww.foobar.com/hello", + "true,https://wwww.foobar.com:1214/hello", + "true,https://host/hello", + }) + void urlValidation(boolean expected, String sut) { + Assertions.assertEquals(expected, Validator.isValidUrl(sut)); + } + } + + @Nested + class ValidateBlockHeader { + List blockHeaders = Block.Header.getHeaders(); + + @ParameterizedTest + @NullAndEmptySource + @ValueSource(strings = { + "hello", + "#metadataBlock test", + "#metadataBlock\tname\tdataverseAlias\tdisplayName", + "\t#metadataBlock\tname\tdataverseAlias\tdisplayName\tblockURI", + "#metadataBlock\tname\tdataverseAlias\tdisplayName\tblockURI\tfoobar", + "#metadataBlock\tname\tdataverseAlias\tdisplayName\tdisplayName\tblockURI", + "dataverseAlias\tdisplayName\tblockURI\t#metadataBlock\tname" + }) + void validateHeaderLine_Block_Throws(String line) { + ParserException exception = assertThrows(ParserException.class, () -> Validator.validateHeaderLine(line, blockHeaders, Configuration.defaultConfig())); + assertTrue(exception.hasSubExceptions()); + logger.log(Level.FINE, + exception.getSubExceptions().stream().map(Throwable::getMessage).collect(Collectors.joining("\n")) + ); + } + + @ParameterizedTest + @ValueSource(strings = { + "#metadataBlock\tname\tdataverseAlias\tdisplayName\tblockURI", + "#metadataBlock\tNAME\tDataversealias\tDisplayname\tBlockURI" + }) + void validateHeaderLine_Block_True(String line) throws ParserException { + List headers = Validator.validateHeaderLine(line, blockHeaders, Configuration.defaultConfig()); + assertFalse(headers.isEmpty()); + // we expect the normalized form, so the arrays should match! + assertEquals(blockHeaders, headers); + } + } +} \ No newline at end of file diff --git a/modules/solr-configset/src/test/resources/fields/valid_fields.csv b/modules/solr-configset/src/test/resources/fields/valid_fields.csv new file mode 100644 index 00000000000..13fd8c956ee --- /dev/null +++ b/modules/solr-configset/src/test/resources/fields/valid_fields.csv @@ -0,0 +1,12 @@ +;title;Title;The main title of the Dataset;;text;0;;TRUE;FALSE;FALSE;FALSE;TRUE;TRUE;;citation;http://purl.org/dc/terms/title +;subtitle;Subtitle;A secondary title that amplifies or states certain limitations on the main title;;text;1;;FALSE;FALSE;FALSE;FALSE;FALSE;FALSE;;citation; +;alternativeURL;Alternative URL;Another URL where one can view or access the data in the Dataset, e.g. a project or personal webpage;https://;url;3;#VALUE;FALSE;FALSE;FALSE;FALSE;FALSE;FALSE;;citation;https://schema.org/distribution +;author;Author;The entity, e.g. a person or organization, that created the Dataset;;none;7;;FALSE;FALSE;TRUE;FALSE;TRUE;TRUE;;citation;http://purl.org/dc/terms/creator +;authorName;Name;The name of the author, such as the person's name or the name of an organization;1) Family Name, Given Name or 2) Organization XYZ;text;8;#VALUE;TRUE;FALSE;FALSE;TRUE;TRUE;TRUE;author;citation; +;authorAffiliation;Affiliation;The name of the entity affiliated with the author, e.g. an organization's name;Organization XYZ;text;9;(#VALUE);TRUE;FALSE;FALSE;TRUE;TRUE;FALSE;author;citation; +;authorIdentifierScheme;Identifier Type;The type of identifier that uniquely identifies the author (e.g. ORCID, ISNI);;text;10;- #VALUE:;FALSE;TRUE;FALSE;FALSE;TRUE;FALSE;author;citation;http://purl.org/spar/datacite/AgentIdentifierScheme +;authorIdentifier;Identifier;Uniquely identifies the author when paired with an identifier type;;text;11;#VALUE;FALSE;FALSE;FALSE;FALSE;TRUE;FALSE;author;citation;http://purl.org/spar/datacite/AgentIdentifier +;datasetContactEmail;E-mail;The point of contact's email address;name@email.xyz;email;15;#EMAIL;FALSE;FALSE;FALSE;FALSE;TRUE;TRUE;datasetContact;citation; +;dsDescriptionValue;Text;A summary describing the purpose, nature, and scope of the Dataset;;textbox;17;#VALUE;TRUE;FALSE;FALSE;FALSE;TRUE;TRUE;dsDescription;citation; +;dsDescriptionDate;Date;The date when the description was added to the Dataset. If the Dataset contains more than one description, e.g. the data producer supplied one description and the data repository supplied another, this date is used to distinguish between the descriptions;YYYY-MM-DD;date;18;(#VALUE);FALSE;FALSE;FALSE;FALSE;TRUE;FALSE;dsDescription;citation; +;keywordVocabularyURI;Controlled Vocabulary URL;The URL where one can access information about the term's controlled vocabulary;https://;url;23;#VALUE;FALSE;FALSE;FALSE;FALSE;TRUE;FALSE;keyword;citation; diff --git a/pom.xml b/pom.xml index b4636c9aac6..3a1134d697b 100644 --- a/pom.xml +++ b/pom.xml @@ -283,7 +283,6 @@ org.apache.solr solr-solrj - 8.11.1 colt @@ -539,7 +538,6 @@ org.junit.jupiter junit-jupiter - ${junit.jupiter.version} test diff --git a/scripts/installer/Makefile b/scripts/installer/Makefile index a1fbfab782e..5fffcd774c1 100644 --- a/scripts/installer/Makefile +++ b/scripts/installer/Makefile @@ -55,9 +55,9 @@ ${JHOVE_SCHEMA}: ../../conf/jhove/jhoveConfig.xsd ${INSTALLER_ZIP_DIR} @echo copying jhove schema file /bin/cp ../../conf/jhove/jhoveConfig.xsd ${INSTALLER_ZIP_DIR} -${SOLR_SCHEMA}: ../../conf/solr/8.11.1/schema.xml ../../conf/solr/8.11.1/update-fields.sh ${INSTALLER_ZIP_DIR} +${SOLR_SCHEMA}: ../../conf/solr/8.11.1/schema.xml ../../modules/solr-configset/src/main/scripts/update-fields.sh ${INSTALLER_ZIP_DIR} @echo copying Solr schema file - /bin/cp ../../conf/solr/8.11.1/schema.xml ../../conf/solr/8.11.1/update-fields.sh ${INSTALLER_ZIP_DIR} + /bin/cp ../../conf/solr/8.11.1/schema.xml ../../modules/solr-configset/src/main/scripts/update-fields.sh ${INSTALLER_ZIP_DIR} ${SOLR_CONFIG}: ../../conf/solr/8.11.1/solrconfig.xml ${INSTALLER_ZIP_DIR} @echo copying Solr config file diff --git a/tests/shell/spec/update_fields_spec.sh b/tests/shell/spec/update_fields_spec.sh index e77121672dd..be86444d8a6 100644 --- a/tests/shell/spec/update_fields_spec.sh +++ b/tests/shell/spec/update_fields_spec.sh @@ -1,16 +1,16 @@ #shellcheck shell=sh update_fields() { - ../../conf/solr/8.11.1/update-fields.sh "$@" + ../../modules/solr-configset/src/main/scripts/update-fields.sh "$@" } Describe "Update fields command" Describe "can operate on upstream data" - copyUpstreamSchema() { cp ../../conf/solr/8.11.1/schema.xml data/solr/upstream-schema.xml; } + copyUpstreamSchema() { cp ../../modules/solr-configset/src/main/resources/schema.xml data/solr/upstream-schema.xml; } AfterAll 'copyUpstreamSchema' - Path schema-xml="../../conf/solr/8.11.1/schema.xml" + Path schema-xml="../../modules/solr-configset/src/main/resources/schema.xml" It "needs upstream schema.xml" The path schema-xml should be exist End