diff --git a/docs/source/asr/intro.rst b/docs/source/asr/intro.rst
index 8e2e3ba699ec..e655da836a76 100644
--- a/docs/source/asr/intro.rst
+++ b/docs/source/asr/intro.rst
@@ -28,6 +28,18 @@ we could integrate a language model that would improve our predictions, as well.
And the entire end-to-end ASR model can be trained at once--a much easier pipeline to handle!
+A demo below allows evaluation of NeMo ASR models in multiple langauges from the browser:
+
+.. raw:: html
+
+
+
+
+
+
The full documentation tree is as follows:
.. toctree::
diff --git a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb
index 1ecb17e83b06..a13174033e0c 100644
--- a/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb
+++ b/tutorials/Publish_NeMo_Model_On_Hugging_Face_Hub.ipynb
@@ -458,6 +458,7 @@
{
"cell_type": "code",
"source": [
+ "# Replace all spaces with `-`\n",
"DATASETS = [\n",
" \"librispeech_asr\",\n",
" \"mozilla-foundation/common_voice_7_0\",\n",
@@ -466,11 +467,11 @@
" \"Switchboard-1\",\n",
" \"WSJ-0\",\n",
" \"WSJ-1\",\n",
- " \"National Singapore Corpus Part 1\",\n",
- " \"National Singapore Corpus Part 6\",\n",
- " \"VoxPopuli (EN)\",\n",
- " \"Europarl-ASR (EN)\",\n",
- " \"Multilingual LibriSpeech (2000 hours)\",\n",
+ " \"National-Singapore-Corpus-Part-1\",\n",
+ " \"National-Singapore-Corpus-Part-6\",\n",
+ " \"VoxPopuli-(EN)\",\n",
+ " \"Europarl-ASR-(EN)\",\n",
+ " \"Multilingual-LibriSpeech-(2000-hours)\",\n",
"]"
],
"metadata": {
@@ -520,9 +521,14 @@
"config = OmegaConf.structured(config)\n",
"\n",
"with open_dict(config):\n",
+ " # Update `model_index` to `model-index`\n",
" model_index = config.pop('model_index')\n",
" config['model-index'] = model_index\n",
"\n",
+ " # Replace all spaces with `-` in datasets\n",
+ " normalized_datasets = [ds_name.replace(\" \", \"-\") for ds_name in config['datasets']]\n",
+ " config['datasets'] = OmegaConf.create(normalized_datasets)\n",
+ "\n",
"print(OmegaConf.to_yaml(config))"
],
"metadata": {