diff --git a/tutorials/02_NeMo_Adapters.ipynb b/tutorials/02_NeMo_Adapters.ipynb index 51a91a3c7053..289426f3bc2b 100644 --- a/tutorials/02_NeMo_Adapters.ipynb +++ b/tutorials/02_NeMo_Adapters.ipynb @@ -1985,4 +1985,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorials/asr/ASR_TTS_Tutorial.ipynb b/tutorials/asr/ASR_TTS_Tutorial.ipynb index 007713ee3cc2..9bbcc8e4aa34 100644 --- a/tutorials/asr/ASR_TTS_Tutorial.ipynb +++ b/tutorials/asr/ASR_TTS_Tutorial.ipynb @@ -553,7 +553,7 @@ "config.trainer.max_epochs = NUM_EPOCHS\n", "\n", "config.trainer.devices = 1\n", - "config.trainer.strategy = None # use 1 device, no need for ddp strategy\n", + "config.trainer.strategy = auto # use 1 device, no need for ddp strategy\n", "\n", "OmegaConf.resolve(config)" ] diff --git a/tutorials/asr/Self_Supervised_Pre_Training.ipynb b/tutorials/asr/Self_Supervised_Pre_Training.ipynb index 04998f68f23e..6f977df492a1 100644 --- a/tutorials/asr/Self_Supervised_Pre_Training.ipynb +++ b/tutorials/asr/Self_Supervised_Pre_Training.ipynb @@ -316,7 +316,7 @@ " cfg.trainer.gpus = 1\n", "else:\n", " cfg.trainer.accelerator = 'cpu'\n", - " cfg.trainer.strategy = None\n", + " cfg.trainer.strategy = auto\n", " cfg.trainer.gpus = 0\n", "\n", "cfg.exp_manager.exp_dir = data_dir + \"/content/exp\"\n", @@ -538,7 +538,7 @@ " cfg.trainer.gpus = 1\n", "else:\n", " cfg.trainer.accelerator = 'cpu'\n", - " cfg.trainer.strategy = None\n", + " cfg.trainer.strategy = auto\n", " cfg.trainer.gpus = 0\n", "\n", "cfg.model.tokenizer.dir = data_dir + \"/tokenizers/an4/tokenizer_spe_unigram_v128/\" # note this is a directory, not a path to a vocabulary file\n", @@ -725,4 +725,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/tutorials/asr/Speech_Commands.ipynb b/tutorials/asr/Speech_Commands.ipynb index 208752347d64..c1566ae71850 100644 --- a/tutorials/asr/Speech_Commands.ipynb +++ b/tutorials/asr/Speech_Commands.ipynb @@ -441,7 +441,7 @@ "config.trainer.max_epochs = 5\n", "\n", "# Remove distributed training flags\n", - "config.trainer.strategy = None" + "config.trainer.strategy = auto" ], "execution_count": null, "outputs": [] diff --git a/tutorials/asr/Voice_Activity_Detection.ipynb b/tutorials/asr/Voice_Activity_Detection.ipynb index b1bdd434511b..7c7b95e99416 100644 --- a/tutorials/asr/Voice_Activity_Detection.ipynb +++ b/tutorials/asr/Voice_Activity_Detection.ipynb @@ -462,7 +462,7 @@ "config.trainer.max_epochs = 5\n", "\n", "# Remove distributed training flags\n", - "config.trainer.strategy = None" + "config.trainer.strategy = auto" ] }, { diff --git a/tutorials/audio_tasks/speech_enhancement/Speech_Enhancement_with_NeMo.ipynb b/tutorials/audio_tasks/speech_enhancement/Speech_Enhancement_with_NeMo.ipynb index 41a49688d35e..d8a15cbd5e1c 100644 --- a/tutorials/audio_tasks/speech_enhancement/Speech_Enhancement_with_NeMo.ipynb +++ b/tutorials/audio_tasks/speech_enhancement/Speech_Enhancement_with_NeMo.ipynb @@ -667,7 +667,7 @@ "config.trainer.max_epochs = 10\n", "\n", "# Remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# Instantiate the trainer\n", "trainer = pl.Trainer(**config.trainer)" @@ -1144,7 +1144,7 @@ "config_dual_output.trainer.max_epochs = 10\n", "\n", "# Remove distributed training flags\n", - "config_dual_output.trainer.strategy = None\n", + "config_dual_output.trainer.strategy = auto\n", "\n", "# Instantiate the trainer\n", "trainer = pl.Trainer(**config_dual_output.trainer)\n", @@ -1313,4 +1313,4 @@ }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/tutorials/nlp/Entity_Linking_Medical.ipynb b/tutorials/nlp/Entity_Linking_Medical.ipynb index ff8eda123b7f..4f56a85eabfa 100644 --- a/tutorials/nlp/Entity_Linking_Medical.ipynb +++ b/tutorials/nlp/Entity_Linking_Medical.ipynb @@ -187,7 +187,7 @@ "cfg.model.validation_ds.data_file = os.path.join(DATA_DIR, \"tiny_example_validation_pairs.tsv\")\n", "\n", "# remove distributed training flags\n", - "cfg.trainer.strategy = None\n", + "cfg.trainer.strategy = auto\n", "cfg.trainer.accelerator = None" ] }, diff --git a/tutorials/nlp/GLUE_Benchmark.ipynb b/tutorials/nlp/GLUE_Benchmark.ipynb index d8fe75940b09..445ff6705028 100644 --- a/tutorials/nlp/GLUE_Benchmark.ipynb +++ b/tutorials/nlp/GLUE_Benchmark.ipynb @@ -342,7 +342,7 @@ "# config.trainer.amp_level = O1\n", "\n", "# remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n", "config.trainer.max_steps = 128\n", @@ -563,4 +563,4 @@ ] } ] -} \ No newline at end of file +} diff --git a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb index 104d69df18e2..7513183fba28 100644 --- a/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb +++ b/tutorials/nlp/Joint_Intent_and_Slot_Classification.ipynb @@ -286,7 +286,7 @@ "# config.trainer.amp_level = O1\n", "\n", "# remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# setup a small number of epochs for demonstration purposes of this tutorial\n", "config.trainer.max_epochs = 5\n", @@ -705,7 +705,7 @@ "config.trainer.accelerator = accelerator\n", "\n", "# remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "trainer = pl.Trainer(**config.trainer)\n", "config.exp_manager.exp_dir = os.path.join(DATA_DIR, \"output/\" + run_name)\n", diff --git a/tutorials/nlp/Punctuation_and_Capitalization.ipynb b/tutorials/nlp/Punctuation_and_Capitalization.ipynb index 1519c234372b..e9d1060f6442 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization.ipynb @@ -550,7 +550,7 @@ "config.trainer.max_epochs = 1\n", "\n", "# Remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "trainer = pl.Trainer(**config.trainer)" ] @@ -745,7 +745,7 @@ "config.trainer.accelerator = accelerator\n", "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", "config.trainer.max_epochs = 1\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# Exp manager\n", "config.exp_manager.explicit_log_dir = 'tarred_experiment'\n", @@ -1043,4 +1043,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb index 5580bc4cf946..778e14e63b70 100644 --- a/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb +++ b/tutorials/nlp/Punctuation_and_Capitalization_Lexical_Audio.ipynb @@ -645,7 +645,7 @@ "config.trainer.max_epochs = 1\n", "\n", "# Remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "config.exp_manager.use_datetime_version=False\n", "config.exp_manager.explicit_log_dir='Punctuation_And_Capitalization_Lexical_Audio'\n", "\n", @@ -860,7 +860,7 @@ "config.trainer.accelerator = accelerator\n", "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", "config.trainer.max_epochs = 1\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# Exp manager\n", "config.exp_manager.explicit_log_dir = 'tarred_experiment'\n", diff --git a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb index b7c25cb416ef..451c40152c8d 100644 --- a/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb +++ b/tutorials/nlp/Relation_Extraction-BioMegatron.ipynb @@ -403,7 +403,7 @@ "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", "\n", "# remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "trainer = pl.Trainer(**config.trainer)" ] @@ -652,4 +652,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb index 5b5b74e7bf11..8e44aca9d0d1 100644 --- a/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb +++ b/tutorials/nlp/Text_Classification_Sentiment_Analysis.ipynb @@ -370,7 +370,7 @@ "# config.trainer.amp_level = O1\n", "\n", "# disable distributed training when using Colab to prevent the errors\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n", "# Training stops when max_step or max_epochs is reached (earliest)\n", @@ -573,7 +573,7 @@ "# create a copy of the trainer config and update it to be used for final evaluation\n", "eval_trainer_cfg = config.trainer.copy()\n", "eval_trainer_cfg.accelerator = 'gpu' if torch.cuda.is_available() else 'cpu' # it is safer to perform evaluation on single GPU as PT is buggy with the last batch on multi-GPUs\n", - "eval_trainer_cfg.strategy = None # 'ddp' is buggy with test process in the current PT, it looks like it has been fixed in the latest master\n", + "eval_trainer_cfg.strategy = auto # 'ddp' is buggy with test process in the current PT, it looks like it has been fixed in the latest master\n", "eval_trainer = pl.Trainer(**eval_trainer_cfg)\n", "\n", "eval_trainer.test(model=eval_model, verbose=False) # test_dataloaders=eval_dataloader,\n" @@ -832,4 +832,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/nlp/Token_Classification-BioMegatron.ipynb b/tutorials/nlp/Token_Classification-BioMegatron.ipynb index 517f2e557743..e5e5aa81b859 100644 --- a/tutorials/nlp/Token_Classification-BioMegatron.ipynb +++ b/tutorials/nlp/Token_Classification-BioMegatron.ipynb @@ -434,7 +434,7 @@ "config.trainer.precision = 16 if torch.cuda.is_available() else 32\n", "\n", "# remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "trainer = pl.Trainer(**config.trainer)" ] diff --git a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb index c3f7e28b6b1f..1c1999cc08c1 100644 --- a/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb +++ b/tutorials/nlp/Token_Classification_Named_Entity_Recognition.ipynb @@ -533,7 +533,7 @@ "# config.trainer.amp_level = O1\n", "\n", "# remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n", "config.trainer.max_steps = 32\n", @@ -847,4 +847,4 @@ "metadata": {} } ] -} \ No newline at end of file +} diff --git a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb index 69df7b27b02d..f571fa176e96 100644 --- a/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb +++ b/tutorials/nlp/Zero_Shot_Intent_Recognition.ipynb @@ -400,7 +400,7 @@ "# config.trainer.amp_level = O1\n", "\n", "# remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# setup max number of steps to reduce training time for demonstration purposes of this tutorial\n", "config.trainer.max_steps = 128\n", @@ -671,4 +671,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb index 3c56df2bbba0..efd86e1ef242 100644 --- a/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb +++ b/tutorials/speaker_tasks/Speaker_Diarization_Training.ipynb @@ -761,7 +761,7 @@ "source": [ "config.model.diarizer.speaker_embeddings.model_path=\"titanet_large\"\n", "config.trainer.max_epochs = 5\n", - "config.trainer.strategy = None" + "config.trainer.strategy = auto" ] }, { diff --git a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb index dce8c46df1b0..f0ad1c19f5c9 100644 --- a/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb +++ b/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb @@ -475,7 +475,7 @@ "config.trainer.max_epochs = 10\n", "\n", "# Remove distributed training flags\n", - "config.trainer.strategy = None\n", + "config.trainer.strategy = auto\n", "\n", "# Remove augmentations\n", "config.model.train_ds.augmentor=None"