mirror of
https://github.com/davesarmoury/GLaDOS.git
synced 2025-09-26 22:31:26 +08:00
Added training with NeMo
This commit is contained in:
parent
89b90e7346
commit
01da2c4777
14
README.md
14
README.md
@ -1,14 +0,0 @@
|
||||
GLaDOS RIVA
|
||||
===========
|
||||
|
||||
This package will download audio for training data, then train a riva TTS network
|
||||
|
||||
Install
|
||||
-------
|
||||
|
||||
pip3 install -r requirements.txt
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
./download_audio.py
|
@ -1,401 +0,0 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b276bbe0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!sudo apt install sox libsndfile1 ffmpeg\n",
|
||||
"#!pip3 install wget unidecode pynini==2.1.4\n",
|
||||
"#!pip3 install git+https://github.com/NVIDIA/NeMo.git@v1.12.0#egg=nemo_toolkit[all]\n",
|
||||
"#!wget https://raw.githubusercontent.com/NVIDIA/NeMo/main/nemo_text_processing/install_pynini.sh\n",
|
||||
"#!bash install_pynini.sh"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b7b7e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from multiprocessing import cpu_count\n",
|
||||
"from multiprocessing.pool import ThreadPool\n",
|
||||
"import shutil\n",
|
||||
"import os\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"import soundfile as sf\n",
|
||||
"import string\n",
|
||||
"import json\n",
|
||||
"import re\n",
|
||||
"import num2words\n",
|
||||
"\n",
|
||||
"class bcolors:\n",
|
||||
" HEADER = '\\033[95m'\n",
|
||||
" OKBLUE = '\\033[94m'\n",
|
||||
" OKCYAN = '\\033[96m'\n",
|
||||
" OKGREEN = '\\033[92m'\n",
|
||||
" WARNING = '\\033[93m'\n",
|
||||
" FAIL = '\\033[91m'\n",
|
||||
" ENDC = '\\033[0m'\n",
|
||||
" BOLD = '\\033[1m'\n",
|
||||
" UNDERLINE = '\\033[4m'\n",
|
||||
"\n",
|
||||
"blocklist = [\"potato\", \"_ding_\", \"00_part1_entry-6\"]\n",
|
||||
"audio_dir = 'audio'\n",
|
||||
"download_threads = 64\n",
|
||||
"\n",
|
||||
"def prep(args, overwrite=False):\n",
|
||||
" already_exists = os.path.exists(audio_dir)\n",
|
||||
" \n",
|
||||
" if already_exists and not overwrite:\n",
|
||||
" print(\"Data already downloaded\")\n",
|
||||
" return\n",
|
||||
" \n",
|
||||
" if already_exists:\n",
|
||||
" print(\"Deleting previously downloaded audio\")\n",
|
||||
" shutil.rmtree(audio_dir)\n",
|
||||
"\n",
|
||||
" os.mkdir(audio_dir)\n",
|
||||
" download_parallel(args)\n",
|
||||
"\n",
|
||||
"def remove_punctuation(str):\n",
|
||||
" return str.translate(str.maketrans('', '', string.punctuation))\n",
|
||||
" \n",
|
||||
"def audio_duration(fn):\n",
|
||||
" f = sf.SoundFile(fn)\n",
|
||||
" return f.frames / f.samplerate\n",
|
||||
"\n",
|
||||
"def download_file(args):\n",
|
||||
" url, filename = args[0], args[1]\n",
|
||||
"\n",
|
||||
" try:\n",
|
||||
" response = requests.get(url)\n",
|
||||
" open(os.path.join(audio_dir, filename), \"wb\").write(response.content)\n",
|
||||
" return filename, True\n",
|
||||
" except:\n",
|
||||
" return filename, False\n",
|
||||
"\n",
|
||||
"def download_parallel(args):\n",
|
||||
" results = ThreadPool(download_threads).imap_unordered(download_file, args)\n",
|
||||
" for result in results:\n",
|
||||
" if result[1]:\n",
|
||||
" print(bcolors.OKGREEN + \"[\" + u'\\u2713' + \"] \" + bcolors.ENDC + result[0])\n",
|
||||
" else:\n",
|
||||
" print(bcolors.FAIL + \"[\" + u'\\u2715' + \"] \" + bcolors.ENDC + result[0])\n",
|
||||
"\n",
|
||||
"def main():\n",
|
||||
" r = requests.get(\"https://theportalwiki.com/wiki/GLaDOS_voice_lines\")\n",
|
||||
"\n",
|
||||
" urls = []\n",
|
||||
" filenames = []\n",
|
||||
" texts = []\n",
|
||||
"\n",
|
||||
" soup = BeautifulSoup(r.text.encode('utf-8').decode('ascii', 'ignore'), 'html.parser')\n",
|
||||
" for link_item in soup.find_all('a'):\n",
|
||||
" url = link_item.get(\"href\", None)\n",
|
||||
" if url:\n",
|
||||
" if \"https:\" in url and \".wav\" in url:\n",
|
||||
" list_item = link_item.find_parent(\"li\")\n",
|
||||
" ital_item = list_item.find_all('i')\n",
|
||||
" if ital_item:\n",
|
||||
" text = ital_item[0].text\n",
|
||||
" text = text.replace('\"', '')\n",
|
||||
" filename = url[url.rindex(\"/\")+1:]\n",
|
||||
"\n",
|
||||
" if \"[\" not in text and \"]\" not in text and \"$\" not in text:\n",
|
||||
" if url not in urls:\n",
|
||||
" for s in blocklist:\n",
|
||||
" if s in url:\n",
|
||||
" break\n",
|
||||
" else:\n",
|
||||
" urls.append(url)\n",
|
||||
" filenames.append(filename)\n",
|
||||
" text = text.replace('*', '')\n",
|
||||
" text = re.sub(r\"(\\d+)\", lambda x: num2words.num2words(int(x.group(0))), text)\n",
|
||||
" texts.append(text)\n",
|
||||
"\n",
|
||||
" print(\"Found \" + str(len(urls)) + \" urls\")\n",
|
||||
"\n",
|
||||
" args = zip(urls, filenames)\n",
|
||||
"\n",
|
||||
" prep(args)\n",
|
||||
" \n",
|
||||
" total_audio_time = 0\n",
|
||||
" outFile=open(os.path.join(audio_dir, \"manifest.json\"), 'w')\n",
|
||||
" for i in range(len(urls)):\n",
|
||||
" item = {}\n",
|
||||
" text = texts[i]\n",
|
||||
" filename = filenames[i]\n",
|
||||
" item[\"audio_filepath\"] = os.path.join(audio_dir, filename)\n",
|
||||
" #item[\"text_normalized\"] = text\n",
|
||||
" #item[\"text_no_preprocessing\"] = text\n",
|
||||
" item[\"text\"] = text.lower()\n",
|
||||
" item[\"duration\"] = audio_duration(os.path.join(audio_dir, filename))\n",
|
||||
" total_audio_time = total_audio_time + item[\"duration\"]\n",
|
||||
" outFile.write(json.dumps(item, ensure_ascii=True, sort_keys=True) + \"\\n\")\n",
|
||||
" \n",
|
||||
" outFile.close()\n",
|
||||
" print(str(total_audio_time/60.0) + \" min\")\n",
|
||||
"\n",
|
||||
"main()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82a7d268",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!head -n 1 ./audio/manifest.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "832fcd67",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!cat ./audio/manifest.json | tail -n 2 > ./manifest_validation.json\n",
|
||||
"!cat ./audio/manifest.json | head -n -2 > ./manifest_train.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e56749bf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"home_path = !(echo $HOME)\n",
|
||||
"home_path = home_path[0]\n",
|
||||
"print(home_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9bf1fb18",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"import IPython.display as ipd\n",
|
||||
"from matplotlib.pyplot import imshow\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"\n",
|
||||
"from nemo.collections.tts.models import FastPitchModel\n",
|
||||
"FastPitchModel.from_pretrained(\"tts_en_fastpitch\")\n",
|
||||
"\n",
|
||||
"from pathlib import Path\n",
|
||||
"nemo_files = [p for p in Path(f\"{home_path}/.cache/torch/NeMo/\").glob(\"**/tts_en_fastpitch_align.nemo\")]\n",
|
||||
"print(f\"Copying {nemo_files[0]} to ./\")\n",
|
||||
"Path(\"./tts_en_fastpitch_align.nemo\").write_bytes(nemo_files[0].read_bytes())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "81741e30",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!wget https://raw.githubusercontent.com/nvidia/NeMo/v1.12.0/examples/tts/fastpitch_finetune.py\n",
|
||||
"!wget https://raw.githubusercontent.com/NVIDIA/NeMo/v1.12.0/examples/tts/hifigan_finetune.py\n",
|
||||
" \n",
|
||||
"!mkdir -p conf\n",
|
||||
"!cd conf \\\n",
|
||||
"&& wget https://raw.githubusercontent.com/nvidia/NeMo/v1.12.0/examples/tts/conf/fastpitch_align_v1.05.yaml \\\n",
|
||||
"&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/v1.12.0/examples/tts/conf/hifigan/hifigan.yaml \\\n",
|
||||
"&& cd .."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64b51e94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# additional files\n",
|
||||
"!mkdir -p tts_dataset_files && cd tts_dataset_files \\\n",
|
||||
"&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/v1.12.0/scripts/tts_dataset_files/cmudict-0.7b_nv22.08 \\\n",
|
||||
"&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/v1.12.0/scripts/tts_dataset_files/heteronyms-052722 \\\n",
|
||||
"&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/v1.12.0/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv \\\n",
|
||||
"&& cd .."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6b92a469",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!(python3 fastpitch_finetune.py --config-name=fastpitch_align_v1.05.yaml \\\n",
|
||||
" train_dataset=./manifest_train.json \\\n",
|
||||
" validation_datasets=./manifest_validation.json \\\n",
|
||||
" sup_data_path=./fastpitch_sup_data \\\n",
|
||||
" phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.08 \\\n",
|
||||
" heteronyms_path=tts_dataset_files/heteronyms-052722 \\\n",
|
||||
" whitelist_path=tts_dataset_files/lj_speech.tsv \\\n",
|
||||
" exp_manager.exp_dir=./glados_out \\\n",
|
||||
" +init_from_nemo_model=./tts_en_fastpitch_align.nemo \\\n",
|
||||
" trainer.max_epochs=100 \\\n",
|
||||
" trainer.check_val_every_n_epoch=25 \\\n",
|
||||
" model.train_ds.dataloader_params.batch_size=12 model.validation_ds.dataloader_params.batch_size=12 \\\n",
|
||||
" model.n_speakers=1 model.pitch_mean=121.9 model.pitch_std=23.1 \\\n",
|
||||
" model.pitch_fmin=30 model.pitch_fmax=512 model.optim.lr=2e-4 \\\n",
|
||||
" ~model.optim.sched model.optim.name=adam trainer.devices=1 trainer.strategy=null \\\n",
|
||||
" +model.text_tokenizer.add_blank_at=true \\\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "780dba9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from nemo.collections.tts.models import HifiGanModel\n",
|
||||
"from nemo.collections.tts.models import FastPitchModel\n",
|
||||
"\n",
|
||||
"vocoder = HifiGanModel.from_pretrained(\"tts_hifigan\")\n",
|
||||
"vocoder = vocoder.eval().cuda()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "831239e2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def infer(spec_gen_model, vocoder_model, str_input, speaker=None):\n",
|
||||
" \"\"\"\n",
|
||||
" Synthesizes spectrogram and audio from a text string given a spectrogram synthesis and vocoder model.\n",
|
||||
" \n",
|
||||
" Args:\n",
|
||||
" spec_gen_model: Spectrogram generator model (FastPitch in our case)\n",
|
||||
" vocoder_model: Vocoder model (HiFiGAN in our case)\n",
|
||||
" str_input: Text input for the synthesis\n",
|
||||
" speaker: Speaker ID\n",
|
||||
" \n",
|
||||
" Returns:\n",
|
||||
" spectrogram and waveform of the synthesized audio.\n",
|
||||
" \"\"\"\n",
|
||||
" with torch.no_grad():\n",
|
||||
" parsed = spec_gen_model.parse(str_input)\n",
|
||||
" if speaker is not None:\n",
|
||||
" speaker = torch.tensor([speaker]).long().to(device=spec_gen_model.device)\n",
|
||||
" spectrogram = spec_gen_model.generate_spectrogram(tokens=parsed, speaker=speaker)\n",
|
||||
" audio = vocoder_model.convert_spectrogram_to_audio(spec=spectrogram)\n",
|
||||
" \n",
|
||||
" if spectrogram is not None:\n",
|
||||
" if isinstance(spectrogram, torch.Tensor):\n",
|
||||
" spectrogram = spectrogram.to('cpu').numpy()\n",
|
||||
" if len(spectrogram.shape) == 3:\n",
|
||||
" spectrogram = spectrogram[0]\n",
|
||||
" if isinstance(audio, torch.Tensor):\n",
|
||||
" audio = audio.to('cpu').numpy()\n",
|
||||
" return spectrogram, audio\n",
|
||||
"\n",
|
||||
"def get_best_ckpt_from_last_run(\n",
|
||||
" base_dir, \n",
|
||||
" new_speaker_id, \n",
|
||||
" duration_mins, \n",
|
||||
" mixing_enabled, \n",
|
||||
" original_speaker_id, \n",
|
||||
" model_name=\"FastPitch\"\n",
|
||||
" ): \n",
|
||||
" mixing = \"no_mixing\" if not mixing_enabled else \"mixing\"\n",
|
||||
" \n",
|
||||
" d = \"glados_out\"\n",
|
||||
" \n",
|
||||
" exp_dirs = list([i for i in (Path(base_dir) / d / model_name).iterdir() if i.is_dir()])\n",
|
||||
" last_exp_dir = sorted(exp_dirs)[-1]\n",
|
||||
" \n",
|
||||
" last_checkpoint_dir = last_exp_dir / \"checkpoints\"\n",
|
||||
" \n",
|
||||
" last_ckpt = list(last_checkpoint_dir.glob('*-last.ckpt'))\n",
|
||||
"\n",
|
||||
" if len(last_ckpt) == 0:\n",
|
||||
" raise ValueError(f\"There is no last checkpoint in {last_checkpoint_dir}.\")\n",
|
||||
" \n",
|
||||
" return str(last_ckpt[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2fad0610",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"new_speaker_id = 6097\n",
|
||||
"duration_mins = 5\n",
|
||||
"mixing = False\n",
|
||||
"original_speaker_id = \"ljspeech\"\n",
|
||||
"\n",
|
||||
"last_ckpt = get_best_ckpt_from_last_run(\"./\", new_speaker_id, duration_mins, mixing, original_speaker_id)\n",
|
||||
"print(last_ckpt)\n",
|
||||
"\n",
|
||||
"spec_model = FastPitchModel.load_from_checkpoint(last_ckpt)\n",
|
||||
"spec_model.eval().cuda()\n",
|
||||
"\n",
|
||||
"# Only need to set speaker_id if there is more than one speaker\n",
|
||||
"speaker_id = None\n",
|
||||
"if mixing:\n",
|
||||
" speaker_id = 1\n",
|
||||
"\n",
|
||||
"num_val = 2 # Number of validation samples\n",
|
||||
"val_records = []\n",
|
||||
"with open(\"manifest_validation.json\", \"r\") as f:\n",
|
||||
" for i, line in enumerate(f):\n",
|
||||
" val_records.append(json.loads(line))\n",
|
||||
" if len(val_records) >= num_val:\n",
|
||||
" break\n",
|
||||
" \n",
|
||||
"for val_record in val_records:\n",
|
||||
" print(\"Real validation audio\")\n",
|
||||
" ipd.display(ipd.Audio(val_record['audio_filepath'], rate=22050))\n",
|
||||
" print(f\"SYNTHESIZED FOR -- Speaker: {new_speaker_id} | Dataset size: {duration_mins} mins | Mixing:{mixing} | Text: {val_record['text']}\")\n",
|
||||
" spec, audio = infer(spec_model, vocoder, val_record['text'], speaker=speaker_id)\n",
|
||||
" ipd.display(ipd.Audio(audio, rate=22050))\n",
|
||||
" %matplotlib inline\n",
|
||||
" imshow(spec, origin=\"lower\", aspect=\"auto\")\n",
|
||||
" plt.show()"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
File diff suppressed because it is too large
Load Diff
3
TrainNemo/README.md
Normal file
3
TrainNemo/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
pip3 uninstall numpy pesq
|
||||
pip3 install numpy==1.21.4
|
||||
pip3 install pesq
|
631
TrainNemo/TrainGLaDOS_NeMo.ipynb
Normal file
631
TrainNemo/TrainGLaDOS_NeMo.ipynb
Normal file
@ -0,0 +1,631 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b276bbe0",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!apt install sox libsndfile1 ffmpeg\n",
|
||||
"#!pip3 install wheel\n",
|
||||
"#!pip3 install -r requirements.txt\n",
|
||||
"#!pip3 install wget unidecode\n",
|
||||
"#!pip3 install resampy==0.3.1\n",
|
||||
"#!pip3 install nemo_toolkit[all]\n",
|
||||
"#!pip3 install numba==0.48\n",
|
||||
"#!pip3 install librosa==0.8.1\n",
|
||||
"#!pip3 install pynini"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "a9bb6563",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import soundfile\n",
|
||||
"import librosa\n",
|
||||
"import json\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"print(librosa.__version__)\n",
|
||||
"def resample_audio(input_file_path, output_path, target_sampling_rate=22050):\n",
|
||||
" if not input_file_path.endswith(\".wav\"):\n",
|
||||
" raise NotImplementedError(\"Loading only implemented for wav files.\")\n",
|
||||
" if not os.path.exists(input_file_path):\n",
|
||||
" raise FileNotFoundError(f\"Cannot file input file at {input_file_path}\")\n",
|
||||
" audio, sampling_rate = librosa.load(\n",
|
||||
" input_file_path,\n",
|
||||
" sr=target_sampling_rate\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" soundfile.write(\n",
|
||||
" output_path,\n",
|
||||
" audio,\n",
|
||||
" samplerate=target_sampling_rate,\n",
|
||||
" format=\"wav\"\n",
|
||||
" )"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "c3b7b7e3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import requests\n",
|
||||
"from multiprocessing import cpu_count\n",
|
||||
"from multiprocessing.pool import ThreadPool\n",
|
||||
"import shutil\n",
|
||||
"import os\n",
|
||||
"from bs4 import BeautifulSoup\n",
|
||||
"import soundfile as sf\n",
|
||||
"import string\n",
|
||||
"import json\n",
|
||||
"import re\n",
|
||||
"import num2words\n",
|
||||
"from tqdm.notebook import tqdm\n",
|
||||
"\n",
|
||||
"class bcolors:\n",
|
||||
" HEADER = '\\033[95m'\n",
|
||||
" OKBLUE = '\\033[94m'\n",
|
||||
" OKCYAN = '\\033[96m'\n",
|
||||
" OKGREEN = '\\033[92m'\n",
|
||||
" WARNING = '\\033[93m'\n",
|
||||
" FAIL = '\\033[91m'\n",
|
||||
" ENDC = '\\033[0m'\n",
|
||||
" BOLD = '\\033[1m'\n",
|
||||
" UNDERLINE = '\\033[4m'\n",
|
||||
"\n",
|
||||
"blocklist = [\"potato\", \"_ding_\", \"00_part1_entry-6\", \"_escape_\"]\n",
|
||||
"audio_dir = 'audio'\n",
|
||||
"download_threads = 64\n",
|
||||
"\n",
|
||||
"temp_path = \"temp_audio\"\n",
|
||||
"sampling_rate = 22050\n",
|
||||
" \n",
|
||||
"def prep(args, overwrite=True):\n",
|
||||
" already_exists = os.path.exists(audio_dir)\n",
|
||||
" \n",
|
||||
" if already_exists and not overwrite:\n",
|
||||
" print(\"Data already downloaded\")\n",
|
||||
" return\n",
|
||||
" \n",
|
||||
" if already_exists:\n",
|
||||
" print(\"Deleting previously downloaded audio\")\n",
|
||||
" shutil.rmtree(audio_dir)\n",
|
||||
" \n",
|
||||
" if os.path.exists(temp_path):\n",
|
||||
" shutil.rmtree(temp_path)\n",
|
||||
" \n",
|
||||
" os.mkdir(audio_dir)\n",
|
||||
" download_parallel(args)\n",
|
||||
"\n",
|
||||
"def remove_punctuation(str):\n",
|
||||
" return str.translate(str.maketrans('', '', string.punctuation))\n",
|
||||
" \n",
|
||||
"def audio_duration(fn):\n",
|
||||
" f = sf.SoundFile(fn)\n",
|
||||
" return f.frames / f.samplerate\n",
|
||||
"\n",
|
||||
"def download_file(args):\n",
|
||||
" url, filename = args[0], args[1]\n",
|
||||
"\n",
|
||||
" try:\n",
|
||||
" response = requests.get(url)\n",
|
||||
" open(os.path.join(audio_dir, filename), \"wb\").write(response.content)\n",
|
||||
" return filename, True\n",
|
||||
" except:\n",
|
||||
" return filename, False\n",
|
||||
"\n",
|
||||
"def download_parallel(args):\n",
|
||||
" results = ThreadPool(download_threads).imap_unordered(download_file, args)\n",
|
||||
" for result in results:\n",
|
||||
" if result[1]:\n",
|
||||
" print(bcolors.OKGREEN + \"[\" + u'\\u2713' + \"] \" + bcolors.ENDC + result[0])\n",
|
||||
" else:\n",
|
||||
" print(bcolors.FAIL + \"[\" + u'\\u2715' + \"] \" + bcolors.ENDC + result[0])\n",
|
||||
"\n",
|
||||
"def main():\n",
|
||||
" r = requests.get(\"https://theportalwiki.com/wiki/GLaDOS_voice_lines\")\n",
|
||||
"\n",
|
||||
" urls = []\n",
|
||||
" filenames = []\n",
|
||||
" texts = []\n",
|
||||
"\n",
|
||||
" soup = BeautifulSoup(r.text.encode('utf-8').decode('ascii', 'ignore'), 'html.parser')\n",
|
||||
" for link_item in soup.find_all('a'):\n",
|
||||
" url = link_item.get(\"href\", None)\n",
|
||||
" if url:\n",
|
||||
" if \"https:\" in url and \".wav\" in url:\n",
|
||||
" list_item = link_item.find_parent(\"li\")\n",
|
||||
" ital_item = list_item.find_all('i')\n",
|
||||
" if ital_item:\n",
|
||||
" text = ital_item[0].text\n",
|
||||
" text = text.replace('\"', '')\n",
|
||||
" filename = url[url.rindex(\"/\")+1:]\n",
|
||||
"\n",
|
||||
" if \"[\" not in text and \"]\" not in text and \"$\" not in text:\n",
|
||||
" if url not in urls:\n",
|
||||
" for s in blocklist:\n",
|
||||
" if s in url:\n",
|
||||
" break\n",
|
||||
" else:\n",
|
||||
" urls.append(url)\n",
|
||||
" filenames.append(filename)\n",
|
||||
" text = text.replace('*', '')\n",
|
||||
" texts.append(text)\n",
|
||||
"\n",
|
||||
" print(\"Found \" + str(len(urls)) + \" urls\")\n",
|
||||
"\n",
|
||||
" args = zip(urls, filenames)\n",
|
||||
"\n",
|
||||
" prep(args)\n",
|
||||
" \n",
|
||||
" total_audio_time = 0\n",
|
||||
" outFile=open(os.path.join(audio_dir, \"manifest.json\"), 'w')\n",
|
||||
" for i in range(len(urls)):\n",
|
||||
" item = {}\n",
|
||||
" text = texts[i]\n",
|
||||
" filename = filenames[i]\n",
|
||||
" item[\"audio_filepath\"] = os.path.join(audio_dir, filename)\n",
|
||||
" #item[\"text_normalized\"] = re.sub(r\"(\\d+)\", lambda x: num2words.num2words(int(x.group(0))), text)\n",
|
||||
" item[\"text\"] = re.sub(r\"(\\d+)\", lambda x: num2words.num2words(int(x.group(0))), text).lower()\n",
|
||||
" item[\"duration\"] = audio_duration(os.path.join(audio_dir, filename))\n",
|
||||
" total_audio_time = total_audio_time + item[\"duration\"]\n",
|
||||
" outFile.write(json.dumps(item, ensure_ascii=True, sort_keys=True) + \"\\n\")\n",
|
||||
" \n",
|
||||
" outFile.close()\n",
|
||||
" print(\"\\n\" + str(total_audio_time/60.0) + \" min\\n\")\n",
|
||||
"\n",
|
||||
"main()\n",
|
||||
"\n",
|
||||
"shutil.copytree(audio_dir, temp_path)\n",
|
||||
"\n",
|
||||
"print(\"Resampling Audio...\")\n",
|
||||
"for filename in tqdm(os.listdir(temp_path)):\n",
|
||||
" if \".wav\" in filename:\n",
|
||||
" source_name = os.path.join(temp_path, filename)\n",
|
||||
" destination_name = os.path.join(audio_dir, filename)\n",
|
||||
" resample_audio(source_name, destination_name, target_sampling_rate=sampling_rate)\n",
|
||||
" \n",
|
||||
"if os.path.exists(temp_path):\n",
|
||||
" shutil.rmtree(temp_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "82a7d268",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!head -n 1 ./audio/manifest.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "832fcd67",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!cat ./audio/manifest.json | tail -n 5 > ./manifest_validation.json\n",
|
||||
"!cat ./audio/manifest.json | head -n -5 > ./manifest_train.json"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e56749bf",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"home_path = !(echo $HOME)\n",
|
||||
"home_path = home_path[0]\n",
|
||||
"print(home_path)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9bf1fb18",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"import json\n",
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"import IPython.display as ipd\n",
|
||||
"from matplotlib.pyplot import imshow\n",
|
||||
"from matplotlib import pyplot as plt\n",
|
||||
"\n",
|
||||
"from nemo.collections.tts.models import FastPitchModel\n",
|
||||
"FastPitchModel.from_pretrained(\"tts_en_fastpitch\")\n",
|
||||
"\n",
|
||||
"from pathlib import Path\n",
|
||||
"nemo_files = [p for p in Path(f\"{home_path}/.cache/torch/NeMo/\").glob(\"**/tts_en_fastpitch_align.nemo\")]\n",
|
||||
"print(f\"Copying {nemo_files[0]} to ./\")\n",
|
||||
"Path(\"./tts_en_fastpitch_align.nemo\").write_bytes(nemo_files[0].read_bytes())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aefc57ff",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#!wget https://raw.githubusercontent.com/nvidia/NeMo/r1.14.0/examples/tts/fastpitch_finetune.py\n",
|
||||
"#!wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/examples/tts/hifigan_finetune.py\n",
|
||||
"# \n",
|
||||
"#!mkdir -p conf\n",
|
||||
"#!cd conf \\\n",
|
||||
"#&& wget https://raw.githubusercontent.com/nvidia/NeMo/r1.14.0/examples/tts/conf/fastpitch_align_v1.05.yaml \\\n",
|
||||
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/examples/tts/conf/hifigan/hifigan.yaml \\\n",
|
||||
"#&& cd .."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64b51e94",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# additional files\n",
|
||||
"#!mkdir -p tts_dataset_files && cd tts_dataset_files \\\n",
|
||||
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/scripts/tts_dataset_files/cmudict-0.7b_nv22.10 \\\n",
|
||||
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/scripts/tts_dataset_files/heteronyms-052722 \\\n",
|
||||
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv \\\n",
|
||||
"#&& cd .."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "6b92a469",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!(python3 fastpitch_finetune.py --config-name=fastpitch_align_v1.05.yaml \\\n",
|
||||
" train_dataset=./manifest_train.json \\\n",
|
||||
" validation_datasets=./manifest_validation.json \\\n",
|
||||
" sup_data_path=./fastpitch_sup_data \\\n",
|
||||
" phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.10 \\\n",
|
||||
" heteronyms_path=tts_dataset_files/heteronyms-052722 \\\n",
|
||||
" whitelist_path=tts_dataset_files/lj_speech.tsv \\\n",
|
||||
" exp_manager.exp_dir=./glados_out \\\n",
|
||||
" +init_from_nemo_model=./tts_en_fastpitch_align.nemo \\\n",
|
||||
" trainer.max_epochs=150 \\\n",
|
||||
" trainer.check_val_every_n_epoch=25 \\\n",
|
||||
" model.train_ds.dataloader_params.batch_size=12 model.validation_ds.dataloader_params.batch_size=12 \\\n",
|
||||
" model.n_speakers=1 model.pitch_mean=165.458 model.pitch_std=40.1891 \\\n",
|
||||
" model.pitch_fmin=80.0 model.pitch_fmax=2048.0 model.optim.lr=2e-4 \\\n",
|
||||
" ~model.optim.sched model.optim.name=adam trainer.devices=1 trainer.strategy=null \\\n",
|
||||
" +model.text_tokenizer.add_blank_at=true \\\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "780dba9a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from nemo.collections.tts.models import HifiGanModel\n",
|
||||
"from nemo.collections.tts.models import FastPitchModel\n",
|
||||
"\n",
|
||||
"vocoder = HifiGanModel.from_pretrained(\"tts_hifigan\")\n",
|
||||
"vocoder = vocoder.eval().cuda()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "831239e2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"def infer(spec_gen_model, vocoder_model, str_input, speaker=None):\n",
|
||||
" \"\"\"\n",
|
||||
" Synthesizes spectrogram and audio from a text string given a spectrogram synthesis and vocoder model.\n",
|
||||
" \n",
|
||||
" Args:\n",
|
||||
" spec_gen_model: Spectrogram generator model (FastPitch in our case)\n",
|
||||
" vocoder_model: Vocoder model (HiFiGAN in our case)\n",
|
||||
" str_input: Text input for the synthesis\n",
|
||||
" speaker: Speaker ID\n",
|
||||
" \n",
|
||||
" Returns:\n",
|
||||
" spectrogram and waveform of the synthesized audio.\n",
|
||||
" \"\"\"\n",
|
||||
" with torch.no_grad():\n",
|
||||
" parsed = spec_gen_model.parse(str_input)\n",
|
||||
" if speaker is not None:\n",
|
||||
" speaker = torch.tensor([speaker]).long().to(device=spec_gen_model.device)\n",
|
||||
" spectrogram = spec_gen_model.generate_spectrogram(tokens=parsed, speaker=speaker)\n",
|
||||
" audio = vocoder_model.convert_spectrogram_to_audio(spec=spectrogram)\n",
|
||||
" \n",
|
||||
" if spectrogram is not None:\n",
|
||||
" if isinstance(spectrogram, torch.Tensor):\n",
|
||||
" spectrogram = spectrogram.to('cpu').numpy()\n",
|
||||
" if len(spectrogram.shape) == 3:\n",
|
||||
" spectrogram = spectrogram[0]\n",
|
||||
" if isinstance(audio, torch.Tensor):\n",
|
||||
" audio = audio.to('cpu').numpy()\n",
|
||||
" return spectrogram, audio\n",
|
||||
"\n",
|
||||
"def get_best_ckpt_from_last_run(\n",
|
||||
" base_dir=\"./glados_out\", \n",
|
||||
" model_name=\"FastPitch\"\n",
|
||||
" ): \n",
|
||||
" \n",
|
||||
" exp_dirs = list([i for i in (Path(base_dir) / model_name).iterdir() if i.is_dir()])\n",
|
||||
" last_exp_dir = sorted(exp_dirs)[-1]\n",
|
||||
" \n",
|
||||
" last_checkpoint_dir = last_exp_dir / \"checkpoints\"\n",
|
||||
" \n",
|
||||
" last_ckpt = list(last_checkpoint_dir.glob('*-last.ckpt'))\n",
|
||||
"\n",
|
||||
" if len(last_ckpt) == 0:\n",
|
||||
" raise ValueError(f\"There is no last checkpoint in {last_checkpoint_dir}.\")\n",
|
||||
" \n",
|
||||
" return str(last_ckpt[0])"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "2fad0610",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import IPython.display as ipd\n",
|
||||
"\n",
|
||||
"# Only need to set speaker_id if there is more than one speaker\n",
|
||||
"speaker_id = None\n",
|
||||
"\n",
|
||||
"last_ckpt = get_best_ckpt_from_last_run(model_name=\"FastPitch\")\n",
|
||||
"print(last_ckpt)\n",
|
||||
"\n",
|
||||
"spec_model = FastPitchModel.load_from_checkpoint(last_ckpt)\n",
|
||||
"spec_model.eval().cuda()\n",
|
||||
"\n",
|
||||
"num_val = 5 # Number of validation samples\n",
|
||||
"val_records = []\n",
|
||||
"with open(\"manifest_validation.json\", \"r\") as f:\n",
|
||||
" for i, line in enumerate(f):\n",
|
||||
" val_records.append(json.loads(line))\n",
|
||||
" if len(val_records) >= num_val:\n",
|
||||
" break\n",
|
||||
" \n",
|
||||
"for val_record in val_records:\n",
|
||||
" print(\"Real validation audio\")\n",
|
||||
" ipd.display(ipd.Audio(val_record['audio_filepath'], rate=22050))\n",
|
||||
" print(f\"Text: {val_record['text']}\")\n",
|
||||
" spec, audio = infer(spec_model, vocoder, val_record['text'], speaker=speaker_id)\n",
|
||||
" ipd.display(ipd.Audio(audio, rate=22050))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "0454967c",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import numpy as np\n",
|
||||
"import torch\n",
|
||||
"import soundfile as sf\n",
|
||||
"\n",
|
||||
"from pathlib import Path\n",
|
||||
"\n",
|
||||
"from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator\n",
|
||||
"\n",
|
||||
"def load_wav(audio_file, target_sr=None):\n",
|
||||
" with sf.SoundFile(audio_file, 'r') as f:\n",
|
||||
" samples = f.read(dtype='float32')\n",
|
||||
" sample_rate = f.samplerate\n",
|
||||
" if target_sr is not None and target_sr != sample_rate:\n",
|
||||
" samples = librosa.core.resample(samples, orig_sr=sample_rate, target_sr=target_sr)\n",
|
||||
" return samples.transpose()\n",
|
||||
" \n",
|
||||
"def generateMels(manifest_path, save_path, hifigan_manifest_path):\n",
|
||||
" # Get records from the training manifest\n",
|
||||
" \n",
|
||||
" records = []\n",
|
||||
" with open(manifest_path, \"r\") as f:\n",
|
||||
" for i, line in enumerate(f):\n",
|
||||
" records.append(json.loads(line))\n",
|
||||
"\n",
|
||||
" beta_binomial_interpolator = BetaBinomialInterpolator()\n",
|
||||
" spec_model.eval()\n",
|
||||
"\n",
|
||||
" device = spec_model.device\n",
|
||||
"\n",
|
||||
" save_dir = Path(save_path)\n",
|
||||
" save_dir.mkdir(exist_ok=True, parents=True)\n",
|
||||
"\n",
|
||||
" # Generate a spectrograms (we need to use ground truth alignment for correct matching between audio and mels)\n",
|
||||
" for i, r in enumerate(records):\n",
|
||||
" audio = load_wav(r[\"audio_filepath\"])\n",
|
||||
" audio = torch.from_numpy(audio).unsqueeze(0).to(device)\n",
|
||||
" audio_len = torch.tensor(audio.shape[1], dtype=torch.long, device=device).unsqueeze(0)\n",
|
||||
"\n",
|
||||
" # Again, our finetuned FastPitch model doesn't use multiple speakers,\n",
|
||||
" # but we keep the code to support it here for reference\n",
|
||||
" if spec_model.fastpitch.speaker_emb is not None and \"speaker\" in r:\n",
|
||||
" speaker = torch.tensor([r['speaker']]).to(device)\n",
|
||||
" else:\n",
|
||||
" speaker = None\n",
|
||||
"\n",
|
||||
" with torch.no_grad():\n",
|
||||
" if \"normalized_text\" in r:\n",
|
||||
" text = spec_model.parse(r[\"normalized_text\"], normalize=False)\n",
|
||||
" else:\n",
|
||||
" text = spec_model.parse(r['text'])\n",
|
||||
"\n",
|
||||
" text_len = torch.tensor(text.shape[-1], dtype=torch.long, device=device).unsqueeze(0)\n",
|
||||
"\n",
|
||||
" spect, spect_len = spec_model.preprocessor(input_signal=audio, length=audio_len)\n",
|
||||
"\n",
|
||||
" # Generate attention prior and spectrogram inputs for HiFi-GAN\n",
|
||||
" attn_prior = torch.from_numpy(\n",
|
||||
" beta_binomial_interpolator(spect_len.item(), text_len.item())\n",
|
||||
" ).unsqueeze(0).to(text.device)\n",
|
||||
"\n",
|
||||
" spectrogram = spec_model.forward(\n",
|
||||
" text=text, \n",
|
||||
" input_lens=text_len, \n",
|
||||
" spec=spect, \n",
|
||||
" mel_lens=spect_len, \n",
|
||||
" attn_prior=attn_prior,\n",
|
||||
" speaker=speaker,\n",
|
||||
" )[0]\n",
|
||||
"\n",
|
||||
" save_path = save_dir / f\"mel_{i}.npy\"\n",
|
||||
" np.save(save_path, spectrogram[0].to('cpu').numpy())\n",
|
||||
" r[\"mel_filepath\"] = str(save_path)\n",
|
||||
"\n",
|
||||
" \n",
|
||||
" with open(hifigan_manifest_path, \"w\") as f:\n",
|
||||
" for r in records:\n",
|
||||
" f.write(json.dumps(r) + '\\n')\n",
|
||||
"\n",
|
||||
"generateMels(\"./manifest_train.json\", \"./training_mels\", \"hifigan_manifest_train.json\")\n",
|
||||
"generateMels(\"./manifest_validation.json\", \"./validation_mels\", \"hifigan_manifest_validation.json\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "9d6dc1ee",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"!(python3 hifigan_finetune.py \\\n",
|
||||
" --config-name=hifigan.yaml \\\n",
|
||||
" model.train_ds.dataloader_params.batch_size=32 \\\n",
|
||||
" model.max_steps=7000 \\\n",
|
||||
" model.optim.lr=0.00001 \\\n",
|
||||
" ~model.optim.sched \\\n",
|
||||
" train_dataset=./hifigan_manifest_train.json \\\n",
|
||||
" validation_datasets=./hifigan_manifest_validation.json \\\n",
|
||||
" exp_manager.exp_dir=./glados_out \\\n",
|
||||
" +init_from_pretrained_model=tts_hifigan \\\n",
|
||||
" trainer.check_val_every_n_epoch=10 \\\n",
|
||||
" model/train_ds=train_ds_finetune \\\n",
|
||||
" model/validation_ds=val_ds_finetune)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "45085a4e",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import json\n",
|
||||
"import IPython.display as ipd\n",
|
||||
"from nemo.collections.tts.models import HifiGanModel\n",
|
||||
"from nemo.collections.tts.models import FastPitchModel\n",
|
||||
"import shutil\n",
|
||||
"import os\n",
|
||||
"\n",
|
||||
"# Only need to set speaker_id if there is more than one speaker\n",
|
||||
"speaker_id = None\n",
|
||||
"\n",
|
||||
"base_dir=\"glados_out\"\n",
|
||||
"\n",
|
||||
"last_fastpitch_ckpt = get_best_ckpt_from_last_run(base_dir=base_dir, model_name=\"FastPitch\")\n",
|
||||
"last_hifigan_ckpt = get_best_ckpt_from_last_run(base_dir=base_dir, model_name=\"HifiGan\")\n",
|
||||
"\n",
|
||||
"print(\"Fastpitch: \" + last_fastpitch_ckpt)\n",
|
||||
"print(\"HifiGan: \" + last_hifigan_ckpt)\n",
|
||||
"\n",
|
||||
"vocoder = HifiGanModel.load_from_checkpoint(last_hifigan_ckpt)\n",
|
||||
"vocoder = vocoder.eval().cuda()\n",
|
||||
"spec_model = FastPitchModel.load_from_checkpoint(last_fastpitch_ckpt)\n",
|
||||
"spec_model.eval().cuda()\n",
|
||||
"\n",
|
||||
"num_val = 5 # Number of validation samples\n",
|
||||
"val_records = []\n",
|
||||
"with open(\"manifest_validation.json\", \"r\") as f:\n",
|
||||
" for i, line in enumerate(f):\n",
|
||||
" val_records.append(json.loads(line))\n",
|
||||
" if len(val_records) >= num_val:\n",
|
||||
" break\n",
|
||||
" \n",
|
||||
"for val_record in val_records:\n",
|
||||
" print(\"Real validation audio\")\n",
|
||||
" ipd.display(ipd.Audio(val_record['audio_filepath'], rate=22050))\n",
|
||||
" print(f\"Text: {val_record['text']}\")\n",
|
||||
" spec, audio = infer(spec_model, vocoder, val_record['text'], speaker=speaker_id)\n",
|
||||
" ipd.display(ipd.Audio(audio, rate=22050))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "50b4c3be",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"spec, audio = infer(spec_model, vocoder, \"Robots are awesome\", speaker=speaker_id)\n",
|
||||
"ipd.display(ipd.Audio(audio, rate=22050))\n",
|
||||
"spec, audio = infer(spec_model, vocoder, \"Welcome back to the armoury\", speaker=speaker_id)\n",
|
||||
"ipd.display(ipd.Audio(audio, rate=22050))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "584dc353",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"spec_model.save_to(\"fastpitch.nemo\")\n",
|
||||
"vocoder.save_to(\"hifigan.nemo\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "52c6d0a9",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
250
TrainNemo/conf/fastpitch_align_v1.05.yaml
Normal file
250
TrainNemo/conf/fastpitch_align_v1.05.yaml
Normal file
@ -0,0 +1,250 @@
|
||||
# This config contains the default values for training FastPitch model with aligner on LJSpeech dataset.
|
||||
# If you want to train model on other dataset, you can change config values according to your dataset.
|
||||
# Most dataset-specific arguments are in the head of the config file, see below.
|
||||
|
||||
name: FastPitch
|
||||
|
||||
train_dataset: ???
|
||||
validation_datasets: ???
|
||||
sup_data_path: ???
|
||||
sup_data_types: [ "align_prior_matrix", "pitch" ]
|
||||
|
||||
# Default values from librosa.pyin
|
||||
pitch_fmin: 65.40639132514966
|
||||
pitch_fmax: 2093.004522404789
|
||||
|
||||
# these frame-wise values depend on pitch_fmin and pitch_fmax, you can get values
|
||||
# by running `scripts/dataset_processing/tts/extract_sup_data.py`
|
||||
pitch_mean: ??? # e.g. 212.35873413085938 for LJSpeech
|
||||
pitch_std: ??? # e.g. 68.52806091308594 for LJSpeech
|
||||
|
||||
# Default values for dataset with sample_rate=22050
|
||||
sample_rate: 22050
|
||||
n_mel_channels: 80
|
||||
n_window_size: 1024
|
||||
n_window_stride: 256
|
||||
n_fft: 1024
|
||||
lowfreq: 0
|
||||
highfreq: 8000
|
||||
window: hann
|
||||
|
||||
phoneme_dict_path: "scripts/tts_dataset_files/cmudict-0.7b_nv22.10"
|
||||
heteronyms_path: "scripts/tts_dataset_files/heteronyms-052722"
|
||||
whitelist_path: "nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv"
|
||||
|
||||
model:
|
||||
learn_alignment: true
|
||||
bin_loss_warmup_epochs: 100
|
||||
|
||||
n_speakers: 1
|
||||
max_token_duration: 75
|
||||
symbols_embedding_dim: 384
|
||||
pitch_embedding_kernel_size: 3
|
||||
|
||||
pitch_fmin: ${pitch_fmin}
|
||||
pitch_fmax: ${pitch_fmax}
|
||||
|
||||
pitch_mean: ${pitch_mean}
|
||||
pitch_std: ${pitch_std}
|
||||
|
||||
sample_rate: ${sample_rate}
|
||||
n_mel_channels: ${n_mel_channels}
|
||||
n_window_size: ${n_window_size}
|
||||
n_window_stride: ${n_window_stride}
|
||||
n_fft: ${n_fft}
|
||||
lowfreq: ${lowfreq}
|
||||
highfreq: ${highfreq}
|
||||
window: ${window}
|
||||
|
||||
text_normalizer:
|
||||
_target_: nemo_text_processing.text_normalization.normalize.Normalizer
|
||||
lang: en
|
||||
input_case: cased
|
||||
whitelist: ${whitelist_path}
|
||||
|
||||
text_normalizer_call_kwargs:
|
||||
verbose: false
|
||||
punct_pre_process: true
|
||||
punct_post_process: true
|
||||
|
||||
text_tokenizer:
|
||||
_target_: nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers.EnglishPhonemesTokenizer
|
||||
punct: true
|
||||
stresses: true
|
||||
chars: true
|
||||
apostrophe: true
|
||||
pad_with_space: true
|
||||
g2p:
|
||||
_target_: nemo_text_processing.g2p.modules.EnglishG2p
|
||||
phoneme_dict: ${phoneme_dict_path}
|
||||
heteronyms: ${heteronyms_path}
|
||||
phoneme_probability: 0.5
|
||||
|
||||
train_ds:
|
||||
dataset:
|
||||
_target_: nemo.collections.tts.torch.data.TTSDataset
|
||||
manifest_filepath: ${train_dataset}
|
||||
sample_rate: ${model.sample_rate}
|
||||
sup_data_path: ${sup_data_path}
|
||||
sup_data_types: ${sup_data_types}
|
||||
n_fft: ${model.n_fft}
|
||||
win_length: ${model.n_window_size}
|
||||
hop_length: ${model.n_window_stride}
|
||||
window: ${model.window}
|
||||
n_mels: ${model.n_mel_channels}
|
||||
lowfreq: ${model.lowfreq}
|
||||
highfreq: ${model.highfreq}
|
||||
max_duration: null
|
||||
min_duration: 0.1
|
||||
ignore_file: null
|
||||
trim: false
|
||||
pitch_fmin: ${model.pitch_fmin}
|
||||
pitch_fmax: ${model.pitch_fmax}
|
||||
pitch_norm: true
|
||||
pitch_mean: ${model.pitch_mean}
|
||||
pitch_std: ${model.pitch_std}
|
||||
use_beta_binomial_interpolator: true
|
||||
|
||||
dataloader_params:
|
||||
drop_last: false
|
||||
shuffle: true
|
||||
batch_size: 32
|
||||
num_workers: 12
|
||||
pin_memory: true
|
||||
|
||||
validation_ds:
|
||||
dataset:
|
||||
_target_: nemo.collections.tts.torch.data.TTSDataset
|
||||
manifest_filepath: ${validation_datasets}
|
||||
sample_rate: ${model.sample_rate}
|
||||
sup_data_path: ${sup_data_path}
|
||||
sup_data_types: ${sup_data_types}
|
||||
n_fft: ${model.n_fft}
|
||||
win_length: ${model.n_window_size}
|
||||
hop_length: ${model.n_window_stride}
|
||||
window: ${model.window}
|
||||
n_mels: ${model.n_mel_channels}
|
||||
lowfreq: ${model.lowfreq}
|
||||
highfreq: ${model.highfreq}
|
||||
max_duration: null
|
||||
min_duration: null
|
||||
ignore_file: null
|
||||
trim: false
|
||||
pitch_fmin: ${model.pitch_fmin}
|
||||
pitch_fmax: ${model.pitch_fmax}
|
||||
pitch_norm: true
|
||||
pitch_mean: ${model.pitch_mean}
|
||||
pitch_std: ${model.pitch_std}
|
||||
use_beta_binomial_interpolator: true
|
||||
|
||||
dataloader_params:
|
||||
drop_last: false
|
||||
shuffle: false
|
||||
batch_size: 32
|
||||
num_workers: 8
|
||||
pin_memory: true
|
||||
|
||||
preprocessor:
|
||||
_target_: nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor
|
||||
features: ${model.n_mel_channels}
|
||||
lowfreq: ${model.lowfreq}
|
||||
highfreq: ${model.highfreq}
|
||||
n_fft: ${model.n_fft}
|
||||
n_window_size: ${model.n_window_size}
|
||||
window_size: false
|
||||
n_window_stride: ${model.n_window_stride}
|
||||
window_stride: false
|
||||
pad_to: 1
|
||||
pad_value: 0
|
||||
sample_rate: ${model.sample_rate}
|
||||
window: ${model.window}
|
||||
normalize: null
|
||||
preemph: null
|
||||
dither: 0.0
|
||||
frame_splicing: 1
|
||||
log: true
|
||||
log_zero_guard_type: add
|
||||
log_zero_guard_value: 1e-05
|
||||
mag_power: 1.0
|
||||
|
||||
input_fft: #n_embed and padding_idx are added by the model
|
||||
_target_: nemo.collections.tts.modules.transformer.FFTransformerEncoder
|
||||
n_layer: 6
|
||||
n_head: 1
|
||||
d_model: ${model.symbols_embedding_dim}
|
||||
d_head: 64
|
||||
d_inner: 1536
|
||||
kernel_size: 3
|
||||
dropout: 0.1
|
||||
dropatt: 0.1
|
||||
dropemb: 0.0
|
||||
d_embed: ${model.symbols_embedding_dim}
|
||||
|
||||
output_fft:
|
||||
_target_: nemo.collections.tts.modules.transformer.FFTransformerDecoder
|
||||
n_layer: 6
|
||||
n_head: 1
|
||||
d_model: ${model.symbols_embedding_dim}
|
||||
d_head: 64
|
||||
d_inner: 1536
|
||||
kernel_size: 3
|
||||
dropout: 0.1
|
||||
dropatt: 0.1
|
||||
dropemb: 0.0
|
||||
|
||||
alignment_module:
|
||||
_target_: nemo.collections.tts.modules.aligner.AlignmentEncoder
|
||||
n_text_channels: ${model.symbols_embedding_dim}
|
||||
|
||||
duration_predictor:
|
||||
_target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor
|
||||
input_size: ${model.symbols_embedding_dim}
|
||||
kernel_size: 3
|
||||
filter_size: 256
|
||||
dropout: 0.1
|
||||
n_layers: 2
|
||||
|
||||
pitch_predictor:
|
||||
_target_: nemo.collections.tts.modules.fastpitch.TemporalPredictor
|
||||
input_size: ${model.symbols_embedding_dim}
|
||||
kernel_size: 3
|
||||
filter_size: 256
|
||||
dropout: 0.1
|
||||
n_layers: 2
|
||||
|
||||
optim:
|
||||
name: adamw
|
||||
lr: 1e-3
|
||||
betas: [0.9, 0.999]
|
||||
weight_decay: 1e-6
|
||||
|
||||
sched:
|
||||
name: NoamAnnealing
|
||||
warmup_steps: 1000
|
||||
last_epoch: -1
|
||||
d_model: 1 # Disable scaling based on model dim
|
||||
|
||||
trainer:
|
||||
num_nodes: 1
|
||||
devices: 1
|
||||
accelerator: gpu
|
||||
strategy: ddp
|
||||
precision: 16
|
||||
max_epochs: 1000
|
||||
accumulate_grad_batches: 1
|
||||
gradient_clip_val: 1000.0
|
||||
enable_checkpointing: False # Provided by exp_manager
|
||||
logger: false # Provided by exp_manager
|
||||
log_every_n_steps: 100
|
||||
check_val_every_n_epoch: 5
|
||||
benchmark: false
|
||||
|
||||
exp_manager:
|
||||
exp_dir: null
|
||||
name: ${name}
|
||||
create_tensorboard_logger: true
|
||||
create_checkpoint_callback: true
|
||||
checkpoint_callback_params:
|
||||
monitor: val_loss
|
||||
resume_if_exists: false
|
||||
resume_ignore_no_checkpoint: false
|
99
TrainNemo/conf/hifigan.yaml
Normal file
99
TrainNemo/conf/hifigan.yaml
Normal file
@ -0,0 +1,99 @@
|
||||
# This config contains the default values for training HiFi-GAN model on LJSpeech dataset.
|
||||
# If you want to train model on other dataset, you can change config values according to your dataset.
|
||||
# Most dataset-specific arguments are in the head of the config file, see below.
|
||||
|
||||
name: "HifiGan"
|
||||
|
||||
train_dataset: ???
|
||||
validation_datasets: ???
|
||||
|
||||
# Default values for dataset with sample_rate=22050
|
||||
sample_rate: 22050
|
||||
n_mel_channels: 80
|
||||
n_window_size: 1024
|
||||
n_window_stride: 256
|
||||
n_fft: 1024
|
||||
lowfreq: 0
|
||||
highfreq: 8000
|
||||
window: hann
|
||||
|
||||
train_n_segments: 8192
|
||||
train_max_duration: null
|
||||
train_min_duration: 0.75
|
||||
|
||||
val_n_segments: 66048
|
||||
val_max_duration: null
|
||||
val_min_duration: 3
|
||||
|
||||
defaults:
|
||||
- model/generator: v1
|
||||
- model/train_ds: train_ds
|
||||
- model/validation_ds: val_ds
|
||||
|
||||
model:
|
||||
preprocessor:
|
||||
_target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures
|
||||
nfilt: ${n_mel_channels}
|
||||
lowfreq: ${lowfreq}
|
||||
highfreq: ${highfreq}
|
||||
n_fft: ${n_fft}
|
||||
n_window_size: ${n_window_size}
|
||||
n_window_stride: ${n_window_stride}
|
||||
pad_to: 0
|
||||
pad_value: -11.52
|
||||
sample_rate: ${sample_rate}
|
||||
window: ${window}
|
||||
normalize: null
|
||||
preemph: null
|
||||
dither: 0.0
|
||||
frame_splicing: 1
|
||||
log: true
|
||||
log_zero_guard_type: clamp
|
||||
log_zero_guard_value: 1e-05
|
||||
mag_power: 1.0
|
||||
use_grads: false
|
||||
exact_pad: true
|
||||
|
||||
optim:
|
||||
_target_: torch.optim.AdamW
|
||||
lr: 0.0002
|
||||
betas: [0.8, 0.99]
|
||||
|
||||
sched:
|
||||
name: CosineAnnealing
|
||||
min_lr: 1e-5
|
||||
warmup_ratio: 0.02
|
||||
|
||||
max_steps: 2500000
|
||||
l1_loss_factor: 45
|
||||
denoise_strength: 0.0025
|
||||
|
||||
trainer:
|
||||
num_nodes: 1
|
||||
devices: 1
|
||||
accelerator: gpu
|
||||
strategy: ddp
|
||||
precision: 32
|
||||
max_steps: ${model.max_steps}
|
||||
accumulate_grad_batches: 1
|
||||
enable_checkpointing: False # Provided by exp_manager
|
||||
logger: false # Provided by exp_manager
|
||||
log_every_n_steps: 100
|
||||
check_val_every_n_epoch: 10
|
||||
benchmark: false
|
||||
|
||||
exp_manager:
|
||||
exp_dir: null
|
||||
name: ${name}
|
||||
create_tensorboard_logger: true
|
||||
create_checkpoint_callback: true
|
||||
checkpoint_callback_params:
|
||||
monitor: val_loss
|
||||
mode: min
|
||||
create_wandb_logger: false
|
||||
wandb_logger_kwargs:
|
||||
name: null
|
||||
project: null
|
||||
entity: null
|
||||
resume_if_exists: false
|
||||
resume_ignore_no_checkpoint: false
|
99
TrainNemo/conf/hifigan/hifigan.yaml
Normal file
99
TrainNemo/conf/hifigan/hifigan.yaml
Normal file
@ -0,0 +1,99 @@
|
||||
# This config contains the default values for training HiFi-GAN model on LJSpeech dataset.
|
||||
# If you want to train model on other dataset, you can change config values according to your dataset.
|
||||
# Most dataset-specific arguments are in the head of the config file, see below.
|
||||
|
||||
name: "HifiGan"
|
||||
|
||||
train_dataset: ???
|
||||
validation_datasets: ???
|
||||
|
||||
# Default values for dataset with sample_rate=22050
|
||||
sample_rate: 22050
|
||||
n_mel_channels: 80
|
||||
n_window_size: 1024
|
||||
n_window_stride: 256
|
||||
n_fft: 1024
|
||||
lowfreq: 0
|
||||
highfreq: 8000
|
||||
window: hann
|
||||
|
||||
train_n_segments: 8192
|
||||
train_max_duration: null
|
||||
train_min_duration: 0.75
|
||||
|
||||
val_n_segments: 66048
|
||||
val_max_duration: null
|
||||
val_min_duration: 3
|
||||
|
||||
defaults:
|
||||
- model/generator: v1
|
||||
- model/train_ds: train_ds
|
||||
- model/validation_ds: val_ds
|
||||
|
||||
model:
|
||||
preprocessor:
|
||||
_target_: nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures
|
||||
nfilt: ${n_mel_channels}
|
||||
lowfreq: ${lowfreq}
|
||||
highfreq: ${highfreq}
|
||||
n_fft: ${n_fft}
|
||||
n_window_size: ${n_window_size}
|
||||
n_window_stride: ${n_window_stride}
|
||||
pad_to: 0
|
||||
pad_value: -11.52
|
||||
sample_rate: ${sample_rate}
|
||||
window: ${window}
|
||||
normalize: null
|
||||
preemph: null
|
||||
dither: 0.0
|
||||
frame_splicing: 1
|
||||
log: true
|
||||
log_zero_guard_type: clamp
|
||||
log_zero_guard_value: 1e-05
|
||||
mag_power: 1.0
|
||||
use_grads: false
|
||||
exact_pad: true
|
||||
|
||||
optim:
|
||||
_target_: torch.optim.AdamW
|
||||
lr: 0.0002
|
||||
betas: [0.8, 0.99]
|
||||
|
||||
sched:
|
||||
name: CosineAnnealing
|
||||
min_lr: 1e-5
|
||||
warmup_ratio: 0.02
|
||||
|
||||
max_steps: 2500000
|
||||
l1_loss_factor: 45
|
||||
denoise_strength: 0.0025
|
||||
|
||||
trainer:
|
||||
num_nodes: 1
|
||||
devices: 1
|
||||
accelerator: gpu
|
||||
strategy: ddp
|
||||
precision: 32
|
||||
max_steps: ${model.max_steps}
|
||||
accumulate_grad_batches: 1
|
||||
enable_checkpointing: False # Provided by exp_manager
|
||||
logger: false # Provided by exp_manager
|
||||
log_every_n_steps: 100
|
||||
check_val_every_n_epoch: 10
|
||||
benchmark: false
|
||||
|
||||
exp_manager:
|
||||
exp_dir: null
|
||||
name: ${name}
|
||||
create_tensorboard_logger: true
|
||||
create_checkpoint_callback: true
|
||||
checkpoint_callback_params:
|
||||
monitor: val_loss
|
||||
mode: min
|
||||
create_wandb_logger: false
|
||||
wandb_logger_kwargs:
|
||||
name: null
|
||||
project: null
|
||||
entity: null
|
||||
resume_if_exists: false
|
||||
resume_ignore_no_checkpoint: false
|
7
TrainNemo/conf/hifigan/model/generator/v1.yaml
Normal file
7
TrainNemo/conf/hifigan/model/generator/v1.yaml
Normal file
@ -0,0 +1,7 @@
|
||||
_target_: nemo.collections.tts.modules.hifigan_modules.Generator
|
||||
resblock: 1
|
||||
upsample_rates: [8,8,2,2]
|
||||
upsample_kernel_sizes: [16,16,4,4]
|
||||
upsample_initial_channel: 512
|
||||
resblock_kernel_sizes: [3,7,11]
|
||||
resblock_dilation_sizes: [[1,3,5], [1,3,5], [1,3,5]]
|
13
TrainNemo/conf/hifigan/model/train_ds/train_ds.yaml
Normal file
13
TrainNemo/conf/hifigan/model/train_ds/train_ds.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
dataset:
|
||||
_target_: "nemo.collections.tts.torch.data.VocoderDataset"
|
||||
manifest_filepath: ${train_dataset}
|
||||
sample_rate: ${sample_rate}
|
||||
n_segments: ${train_n_segments}
|
||||
max_duration: ${train_max_duration}
|
||||
min_duration: ${train_min_duration}
|
||||
dataloader_params:
|
||||
drop_last: false
|
||||
shuffle: true
|
||||
batch_size: 16
|
||||
num_workers: 4
|
||||
pin_memory: true
|
15
TrainNemo/conf/hifigan/model/train_ds/train_ds_finetune.yaml
Normal file
15
TrainNemo/conf/hifigan/model/train_ds/train_ds_finetune.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
dataset:
|
||||
_target_: "nemo.collections.tts.torch.data.VocoderDataset"
|
||||
manifest_filepath: ${train_dataset}
|
||||
sample_rate: ${sample_rate}
|
||||
n_segments: ${train_n_segments}
|
||||
max_duration: ${train_max_duration}
|
||||
min_duration: ${train_min_duration}
|
||||
load_precomputed_mel: true
|
||||
hop_length: ${n_window_stride}
|
||||
dataloader_params:
|
||||
drop_last: false
|
||||
shuffle: true
|
||||
batch_size: 16
|
||||
num_workers: 4
|
||||
pin_memory: true
|
13
TrainNemo/conf/hifigan/model/validation_ds/val_ds.yaml
Normal file
13
TrainNemo/conf/hifigan/model/validation_ds/val_ds.yaml
Normal file
@ -0,0 +1,13 @@
|
||||
dataset:
|
||||
_target_: "nemo.collections.tts.torch.data.VocoderDataset"
|
||||
manifest_filepath: ${validation_datasets}
|
||||
sample_rate: ${sample_rate}
|
||||
n_segments: ${val_n_segments}
|
||||
max_duration: ${val_max_duration}
|
||||
min_duration: ${val_min_duration}
|
||||
dataloader_params:
|
||||
drop_last: false
|
||||
shuffle: false
|
||||
batch_size: 16
|
||||
num_workers: 1
|
||||
pin_memory: true
|
@ -0,0 +1,15 @@
|
||||
dataset:
|
||||
_target_: "nemo.collections.tts.torch.data.VocoderDataset"
|
||||
manifest_filepath: ${validation_datasets}
|
||||
sample_rate: ${sample_rate}
|
||||
n_segments: ${val_n_segments}
|
||||
max_duration: ${val_max_duration}
|
||||
min_duration: ${val_min_duration}
|
||||
load_precomputed_mel: true
|
||||
hop_length: ${n_window_stride}
|
||||
dataloader_params:
|
||||
drop_last: false
|
||||
shuffle: false
|
||||
batch_size: 16
|
||||
num_workers: 4
|
||||
pin_memory: true
|
41
TrainNemo/fastpitch_finetune.py
Normal file
41
TrainNemo/fastpitch_finetune.py
Normal file
@ -0,0 +1,41 @@
|
||||
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import pytorch_lightning as pl
|
||||
|
||||
from nemo.collections.common.callbacks import LogEpochTimeCallback
|
||||
from nemo.collections.tts.models import FastPitchModel
|
||||
from nemo.core.config import hydra_runner
|
||||
from nemo.utils import logging
|
||||
from nemo.utils.exp_manager import exp_manager
|
||||
|
||||
|
||||
@hydra_runner(config_path="conf", config_name="fastpitch_align_44100")
|
||||
def main(cfg):
|
||||
if hasattr(cfg.model.optim, 'sched'):
|
||||
logging.warning("You are using an optimizer scheduler while finetuning. Are you sure this is intended?")
|
||||
if cfg.model.optim.lr > 1e-3 or cfg.model.optim.lr < 1e-5:
|
||||
logging.warning("The recommended learning rate for finetuning is 2e-4")
|
||||
trainer = pl.Trainer(**cfg.trainer)
|
||||
exp_manager(trainer, cfg.get("exp_manager", None))
|
||||
model = FastPitchModel(cfg=cfg.model, trainer=trainer)
|
||||
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
|
||||
lr_logger = pl.callbacks.LearningRateMonitor()
|
||||
epoch_time_logger = LogEpochTimeCallback()
|
||||
trainer.callbacks.extend([lr_logger, epoch_time_logger])
|
||||
trainer.fit(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main() # noqa pylint: disable=no-value-for-parameter
|
32
TrainNemo/hifigan_finetune.py
Normal file
32
TrainNemo/hifigan_finetune.py
Normal file
@ -0,0 +1,32 @@
|
||||
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import pytorch_lightning as pl
|
||||
|
||||
from nemo.collections.tts.models import HifiGanModel
|
||||
from nemo.core.config import hydra_runner
|
||||
from nemo.utils.exp_manager import exp_manager
|
||||
|
||||
|
||||
@hydra_runner(config_path="conf/hifigan", config_name="hifigan_44100")
|
||||
def main(cfg):
|
||||
trainer = pl.Trainer(**cfg.trainer)
|
||||
exp_manager(trainer, cfg.get("exp_manager", None))
|
||||
model = HifiGanModel(cfg=cfg.model, trainer=trainer)
|
||||
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
|
||||
trainer.fit(model)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main() # noqa pylint: disable=no-value-for-parameter
|
9
TrainNemo/requirements.txt
Normal file
9
TrainNemo/requirements.txt
Normal file
@ -0,0 +1,9 @@
|
||||
beautifulsoup4==4.11.1
|
||||
ipython==8.7.0
|
||||
matplotlib==3.1.2
|
||||
nemo==4.3.2
|
||||
nemo_toolkit==1.13.0
|
||||
num2words==0.5.12
|
||||
requests==2.22.0
|
||||
soundfile==0.11.0
|
||||
torch==1.13.1
|
134776
TrainNemo/tts_dataset_files/cmudict-0.7b_nv22.10
Normal file
134776
TrainNemo/tts_dataset_files/cmudict-0.7b_nv22.10
Normal file
File diff suppressed because it is too large
Load Diff
204
TrainNemo/tts_dataset_files/heteronyms-052722
Normal file
204
TrainNemo/tts_dataset_files/heteronyms-052722
Normal file
@ -0,0 +1,204 @@
|
||||
abstract
|
||||
abuse
|
||||
abuses
|
||||
addict
|
||||
address
|
||||
advocate
|
||||
affect
|
||||
affiliate
|
||||
agape
|
||||
aged
|
||||
aggregate
|
||||
allied
|
||||
alternate
|
||||
alum
|
||||
analyses
|
||||
animate
|
||||
appropriate
|
||||
approximate
|
||||
articulate
|
||||
associate
|
||||
attribute
|
||||
august
|
||||
axes
|
||||
bases
|
||||
bass
|
||||
blessed
|
||||
bologna
|
||||
bow
|
||||
buffet
|
||||
celtic
|
||||
close
|
||||
combine
|
||||
compact
|
||||
complex
|
||||
compound
|
||||
compress
|
||||
concert
|
||||
concrete
|
||||
conduct
|
||||
confines
|
||||
conflict
|
||||
conglomerate
|
||||
conjugate
|
||||
conscript
|
||||
console
|
||||
consort
|
||||
construct
|
||||
consummate
|
||||
content
|
||||
contest
|
||||
contract
|
||||
contrast
|
||||
converse
|
||||
convert
|
||||
convict
|
||||
coordinate
|
||||
correlate
|
||||
crooked
|
||||
decrease
|
||||
defect
|
||||
defense
|
||||
degenerate
|
||||
delegate
|
||||
deliberate
|
||||
desert
|
||||
deviate
|
||||
diagnoses
|
||||
diffuse
|
||||
discard
|
||||
discharge
|
||||
discount
|
||||
do
|
||||
document
|
||||
does
|
||||
dogged
|
||||
dove
|
||||
duplicate
|
||||
elaborate
|
||||
ellipses
|
||||
entrance
|
||||
escort
|
||||
estimate
|
||||
excuse
|
||||
expatriate
|
||||
exploit
|
||||
export
|
||||
expose
|
||||
extract
|
||||
fragment
|
||||
frequent
|
||||
graduate
|
||||
house
|
||||
impact
|
||||
implant
|
||||
implement
|
||||
import
|
||||
incense
|
||||
incline
|
||||
increase
|
||||
increment
|
||||
initiate
|
||||
insert
|
||||
instrument
|
||||
insult
|
||||
interchange
|
||||
intimate
|
||||
intrigue
|
||||
invalid
|
||||
invert
|
||||
invite
|
||||
isolate
|
||||
jesus
|
||||
job
|
||||
laminate
|
||||
lead
|
||||
learned
|
||||
lima
|
||||
live
|
||||
lives
|
||||
mate
|
||||
merchandise
|
||||
minute
|
||||
misuse
|
||||
mobile
|
||||
moderate
|
||||
moped
|
||||
mouth
|
||||
mow
|
||||
multiply
|
||||
nestle
|
||||
number
|
||||
object
|
||||
ornament
|
||||
overall
|
||||
overthrow
|
||||
pasty
|
||||
perfect
|
||||
perfume
|
||||
periodic
|
||||
permit
|
||||
pervert
|
||||
pigment
|
||||
polish
|
||||
postulate
|
||||
precipitate
|
||||
predicate
|
||||
present
|
||||
primer
|
||||
proceeds
|
||||
produce
|
||||
progress
|
||||
project
|
||||
protest
|
||||
putting
|
||||
ragged
|
||||
ravel
|
||||
read
|
||||
reading
|
||||
rebel
|
||||
recall
|
||||
record
|
||||
recount
|
||||
recreate
|
||||
refund
|
||||
refuse
|
||||
reject
|
||||
rerelease
|
||||
resign
|
||||
resume
|
||||
retard
|
||||
retreat
|
||||
rodeo
|
||||
root
|
||||
row
|
||||
sake
|
||||
second
|
||||
separate
|
||||
sewer
|
||||
shower
|
||||
sin
|
||||
slough
|
||||
sow
|
||||
subject
|
||||
subordinate
|
||||
supplement
|
||||
supposed
|
||||
suspect
|
||||
syndicate
|
||||
tarry
|
||||
tear
|
||||
tier
|
||||
transfer
|
||||
transform
|
||||
transplant
|
||||
transport
|
||||
unionized
|
||||
upset
|
||||
use
|
||||
uses
|
||||
whoop
|
||||
wicked
|
||||
wind
|
||||
winds
|
||||
wound
|
21
TrainNemo/tts_dataset_files/lj_speech.tsv
Normal file
21
TrainNemo/tts_dataset_files/lj_speech.tsv
Normal file
@ -0,0 +1,21 @@
|
||||
Mr. mister
|
||||
Mrs. misses
|
||||
Dr. doctor
|
||||
Drs. doctors
|
||||
Co. company
|
||||
Lt. lieutenant
|
||||
Sgt. sergeant
|
||||
St. saint
|
||||
Jr. junior
|
||||
Maj. major
|
||||
Hon. honorable
|
||||
Gov. governor
|
||||
Capt. captain
|
||||
Esq. esquire
|
||||
Gen. general
|
||||
Ltd. limited
|
||||
Rev. reverend
|
||||
Col. colonel
|
||||
Mt. mount
|
||||
Ft. fort
|
||||
etc. et cetera
|
|
@ -1,127 +0,0 @@
|
||||
import requests
|
||||
from multiprocessing import cpu_count
|
||||
from multiprocessing.pool import ThreadPool
|
||||
import shutil
|
||||
import os
|
||||
from bs4 import BeautifulSoup
|
||||
import soundfile as sf
|
||||
import string
|
||||
import json
|
||||
import re
|
||||
import num2words
|
||||
|
||||
class bcolors:
|
||||
HEADER = '\033[95m'
|
||||
OKBLUE = '\033[94m'
|
||||
OKCYAN = '\033[96m'
|
||||
OKGREEN = '\033[92m'
|
||||
WARNING = '\033[93m'
|
||||
FAIL = '\033[91m'
|
||||
ENDC = '\033[0m'
|
||||
BOLD = '\033[1m'
|
||||
UNDERLINE = '\033[4m'
|
||||
|
||||
blocklist = ["potato", "_ding_", "00_part1_entry-6"]
|
||||
audio_dir = 'audio'
|
||||
download_threads = 64
|
||||
|
||||
def prep(args, overwrite=False):
|
||||
already_exists = os.path.exists(audio_dir)
|
||||
|
||||
if already_exists and not overwrite:
|
||||
print("Data already downloaded")
|
||||
return
|
||||
|
||||
if already_exists:
|
||||
print("Deleting previously downloaded audio")
|
||||
shutil.rmtree(audio_dir)
|
||||
|
||||
os.mkdir(audio_dir)
|
||||
download_parallel(args)
|
||||
|
||||
def remove_punctuation(str):
|
||||
return str.translate(str.maketrans('', '', string.punctuation))
|
||||
|
||||
def audio_duration(fn):
|
||||
f = sf.SoundFile(fn)
|
||||
return f.frames / f.samplerate
|
||||
|
||||
def download_file(args):
|
||||
url, filename = args[0], args[1]
|
||||
|
||||
try:
|
||||
response = requests.get(url)
|
||||
open(os.path.join(audio_dir, filename), "wb").write(response.content)
|
||||
return filename, True
|
||||
except:
|
||||
return filename, False
|
||||
|
||||
def download_parallel(args):
|
||||
results = ThreadPool(download_threads).imap_unordered(download_file, args)
|
||||
for result in results:
|
||||
if result[1]:
|
||||
print(bcolors.OKGREEN + "[" + u'\u2713' + "] " + bcolors.ENDC + result[0])
|
||||
else:
|
||||
print(bcolors.FAIL + "[" + u'\u2715' + "] " + bcolors.ENDC + result[0])
|
||||
|
||||
def main():
|
||||
r = requests.get("https://theportalwiki.com/wiki/GLaDOS_voice_lines")
|
||||
|
||||
urls = []
|
||||
filenames = []
|
||||
texts = []
|
||||
|
||||
soup = BeautifulSoup(r.text.encode('utf-8').decode('ascii', 'ignore'), 'html.parser')
|
||||
for link_item in soup.find_all('a'):
|
||||
url = link_item.get("href", None)
|
||||
if url:
|
||||
if "https:" in url and ".wav" in url:
|
||||
list_item = link_item.find_parent("li")
|
||||
ital_item = list_item.find_all('i')
|
||||
if ital_item:
|
||||
text = ital_item[0].text
|
||||
text = text.replace('"', '')
|
||||
filename = url[url.rindex("/")+1:]
|
||||
|
||||
if "[" not in text and "]" not in text:
|
||||
if url not in urls:
|
||||
for s in blocklist:
|
||||
if s in url:
|
||||
break
|
||||
else:
|
||||
urls.append(url)
|
||||
filenames.append(filename)
|
||||
text = text.replace('*', '')
|
||||
text = re.sub(r"(\d+)", lambda x: num2words.num2words(int(x.group(0))), text)
|
||||
texts.append(text)
|
||||
|
||||
print("Found " + str(len(urls)) + " urls")
|
||||
|
||||
args = zip(urls, filenames)
|
||||
|
||||
prep(args)
|
||||
|
||||
#{"audio_filepath": "audio/nada_lily_21_haggard_0316.wav",
|
||||
#"text": "awake ye kings",
|
||||
#"duration": 1.3,
|
||||
#"text_no_preprocessing": "\u201cAwake, ye kings,\u201d",
|
||||
#"text_normalized": "\"Awake, ye kings,\""}
|
||||
|
||||
total_audio_time = 0
|
||||
outFile=open(os.path.join(audio_dir, "manifest.json"), 'w')
|
||||
for i in range(len(urls)):
|
||||
item = {}
|
||||
text = texts[i]
|
||||
filename = filenames[i]
|
||||
item["audio_filepath"] = os.path.join(audio_dir, filename)
|
||||
item["text_normalized"] = text
|
||||
item["text_no_preprocessing"] = text
|
||||
item["text"] = text.lower()
|
||||
item["duration"] = audio_duration(os.path.join(audio_dir, filename))
|
||||
total_audio_time = total_audio_time + item["duration"]
|
||||
outFile.write(json.dumps(item, ensure_ascii=True, sort_keys=True) + "\n")
|
||||
|
||||
outFile.close()
|
||||
print(str(total_audio_time/60.0) + " min")
|
||||
|
||||
main()
|
@ -1,3 +0,0 @@
|
||||
beautifulsoup4
|
||||
requests
|
||||
soundfile
|
Loading…
Reference in New Issue
Block a user