Consolidating

This commit is contained in:
Dave Niewinski 2024-02-14 12:54:03 -05:00
parent 5e76654e1d
commit a7030ca8ac
2 changed files with 0 additions and 666 deletions

View File

@ -1,666 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "a81468a6",
"metadata": {},
"outputs": [],
"source": [
"#!apt install sox libsndfile1 ffmpeg\n",
"#!pip3 install wheel\n",
"#!pip3 install -r requirements.txt\n",
"#!pip3 install wget unidecode\n",
"#!pip3 install resampy==0.3.1\n",
"#!pip3 install nemo_toolkit[all]\n",
"#!pip3 install numba==0.48\n",
"#!pip3 install librosa==0.8.1\n",
"#!pip3 install pynini"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "290b2ddd",
"metadata": {},
"outputs": [],
"source": [
"import soundfile\n",
"import librosa\n",
"import json\n",
"import os\n",
"\n",
"print(librosa.__version__)\n",
"def resample_audio(input_file_path, output_path, target_sampling_rate=22050):\n",
" if not input_file_path.endswith(\".wav\"):\n",
" raise NotImplementedError(\"Loading only implemented for wav files.\")\n",
" if not os.path.exists(input_file_path):\n",
" raise FileNotFoundError(f\"Cannot file input file at {input_file_path}\")\n",
" audio, sampling_rate = librosa.load(\n",
" input_file_path,\n",
" sr=target_sampling_rate\n",
" )\n",
"\n",
" soundfile.write(\n",
" output_path,\n",
" audio,\n",
" samplerate=target_sampling_rate,\n",
" format=\"wav\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dce26c49",
"metadata": {},
"outputs": [],
"source": [
"import requests\n",
"from multiprocessing import cpu_count\n",
"from multiprocessing.pool import ThreadPool\n",
"import shutil\n",
"import os\n",
"from bs4 import BeautifulSoup\n",
"import soundfile as sf\n",
"import string\n",
"import json\n",
"import re\n",
"import num2words\n",
"from tqdm.notebook import tqdm\n",
"\n",
"class bcolors:\n",
" HEADER = '\\033[95m'\n",
" OKBLUE = '\\033[94m'\n",
" OKCYAN = '\\033[96m'\n",
" OKGREEN = '\\033[92m'\n",
" WARNING = '\\033[93m'\n",
" FAIL = '\\033[91m'\n",
" ENDC = '\\033[0m'\n",
" BOLD = '\\033[1m'\n",
" UNDERLINE = '\\033[4m'\n",
"\n",
"blocklist = [\"potato\", \"_ding_\", \"00_part1_entry-6\", \"_escape_\"]\n",
"audio_dir = 'audio'\n",
"download_threads = 64\n",
"\n",
"temp_path = \"temp_audio\"\n",
"sampling_rate = 22050\n",
" \n",
"def prep(args, overwrite=True):\n",
" already_exists = os.path.exists(audio_dir)\n",
" \n",
" if already_exists and not overwrite:\n",
" print(\"Data already downloaded\")\n",
" return\n",
" \n",
" if already_exists:\n",
" print(\"Deleting previously downloaded audio\")\n",
" shutil.rmtree(audio_dir)\n",
" \n",
" if os.path.exists(temp_path):\n",
" shutil.rmtree(temp_path)\n",
" \n",
" os.mkdir(audio_dir)\n",
" download_parallel(args)\n",
"\n",
"def remove_punctuation(str):\n",
" return str.translate(str.maketrans('', '', string.punctuation))\n",
" \n",
"def audio_duration(fn):\n",
" f = sf.SoundFile(fn)\n",
" return f.frames / f.samplerate\n",
"\n",
"def download_file(args):\n",
" url, filename = args[0], args[1]\n",
"\n",
" try:\n",
" response = requests.get(url, allow_redirects=False)\n",
"\n",
" open(os.path.join(audio_dir, filename), \"wb\").write(response.content)\n",
" return filename, True\n",
" except:\n",
" return filename, False\n",
"\n",
"def download_parallel(args):\n",
" results = ThreadPool(download_threads).imap_unordered(download_file, args)\n",
" for result in results:\n",
" if result[1]:\n",
" print(bcolors.OKGREEN + \"[\" + u'\\u2713' + \"] \" + bcolors.ENDC + result[0])\n",
" else:\n",
" print(bcolors.FAIL + \"[\" + u'\\u2715' + \"] \" + bcolors.ENDC + result[0])\n",
"\n",
"def main():\n",
" r = requests.get(\"https://theportalwiki.com/wiki/GLaDOS_voice_lines\", allow_redirects=False)\n",
"\n",
" urls = []\n",
" filenames = []\n",
" texts = []\n",
"\n",
" soup = BeautifulSoup(r.text.encode('utf-8').decode('ascii', 'ignore'), 'html.parser')\n",
" for link_item in soup.find_all('a'):\n",
" url = link_item.get(\"href\", None)\n",
" if url:\n",
" if \"https:\" in url and \".wav\" in url:\n",
" list_item = link_item.find_parent(\"li\")\n",
" ital_item = list_item.find_all('i')\n",
" if ital_item:\n",
" text = ital_item[0].text\n",
" text = text.replace('\"', '')\n",
" filename = url[url.rindex(\"/\")+1:]\n",
"\n",
" if \"[\" not in text and \"]\" not in text and \"$\" not in text:\n",
" if url not in urls:\n",
" for s in blocklist:\n",
" if s in url:\n",
" break\n",
" else:\n",
" urls.append(url)\n",
" filenames.append(filename)\n",
" text = text.replace('*', '')\n",
" texts.append(text)\n",
"\n",
" print(\"Found \" + str(len(urls)) + \" urls\")\n",
"\n",
" args = zip(urls, filenames)\n",
"\n",
" prep(args)\n",
" \n",
" total_audio_time = 0\n",
" outFile=open(os.path.join(audio_dir, \"manifest.json\"), 'w')\n",
" for i in range(len(urls)):\n",
" item = {}\n",
" text = texts[i]\n",
" filename = filenames[i]\n",
" item[\"audio_filepath\"] = os.path.join(audio_dir, filename)\n",
" #item[\"text_normalized\"] = re.sub(r\"(\\d+)\", lambda x: num2words.num2words(int(x.group(0))), text)\n",
" item[\"text\"] = re.sub(r\"(\\d+)\", lambda x: num2words.num2words(int(x.group(0))), text).lower()\n",
" item[\"duration\"] = audio_duration(os.path.join(audio_dir, filename))\n",
" total_audio_time = total_audio_time + item[\"duration\"]\n",
" outFile.write(json.dumps(item, ensure_ascii=True, sort_keys=True) + \"\\n\")\n",
" \n",
" outFile.close()\n",
" print(\"\\n\" + str(total_audio_time/60.0) + \" min\\n\")\n",
"\n",
"main()\n",
"\n",
"shutil.copytree(audio_dir, temp_path)\n",
"\n",
"print(\"Resampling Audio...\")\n",
"for filename in tqdm(os.listdir(temp_path)):\n",
" if \".wav\" in filename:\n",
" source_name = os.path.join(temp_path, filename)\n",
" destination_name = os.path.join(audio_dir, filename)\n",
" resample_audio(source_name, destination_name, target_sampling_rate=sampling_rate)\n",
" \n",
"if os.path.exists(temp_path):\n",
" shutil.rmtree(temp_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "50aa0395",
"metadata": {},
"outputs": [],
"source": [
"!head -n 1 ./audio/manifest.json"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "04d4dd5c",
"metadata": {},
"outputs": [],
"source": [
"!cat ./audio/manifest.json | tail -n 5 > ./manifest_validation.json\n",
"!cat ./audio/manifest.json | head -n -5 > ./manifest_train.json"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1d6d4a0d",
"metadata": {},
"outputs": [],
"source": [
"home_path = !(echo $HOME)\n",
"home_path = home_path[0]\n",
"print(home_path)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1773ddff",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import json\n",
"\n",
"import torch\n",
"import IPython.display as ipd\n",
"from matplotlib.pyplot import imshow\n",
"from matplotlib import pyplot as plt\n",
"\n",
"from nemo.collections.tts.models import FastPitchModel\n",
"FastPitchModel.from_pretrained(\"tts_en_fastpitch\")\n",
"\n",
"from pathlib import Path\n",
"nemo_files = [p for p in Path(f\"{home_path}/.cache/torch/NeMo/\").glob(\"**/tts_en_fastpitch_align.nemo\")]\n",
"print(f\"Copying {nemo_files[0]} to ./\")\n",
"Path(\"./tts_en_fastpitch_align.nemo\").write_bytes(nemo_files[0].read_bytes())"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "39bd6009",
"metadata": {},
"outputs": [],
"source": [
"#!wget https://raw.githubusercontent.com/nvidia/NeMo/r1.14.0/examples/tts/fastpitch_finetune.py\n",
"#!wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/examples/tts/hifigan_finetune.py\n",
"# \n",
"#!mkdir -p conf\n",
"#!cd conf \\\n",
"#&& wget https://raw.githubusercontent.com/nvidia/NeMo/r1.14.0/examples/tts/conf/fastpitch_align_v1.05.yaml \\\n",
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/examples/tts/conf/hifigan/hifigan.yaml \\\n",
"#&& cd .."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f9aa135f",
"metadata": {},
"outputs": [],
"source": [
"# additional files\n",
"#!mkdir -p tts_dataset_files && cd tts_dataset_files \\\n",
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/scripts/tts_dataset_files/cmudict-0.7b_nv22.10 \\\n",
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/scripts/tts_dataset_files/heteronyms-052722 \\\n",
"#&& wget https://raw.githubusercontent.com/NVIDIA/NeMo/r1.14.0/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv \\\n",
"#&& cd .."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f2d1e52d",
"metadata": {},
"outputs": [],
"source": [
"!(python3 fastpitch_finetune.py --config-name=fastpitch_align_v1.05.yaml \\\n",
" train_dataset=./manifest_train.json \\\n",
" validation_datasets=./manifest_validation.json \\\n",
" sup_data_path=./fastpitch_sup_data \\\n",
" phoneme_dict_path=tts_dataset_files/cmudict-0.7b_nv22.10 \\\n",
" heteronyms_path=tts_dataset_files/heteronyms-052722 \\\n",
" whitelist_path=tts_dataset_files/lj_speech.tsv \\\n",
" exp_manager.exp_dir=./glados_out \\\n",
" +init_from_nemo_model=./tts_en_fastpitch_align.nemo \\\n",
" trainer.max_epochs=150 \\\n",
" trainer.check_val_every_n_epoch=25 \\\n",
" model.train_ds.dataloader_params.batch_size=12 model.validation_ds.dataloader_params.batch_size=12 \\\n",
" model.n_speakers=1 model.pitch_mean=165.458 model.pitch_std=40.1891 \\\n",
" model.pitch_fmin=80.0 model.pitch_fmax=2048.0 model.optim.lr=2e-4 \\\n",
" ~model.optim.sched model.optim.name=adam trainer.devices=1 trainer.strategy=null \\\n",
" +model.text_tokenizer.add_blank_at=true \\\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "aaefb5e3",
"metadata": {},
"outputs": [],
"source": [
"from nemo.collections.tts.models import HifiGanModel\n",
"from nemo.collections.tts.models import FastPitchModel\n",
"\n",
"vocoder = HifiGanModel.from_pretrained(\"tts_hifigan\")\n",
"vocoder = vocoder.eval().cuda()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "120d8ba0",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"\n",
"def infer(spec_gen_model, vocoder_model, str_input, speaker=None):\n",
" \"\"\"\n",
" Synthesizes spectrogram and audio from a text string given a spectrogram synthesis and vocoder model.\n",
" \n",
" Args:\n",
" spec_gen_model: Spectrogram generator model (FastPitch in our case)\n",
" vocoder_model: Vocoder model (HiFiGAN in our case)\n",
" str_input: Text input for the synthesis\n",
" speaker: Speaker ID\n",
" \n",
" Returns:\n",
" spectrogram and waveform of the synthesized audio.\n",
" \"\"\"\n",
" with torch.no_grad():\n",
" parsed = spec_gen_model.parse(str_input)\n",
" if speaker is not None:\n",
" speaker = torch.tensor([speaker]).long().to(device=spec_gen_model.device)\n",
" spectrogram = spec_gen_model.generate_spectrogram(tokens=parsed, speaker=speaker)\n",
" audio = vocoder_model.convert_spectrogram_to_audio(spec=spectrogram)\n",
" \n",
" if spectrogram is not None:\n",
" if isinstance(spectrogram, torch.Tensor):\n",
" spectrogram = spectrogram.to('cpu').numpy()\n",
" if len(spectrogram.shape) == 3:\n",
" spectrogram = spectrogram[0]\n",
" if isinstance(audio, torch.Tensor):\n",
" audio = audio.to('cpu').numpy()\n",
" return spectrogram, audio\n",
"\n",
"def get_best_ckpt_from_last_run(\n",
" base_dir=\"./glados_out\", \n",
" model_name=\"FastPitch\"\n",
" ): \n",
" \n",
" exp_dirs = list([i for i in (Path(base_dir) / model_name).iterdir() if i.is_dir()])\n",
" last_exp_dir = sorted(exp_dirs)[-1]\n",
" \n",
" last_checkpoint_dir = last_exp_dir / \"checkpoints\"\n",
" \n",
" last_ckpt = list(last_checkpoint_dir.glob('*-last.ckpt'))\n",
"\n",
" if len(last_ckpt) == 0:\n",
" raise ValueError(f\"There is no last checkpoint in {last_checkpoint_dir}.\")\n",
" \n",
" return str(last_ckpt[0])"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "187d85ae",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import IPython.display as ipd\n",
"\n",
"# Only need to set speaker_id if there is more than one speaker\n",
"speaker_id = None\n",
"\n",
"last_ckpt = get_best_ckpt_from_last_run(model_name=\"FastPitch\")\n",
"print(last_ckpt)\n",
"\n",
"spec_model = FastPitchModel.load_from_checkpoint(last_ckpt)\n",
"spec_model.eval().cuda()\n",
"\n",
"num_val = 5 # Number of validation samples\n",
"val_records = []\n",
"with open(\"manifest_validation.json\", \"r\") as f:\n",
" for i, line in enumerate(f):\n",
" val_records.append(json.loads(line))\n",
" if len(val_records) >= num_val:\n",
" break\n",
" \n",
"for val_record in val_records:\n",
" print(\"Real validation audio\")\n",
" ipd.display(ipd.Audio(val_record['audio_filepath'], rate=22050))\n",
" print(f\"Text: {val_record['text']}\")\n",
" spec, audio = infer(spec_model, vocoder, val_record['text'], speaker=speaker_id)\n",
" ipd.display(ipd.Audio(audio, rate=22050))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e01801b6",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import numpy as np\n",
"import torch\n",
"import soundfile as sf\n",
"\n",
"from pathlib import Path\n",
"\n",
"from nemo.collections.tts.torch.helpers import BetaBinomialInterpolator\n",
"\n",
"def load_wav(audio_file, target_sr=None):\n",
" with sf.SoundFile(audio_file, 'r') as f:\n",
" samples = f.read(dtype='float32')\n",
" sample_rate = f.samplerate\n",
" if target_sr is not None and target_sr != sample_rate:\n",
" samples = librosa.core.resample(samples, orig_sr=sample_rate, target_sr=target_sr)\n",
" return samples.transpose()\n",
" \n",
"def generateMels(manifest_path, save_path, hifigan_manifest_path):\n",
" # Get records from the training manifest\n",
" \n",
" records = []\n",
" with open(manifest_path, \"r\") as f:\n",
" for i, line in enumerate(f):\n",
" records.append(json.loads(line))\n",
"\n",
" beta_binomial_interpolator = BetaBinomialInterpolator()\n",
" spec_model.eval()\n",
"\n",
" device = spec_model.device\n",
"\n",
" save_dir = Path(save_path)\n",
" save_dir.mkdir(exist_ok=True, parents=True)\n",
"\n",
" # Generate a spectrograms (we need to use ground truth alignment for correct matching between audio and mels)\n",
" for i, r in enumerate(records):\n",
" audio = load_wav(r[\"audio_filepath\"])\n",
" audio = torch.from_numpy(audio).unsqueeze(0).to(device)\n",
" audio_len = torch.tensor(audio.shape[1], dtype=torch.long, device=device).unsqueeze(0)\n",
"\n",
" # Again, our finetuned FastPitch model doesn't use multiple speakers,\n",
" # but we keep the code to support it here for reference\n",
" if spec_model.fastpitch.speaker_emb is not None and \"speaker\" in r:\n",
" speaker = torch.tensor([r['speaker']]).to(device)\n",
" else:\n",
" speaker = None\n",
"\n",
" with torch.no_grad():\n",
" if \"normalized_text\" in r:\n",
" text = spec_model.parse(r[\"normalized_text\"], normalize=False)\n",
" else:\n",
" text = spec_model.parse(r['text'])\n",
"\n",
" text_len = torch.tensor(text.shape[-1], dtype=torch.long, device=device).unsqueeze(0)\n",
"\n",
" spect, spect_len = spec_model.preprocessor(input_signal=audio, length=audio_len)\n",
"\n",
" # Generate attention prior and spectrogram inputs for HiFi-GAN\n",
" attn_prior = torch.from_numpy(\n",
" beta_binomial_interpolator(spect_len.item(), text_len.item())\n",
" ).unsqueeze(0).to(text.device)\n",
"\n",
" spectrogram = spec_model.forward(\n",
" text=text, \n",
" input_lens=text_len, \n",
" spec=spect, \n",
" mel_lens=spect_len, \n",
" attn_prior=attn_prior,\n",
" speaker=speaker,\n",
" )[0]\n",
"\n",
" save_path = save_dir / f\"mel_{i}.npy\"\n",
" np.save(save_path, spectrogram[0].to('cpu').numpy())\n",
" r[\"mel_filepath\"] = str(save_path)\n",
"\n",
" \n",
" with open(hifigan_manifest_path, \"w\") as f:\n",
" for r in records:\n",
" f.write(json.dumps(r) + '\\n')\n",
"\n",
"generateMels(\"./manifest_train.json\", \"./training_mels\", \"hifigan_manifest_train.json\")\n",
"generateMels(\"./manifest_validation.json\", \"./validation_mels\", \"hifigan_manifest_validation.json\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1f792773",
"metadata": {},
"outputs": [],
"source": [
"!(python3 hifigan_finetune.py \\\n",
" --config-name=hifigan.yaml \\\n",
" model.train_ds.dataloader_params.batch_size=32 \\\n",
" model.max_steps=7000 \\\n",
" model.optim.lr=0.00001 \\\n",
" ~model.optim.sched \\\n",
" train_dataset=./hifigan_manifest_train.json \\\n",
" validation_datasets=./hifigan_manifest_validation.json \\\n",
" exp_manager.exp_dir=./glados_out \\\n",
" +init_from_pretrained_model=tts_hifigan \\\n",
" trainer.check_val_every_n_epoch=10 \\\n",
" model/train_ds=train_ds_finetune \\\n",
" model/validation_ds=val_ds_finetune)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0d15d4d",
"metadata": {},
"outputs": [],
"source": [
"import json\n",
"import IPython.display as ipd\n",
"from nemo.collections.tts.models import HifiGanModel\n",
"from nemo.collections.tts.models import FastPitchModel\n",
"import shutil\n",
"import os\n",
"\n",
"# Only need to set speaker_id if there is more than one speaker\n",
"speaker_id = None\n",
"\n",
"base_dir=\"glados_out\"\n",
"\n",
"last_fastpitch_ckpt = get_best_ckpt_from_last_run(base_dir=base_dir, model_name=\"FastPitch\")\n",
"last_hifigan_ckpt = get_best_ckpt_from_last_run(base_dir=base_dir, model_name=\"HifiGan\")\n",
"\n",
"print(\"Fastpitch: \" + last_fastpitch_ckpt)\n",
"print(\"HifiGan: \" + last_hifigan_ckpt)\n",
"\n",
"vocoder = HifiGanModel.load_from_checkpoint(last_hifigan_ckpt)\n",
"vocoder = vocoder.eval().cuda()\n",
"spec_model = FastPitchModel.load_from_checkpoint(last_fastpitch_ckpt)\n",
"spec_model.eval().cuda()\n",
"\n",
"num_val = 5 # Number of validation samples\n",
"val_records = []\n",
"with open(\"manifest_validation.json\", \"r\") as f:\n",
" for i, line in enumerate(f):\n",
" val_records.append(json.loads(line))\n",
" if len(val_records) >= num_val:\n",
" break\n",
" \n",
"for val_record in val_records:\n",
" print(\"Real validation audio\")\n",
" ipd.display(ipd.Audio(val_record['audio_filepath'], rate=22050))\n",
" print(f\"Text: {val_record['text']}\")\n",
" spec, audio = infer(spec_model, vocoder, val_record['text'], speaker=speaker_id)\n",
" ipd.display(ipd.Audio(audio, rate=22050))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "bad6e34f",
"metadata": {},
"outputs": [],
"source": [
"spec, audio = infer(spec_model, vocoder, \"Robots are awesome\", speaker=speaker_id)\n",
"ipd.display(ipd.Audio(audio, rate=22050))\n",
"spec, audio = infer(spec_model, vocoder, \"Welcome back to the armoury\", speaker=speaker_id)\n",
"ipd.display(ipd.Audio(audio, rate=22050))"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f23644bc",
"metadata": {},
"outputs": [],
"source": [
"spec_model.save_to(\"fastpitch.nemo\")\n",
"vocoder.save_to(\"hifigan.nemo\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "63485879",
"metadata": {},
"outputs": [],
"source": [
"# pip3 install whl\n",
"# pip3 install nemo2riva\n",
"\n",
"# Run on training machine\n",
"!nemo2riva --out hifigan.riva hifigan.nemo --key None\n",
"!nemo2riva --out fastpitch.riva fastpitch.nemo --key None\n",
"\n",
"# scp .riva files to jetson under /home/davesarmoury/RIVA/artifacts\n",
"# Everything below is run on the jetson\n",
"\n",
"# Servicemaker docker\n",
"docker run --gpus all -it --rm \\\n",
" -v /home/davesarmoury/RIVA/artifacts:/servicemaker-dev \\\n",
" -v /home/davesarmoury/RIVA/riva_repo:/data \\\n",
" --entrypoint=\\\"/bin/bash\\\" \\\n",
" nvcr.io/nvidia/riva/riva-speech:2.13.1-servicemaker-l4t-aarch64\n",
"\n",
"riva-build speech_synthesis \\\n",
" /servicemaker-dev/glados.rmir:tlt_encode \\\n",
" /servicemaker-dev/glados_fastpitch.riva:tlt_encode \\\n",
" /servicemaker-dev/glados_hifigan.riva:tlt_encode \\\n",
" --voice_name=GLaDOS \\\n",
" --sample_rate 22050\n",
"\n",
"riva-deploy /servicemaker-dev/glados.rmir:tlt_encode /data/models\n",
"\n",
"# Exit docker\n",
"\n",
"ngc registry resource download-version nvidia/riva/riva_quickstart\n",
"cd riva_quickstart\n",
"bash riva_init.sh\n",
"# copy glados.riva files into riva models dir\n",
"bash riva_start.sh"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}