Updated to create manifest for NeMo retraining

This commit is contained in:
Dave Niewinski 2022-12-06 22:29:57 -05:00
parent 689c9ed154
commit 19a60e54f0
3 changed files with 67 additions and 22 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
manifest.json
*.wav *.wav

View File

@ -5,6 +5,10 @@ from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool from multiprocessing.pool import ThreadPool
import shutil import shutil
import os import os
from bs4 import BeautifulSoup
import soundfile as sf
import string
import json
class bcolors: class bcolors:
HEADER = '\033[95m' HEADER = '\033[95m'
@ -17,14 +21,23 @@ class bcolors:
BOLD = '\033[1m' BOLD = '\033[1m'
UNDERLINE = '\033[4m' UNDERLINE = '\033[4m'
blocklist = ["potato", "_ding_"] blocklist = ["potato", "_ding_", "00_part1_entry-6"]
audio_dir = 'audio' audio_dir = 'audio'
if os.path.exists(audio_dir): download_threads = 64
print("Deleting previously downloaded audio")
shutil.rmtree(audio_dir)
os.mkdir(audio_dir) def prep():
if os.path.exists(audio_dir):
print("Deleting previously downloaded audio")
shutil.rmtree(audio_dir)
os.mkdir(audio_dir)
def remove_punctuation(str):
return str.translate(str.maketrans('', '', string.punctuation))
def audio_duration(fn):
f = sf.SoundFile(fn)
return f.frames / f.samplerate
def download_file(args): def download_file(args):
url, filename = args[0], args[1] url, filename = args[0], args[1]
@ -37,8 +50,7 @@ def download_file(args):
return filename, False return filename, False
def download_parallel(args): def download_parallel(args):
cpus = cpu_count() results = ThreadPool(download_threads).imap_unordered(download_file, args)
results = ThreadPool(cpus - 1).imap_unordered(download_file, args)
for result in results: for result in results:
if result[1]: if result[1]:
print(bcolors.OKGREEN + "[" + u'\u2713' + "] " + bcolors.ENDC + result[0]) print(bcolors.OKGREEN + "[" + u'\u2713' + "] " + bcolors.ENDC + result[0])
@ -46,28 +58,58 @@ def download_parallel(args):
print(bcolors.FAIL + "[" + u'\u2715' + "] " + bcolors.ENDC + result[0]) print(bcolors.FAIL + "[" + u'\u2715' + "] " + bcolors.ENDC + result[0])
def main(): def main():
global blocklist
r = requests.get("https://theportalwiki.com/wiki/GLaDOS_voice_lines") r = requests.get("https://theportalwiki.com/wiki/GLaDOS_voice_lines")
split_text = r.text.split()
urls = [] urls = []
filenames = [] filenames = []
texts = []
for chunk in split_text: soup = BeautifulSoup(r.text, 'html.parser')
if "https:" in chunk and ".wav" in chunk: for link_item in soup.find_all('a'):
url = chunk.replace('"', '').replace("href=", '') url = link_item.get("href", None)
if url not in urls: if url:
for s in blocklist: if "https:" in url and ".wav" in url:
if s in url: list_item = link_item.find_parent("li")
break ital_item = list_item.find_all('i')
else: if ital_item:
urls.append(url) text = ital_item[0].text
filenames.append(url[url.rindex("/")+1:]) text = text.replace('"', '')
filename = url[url.rindex("/")+1:]
if "[" not in text and "]" not in text:
if url not in urls:
for s in blocklist:
if s in url:
break
else:
urls.append(url)
filenames.append(filename)
texts.append(text)
print("Found " + str(len(urls)) + " urls") print("Found " + str(len(urls)) + " urls")
args = zip(urls, filenames) args = zip(urls, filenames)
download_parallel(args) #prep()
#download_parallel(args)
main() #{"audio_filepath": "audio/nada_lily_21_haggard_0316.wav",
#"text": "awake ye kings",
#"duration": 1.3,
#"text_no_preprocessing": "\u201cAwake, ye kings,\u201d",
#"text_normalized": "\"Awake, ye kings,\""}
outFile=open(os.path.join(audio_dir, "manifest.json"), 'w')
for i in range(len(urls)):
item = {}
text = texts[i]
filename = filenames[i]
item["audio_filepath"] = filename
item["text_normalized"] = text
item["text_no_preprocessing"] = text
item["text"] = text.lower()
item["duration"] = audio_duration(os.path.join(audio_dir, filename))
outFile.write(json.dumps(item, ensure_ascii=True, sort_keys=True) + "\n")
outFile.close()
main()

View File

@ -1 +1,3 @@
beautifulsoup4
requests requests
soundfile