Added parallel downloads so it doesn't take FOREVER

This commit is contained in:
Dave Niewinski 2022-12-06 19:09:17 -05:00
parent f833bb99ce
commit d83036ae34

View File

@ -1,10 +1,22 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import requests import requests
from tqdm import tqdm from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
import shutil import shutil
import os import os
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
blocklist = ["potato", "_ding_"] blocklist = ["potato", "_ding_"]
audio_dir = 'audio' audio_dir = 'audio'
@ -14,25 +26,48 @@ if os.path.exists(audio_dir):
os.mkdir(audio_dir) os.mkdir(audio_dir)
r = requests.get("https://theportalwiki.com/wiki/GLaDOS_voice_lines") def download_file(args):
url, filename = args[0], args[1]
split_text = r.text.split() try:
urls = [] response = requests.get(url)
open(os.path.join(audio_dir, filename), "wb").write(response.content)
return filename, True
except:
return filename, False
for chunk in split_text: def download_parallel(args):
if "https:" in chunk and ".wav" in chunk: cpus = cpu_count()
url = chunk.replace('"', '').replace("href=", '') results = ThreadPool(cpus - 1).imap_unordered(download_file, args)
if url not in urls: for result in results:
for s in blocklist: if result[1]:
if s in url: print(bcolors.OKGREEN + "[" + u'\u2713' + "] " + bcolors.ENDC + result[0])
break else:
else: print(bcolors.FAIL + "[" + u'\u2715' + "] " + bcolors.ENDC + result[0])
urls.append(url)
print("Found " + str(len(urls)) + " urls") def main():
global blocklist
r = requests.get("https://theportalwiki.com/wiki/GLaDOS_voice_lines")
for url in tqdm(urls): split_text = r.text.split()
filename = url[url.rindex("/")+1:] urls = []
filenames = []
response = requests.get(url) for chunk in split_text:
open(os.path.join(audio_dir, filename), "wb").write(response.content) if "https:" in chunk and ".wav" in chunk:
url = chunk.replace('"', '').replace("href=", '')
if url not in urls:
for s in blocklist:
if s in url:
break
else:
urls.append(url)
filenames.append(url[url.rindex("/")+1:])
print("Found " + str(len(urls)) + " urls")
args = zip(urls, filenames)
download_parallel(args)
main()