feat(audio_prompts): add default audio prompts for narrator

feat(audio_prompts): add en_narrator_deep audio prompt for narrator
feat(audio_prompts): add en_narrator_light_bg audio prompt for narrator
fix(video.py): fix indentation and add prompt for generating thumbnail
fix(montage.py): fix indentation and add prompt for generating thumbnail
fix(montage.py): fix image download for wikimage slides

fix(speak.py): remove unused import statement
fix(speak.py): remove unused variable 'fakenames'
feat(speak.py): add function 'remove_blank_moments' to remove silent parts from audio file
feat(speak.py): add function 'optimize_string_groups' to optimize string groups for audio generation
fix(speak.py): fix comment indentation in 'generate_voice' function
fix(speak.py): remove unused imports in 'generate_voice' function
fix(speak.py): remove unused variable 'reduced_noise' in 'generate_voice' function
fix(speak.py): remove unused import statements in 'generate_voice' function
fix(speak.py): remove unused import statement for 'logging' module
fix(speak.py): remove unused print statements in 'main' function
fix(speak.py): remove unused import statement for 'logging' module
fix(speak.py): remove unused print statements in 'main' function
fix(speak.py):

fix(wiki_downloader.py): fix Google search URL to include correct query parameter
fix(wiki_downloader.py): reduce sleep time after page load to 1 second
fix(wiki_downloader.py): increase sleep time after image click to 5 seconds
This commit is contained in:
Paillat
2023-07-02 11:17:10 +02:00
parent f1de2ad596
commit f7835f6604
13 changed files with 206 additions and 114 deletions

BIN
audio_prompts/default.npz Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@@ -48,7 +48,7 @@ class Video:
script = await generate_script(self.idea['title'], self.idea['description']) script = await generate_script(self.idea['title'], self.idea['description'])
script = json.loads(script) script = json.loads(script)
with open(os.path.join( self.path, "script.json"), "w") as f: with open(os.path.join( self.path, "script.json"), "w") as f:
json.dump(script, f) json.dump(script, f, indent=4)
f.close() f.close()
else: else:
with open(os.path.join(self.path, "script.json"), "r") as f: with open(os.path.join(self.path, "script.json"), "r") as f:
@@ -60,7 +60,8 @@ class Video:
"title": self.idea['title'], "title": self.idea['title'],
"description": self.idea['description'] + "\n\n" + credits, "description": self.idea['description'] + "\n\n" + credits,
} }
await generate_thumbnail( self.path, self.idea['title'], self.idea['description']) if input("Do you want to generate a thumbnail ? (y/N) : ").lower() == "y":
await generate_thumbnail( self.path, self.idea['title'], self.idea['description'])
videoid = await upload_video( self.path, self.idea['title'], self.metadata['description'], 28, "", "private", self.parent.path) videoid = await upload_video( self.path, self.idea['title'], self.metadata['description'], 28, "", "private", self.parent.path)
printm(f"Your video is ready! You can find it in { self.path}") printm(f"Your video is ready! You can find it in { self.path}")
video_meta_file = { video_meta_file = {

View File

@@ -10,7 +10,7 @@ from moviepy.editor import concatenate_videoclips, CompositeAudioClip, concatena
from moviepy.audio.io.AudioFileClip import AudioFileClip from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.audio.fx.all import volumex, audio_fadein, audio_fadeout # type: ignore from moviepy.audio.fx.all import volumex, audio_fadein, audio_fadeout # type: ignore
from utils.misc import getenv from utils.misc import getenv
from utils.wiki_downloader import download_image as wiki_download_image
unsplash_access = getenv("unsplash_access_key") unsplash_access = getenv("unsplash_access_key")
if not unsplash_access: if not unsplash_access:
@@ -22,64 +22,82 @@ async def prepare(path):
script = json.load(f) script = json.load(f)
f.close() f.close()
if not os.path.exists(path + "/slides"): os.mkdir(path + "/slides") if not os.path.exists(path + "/slides"): os.mkdir(path + "/slides")
fresh = False if not os.path.exists(path + "/audio"): os.mkdir(path + "/audio")
if not os.path.exists(path + "/audio"): choosen_voice = random.choice(voices)
os.mkdir(path + "/audio") with open(os.path.join(os.getcwd(), "prompts", "marp.md"), 'r', encoding='utf-8') as f:
fresh = True
with open("prompts/marp.md", 'r', encoding='utf-8') as f:
marp = f.read() marp = f.read()
f.close() f.close()
if fresh: for i in range(len(script)):
choosen_voice = random.choice(voices) audio_path = os.path.join(path, "audio", "audio" + str(i) + ".wav")
generator = VoiceGenerator(speaker=choosen_voice) generator = None
for i in range(len(script)): if not os.path.exists(audio_path):
audio_path = path + "/audio/audio" + str(i) + ".wav" if not generator:
if not os.path.exists(audio_path): generator = VoiceGenerator(speaker=choosen_voice)
generator.generate_voice(audio_path, script[i]['spoken']) print("Generating audio for slide " + str(i))
if "image" in script[i]: generator.generate_voice(audio_path, script[i]['spoken'])
if os.path.exists(path + "/slides/assets/slide" + str(i) + ".md"): if "image" in script[i]:
#skip this slide if os.path.exists(os.path.join(path, "slides", "slide" + str(i) + ".md")) and os.path.exists(os.path.join(path, "slides", "slide" + str(i) + ".png")):
#skip this slide
continue
if not os.path.exists(path + "/slides/assets"):
os.mkdir(path + "/slides/assets")
url= unsplash_url + script[i]['image'].replace("+", ",")
r = requests.get(url)
real_url = r.json()['urls']['raw']
with open(path + "/slides/assets/slide" + str(i) + ".jpg", 'wb') as f:
f.write(requests.get(real_url).content)
f.close()
content = marp + f"\n\n![bg 70%](assets/slide{i}.jpg)"
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(content)
elif "wikimage" in script[i]:
if os.path.exists(os.path.join(path, "slides", "slide" + str(i) + ".md")) and os.path.exists(os.path.join(path, "slides", "slide" + str(i) + ".png")):
#skip this slide
continue
if not os.path.exists(path + "/slides/assets"):
os.mkdir(path + "/slides/assets")
r = 0
while True:
try:
print("Trying to download image for slide " + str(i))
wiki_download_image(script[i]['wikimage'], os.path.abspath(os.path.join(path, "slides", "assets", "slide" + str(i) + ".jpg")))
print("Downloaded image for slide with wikiimage " + str(i))
break
except:
r += 1
if r > 5:
break
continue continue
if not os.path.exists(path + "/slides/assets"): content = marp + f"\n\n![bg 70%](assets/slide{i}.jpg)"
os.mkdir(path + "/slides/assets") with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
url= unsplash_url + script[i]['image'] f.write(content)
r = requests.get(url) elif "markdown" in script[i]:
real_url = r.json()['urls']['raw'] if os.path.exists(path + "/slides/slide" + str(i) + ".md") and os.path.exists(path + "/slides/slide" + str(i) + ".png"):
with open(path + "/slides/assets/slide" + str(i) + ".jpg", 'wb') as f: #skip this slide
f.write(requests.get(real_url).content) continue
f.close() with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
content = marp + f"\n\n![bg 70%](assets/slide{i}.jpg)" f.write(marp + "\n\n" + script[i]['markdown'])
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f: elif "huge" in script[i]:
f.write(content) #use fit
elif "markdown" in script[i]: if os.path.exists(path + "/slides/slide" + str(i) + ".md") and os.path.exists(path + "/slides/slide" + str(i) + ".png"):
if os.path.exists(path + "/slides/slide" + str(i) + ".md"): #skip this slide
#skip this slide continue
continue with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f: f.write(marp + "\n\n# <!-- fit --> " + script[i]['huge'])
f.write(marp + "\n\n" + script[i]['markdown']) else:
elif "huge" in script[i]: if os.path.exists(path + "/slides/slide" + str(i) + ".md") and os.path.exists(path + "/slides/slide" + str(i) + ".png"):
#use fit #skip this slide
if os.path.exists(path + "/slides/slide" + str(i) + ".md"): continue
#skip this slide with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
continue f.write(marp + "\n\n") # blank slide
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(marp + "\n\n# <!-- fit --> " + script[i]['huge'])
else:
if os.path.exists(path + "/slides/slide" + str(i) + ".md"):
#skip this slide
continue
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(marp + "\n\n") # blank slide
for i in range(len(script)): for i in range(len(script)):
markdown_path = os.path.join(path, f"slides/slide{i}.md") markdown_path = os.path.join(path, f"slides/slide{i}.md")
markdown_path = os.path.abspath(markdown_path) markdown_path = os.path.abspath(markdown_path)
image_path = os.path.join(path, f"slides/slide{i}.png") image_path = os.path.join(path, f"slides/slide{i}.png")
image_path = os.path.abspath(image_path) image_path = os.path.abspath(image_path)
if os.path.exists(markdown_path): if not os.path.exists(image_path):
#skip this slide command = f'marp.exe "{markdown_path}" -o "{image_path}" --allow-local-files'
continue os.system(command)
command = f'marp.exe "{markdown_path}" -o "{image_path}" --allow-local-files'
os.system(command)
return script return script
def convert_seconds_to_time_string(seconds): def convert_seconds_to_time_string(seconds):
@@ -113,8 +131,6 @@ async def mount(path, script):
]) ])
length = complete_audio.duration length = complete_audio.duration
total_length += length total_length += length
print(script[i])
print(script[i]['spoken'])
srt = subs(length, total_length, script[i]['spoken'], srt, i) srt = subs(length, total_length, script[i]['spoken'], srt, i)
slide = ImageClip(path + "/slides/slide" + str(i) + ".png").set_duration(length) slide = ImageClip(path + "/slides/slide" + str(i) + ".png").set_duration(length)
slide = slide.set_audio(complete_audio) slide = slide.set_audio(complete_audio)
@@ -122,7 +138,7 @@ async def mount(path, script):
randmusic = random.choice(os.listdir("musics")) randmusic = random.choice(os.listdir("musics"))
while randmusic.endswith(".txt"): randmusic = random.choice(os.listdir("musics")) while randmusic.endswith(".txt"): randmusic = random.choice(os.listdir("musics"))
randpath = "musics/" + randmusic randpath = "musics/" + randmusic
music = AudioFileClip(randpath).set_duration(total_length) music = AudioFileClip(randpath)
music = audio_fadein(music, 20) music = audio_fadein(music, 20)
music = audio_fadeout(music, 20) music = audio_fadeout(music, 20)
music = volumex(music, 0.2) music = volumex(music, 0.2)
@@ -131,6 +147,7 @@ async def mount(path, script):
for i in range(int(total_length / music.duration)): for i in range(int(total_length / music.duration)):
musics.append(music) musics.append(music)
music = concatenate_audioclips(musics) music = concatenate_audioclips(musics)
music = music.set_duration(total_length)
final_clip = concatenate_videoclips(clips, method="compose") final_clip = concatenate_videoclips(clips, method="compose")
existing_audio = final_clip.audio existing_audio = final_clip.audio
final_audio = CompositeAudioClip([existing_audio, music]) final_audio = CompositeAudioClip([existing_audio, music])
@@ -142,4 +159,4 @@ async def mount(path, script):
f.close() f.close()
return music_credit or "" return music_credit or ""
else: else:
return None return ""

View File

@@ -1,5 +1,5 @@
import os import os
from pydub import AudioSegment, silence
fakenames = { fakenames = {
"Alexander": "p230", "Alexander": "p230",
@@ -11,16 +11,70 @@ fakenames = {
voices = ["Alexander", "Benjamin", "Amelia", "Katherine", "Johanne"] voices = ["Alexander", "Benjamin", "Amelia", "Katherine", "Johanne"]
def remove_blank_moments(file_path, silence_thresh= -50, silence_chunk_len=500):
# Load audio file
audio = AudioSegment.from_wav(file_path)
# Detect non-silent parts
nonsilent_data = silence.detect_nonsilent(audio, min_silence_len=silence_chunk_len, silence_thresh=silence_thresh)
# Create new audio file
final_audio = AudioSegment.empty()
# Iterate over non-silent parts and append to the final_audio with 0.5 seconds before and after each segment
for idx, (start_i, end_i) in enumerate(nonsilent_data):
start_i = max(0, start_i - 500) # 0.5 seconds before
end_i += 500 # 0.5 seconds after
segment = audio[start_i:end_i]
# Only append silence after the first segment
if idx > 0:
final_audio += AudioSegment.silent(duration=500)
final_audio += segment
# Save the result
if not os.path.exists(os.path.abspath(os.path.join(os.getcwd(), "temp"))):
os.mkdir(os.path.abspath(os.path.join(os.getcwd(), "temp")))
tempfile_path = os.path.abspath(os.path.join(os.getcwd(), "temp", "temp.wav"))
final_audio.export(tempfile_path, format="wav")
os.remove(file_path)
os.rename(tempfile_path, file_path)
def optimize_string_groups(strings):
optimized_groups = []
current_group = []
current_length = 0
for string in strings:
string_length = len(string) + len(current_group) # Account for spaces between strings
if current_length + string_length <= 100:
current_group.append(string)
current_length += string_length
else:
optimized_groups.append(' '.join(current_group)) # Join strings with spaces
current_group = [string]
current_length = len(string)
if current_group:
optimized_groups.append(' '.join(current_group))
return optimized_groups
class VoiceGenerator: class VoiceGenerator:
def __init__(self, mode="Bark", speaker=""): def __init__(self, mode="Bark", speaker=""):
self.mode = mode self.mode = mode
self.speaker = speaker self.speaker = speaker
if mode == "Bark": if mode == "Bark":
os.environ["XDG_CACHE_HOME"] = os.path.join(os.getcwd(), "bark_cache") os.environ["XDG_CACHE_HOME"] = os.path.join(os.getcwd(), "bark_cache")
from bark import preload_models, generation from bark import preload_models
print("Loading Bark voice generator")
preload_models() preload_models()
self.speaker = "v2/en_speaker_6" #self.speaker = os.path.abspath(os.path.join(os.getcwd(), "audio_prompts", "en_male_professional_reader.npz"))
self.speaker = os.path.join(os.getcwd(), "audio_prompts", "en_narrator_light_bg.npz")
print(f"Generating voice for Bark with speaker {self.speaker}")
else: else:
from TTS.api import TTS from TTS.api import TTS
model = "tts_models/en/vctk/vits" model = "tts_models/en/vctk/vits"
@@ -43,20 +97,27 @@ class VoiceGenerator:
import numpy as np import numpy as np
import nltk import nltk
sentences = nltk.sent_tokenize(text) sentences = nltk.sent_tokenize(text)
sentences = optimize_string_groups(sentences)
print(sentences)
pieces = [] pieces = []
silence = np.zeros(int(0.25 * SAMPLE_RATE)) # quarter second of silence silence = np.zeros(int(0.25 * SAMPLE_RATE)) # quarter second of silence
for sentence in sentences: for sentence in sentences:
audio_array = generate_audio(sentence, history_prompt=self.speaker) if not sentence == "":
pieces += [audio_array, silence.copy()] audio_array = generate_audio(sentence, history_prompt=self.speaker)
pieces += [audio_array, silence.copy()]
audio_array = np.concatenate(pieces) audio_array = np.concatenate(pieces)
soundfile.write(path, audio_array, SAMPLE_RATE, format="WAV", subtype="PCM_16") soundfile.write(path, audio_array, SAMPLE_RATE, format="WAV", subtype="PCM_16")
rate, data = wavread(path) '''
reduced_noise = nr.reduce_noise(y=data, sr=rate) remove silence
os.remove(path) '''
wavwrite(path, rate, reduced_noise) remove_blank_moments(path)
else: else:
self.tts.tts_to_file(text=text, file_path=path, speaker=self.speaker, speed=1, emotion="Happy") self.tts.tts_to_file(text=text, file_path=path, speaker=self.speaker, speed=1, emotion="Happy")
if __name__ == "__main__": if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.INFO)
print("Testing voice generator")
generator = VoiceGenerator() generator = VoiceGenerator()
generator.generate_voice("test/test_r.wav", "Hello there!") print("Loaded voice generator")
generator.generate_voice("test/teste_r.wav", "This is a test. I like the words python, django and flask. Betty bought a bit of butter but the butter was bitter. So she bought some better butter to make the bitter butter better.") # generator.generate_voice("test/test_r.wav", "Hello there!")
generator.generate_voice("test/tast_timbernerslee.wav", "But his greatest claim to fame is undoubtedly his invention of the World Wide Web back in 1989. Can you imagine a world without the internet? [Laughs] No, thank you!")

View File

@@ -28,6 +28,11 @@ Answer without anything else, just with the 2 textes. Answer with text1 on the f
Here is the title of the video: [TITLE] Here is the title of the video: [TITLE]
Here is the description of the video: [DESCRIPTION]''' Here is the description of the video: [DESCRIPTION]'''
# TODO: make jpg qith 90% quality default when generating the image to avoid having to convert it later
async def rand_gradient(image): async def rand_gradient(image):
randr = random.SystemRandom().randint(1, 20) randr = random.SystemRandom().randint(1, 20)
randg = random.SystemRandom().randint(1, 20) randg = random.SystemRandom().randint(1, 20)
@@ -110,11 +115,20 @@ async def generate_image(path, text1, text2):
drawtext2.text((imgtext2.size[0]//8*2.5, imgtext2.size[1]//5*2), text2def, font=font2, fill=(textcolor2[0], textcolor2[1], textcolor2[2])) drawtext2.text((imgtext2.size[0]//8*2.5, imgtext2.size[1]//5*2), text2def, font=font2, fill=(textcolor2[0], textcolor2[1], textcolor2[2]))
imgtext2 = imgtext2.rotate(5, expand=True) imgtext2 = imgtext2.rotate(5, expand=True)
#paste the textes on the image #paste the textes on the image
img.paste(bcg, (0, 0), bcg) bcg = bcg.convert('RGBA')
#also set the bcg size to the image size
bcg = bcg.resize((1920, 1080))
img.paste(bcg, (0, 0), bcg) # TODO: make it work with standard pngs (non rgba)
img.paste(imgtext1, (0, 0-img.size[1]//8), imgtext1) img.paste(imgtext1, (0, 0-img.size[1]//8), imgtext1)
if len(text1def.split("\n")) > 2: #if the text is too long, put the second text on the third line if len(text1def.split("\n")) > 2: #if the text is too long, put the second text on the third line
img.paste(imgtext2, (0, img.size[1]//8), imgtext2) img.paste(imgtext2, (0, img.size[1]//8), imgtext2)
else: else:
img.paste(imgtext2, (0, 0), imgtext2) img.paste(imgtext2, (0, 0), imgtext2)
img.save(path + "/miniature.png") #disable the alpha channel
return path + "/miniature.png" img = img.convert('RGB')
img_path = os.path.abspath(os.path.join(path, "thumbnail.jpg"))
for quality in range(100, 0, -1):
img.save(img_path, quality=quality)
if os.path.getsize(img_path) < 2000000:
break
return img_path

22
main.py
View File

@@ -11,15 +11,15 @@ from utils.openaicaller import openai
logging.basicConfig(level=logging.INFO) logging.basicConfig(level=logging.INFO)
async def main(): async def main():
printm("Loading...") #printm("Loading...")
await asyncio.sleep(1) #await asyncio.sleep(1)
clear_screen() #clear_screen()
printm(loadingmessage) printm(loadingmessage)
await asyncio.sleep(4) #await asyncio.sleep(4)
clear_screen() #clear_screen()
await asyncio.sleep(1) await asyncio.sleep(0.5)
printm("Welcome in FABLE, the Film and Artistic Bot for Lively Entertainment!") printm("Welcome in FABLE, the Film and Artistic Bot for Lively Entertainment!")
await asyncio.sleep(1) await asyncio.sleep(0.5)
printm(f"This program will generate for you complete {bcolors.FAIL}{bcolors.BOLD}YouTube{bcolors.ENDC} videos, as well as uploading them to YouTube.") printm(f"This program will generate for you complete {bcolors.FAIL}{bcolors.BOLD}YouTube{bcolors.ENDC} videos, as well as uploading them to YouTube.")
if not os.path.exists('env.yaml'): if not os.path.exists('env.yaml'):
printm("It looks like you don't have an OpenAI API key yet. Please paste it here:") printm("It looks like you don't have an OpenAI API key yet. Please paste it here:")
@@ -57,9 +57,17 @@ async def main():
await channel.load(channel_name) await channel.load(channel_name)
printm("Now, let's create a video!") printm("Now, let's create a video!")
printm("Here are all the ideas you have:") printm("Here are all the ideas you have:")
printm("0. Generate new ideas")
for i, idea in enumerate(channel.ideas): for i, idea in enumerate(channel.ideas):
printm(f"{i+1}. {idea['title']}") printm(f"{i+1}. {idea['title']}")
index = input("Which idea do you want to create a video for : ") index = input("Which idea do you want to create a video for : ")
if index == "0":
printm("Generating new ideas...")
await channel.generate_ideas()
printm("Here are your new ideas:")
for i, idea in enumerate(channel.ideas):
printm(f"{i+1}. {idea['title']}")
index = input("Which idea do you want to create a video for : ")
idea = channel.ideas[int(index)-1] idea = channel.ideas[int(index)-1]
video = await channel.generate_video(idea) video = await channel.generate_video(idea)
printm("Done!") printm("Done!")

View File

@@ -1,3 +1,3 @@
Lost In Thought by Ghostrifter bit.ly/ghostrifter-yt Lost In Thought by Ghostrifter
Creative Commons — Attribution-NoDerivs 3.0 Unported — CC BY-ND 3.0 Creative Commons — Attribution-NoDerivs 3.0 Unported — CC BY-ND 3.0
Music promoted by https://www.chosic.com/free-music/all/ Music promoted by chosic

View File

@@ -1,4 +1,3 @@
When I Was A Boy by Tokyo Music Walker | https://soundcloud.com/user-356546060 When I Was A Boy by Tokyo Music Walker
Music promoted by https://www.chosic.com/free-music/all/ Music promoted by free-stock-music
Creative Commons CC BY 3.0 Creative Commons CC BY 3.0
https://creativecommons.org/licenses/by/3.0/

View File

@@ -1,4 +1,3 @@
Sin and Sensitivity (Rendition of Bachs "Air") by Aila Scott • Johann Sebastian Bach | https://ailascott.com Sin and Sensitivity (Rendition of Bachs "Air") by Aila Scott • Johann Sebastian Bach
Music promoted by https://www.free-stock-music.com Music promoted by free-stock-music
Creative Commons / Attribution 4.0 International (CC BY 4.0) Creative Commons / Attribution 4.0 International (CC BY 4.0)
https://creativecommons.org/licenses/by/4.0/

View File

@@ -41,7 +41,10 @@ VALID_PRIVACY_STATUSES = ('public', 'private', 'unlisted')
async def get_authenticated_service(credentialsPath="", force_refresh=False): async def get_authenticated_service(credentialsPath="", force_refresh=False):
CLIENT_SECRETS_FILE = "" CLIENT_SECRETS_FILE = ""
try: try:
CLIENT_SECRETS_FILE=os.path.join(credentialsPath, "client_secret.json") if os.path.exists(os.path.join(credentialsPath, "client_secret.json")):
CLIENT_SECRETS_FILE=os.path.join(credentialsPath, "client_secret.json")
else:
raise FileNotFoundError("No client_secret.json file found in the specified path !")
except: except:
listdir = os.listdir(credentialsPath) listdir = os.listdir(credentialsPath)
for file in listdir: for file in listdir:
@@ -146,25 +149,16 @@ async def upload_video(path, title, description, category, keywords, privacyStat
'keywords': keywords, 'keywords': keywords,
'privacyStatus': privacyStatus 'privacyStatus': privacyStatus
} }
refresh = False youtube = await get_authenticated_service(credentials_path, force_refresh=False)
while True: print("Uploading video...")
try: try:
youtube = await get_authenticated_service(credentials_path, force_refresh=refresh) videoid = await initialize_upload(youtube, options)
videoid = await initialize_upload(youtube, options) except:
await upload_thumbnail(videoid, path + "/miniature.png", credentials_path, youtube) youtube = await get_authenticated_service(credentials_path, force_refresh=True)
return videoid videoid = await initialize_upload(youtube, options)
except HttpError as e: thumb_path = os.path.abspath(os.path.join(path, "thumbnail.jpg"))
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content)) await upload_thumbnail(videoid, thumb_path, credentials_path, youtube)
#escape the loop return videoid
break
except:
#refresh the token
if not refresh:
refresh = True
else:
#escape the loop
break
async def upload_thumbnail(video_id, file, credentials_path="", youtube=None): async def upload_thumbnail(video_id, file, credentials_path="", youtube=None):

View File

@@ -13,7 +13,7 @@ def download_image(query, download_path):
driver = uc.Chrome(options=options) driver = uc.Chrome(options=options)
try: try:
driver.get(f"https://www.google.com/search?site=&tbm=isch&source=hp&biw=1873&bih=990&tbs=isz:l&q=site:wikipedia.org+{query.replace(' ', '+')}") driver.get(f"https://www.google.com/search?site=&tbm=isch&source=hp&biw=1873&bih=99&q=site:wikipedia.org+{query.replace(' ', '+')}")
time.sleep(2) time.sleep(2)
tos = driver.find_elements(By.CLASS_NAME, "VfPpkd-vQzf8d") tos = driver.find_elements(By.CLASS_NAME, "VfPpkd-vQzf8d")
@@ -21,11 +21,10 @@ def download_image(query, download_path):
if to.text.lower() == "tout refuser": if to.text.lower() == "tout refuser":
to.click() to.click()
break break
time.sleep(1)
time.sleep(10)
image = driver.find_element(By.CLASS_NAME, "rg_i") image = driver.find_element(By.CLASS_NAME, "rg_i")
image.click() image.click()
time.sleep(2) time.sleep(5)
image = driver.find_element(By.CLASS_NAME, "r48jcc").get_attribute("src") or "" image = driver.find_element(By.CLASS_NAME, "r48jcc").get_attribute("src") or ""
image_content = None image_content = None