fix(generators/ideas.py): fix typo in variable name 'existing_ideas'

feat(generators/montage.py): add check to skip slide if it already exists
feat(generators/montage.py): add support for DEEPL_ACCESS_KEY and UNSPLASH_ACCESS_KEY environment variables
feat(generators/speak.py): add support for Johanne voice
feat(generators/speak.py): add emotion parameter to generate_voice function
feat(generators/uploader.py): add success message and authorization prompt message to run_local_server method
fix(main.py): check if credits is None before writing to meta.txt file
feat(prompts/marp.md): change theme to gaia and add lead and invert classes
This commit is contained in:
Paillat
2023-05-25 21:47:11 +02:00
parent 32c14a01ca
commit 25f578f48c
6 changed files with 90 additions and 58 deletions

View File

@@ -20,7 +20,10 @@ async def generate_ideas(path, subject):
except:
ides_json = []
ideas = "There are no existing ideas."
prmpt = prmpt.replace('[existing ideas]', ideas)
exuisting_ideas = ""
for idea in ides_json:
exuisting_ideas += f"{idea['title']}\n"
prmpt = prmpt.replace('[existing ideas]', exuisting_ideas)
print(prmpt)
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",

View File

@@ -9,12 +9,12 @@ from generators.speak import generate_voice, voices
from moviepy.video.VideoClip import ImageClip
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeAudioClip, concatenate_audioclips
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.audio.fx.all import volumex, audio_fadein, audio_fadeout
from moviepy.audio.fx.all import volumex, audio_fadein, audio_fadeout # type: ignore
from dotenv import load_dotenv
load_dotenv()
unsplash_access = os.getenv("UNSPLASH_ACCESS_KEY")
unsplash_access = os.getenv("UNSPLASH_ACCESS_KEY") or "UNSPLASH_ACCESS_KEY"
unsplash_url = "https://api.unsplash.com/photos/random/?client_id=" + unsplash_access + "&query="
deepl_access = os.getenv("DEEPL_ACCESS_KEY")
deepl_access = os.getenv("DEEPL_ACCESS_KEY") or "DEEPL_ACCESS_KEY"
translator = deepl.Translator(deepl_access)
def prepare(path):
@@ -36,6 +36,9 @@ def prepare(path):
if not os.path.exists(audio_path):
generate_voice(audio_path, script[i]['spoken'], choosen_voice)
if "image" in script[i]:
if os.path.exists(path + "/slides/assets/slide" + str(i) + ".md"):
#skip this slide
continue
if not os.path.exists(path + "/slides/assets"):
os.mkdir(path + "/slides/assets")
url= unsplash_url + script[i]['image']
@@ -48,16 +51,29 @@ def prepare(path):
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(content)
elif "markdown" in script[i]:
if os.path.exists(path + "/slides/slide" + str(i) + ".md"):
#skip this slide
continue
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(marp + "\n\n" + script[i]['markdown'])
elif "huge" in script[i]:
#use fit
if os.path.exists(path + "/slides/slide" + str(i) + ".md"):
#skip this slide
continue
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(marp + "\n\n# <!-- fit --> " + script[i]['huge'])
else:
pass
if os.path.exists(path + "/slides/slide" + str(i) + ".md"):
#skip this slide
continue
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(marp + "\n\n") # blank slide
for i in range(len(script)):
marrkdown_path = "./" + path + "/slides/slide" + str(i) + ".md"
if os.path.exists(f"./{path}/slides/slide{i}.png"):
#skip this slide
continue
command = f"marp.exe {marrkdown_path} -o {path}/slides/slide{i}.png --allow-local-files"
os.system(command)
return script
@@ -79,42 +95,45 @@ def subs(length, total, text, srt, index):
return srt
def mount(path, script):
num_slides = len(os.listdir(path + "/audio"))
clips = []
srt = pysrt.SubRipFile()
total_length = 0
for i in range(num_slides):
audio = AudioFileClip(path + "/audio/audio" + str(i) + ".mp3")
complete_audio = CompositeAudioClip([
AudioFileClip("silence.mp3").set_duration(1),
audio,
AudioFileClip("silence.mp3").set_duration(1)
])
length = complete_audio.duration
total_length += length
srt = subs(length, total_length, script[i]['spoken'], srt, i)
slide = ImageClip(path + "/slides/slide" + str(i) + ".png").set_duration(length)
slide = slide.set_audio(complete_audio)
clips.append(slide)
randmusic = random.choice(os.listdir("musics"))
while randmusic.endswith(".txt"): randmusic = random.choice(os.listdir("musics"))
randpath = "musics/" + randmusic
music = AudioFileClip(randpath).set_duration(total_length)
music = audio_fadein(music, 20)
music = audio_fadeout(music, 20)
music = volumex(music, 0.2)
musics = []
if music.duration < total_length:
for i in range(int(total_length / music.duration)):
musics.append(music)
music = concatenate_audioclips(musics)
final_clip = concatenate_videoclips(clips, method="compose")
existing_audio = final_clip.audio
final_audio = CompositeAudioClip([existing_audio, music])
final_clip = final_clip.set_audio(final_audio)
final_clip.write_videofile(path + "/montage.mp4", fps=60, codec="nvenc")
srt.save(path + "/montage.srt")
with open (randpath.split(".")[0] + ".txt", 'r', encoding='utf-8') as f:
music_credit = f.read()
f.close()
return music_credit
if not os.path.exists(path + "/montage.mp4"):
num_slides = len(os.listdir(path + "/audio"))
clips = []
srt = pysrt.SubRipFile()
total_length = 0
for i in range(num_slides):
audio = AudioFileClip(path + "/audio/audio" + str(i) + ".mp3")
complete_audio = CompositeAudioClip([
AudioFileClip("silence.mp3").set_duration(1),
audio,
AudioFileClip("silence.mp3").set_duration(1)
])
length = complete_audio.duration
total_length += length
srt = subs(length, total_length, script[i]['spoken'], srt, i)
slide = ImageClip(path + "/slides/slide" + str(i) + ".png").set_duration(length)
slide = slide.set_audio(complete_audio)
clips.append(slide)
randmusic = random.choice(os.listdir("musics"))
while randmusic.endswith(".txt"): randmusic = random.choice(os.listdir("musics"))
randpath = "musics/" + randmusic
music = AudioFileClip(randpath).set_duration(total_length)
music = audio_fadein(music, 20)
music = audio_fadeout(music, 20)
music = volumex(music, 0.2)
musics = []
if music.duration < total_length:
for i in range(int(total_length / music.duration)):
musics.append(music)
music = concatenate_audioclips(musics)
final_clip = concatenate_videoclips(clips, method="compose")
existing_audio = final_clip.audio
final_audio = CompositeAudioClip([existing_audio, music])
final_clip = final_clip.set_audio(final_audio)
final_clip.write_videofile(path + "/montage.mp4", fps=60, codec="nvenc")
srt.save(path + "/montage.srt")
with open (randpath.split(".")[0] + ".txt", 'r', encoding='utf-8') as f:
music_credit = f.read()
f.close()
return music_credit
else:
return None

View File

@@ -8,17 +8,23 @@ fakenames = {
"Alexander": "p230",
"Benjamin": "p240",
"Amelia": "p270",
"Katherine": "p273"
"Katherine": "p273",
"Johanne": "p347",
}
voices = ["Alexander", "Benjamin", "Amelia", "Katherine"]
voices = ["Alexander", "Benjamin", "Amelia", "Katherine", "Johanne"]
# Init TTS
def generate_voice(path, text, speaker="Alexander"):
try:
tts = TTS(model_best_multi, gpu=True)
except:
tts = TTS(model_best_multi, gpu=False)
model = model_best_multi
speaker = fakenames[speaker] if speaker in fakenames else speaker
tts.tts_to_file(text=text, file_path=path, speaker=speaker, speed=1)
print(f"Generating voice for {model} with speaker {speaker}")
try:
tts = TTS(model, gpu=True)
except:
tts = TTS(model, gpu=False)
tts.tts_to_file(text=text, file_path=path, speaker=speaker, speed=1, emotion="Happy")
if __name__ == "__main__":
generate_voice("test/test.mp3", "This is a test. I like the words python, django and flask. Betty bought a bit of butter but the butter was bitter. So she bought some better butter to make the bitter butter better.")

View File

@@ -26,7 +26,7 @@ RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, client.NotConnected,
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
CLIENT_SECRETS_FILE = 'env/client_secret.json'
CLIENT_SECRETS_FILE = ''
#SCOPES = ['https://www.googleapis.com/auth/youtube.upload', 'https://www.googleapis.com/upload/youtube/v3/thumbnails/set', 'https://www.googleapis.com/auth/youtube.force-ssl']
SCOPES = ['https://www.googleapis.com/auth/youtube']
@@ -38,6 +38,7 @@ VALID_PRIVACY_STATUSES = ('public', 'private', 'unlisted')
# Authorize the request and store authorization credentials.
def get_authenticated_service(credentialsPath=""):
CLIENT_SECRETS_FILE=f'{credentialsPath}/client_secret.json'
if os.path.exists(f'{credentialsPath}/credentials.json'):
with open(f'{credentialsPath}/credentials.json') as json_file:
data = json.load(json_file)
@@ -52,7 +53,7 @@ def get_authenticated_service(credentialsPath=""):
else:
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRETS_FILE, SCOPES)
credentials = flow.run_local_server()
credentials = flow.run_local_server(success_message="Heyy, yippie, you're authenticated ! You can close this window now !", authorization_prompt_message="Please authorize this app to upload videos on your YouTube account !")
with open(f'{credentialsPath}/credentials.json', 'w') as outfile:
outfile.write(credentials.to_json())
return build(API_SERVICE_NAME, API_VERSION, credentials=credentials)

View File

@@ -53,9 +53,10 @@ async def main():
script = prepare(path)
credits = mount(path, script)
description = f"{idea['description']}\n\nMusic credits: {credits}"
with open(path + "/meta.txt", 'w', encoding='utf-8') as f:
f.write(description)
f.close()
if credits != None:
with open(path + "/meta.txt", 'w', encoding='utf-8') as f:
f.write(description)
f.close()
generate_miniature(path, title=idea['title'], description=idea['description'])
upload_video(path, idea['title'], description, 28, "", "private", subjectdirpath)
print(f"Your video is ready! You can find it in {path}.")

View File

@@ -1,6 +1,8 @@
---
marp: true
theme: default
class: invert
theme: gaia
class:
- lead
- invert
backgroundImage: url(https://images.unsplash.com/photo-1651604454911-fdfb0edde727)
---