Initial commit

This commit is contained in:
Paillat
2023-05-15 10:11:04 +02:00
commit 5410752853
24 changed files with 742 additions and 0 deletions

25
generators/ideas.py Normal file
View File

@@ -0,0 +1,25 @@
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
subject = os.getenv("SUBJECT")
with open('prompts/ideas.txt') as f:
prompt = f.read().replace('[subject]', subject)
f.close()
async def generate_ideas():
with open('ideas/ideas.json', 'r') as f:
ideas = f.read()
f.close()
prmpt = prompt.replace('[existing ideas]', ideas)
print(prmpt)
response = await openai.ChatCompletion.acreate(
model="gpt-3.5-turbo",
messages=[
{"role":"user","content":prmpt},
],
)
return response['choices'][0]['message']['content']

112
generators/miniature.py Normal file
View File

@@ -0,0 +1,112 @@
import openai
import os
from PIL import Image, ImageDraw, ImageFont
import random
from dotenv import load_dotenv
from PIL import Image
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
'''
Putpose of this file is to generate a miniature of the video.
It has a function that takes a path, title, and description and generates a miniature.
It uses pillow to generate the image, and openai to generate text1 and text2.
text 1 is a short text max 2 words to put on the top of the image.
text 2 is a 3 word text to put in the middle of the image.
The function returns the path of the image.
First open bcg.png. Then create a new image and add a random gradient to it from top to bottom.
then put the png on top of the gradient.
Then add text1 and text2 to the image.
'''
prompt = '''Generate 2 short textes OF MAX 2-4 WORDS each to put on the top of the miniature of the video. Here are some examples:
For the title "Python Exception Handling" the text1 could be "No more crashes!" and the text2 could be "Easy!"
The second text is often shorter than the first one.
Answer without anything else, just with the 2 textes. Answer with text1 on the first line and text2 on the second line. Nothing else.
Here is the title of the video: [TITLE]
Here is the description of the video: [DESCRIPTION]'''
def rand_gradient(image):
randr = random.SystemRandom().randint(1, 20)
randg = random.SystemRandom().randint(1, 20)
randb = random.SystemRandom().randint(1, 20)
for i in range(image.size[0]):
for j in range(image.size[1]):
colors = [i//randr, j//randg, i//randb]
position1 = [image.size[0]//5, image.size[1]//5]
position2 = [image.size[0]//5, image.size[1]//2]
if i == position1[0] and j == position1[1]:
textcolor1 = colors
if i == position2[0] and j == position2[1]:
textcolor2 = colors
image.putpixel((i,j), (colors[0], colors[1], colors[2]))
return image, textcolor1, textcolor2
def generate_miniature(path, title, description):
prmpt = prompt.replace("[TITLE]", title).replace("[DESCRIPTION]", description)
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role":"user","content":prmpt},
],
)
response['choices'][0]['message']['content']
text1 = response['choices'][0]['message']['content'].split("\n")[0]
text2 = response['choices'][0]['message']['content'].split("\n")[1]
generate_image(path, text1, text2)
def generate_image(path, text1, text2):
bcg = Image.open("bcg.png")
img = Image.new('RGBA', (1920, 1080))
img, textcolor1, textcolor2 = rand_gradient(img)
draw = ImageDraw.Draw(img)
font1 = ImageFont.truetype("./Sigmar-Regular.ttf", 200)
font2 = ImageFont.truetype("./Sigmar-Regular.ttf", 200)
text1words = text1.split(" ")
text2words = text2.split(" ")
text1def = ""
text2def = ""
#max charachters per line is 7, but if a word is longer than 7 charachters, do not split it. Howerver if 2 or more words can fit on the same line, put them on the same line.
for word in text1words:
if len(text1def.split("\n")[-1]) + len(word) > 7:
text1def += "\n"
text1def += word + " "
for word in text2words:
if len(text2def.split("\n")[-1]) + len(word) > 7:
text2def += "\n"
text2def += word + " "
maxlen1 = max([len(line) for line in text1def.split("\n")])
maxlen2 = max([len(line) for line in text2def.split("\n")])
#if the text is too long, reduce the font size proportionally
if maxlen1 > 7:
font1 = ImageFont.truetype("./Sigmar-Regular.ttf", 200 - (maxlen1 - 7)*10)
if maxlen2 > 7:
font2 = ImageFont.truetype("./Sigmar-Regular.ttf", 200 - (maxlen2 - 7)*10)
text1def = text1def.upper().strip()
text2def = text2def.upper().strip()
textcolor1 = [255 - textcolor1[0], 255 - textcolor1[1], 255 - textcolor1[2]]
textcolor2 = [255 - textcolor2[0], 255 - textcolor2[1], 255 - textcolor2[2]]
imgtext1 = Image.new('RGBA', (1920, 1080))
imgtext2 = Image.new('RGBA', (1920, 1080))
drawtext1 = ImageDraw.Draw(imgtext1)
drawtext1.text((imgtext1.size[0]//8*2, 0), text1def, font=font1, fill=(textcolor1[0], textcolor1[1], textcolor1[2]))
imgtext1 = imgtext1.rotate(-5, expand=True)
drawtext2 = ImageDraw.Draw(imgtext2)
drawtext2.text((imgtext2.size[0]//8*2.5, imgtext2.size[1]//5*2), text2def, font=font2, fill=(textcolor2[0], textcolor2[1], textcolor2[2]))
imgtext2 = imgtext2.rotate(5, expand=True)
#paste the textes on the image
img.paste(bcg, (0, 0), bcg)
img.paste(imgtext1, (0, 0-img.size[1]//8), imgtext1)
if len(text1def.split("\n")) > 2: #if the text is too long, put the second text on the third line
img.paste(imgtext2, (0, img.size[1]//8), imgtext2)
else:
img.paste(imgtext2, (0, 0), imgtext2)
img.save(path + "/miniature.png")
return path + "/miniature.png"
generate_image("test", "Master python loops", "Effortlessly")

128
generators/montage.py Normal file
View File

@@ -0,0 +1,128 @@
import json
import os
import requests
import pysrt
import deepl
import random
from generators.speak import generate_voice, voices
from moviepy.video.VideoClip import ImageClip
from moviepy.editor import VideoFileClip, concatenate_videoclips, CompositeAudioClip, concatenate_audioclips
from moviepy.audio.io.AudioFileClip import AudioFileClip
from moviepy.audio.fx.all import volumex, audio_fadein, audio_fadeout
from dotenv import load_dotenv
load_dotenv()
unsplash_access = os.getenv("UNSPLASH_ACCESS_KEY")
unsplash_url = "https://api.unsplash.com/photos/random/?client_id=" + unsplash_access + "&query="
deepl_access = os.getenv("DEEPL_ACCESS_KEY")
translator = deepl.Translator(deepl_access)
def prepare(path):
with open(path + "/script.json", 'r', encoding='utf-8') as f:
script = json.load(f)
f.close()
if not os.path.exists(path + "/slides"): os.mkdir(path + "/slides")
fresh = False
if not os.path.exists(path + "/audio"):
os.mkdir(path + "/audio")
fresh = True
with open("prompts/marp.md", 'r', encoding='utf-8') as f:
marp = f.read()
f.close()
if fresh:
choosen_voice = random.choice(voices)
for i in range(len(script)):
audio_path = path + "/audio/audio" + str(i) + ".mp3"
if not os.path.exists(audio_path):
generate_voice(audio_path, script[i]['spoken'], choosen_voice)
if "image" in script[i]:
if not os.path.exists(path + "/slides/assets"):
os.mkdir(path + "/slides/assets")
url= unsplash_url + script[i]['image']
r = requests.get(url)
real_url = r.json()['urls']['raw']
with open(path + "/slides/assets/slide" + str(i) + ".jpg", 'wb') as f:
f.write(requests.get(real_url).content)
f.close()
content = marp + f"\n\n![bg 70%](assets/slide{i}.jpg)"
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(content)
elif "markdown" in script[i]:
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(marp + "\n\n" + script[i]['markdown'])
elif "huge" in script[i]:
#use fit
with open(path + "/slides/slide" + str(i) + ".md", 'w', encoding='utf-8') as f:
f.write(marp + "\n\n# <!-- fit --> " + script[i]['huge'])
else:
pass
for i in range(len(script)):
marrkdown_path = "./" + path + "/slides/slide" + str(i) + ".md"
command = f"marp.exe {marrkdown_path} -o {path}/slides/slide{i}.png --allow-local-files"
os.system(command)
return script
def convert_seconds_to_time_string(seconds):
milliseconds = int((seconds - int(seconds)) * 1000)
seconds = int(seconds)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return f"{hours:02}:{minutes:02}:{seconds:02},{milliseconds:03}"
def subs(length, total, text, srt, index):
#first format the start and end in xx:xx:xx,xxx from float seconds like xx.xxxxxx
start = convert_seconds_to_time_string(total - length)
stop = convert_seconds_to_time_string(total)
sub = pysrt.SubRipItem(index=index, start=start, end=stop, text=text)
srt.append(sub)
return srt
def translate(target, text):
translation = translator.translate_text(text, target_lang=target).text
return translation
def mount(path, script):
num_slides = len(os.listdir(path + "/audio"))
clips = []
srt = pysrt.SubRipFile()
srt_fr = pysrt.SubRipFile()
total_length = 0
for i in range(num_slides):
audio = AudioFileClip(path + "/audio/audio" + str(i) + ".mp3")
complete_audio = CompositeAudioClip([
AudioFileClip("silence.mp3").set_duration(1),
audio,
AudioFileClip("silence.mp3").set_duration(1)
])
length = complete_audio.duration
total_length += length
srt = subs(length, total_length, script[i]['spoken'], srt, i)
srt_fr = subs(length, total_length, translate("FR", script[i]['spoken']), srt_fr, i)
slide = ImageClip(path + "/slides/slide" + str(i) + ".png").set_duration(length)
slide = slide.set_audio(complete_audio)
clips.append(slide)
randmusic = random.choice(os.listdir("musics"))
while randmusic.endswith(".txt"): randmusic = random.choice(os.listdir("musics"))
randpath = "musics/" + randmusic
music = AudioFileClip(randpath).set_duration(total_length)
music = audio_fadein(music, 20)
music = audio_fadeout(music, 20)
music = volumex(music, 0.2)
musics = []
if music.duration < total_length:
for i in range(int(total_length / music.duration)):
musics.append(music)
music = concatenate_audioclips(musics)
final_clip = concatenate_videoclips(clips, method="compose")
existing_audio = final_clip.audio
final_audio = CompositeAudioClip([existing_audio, music])
final_clip = final_clip.set_audio(final_audio)
final_clip.write_videofile(path + "/montage.mp4", fps=60, codec="nvenc")
srt.save(path + "/montage.srt")
srt_fr.save(path + "/montage_fr.srt")
with open (randpath.split(".")[0] + ".txt", 'r', encoding='utf-8') as f:
music_credit = f.read()
f.close()
return music_credit

22
generators/script.py Normal file
View File

@@ -0,0 +1,22 @@
import os
import json
import asyncio
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
async def generate_script(title, description):
with open('prompts/script.txt') as f:
prompt = f.read()
f.close()
prompt = prompt.replace("[title]", title)
prompt = prompt.replace("[description]", description)
response = await openai.ChatCompletion.acreate(
model="gpt-4",
messages=[
{"role":"user","content":prompt}
],
)
return response['choices'][0]['message']['content']

23
generators/speak.py Normal file
View File

@@ -0,0 +1,23 @@
from TTS.api import TTS
import os
# Running a multi-speaker and multi-lingual model
# List available 🐸TTS models and choose the first one
model_best_multi = "tts_models/en/vctk/vits"
fakenames = {
"Alexander": "p230",
"Benjamin": "p240",
"Amelia": "p270",
"Katherine": "p273"
}
voices = ["Alexander", "Benjamin", "Amelia", "Katherine"]
# Init TTS
def generate_voice(path, text, speaker="Alexander"):
tts = TTS(model_best_multi, gpu=True)
speaker = fakenames[speaker] if speaker in fakenames else speaker
tts.tts_to_file(text=text, file_path=path, speaker=speaker, speed=1)

137
generators/uploader.py Normal file
View File

@@ -0,0 +1,137 @@
#!/usr/bin/python
'''Uploads a video to YouTube.'''
from http import client
import httplib2
import os
import random
import time
import json
import google.oauth2.credentials
import google_auth_oauthlib.flow
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from google_auth_oauthlib.flow import InstalledAppFlow
httplib2.RETRIES = 1
MAX_RETRIES = 10
RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, client.NotConnected,
client.IncompleteRead, client.ImproperConnectionState,
client.CannotSendRequest, client.CannotSendHeader,
client.ResponseNotReady, client.BadStatusLine)
RETRIABLE_STATUS_CODES = [500, 502, 503, 504]
CLIENT_SECRETS_FILE = 'env/client_secret.json'
SCOPES = ['https://www.googleapis.com/auth/youtube.upload', 'POST https://www.googleapis.com/upload/youtube/v3/thumbnails/set', 'https://www.googleapis.com/auth/youtube.force-ssl']
API_SERVICE_NAME = 'youtube'
API_VERSION = 'v3'
VALID_PRIVACY_STATUSES = ('public', 'private', 'unlisted')
# Authorize the request and store authorization credentials.
def get_authenticated_service():
if os.path.exists('env/credentials.json'):
with open('env/credentials.json') as json_file:
data = json.load(json_file)
credentials = google.oauth2.credentials.Credentials(
token=data['token'],
refresh_token=data['refresh_token'],
token_uri=data['token_uri'],
client_id=data['client_id'],
client_secret=data['client_secret'],
scopes=data['scopes']
)
else:
flow = InstalledAppFlow.from_client_secrets_file(
CLIENT_SECRETS_FILE, SCOPES)
credentials = flow.run_local_server()
with open('env/credentials.json', 'w') as outfile:
outfile.write(credentials.to_json())
return build(API_SERVICE_NAME, API_VERSION, credentials=credentials)
def initialize_upload(youtube, options):
tags = None
if options['keywords']:
tags = options['keywords'].split(',')
body = dict(
snippet=dict(
title=options['title'],
description=options['description'],
tags=tags,
categoryId=options['category']
),
status=dict(
privacyStatus=options['privacyStatus']
)
)
# Call the API's videos.insert method to create and upload the video.
insert_request = youtube.videos().insert(
part=','.join(body.keys()),
body=body,
media_body=MediaFileUpload(options['file'], chunksize=-1, resumable=True)
)
resumable_upload(insert_request)
def resumable_upload(request):
response = None
error = None
retry = 0
while response is None:
try:
print('Uploading file...')
status, response = request.next_chunk()
if response is not None:
if 'id' in response:
print('Video id "%s" was successfully uploaded.' %
response['id'])
else:
exit('The upload failed with an unexpected response: %s' % response)
except HttpError as e:
if e.resp.status in RETRIABLE_STATUS_CODES:
error = 'A retriable HTTP error %d occurred:\n%s' % (e.resp.status,
e.content)
else:
raise
except RETRIABLE_EXCEPTIONS as e:
error = 'A retriable error occurred: %s' % e
if error is not None:
print(error)
retry += 1
if retry > MAX_RETRIES:
exit('No longer attempting to retry.')
max_sleep = 2 ** retry
sleep_seconds = random.random() * max_sleep
print('Sleeping %f seconds and then retrying...' % sleep_seconds)
time.sleep(sleep_seconds)
if __name__ == '__main__':
sample_options = {
'file': './test.mp4',
'title': 'Test Title',
'description': 'Test Description',
'category': 22,
'keywords': 'test, video',
'privacyStatus': 'private'
}
youtube = get_authenticated_service()
try:
initialize_upload(youtube, sample_options)
except HttpError as e:
print('An HTTP error %d occurred:\n%s' % (e.resp.status, e.content))