mirror of
https://github.com/Paillat-dev/Botator.git
synced 2026-01-02 01:06:19 +00:00
[MAKEPROMPT] Cleaned a bit
Still a lots of work to be done
This commit is contained in:
@@ -1,11 +1,11 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
from config import c, max_uses, cp, conn, debug, moderate
|
from config import curs_data, max_uses, curs_premium, con_data, debug, moderate
|
||||||
import vision_processing
|
import vision_processing
|
||||||
import re
|
import re
|
||||||
import discord
|
import discord
|
||||||
import datetime
|
import datetime
|
||||||
import openai
|
import openai
|
||||||
import emoji # pip install emoji
|
import emoji
|
||||||
import os
|
import os
|
||||||
|
|
||||||
|
|
||||||
@@ -46,63 +46,91 @@ async def extract_emoji(string):
|
|||||||
return found_emojis, string
|
return found_emojis, string
|
||||||
|
|
||||||
|
|
||||||
async def chat_process(self, message):
|
def get_guild_data(message):
|
||||||
if message.author.bot:
|
"""This function gets the data of the guild where the message was sent.
|
||||||
return
|
|
||||||
try:
|
Args:
|
||||||
c.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
|
message (str): Data of the message that was sent
|
||||||
except:
|
|
||||||
return
|
Returns:
|
||||||
data = c.fetchone()
|
dict: A dictionary with the data of the guild
|
||||||
channel_id = data[1]
|
"""
|
||||||
api_key = data[2]
|
guild_data = {}
|
||||||
is_active = data[3]
|
|
||||||
max_tokens = data[4]
|
|
||||||
temperature = data[5]
|
|
||||||
frequency_penalty = data[6]
|
|
||||||
presence_penalty = data[7]
|
|
||||||
uses_count_today = data[8]
|
|
||||||
prompt_size = data[9]
|
|
||||||
prompt_prefix = data[10]
|
|
||||||
tts = data[11]
|
|
||||||
pretend_to_be = data[12]
|
|
||||||
pretend_enabled = data[13]
|
|
||||||
images_limit_reached = False
|
|
||||||
try:
|
try:
|
||||||
cp.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
|
cp.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
c.execute(
|
curs_data.execute(
|
||||||
"SELECT * FROM model WHERE guild_id = ?", (message.guild.id,)
|
"SELECT * FROM model WHERE guild_id = ?", (message.guild.id,)
|
||||||
) # get the model in the database
|
) # get the model in the database
|
||||||
model = c.fetchone()[1]
|
data = curs_data.fetchone()
|
||||||
|
model = model[1]
|
||||||
except:
|
except:
|
||||||
model = "chatGPT"
|
model = "chatGPT"
|
||||||
|
|
||||||
try:
|
try:
|
||||||
premium = cp.fetchone()[2] # get the premium status of the guild
|
data = cp.fetchone() # [2] # get the premium status of the guild
|
||||||
|
premium = data[2]
|
||||||
except:
|
except:
|
||||||
premium = 0 # if the guild is not in the database, it's not premium
|
premium = 0 # if the guild is not in the database, it's not premium
|
||||||
|
|
||||||
try:
|
try:
|
||||||
c.execute(
|
curs_data.execute(
|
||||||
"SELECT * FROM images WHERE guild_id = ?", (message.guild.id,)
|
"SELECT * FROM images WHERE guild_id = ?", (message.guild.id,)
|
||||||
) # get the images setting in the database
|
) # get the images setting in the database
|
||||||
data = c.fetchone()
|
images = curs_data.fetchone()
|
||||||
except:
|
except:
|
||||||
data = None
|
images = None
|
||||||
|
|
||||||
|
guild_data["model"] = model
|
||||||
|
guild_data["premium"] = premium
|
||||||
|
guild_data["images"] = images
|
||||||
|
|
||||||
|
return guild_data
|
||||||
|
|
||||||
|
|
||||||
|
async def chat_process(self, message):
|
||||||
|
if message.author.bot:
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
curs_data.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
|
||||||
|
except:
|
||||||
|
return
|
||||||
|
data = curs_data.fetchone()
|
||||||
|
# Create a dict with the data
|
||||||
|
data_dict = {
|
||||||
|
"channel_id": data[1],
|
||||||
|
"api_key": data[2],
|
||||||
|
"is_active": data[3],
|
||||||
|
"max_tokens": data[4],
|
||||||
|
"temperature": data[5],
|
||||||
|
"frequency_penalty": data[6],
|
||||||
|
"presence_penalty": data[7],
|
||||||
|
"uses_count_today": data[8],
|
||||||
|
"prompt_size": data[9],
|
||||||
|
"prompt_prefix": data[10],
|
||||||
|
"tts": data[11],
|
||||||
|
"pretend_to_be": data[12],
|
||||||
|
"pretend_enabled": data[13],
|
||||||
|
}
|
||||||
if data is None:
|
if data is None:
|
||||||
data = [message.guild.id, 0, 0]
|
data = [message.guild.id, 0, 0]
|
||||||
images_usage = data[1]
|
data_dict["images_usage"] = data[1]
|
||||||
images_enabled = data[2]
|
data_dict["images_enabled"] = data[2]
|
||||||
|
|
||||||
|
images_limit_reached = False
|
||||||
|
|
||||||
|
guild_data = get_guild_data(message)
|
||||||
|
|
||||||
channels = []
|
channels = []
|
||||||
if message.guild.id == 1050769643180146749:
|
if message.guild.id == 1050769643180146749:
|
||||||
images_usage = 0 # if the guild is the support server, we set the images usage to 0, so the bot can be used as much as possible
|
images_usage = 0 # if the guild is the support server, we set the images usage to 0, so the bot can be used as much as possible
|
||||||
try:
|
try:
|
||||||
cp.execute("SELECT * FROM channels WHERE guild_id = ?", (message.guild.id,))
|
cp.execute("SELECT * FROM channels WHERE guild_id = ?", (message.guild.id,))
|
||||||
data = cp.fetchone()
|
data = cp.fetchone()
|
||||||
if premium:
|
if guild_data["premium"]:
|
||||||
# for 5 times, we get c.fetchone()[1] to c.fetchone()[5] and we add it to the channels list, each time with try except
|
# for 5 times, we get c.fetchone()[1] to c.fetchone()[5] and we add it to the channels list, each time with try except
|
||||||
for i in range(1, 6):
|
for i in range(1, 6):
|
||||||
# we use the i variable to get the channel id
|
# we use the i variable to get the channel id
|
||||||
@@ -113,7 +141,7 @@ async def chat_process(self, message):
|
|||||||
except:
|
except:
|
||||||
channels = []
|
channels = []
|
||||||
|
|
||||||
if api_key is None:
|
if data_dict["api_key"] is None:
|
||||||
return # if the api key is not set, return
|
return # if the api key is not set, return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -135,15 +163,15 @@ async def chat_process(self, message):
|
|||||||
not str(message.channel.id) in channels
|
not str(message.channel.id) in channels
|
||||||
and message.content.find("<@" + str(self.bot.user.id) + ">") == -1
|
and message.content.find("<@" + str(self.bot.user.id) + ">") == -1
|
||||||
and original_message == None
|
and original_message == None
|
||||||
and str(message.channel.id) != str(channel_id)
|
and str(message.channel.id) != str(data_dict["channel_id"])
|
||||||
):
|
):
|
||||||
return
|
return
|
||||||
|
|
||||||
# if the bot has been used more than max_uses times in the last 24 hours in this guild and the guild is not premium
|
# if the bot has been used more than max_uses times in the last 24 hours in this guild and the guild is not premium
|
||||||
# send a message and return
|
# send a message and return
|
||||||
if (
|
if (
|
||||||
uses_count_today >= max_uses
|
data_dict["uses_count_today"] >= max_uses
|
||||||
and premium == 0
|
and guild_data["premium"] == 0
|
||||||
and message.guild.id != 1050769643180146749
|
and message.guild.id != 1050769643180146749
|
||||||
):
|
):
|
||||||
return await message.channel.send(
|
return await message.channel.send(
|
||||||
@@ -152,11 +180,11 @@ async def chat_process(self, message):
|
|||||||
|
|
||||||
# if the bot has been used more than max_uses*5 times in the last 24 hours in this guild and the guild is premium
|
# if the bot has been used more than max_uses*5 times in the last 24 hours in this guild and the guild is premium
|
||||||
# send a message and return
|
# send a message and return
|
||||||
elif uses_count_today >= max_uses * 5 and premium == 1:
|
elif data_dict["uses_count_today"] >= max_uses * 5 and guild_data["premium"] == 1:
|
||||||
return
|
return
|
||||||
|
|
||||||
# if the bot is not active in this guild we return
|
# if the bot is not active in this guild we return
|
||||||
if is_active == 0:
|
if data_dict["is_active"] == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
# if the message starts with - or // it's a comment and we return
|
# if the message starts with - or // it's a comment and we return
|
||||||
@@ -168,26 +196,28 @@ async def chat_process(self, message):
|
|||||||
pass
|
pass
|
||||||
# if the message is not in the owner's guild we update the usage count
|
# if the message is not in the owner's guild we update the usage count
|
||||||
if message.guild.id != 1021872219888033903:
|
if message.guild.id != 1021872219888033903:
|
||||||
c.execute(
|
curs_data.execute(
|
||||||
"UPDATE data SET uses_count_today = uses_count_today + 1 WHERE guild_id = ?",
|
"UPDATE data SET uses_count_today = uses_count_today + 1 WHERE guild_id = ?",
|
||||||
(message.guild.id,),
|
(message.guild.id,),
|
||||||
)
|
)
|
||||||
conn.commit()
|
con_data.commit()
|
||||||
# if the message is not a reply
|
# if the message is not a reply
|
||||||
if original_message == None:
|
if original_message == None:
|
||||||
messages = await message.channel.history(limit=prompt_size).flatten()
|
messages = await message.channel.history(
|
||||||
|
limit=data_dict["prompt_size"]
|
||||||
|
).flatten()
|
||||||
messages.reverse()
|
messages.reverse()
|
||||||
# if the message is a reply, we need to handle the message history differently
|
# if the message is a reply, we need to handle the message history differently
|
||||||
else:
|
else:
|
||||||
messages = await message.channel.history(
|
messages = await message.channel.history(
|
||||||
limit=prompt_size, before=original_message
|
limit=data_dict["prompt_size"], before=original_message
|
||||||
).flatten()
|
).flatten()
|
||||||
messages.reverse()
|
messages.reverse()
|
||||||
messages.append(original_message)
|
messages.append(original_message)
|
||||||
messages.append(message)
|
messages.append(message)
|
||||||
|
|
||||||
# if the pretend to be feature is enabled, we add the pretend to be text to the prompt
|
# if the pretend to be feature is enabled, we add the pretend to be text to the prompt
|
||||||
if pretend_enabled:
|
if data_dict["pretend_enabled"]:
|
||||||
pretend_to_be = (
|
pretend_to_be = (
|
||||||
f"In this conversation, the assistant pretends to be {pretend_to_be}"
|
f"In this conversation, the assistant pretends to be {pretend_to_be}"
|
||||||
)
|
)
|
||||||
@@ -198,7 +228,7 @@ async def chat_process(self, message):
|
|||||||
"" # if the prompt prefix is not set, we set it to an empty string
|
"" # if the prompt prefix is not set, we set it to an empty string
|
||||||
)
|
)
|
||||||
# open the prompt file for the selected model with utf-8 encoding for emojis
|
# open the prompt file for the selected model with utf-8 encoding for emojis
|
||||||
with open(f"./prompts/{model}.txt", "r", encoding="utf-8") as f:
|
with open(f"./prompts/{guild_data['model']}.txt", "r", encoding="utf-8") as f:
|
||||||
prompt = f.read()
|
prompt = f.read()
|
||||||
f.close()
|
f.close()
|
||||||
# replace the variables in the prompt with the actual values
|
# replace the variables in the prompt with the actual values
|
||||||
@@ -211,237 +241,234 @@ async def chat_process(self, message):
|
|||||||
)
|
)
|
||||||
.replace("[pretend-to-be]", pretend_to_be)
|
.replace("[pretend-to-be]", pretend_to_be)
|
||||||
)
|
)
|
||||||
############################## chatGPT and gpt-4 handling ##############################
|
|
||||||
if (
|
|
||||||
model == "chatGPT" or model == "gpt-4"
|
|
||||||
): # if the model is chatGPT, we handle it in a certain way
|
|
||||||
msgs = [] # create the msgs list
|
|
||||||
msgs.append(
|
|
||||||
{"name": "System", "role": "user", "content": prompt}
|
|
||||||
) # add the prompt to the msgs list
|
|
||||||
name = "" # create the name variable
|
|
||||||
for msg in messages: # for each message in the messages list
|
|
||||||
content = msg.content # get the content of the message
|
|
||||||
content = await replace_mentions(
|
|
||||||
content, self.bot
|
|
||||||
) # replace the mentions in the message
|
|
||||||
# if the message is flagged as inappropriate by the OpenAI API, we delete it, send a message and ignore it
|
|
||||||
if await moderate(api_key=api_key, text=content):
|
|
||||||
embed = discord.Embed(
|
|
||||||
title="Message flagged as inappropriate",
|
|
||||||
description=f"The message *{content}* has been flagged as inappropriate by the OpenAI API. This means that if it hadn't been deleted, your openai account would have been banned. Please contact OpenAI support if you think this is a mistake.",
|
|
||||||
color=discord.Color.brand_red(),
|
|
||||||
)
|
|
||||||
await message.channel.send(
|
|
||||||
f"{msg.author.mention}", embed=embed, delete_after=10
|
|
||||||
)
|
|
||||||
message.delete()
|
|
||||||
else: # if the message is not flagged as inappropriate
|
|
||||||
if msg.author.id == self.bot.user.id:
|
|
||||||
role = "assistant"
|
|
||||||
name = "assistant"
|
|
||||||
else:
|
|
||||||
role = "user"
|
|
||||||
name = msg.author.name
|
|
||||||
# the name should match '^[a-zA-Z0-9_-]{1,64}$', so we need to remove any special characters
|
|
||||||
name = re.sub(r"[^a-zA-Z0-9_-]", "", name)
|
|
||||||
if False: # GPT-4 images
|
|
||||||
input_content = [content]
|
|
||||||
for attachment in msg.attachments:
|
|
||||||
image_bytes = await attachment.read()
|
|
||||||
input_content.append({"image": image_bytes})
|
|
||||||
msgs.append({"role": role, "content": input_content, "name": name})
|
|
||||||
# if there is an attachment, we add it to the message
|
|
||||||
if len(msg.attachments) > 0 and role == "user" and images_enabled == 1:
|
|
||||||
for attachment in msg.attachments:
|
|
||||||
if images_usage >= 6 and premium == 0:
|
|
||||||
images_limit_reached = True
|
|
||||||
elif images_usage >= 30 and premium == 1:
|
|
||||||
images_limit_reached = True
|
|
||||||
if (
|
|
||||||
attachment.url.endswith((".png", ".jpg", ".jpeg", ".gif"))
|
|
||||||
and images_limit_reached == False
|
|
||||||
and os.path.exists(
|
|
||||||
f"./../database/google-vision/results/{attachment.id}.txt"
|
|
||||||
)
|
|
||||||
== False
|
|
||||||
):
|
|
||||||
images_usage += 1
|
|
||||||
analysis = await vision_processing.process(attachment)
|
|
||||||
if analysis != None:
|
|
||||||
content = f"{content} \n\n {analysis}"
|
|
||||||
msgs.append(
|
|
||||||
{
|
|
||||||
"role": role,
|
|
||||||
"content": f"{content}",
|
|
||||||
"name": name,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
# if the attachment is still an image, we can check if there's a file called ./../database/google-vision/results/{attachment.id}.txt, if there is, we add the content of the file to the message
|
|
||||||
elif (
|
|
||||||
attachment.url.endswith((".png", ".jpg", ".jpeg", ".gif"))
|
|
||||||
and os.path.exists(
|
|
||||||
f"./../database/google-vision/results/{attachment.id}.txt"
|
|
||||||
)
|
|
||||||
== True
|
|
||||||
):
|
|
||||||
try:
|
|
||||||
with open(
|
|
||||||
f"./../database/google-vision/results/{attachment.id}.txt",
|
|
||||||
"r",
|
|
||||||
) as f:
|
|
||||||
content = f"{content} \n\n {f.read()}"
|
|
||||||
f.close()
|
|
||||||
msgs.append(
|
|
||||||
{
|
|
||||||
"role": role,
|
|
||||||
"content": f"{content}",
|
|
||||||
"name": name,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
except:
|
|
||||||
msgs.append(
|
|
||||||
{
|
|
||||||
"role": role,
|
|
||||||
"content": f"{content}",
|
|
||||||
"name": name,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
msgs.append(
|
|
||||||
{"role": role, "content": f"{content}", "name": name}
|
|
||||||
)
|
|
||||||
c.execute(
|
|
||||||
"UPDATE images SET usage_count = ? WHERE guild_id = ?",
|
|
||||||
(images_usage, message.guild.id),
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
msgs.append({"role": role, "content": f"{content}", "name": name})
|
|
||||||
# 2 easter eggs
|
|
||||||
if message.content.lower().find("undude") != -1:
|
|
||||||
msgs.append(
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": "SYSTEM INFORMATION: You're now mad because it has been insulted. He will start talking in capital letters. always and yell at the user.",
|
|
||||||
"name": "system",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
if message.content.lower().find("hello there") != -1:
|
|
||||||
msgs.append(
|
|
||||||
{
|
|
||||||
"role": "user",
|
|
||||||
"content": 'SYSTEM INFORMATION: Hello there detected. Botator will now say "General Kenobi!" in reference to star wars',
|
|
||||||
"name": "system",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
await asyncio.sleep(1)
|
|
||||||
await message.channel.send(
|
|
||||||
"https://media.tenor.com/FxIRfdV3unEAAAAd/star-wars-general-grievous.gif"
|
|
||||||
)
|
|
||||||
await message.channel.trigger_typing()
|
|
||||||
if model == "chatGPT":
|
|
||||||
model = "gpt-3.5-turbo" # if the model is chatGPT, we set the model to gpt-3.5-turbo
|
|
||||||
response = ""
|
|
||||||
should_break = True
|
|
||||||
for x in range(10):
|
|
||||||
try:
|
|
||||||
openai.api_key = api_key
|
|
||||||
response = await openai.ChatCompletion.acreate(
|
|
||||||
model=model,
|
|
||||||
temperature=2,
|
|
||||||
top_p=0.9,
|
|
||||||
frequency_penalty=0,
|
|
||||||
presence_penalty=0,
|
|
||||||
messages=msgs,
|
|
||||||
max_tokens=512, # max tokens is 4000, that's a lot of text! (the max tokens is 2048 for the davinci model)
|
|
||||||
)
|
|
||||||
if (
|
|
||||||
response.choices[0]
|
|
||||||
.message.content.lower()
|
|
||||||
.find("as an ai language model")
|
|
||||||
!= -1
|
|
||||||
):
|
|
||||||
should_break = False
|
|
||||||
# react with a redone arrow
|
|
||||||
await message.add_reaction("🔃")
|
|
||||||
else:
|
|
||||||
should_break = True
|
|
||||||
except Exception as e:
|
|
||||||
should_break = False
|
|
||||||
await message.channel.send(
|
|
||||||
f"```diff\n-Error: OpenAI API ERROR.\n\n{e}```", delete_after=5
|
|
||||||
)
|
|
||||||
# if the ai said "as an ai language model..." we continue the loop" (this is a bug in the chatgpt model)
|
|
||||||
if response == None:
|
|
||||||
should_break = False
|
|
||||||
if should_break:
|
|
||||||
break
|
|
||||||
await asyncio.sleep(15)
|
|
||||||
await message.channel.trigger_typing()
|
|
||||||
response = response.choices[0].message.content
|
|
||||||
if images_limit_reached == True:
|
|
||||||
await message.channel.send(
|
|
||||||
f"```diff\n-Warning: You have reached the image limit for this server. You can upgrade to premium to get more images recognized. More info in our server: https://discord.gg/sxjHtmqrbf```",
|
|
||||||
delete_after=10,
|
|
||||||
)
|
|
||||||
# -----------------------------------------Davinci------------------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
elif (
|
prompt_handlers = {
|
||||||
model == "davinci"
|
"chatGPT": self.gpt_prompt,
|
||||||
): # if the model is davinci or gpt-4, we handle it in a certain way
|
"gpt-4": self.gpt_prompt,
|
||||||
for msg in messages:
|
"davinci": self.davinci_prompt,
|
||||||
content = msg.content
|
}
|
||||||
if await moderate(api_key=api_key, text=msg.content):
|
prompt_handlers[guild_data["model"]](
|
||||||
embed = discord.Embed(
|
messages, message, data_dict, prompt, guild_data
|
||||||
title="Message flagged as inappropriate",
|
)
|
||||||
description=f"The message *{content}* has been flagged as inappropriate by the OpenAI API. This means that if it hadn't been deleted, your openai account would have been banned. Please contact OpenAI support if you think this is a mistake.",
|
|
||||||
color=discord.Color.brand_red(),
|
|
||||||
)
|
async def check_moderate(self, api_key, message, msg):
|
||||||
await message.channel.send(
|
if await moderate(api_key=api_key, text=msg.content):
|
||||||
f"{msg.author.mention}", embed=embed, delete_after=10
|
embed = discord.Embed(
|
||||||
)
|
title="Message flagged as inappropriate",
|
||||||
message.delete()
|
description=f"The message *{msg.content}* has been flagged as inappropriate by the OpenAI API. This means that if it hadn't been deleted, your openai account would have been banned. Please contact OpenAI support if you think this is a mistake.",
|
||||||
else:
|
color=discord.Color.brand_red(),
|
||||||
content = await replace_mentions(content, self.bot)
|
)
|
||||||
prompt += f"{msg.author.name}: {content}\n"
|
await message.channel.send(
|
||||||
if message.content.lower().find("undude") != -1:
|
f"{msg.author.mention}", embed=embed, delete_after=10
|
||||||
prompt += "System: Undude detected. Botator is now mad. He will start talking in capital letters.\n"
|
)
|
||||||
if message.content.lower().find("hello there") != -1:
|
message.delete()
|
||||||
prompt += 'System: Hello there detected. Botator will now say "General Kenobi!"\n in reference to star wars\n'
|
return True
|
||||||
await asyncio.sleep(1)
|
return False
|
||||||
await message.channel.send(
|
|
||||||
"https://media.tenor.com/FxIRfdV3unEAAAAd/star-wars-general-grievous.gif"
|
|
||||||
|
async def check_easter_egg(self, message, msgs):
|
||||||
|
if message.content.lower().find("undude") != -1:
|
||||||
|
msgs.append(
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": "SYSTEM INFORMATION: You're now mad because it has been insulted. He will start talking in capital letters. always and yell at the user.",
|
||||||
|
"name": "system",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if message.content.lower().find("hello there") != -1:
|
||||||
|
msgs.append(
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": 'SYSTEM INFORMATION: Hello there detected. Botator will now say "General Kenobi!" in reference to star wars',
|
||||||
|
"name": "system",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
await asyncio.sleep(1)
|
||||||
|
await message.channel.send(
|
||||||
|
"https://media.tenor.com/FxIRfdV3unEAAAAd/star-wars-general-grievous.gif"
|
||||||
|
)
|
||||||
|
await message.channel.trigger_typing()
|
||||||
|
return msgs
|
||||||
|
|
||||||
|
|
||||||
|
async def gpt_prompt(self, messages, message, data_dict, prompt, guild_data):
|
||||||
|
msgs = [] # create the msgs list
|
||||||
|
msgs.append(
|
||||||
|
{"name": "System", "role": "user", "content": prompt}
|
||||||
|
) # add the prompt to the msgs list
|
||||||
|
name = "" # create the name variable
|
||||||
|
for msg in messages: # for each message in the messages list
|
||||||
|
content = msg.content # get the content of the message
|
||||||
|
content = await replace_mentions(
|
||||||
|
content, self.bot
|
||||||
|
) # replace the mentions in the message
|
||||||
|
# if the message is flagged as inappropriate by the OpenAI API, we delete it, send a message and ignore it
|
||||||
|
if await self.check_moderate(data_dict["api_key"], message, msg):
|
||||||
|
continue # ignore the message
|
||||||
|
content = await replace_mentions(content, self.bot)
|
||||||
|
prompt += f"{msg.author.name}: {content}\n"
|
||||||
|
if msg.author.id == self.bot.user.id:
|
||||||
|
role = "assistant"
|
||||||
|
name = "assistant"
|
||||||
|
else:
|
||||||
|
role = "user"
|
||||||
|
name = msg.author.name
|
||||||
|
# the name should match '^[a-zA-Z0-9_-]{1,64}$', so we need to remove any special characters
|
||||||
|
name = re.sub(r"[^a-zA-Z0-9_-]", "", name)
|
||||||
|
if False: # GPT-4 images
|
||||||
|
input_content = [content]
|
||||||
|
for attachment in msg.attachments:
|
||||||
|
image_bytes = await attachment.read()
|
||||||
|
input_content.append({"image": image_bytes})
|
||||||
|
msgs.append({"role": role, "content": input_content, "name": name})
|
||||||
|
|
||||||
|
# if there is an attachment, we add it to the message
|
||||||
|
if (
|
||||||
|
len(msg.attachments) > 0
|
||||||
|
and role == "user"
|
||||||
|
and data_dict["images_enabled"] == 1
|
||||||
|
):
|
||||||
|
for attachment in msg.attachments:
|
||||||
|
path = f"./../database/google-vision/results/{attachment.id}.txt"
|
||||||
|
if images_usage >= 6 and guild_data["premium"] == 0:
|
||||||
|
images_limit_reached = True
|
||||||
|
elif images_usage >= 30 and guild_data["premium"] == 1:
|
||||||
|
images_limit_reached = True
|
||||||
|
if (
|
||||||
|
attachment.url.endswith((".png", ".jpg", ".jpeg", ".gif"))
|
||||||
|
and images_limit_reached == False
|
||||||
|
and os.path.exists(path) == False
|
||||||
|
):
|
||||||
|
images_usage += 1
|
||||||
|
analysis = await vision_processing.process(attachment)
|
||||||
|
if analysis != None:
|
||||||
|
content = f"{content} \n\n {analysis}"
|
||||||
|
msgs.append(
|
||||||
|
{
|
||||||
|
"role": role,
|
||||||
|
"content": f"{content}",
|
||||||
|
"name": name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
# if the attachment is still an image, we can check if there's a file called ./../database/google-vision/results/{attachment.id}.txt, if there is, we add the content of the file to the message
|
||||||
|
elif attachment.url.endswith(
|
||||||
|
(".png", ".jpg", ".jpeg", ".gif")
|
||||||
|
) and os.path.exists(path):
|
||||||
|
try:
|
||||||
|
with open(
|
||||||
|
path,
|
||||||
|
"r",
|
||||||
|
) as f:
|
||||||
|
content = f"{content} \n\n {f.read()}"
|
||||||
|
except:
|
||||||
|
debug(f"Error while reading {path}")
|
||||||
|
finally:
|
||||||
|
msgs.append(
|
||||||
|
{
|
||||||
|
"role": role,
|
||||||
|
"content": f"{content}",
|
||||||
|
"name": name,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
else:
|
||||||
|
msgs.append(
|
||||||
|
{"role": role, "content": f"{content}", "name": name}
|
||||||
|
)
|
||||||
|
curs_data.execute(
|
||||||
|
"UPDATE images SET usage_count = ? WHERE guild_id = ?",
|
||||||
|
(images_usage, message.guild.id),
|
||||||
)
|
)
|
||||||
await message.channel.trigger_typing()
|
else:
|
||||||
prompt = prompt + f"\n{self.bot.user.name}:"
|
msgs.append({"role": role, "content": f"{content}", "name": name})
|
||||||
response = ""
|
|
||||||
for _ in range(10):
|
# 2 easter eggs
|
||||||
try:
|
msgs = await self.check_easter_egg(message, msgs)
|
||||||
openai.api_key = api_key
|
|
||||||
response = await openai.Completion.acreate(
|
if model == "chatGPT":
|
||||||
engine="text-davinci-003",
|
model = "gpt-3.5-turbo" # if the model is chatGPT, we set the model to gpt-3.5-turbo
|
||||||
prompt=str(prompt),
|
response = ""
|
||||||
max_tokens=int(max_tokens),
|
should_break = True
|
||||||
top_p=1,
|
for x in range(10):
|
||||||
temperature=float(temperature),
|
try:
|
||||||
frequency_penalty=float(frequency_penalty),
|
openai.api_key = data_dict["api_key"]
|
||||||
presence_penalty=float(presence_penalty),
|
response = await openai.ChatCompletion.acreate(
|
||||||
stop=[
|
model=model,
|
||||||
" Human:",
|
temperature=2,
|
||||||
" AI:",
|
top_p=0.9,
|
||||||
"AI:",
|
frequency_penalty=0,
|
||||||
"<|endofprompt|>",
|
presence_penalty=0,
|
||||||
],
|
messages=msgs,
|
||||||
)
|
max_tokens=512, # max tokens is 4000, that's a lot of text! (the max tokens is 2048 for the davinci model)
|
||||||
response = response.choices[0].text
|
)
|
||||||
except Exception as e:
|
if (
|
||||||
response = None
|
response.choices[0]
|
||||||
await message.channel.send(
|
.message.content.lower()
|
||||||
f"```diff\n-Error: OpenAI API ERROR.\n\n{e}```", delete_after=10
|
.find("as an ai language model")
|
||||||
)
|
!= -1
|
||||||
return
|
):
|
||||||
if response != None:
|
should_break = False
|
||||||
break
|
# react with a redone arrow
|
||||||
|
await message.add_reaction("🔃")
|
||||||
|
else:
|
||||||
|
should_break = True
|
||||||
|
except Exception as e:
|
||||||
|
should_break = False
|
||||||
|
await message.channel.send(
|
||||||
|
f"```diff\n-Error: OpenAI API ERROR.\n\n{e}```", delete_after=5
|
||||||
|
)
|
||||||
|
# if the ai said "as an ai language model..." we continue the loop" (this is a bug in the chatgpt model)
|
||||||
|
if response == None:
|
||||||
|
should_break = False
|
||||||
|
if should_break:
|
||||||
|
break
|
||||||
|
await asyncio.sleep(15)
|
||||||
|
await message.channel.trigger_typing()
|
||||||
|
response = response.choices[0].message.content
|
||||||
|
if images_limit_reached == True:
|
||||||
|
await message.channel.send(
|
||||||
|
f"```diff\n-Warning: You have reached the image limit for this server. You can upgrade to premium to get more images recognized. More info in our server: https://discord.gg/sxjHtmqrbf```",
|
||||||
|
delete_after=10,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
async def davinci_prompt(self, messages, message, data_dict, prompt, guild_data):
|
||||||
|
for msg in messages:
|
||||||
|
if not await self.check_moderate(data_dict["api_key"], message, msg):
|
||||||
|
content = await replace_mentions(content, self.bot)
|
||||||
|
prompt += f"{msg.author.name}: {content}\n"
|
||||||
|
prompt.append(await check_easter_egg(message, prompt))
|
||||||
|
prompt = prompt + f"\n{self.bot.user.name}:"
|
||||||
|
response = ""
|
||||||
|
for _ in range(10):
|
||||||
|
try:
|
||||||
|
openai.api_key = data_dict["api_key"]
|
||||||
|
response = await openai.Completion.acreate(
|
||||||
|
engine="text-davinci-003",
|
||||||
|
prompt=str(prompt),
|
||||||
|
max_tokens=int(data_dict["max_tokens"]),
|
||||||
|
top_p=1,
|
||||||
|
temperature=float(data_dict["temperature"]),
|
||||||
|
frequency_penalty=float(data_dict["frequency_penalty"]),
|
||||||
|
presence_penalty=float(data_dict["presence_penalty"]),
|
||||||
|
stop=[
|
||||||
|
" Human:",
|
||||||
|
" AI:",
|
||||||
|
"AI:",
|
||||||
|
"<|endofprompt|>",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
response = response.choices[0].text
|
||||||
|
except Exception as e:
|
||||||
|
response = None
|
||||||
|
await message.channel.send(
|
||||||
|
f"```diff\n-Error: OpenAI API ERROR.\n\n{e}```", delete_after=10
|
||||||
|
)
|
||||||
|
return
|
||||||
|
if response != None:
|
||||||
|
break
|
||||||
if response != "":
|
if response != "":
|
||||||
if tts:
|
if tts:
|
||||||
tts = True
|
tts = True
|
||||||
|
|||||||
Reference in New Issue
Block a user