mirror of
https://github.com/Paillat-dev/Botator.git
synced 2026-01-02 01:06:19 +00:00
FIxed stuff
This commit is contained in:
@@ -1,5 +1,78 @@
|
|||||||
import discord
|
import discord
|
||||||
|
functions = [
|
||||||
|
{
|
||||||
|
"name": "add_reaction_to_last_message",
|
||||||
|
"description": "React to the last message sent by the user with an emoji.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"emoji": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "an emoji to react with, only one emoji is supported"
|
||||||
|
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Your message"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["emoji"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "reply_to_last_message",
|
||||||
|
"description": "Reply to the last message sent by the user.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"message": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Your message"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["message"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "send_a_stock_image",
|
||||||
|
"description": "Send a stock image in the channel.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"query": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The query to search for, words separated by spaces"
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Your message to send with the image"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["query"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
server_normal_channel_functions = [
|
||||||
|
{
|
||||||
|
"name": "create_a_thread",
|
||||||
|
"description": "Create a thread in the channel. Use this if you see a long discussion coming.",
|
||||||
|
"parameters": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"name": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "The name of the thread"
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"type": "string",
|
||||||
|
"description": "Your message to send with the thread"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"required": ["name", "message"]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
]
|
||||||
unsplash_random_image_url = "https://source.unsplash.com/random/1920x1080"
|
unsplash_random_image_url = "https://source.unsplash.com/random/1920x1080"
|
||||||
async def add_reaction_to_last_message(message_to_react_to: discord.Message, emoji, message=""):
|
async def add_reaction_to_last_message(message_to_react_to: discord.Message, emoji, message=""):
|
||||||
if message == "":
|
if message == "":
|
||||||
@@ -18,3 +91,7 @@ async def send_a_stock_image(message_in_channel_in_wich_to_send: discord.Message
|
|||||||
else:
|
else:
|
||||||
await message_in_channel_in_wich_to_send.channel.send(message)
|
await message_in_channel_in_wich_to_send.channel.send(message)
|
||||||
await message_in_channel_in_wich_to_send.channel.send(f"https://source.unsplash.com/random/1920x1080?{query}")
|
await message_in_channel_in_wich_to_send.channel.send(f"https://source.unsplash.com/random/1920x1080?{query}")
|
||||||
|
|
||||||
|
async def create_a_thread(channel_in_which_to_create_the_thread: discord.TextChannel, name: str, message: str):
|
||||||
|
msg = await channel_in_which_to_create_the_thread.send(message)
|
||||||
|
await msg.create_thread(name=name)
|
||||||
@@ -1,11 +1,12 @@
|
|||||||
import asyncio
|
import asyncio
|
||||||
import os
|
import os
|
||||||
from src.config import curs_data, max_uses, curs_premium, functions, moderate
|
from src.config import curs_data, max_uses, curs_premium, moderate
|
||||||
import re
|
import re
|
||||||
import discord
|
import discord
|
||||||
import datetime
|
import datetime
|
||||||
|
import json
|
||||||
from src.utils.openaicaller import openai_caller
|
from src.utils.openaicaller import openai_caller
|
||||||
from src.functionscalls import add_reaction_to_last_message, reply_to_last_message, send_a_stock_image
|
from src.functionscalls import add_reaction_to_last_message, reply_to_last_message, send_a_stock_image, create_a_thread, functions, server_normal_channel_functions
|
||||||
async def replace_mentions(content, bot):
|
async def replace_mentions(content, bot):
|
||||||
mentions = re.findall(r"<@!?\d+>", content)
|
mentions = re.findall(r"<@!?\d+>", content)
|
||||||
for mention in mentions:
|
for mention in mentions:
|
||||||
@@ -52,36 +53,57 @@ async def chatgpt_process(self, messages, message: discord.Message, api_key, pro
|
|||||||
|
|
||||||
response = str()
|
response = str()
|
||||||
caller = openai_caller(api_key=api_key)
|
caller = openai_caller(api_key=api_key)
|
||||||
|
async def error_call(error=""):
|
||||||
|
try:
|
||||||
|
if error != "":
|
||||||
|
await message.channel.send(f"An error occured: {error}", delete_after=10)
|
||||||
|
await message.channel.trigger_typing()
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
funcs = functions
|
||||||
|
if isinstance(message.channel, discord.TextChannel):
|
||||||
|
for func in server_normal_channel_functions:
|
||||||
|
funcs.append(func)
|
||||||
|
|
||||||
response = await caller.generate_response(
|
response = await caller.generate_response(
|
||||||
|
error_call,
|
||||||
model=model,
|
model=model,
|
||||||
messages=msgs,
|
messages=msgs,
|
||||||
functions=functions,
|
functions=functions,
|
||||||
function_call="auto",
|
#function_call="auto",
|
||||||
)
|
)
|
||||||
response = response["choices"][0]["message"] #type: ignore
|
response = response["choices"][0]["message"] #type: ignore
|
||||||
if response.get("function_call"):
|
if response.get("function_call"):
|
||||||
function_calls = response.get("function_call")
|
function_call = response.get("function_call")
|
||||||
if function_calls.get("add_reaction_to_last_message"):
|
name = function_call.get("name", "")
|
||||||
func = function_calls.get("add_reaction_to_last_message")
|
arguments = function_call.get("arguments", {})
|
||||||
if func.get("emoji"):
|
arguments = json.loads(arguments)
|
||||||
emoji = func.get("emoji")
|
if name == "add_reaction_to_last_message":
|
||||||
reply = func.get("message", "")
|
if arguments.get("emoji"):
|
||||||
|
emoji = arguments.get("emoji")
|
||||||
|
reply = arguments.get("message", "")
|
||||||
await add_reaction_to_last_message(message, emoji, reply)
|
await add_reaction_to_last_message(message, emoji, reply)
|
||||||
if function_calls.get("reply_to_last_message"):
|
if name == "reply_to_last_message":
|
||||||
func = function_calls.get("reply_to_last_message")
|
if arguments.get("message"):
|
||||||
if func.get("message"):
|
reply = arguments.get("message")
|
||||||
reply = func.get("message")
|
|
||||||
await reply_to_last_message(message, reply)
|
await reply_to_last_message(message, reply)
|
||||||
if function_calls.get("send_a_stock_image"):
|
if name == "send_a_stock_image":
|
||||||
func = function_calls.get("send_a_stock_image")
|
if arguments.get("query"):
|
||||||
if func.get("query"):
|
query = arguments.get("query")
|
||||||
query = func.get("query")
|
reply = arguments.get("message", "")
|
||||||
reply = func.get("message", "")
|
|
||||||
await send_a_stock_image(message, query, reply)
|
await send_a_stock_image(message, query, reply)
|
||||||
|
if name == "create_a_thread":
|
||||||
|
if arguments.get("name") and arguments.get("message"):
|
||||||
|
name = arguments.get("name")
|
||||||
|
reply = arguments.get("message", "")
|
||||||
|
if isinstance(message.channel, discord.TextChannel):
|
||||||
|
await create_a_thread(message.channel, name, reply)
|
||||||
|
else:
|
||||||
|
await message.channel.send("`A server normal text channel only function has been called in a DM channel. Please retry.`", delete_after=10)
|
||||||
|
if name == "":
|
||||||
|
await message.channel.send("The function call is empty. Please retry.", delete_after=10)
|
||||||
else:
|
else:
|
||||||
await message.channel.send(response["content"]) #type: ignore
|
await message.channel.send(response["content"]) #type: ignore
|
||||||
print(response["content"]) #type: ignore
|
|
||||||
async def chat_process(self, message):
|
async def chat_process(self, message):
|
||||||
|
|
||||||
#if the message is from a bot, we ignore it
|
#if the message is from a bot, we ignore it
|
||||||
@@ -136,8 +158,11 @@ async def chat_process(self, message):
|
|||||||
|
|
||||||
if original_message != None and original_message.author.id != self.bot.user.id:
|
if original_message != None and original_message.author.id != self.bot.user.id:
|
||||||
original_message = None
|
original_message = None
|
||||||
|
is_bots_thread = False
|
||||||
if not str(message.channel.id) in channels and message.content.find("<@"+str(self.bot.user.id)+">") == -1 and original_message == None and str(message.channel.id) != str(channel_id):
|
if isinstance(message.channel, discord.Thread):
|
||||||
|
if message.channel.owner_id == self.bot.user.id:
|
||||||
|
is_bots_thread = True
|
||||||
|
if not str(message.channel.id) in channels and message.content.find("<@"+str(self.bot.user.id)+">") == -1 and original_message == None and str(message.channel.id) != str(channel_id) and not is_bots_thread:
|
||||||
return
|
return
|
||||||
|
|
||||||
# if the bot is not active in this guild we return
|
# if the bot is not active in this guild we return
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ You are in a discord server. You are now called Botator. You arre funny and you
|
|||||||
|
|
||||||
You use words like "u" instead of "you" and "ur" instead of "your", etc.. because you are lazy.
|
You use words like "u" instead of "you" and "ur" instead of "your", etc.. because you are lazy.
|
||||||
|
|
||||||
|
You never ask the user if he needs help, you just answer to him funnyily.
|
||||||
|
|
||||||
|
You can use function calls or answer normally. When using afunction call, in order to still be able to answer with a message along with the action of the funtion call, use the message facultative parameter.
|
||||||
You will use emojis in your messages, but not too many to be cringe.
|
You will use emojis in your messages, but not too many to be cringe.
|
||||||
|
|
||||||
Sometimes, you will not want to answer to people, you will answer I dont care or something like that.
|
Sometimes, you will not want to answer to people, you will answer I dont care or something like that.
|
||||||
|
|||||||
@@ -37,13 +37,14 @@ class bcolors:
|
|||||||
BOLD = '\033[1m'
|
BOLD = '\033[1m'
|
||||||
UNDERLINE = '\033[4m'
|
UNDERLINE = '\033[4m'
|
||||||
|
|
||||||
chat_models = ["gpt-4", "gpt-4-32k", "gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
|
chat_models = ["gpt-4", "gpt-4-32k", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-0613"]
|
||||||
text_models = ["text-davinci-003", "text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
|
text_models = ["text-davinci-003", "text-davinci-002", "text-curie-001", "text-babbage-001", "text-ada-001"]
|
||||||
|
|
||||||
models_max_tokens = {
|
models_max_tokens = {
|
||||||
"gpt-4": 8_192,
|
"gpt-4": 8_192,
|
||||||
"gpt-4-32k": 32_768,
|
"gpt-4-32k": 32_768,
|
||||||
"gpt-3.5-turbo": 4_096,
|
"gpt-3.5-turbo": 4_096,
|
||||||
|
"gpt-3.5-turbo-0613": 4_096,
|
||||||
"gpt-3.5-turbo-16k": 16_384,
|
"gpt-3.5-turbo-16k": 16_384,
|
||||||
"text-davinci-003": 4_097,
|
"text-davinci-003": 4_097,
|
||||||
"text-davinci-002": 4_097,
|
"text-davinci-002": 4_097,
|
||||||
@@ -54,17 +55,17 @@ models_max_tokens = {
|
|||||||
|
|
||||||
class openai_caller:
|
class openai_caller:
|
||||||
def __init__(self, api_key=None) -> None:
|
def __init__(self, api_key=None) -> None:
|
||||||
pass
|
self.api_key = api_key
|
||||||
def set_api_key(self, key):
|
async def generate_response(self, error_call=None, **kwargs):
|
||||||
openai_module.api_key = key
|
if error_call is None:
|
||||||
async def generate_response(self, **kwargs):
|
error_call = lambda x: 2 # do nothing
|
||||||
if kwargs['model'] in chat_models:
|
if kwargs.get("model", "") in chat_models:
|
||||||
return await self.chat_generate(**kwargs)
|
return await self.chat_generate(error_call, **kwargs)
|
||||||
elif kwargs['model'] in text_models:
|
elif kwargs.get("engine", "") in text_models:
|
||||||
raise NotImplementedError("Text models are not supported yet")
|
raise NotImplementedError("Text models are not supported yet")
|
||||||
else:
|
else:
|
||||||
raise ValueError("Model not found")
|
raise ValueError("Model not found")
|
||||||
async def chat_generate(self, **kwargs):
|
async def chat_generate(self, recall_func, **kwargs):
|
||||||
tokens = await num_tokens_from_messages(kwargs['messages'], kwargs['model'])
|
tokens = await num_tokens_from_messages(kwargs['messages'], kwargs['model'])
|
||||||
model_max_tokens = models_max_tokens[kwargs['model']]
|
model_max_tokens = models_max_tokens[kwargs['model']]
|
||||||
while tokens > model_max_tokens:
|
while tokens > model_max_tokens:
|
||||||
@@ -73,36 +74,68 @@ class openai_caller:
|
|||||||
tokens = await num_tokens_from_messages(kwargs['messages'], kwargs['model'])
|
tokens = await num_tokens_from_messages(kwargs['messages'], kwargs['model'])
|
||||||
i = 0
|
i = 0
|
||||||
response = None
|
response = None
|
||||||
|
kwargs['api_key'] = self.api_key
|
||||||
while i < 10:
|
while i < 10:
|
||||||
try:
|
try:
|
||||||
response = await openai_module.ChatCompletion.acreate(**kwargs)
|
response = await openai_module.ChatCompletion.acreate(
|
||||||
|
**kwargs
|
||||||
|
)
|
||||||
break
|
break
|
||||||
except APIError:
|
except APIError as e:
|
||||||
|
print(f"\n\n{bcolors.BOLD}{bcolors.WARNING}APIError. This is not your fault. Retrying...{bcolors.ENDC}")
|
||||||
|
await recall_func("`An APIError occurred. This is not your fault. Retrying...`")
|
||||||
await asyncio.sleep(10)
|
await asyncio.sleep(10)
|
||||||
|
await recall_func()
|
||||||
i += 1
|
i += 1
|
||||||
except Timeout:
|
except Timeout as e:
|
||||||
|
print(f"\n\n{bcolors.BOLD}{bcolors.WARNING}The request timed out. Retrying...{bcolors.ENDC}")
|
||||||
|
await recall_func("`The request timed out. Retrying...`")
|
||||||
await asyncio.sleep(10)
|
await asyncio.sleep(10)
|
||||||
|
await recall_func()
|
||||||
i += 1
|
i += 1
|
||||||
except RateLimitError:
|
except RateLimitError as e:
|
||||||
|
print(f"\n\n{bcolors.BOLD}{bcolors.WARNING}RateLimitError. You are being rate limited. Retrying...{bcolors.ENDC}")
|
||||||
|
await recall_func("`You are being rate limited. Retrying...`")
|
||||||
await asyncio.sleep(10)
|
await asyncio.sleep(10)
|
||||||
|
await recall_func()
|
||||||
i += 1
|
i += 1
|
||||||
except APIConnectionError as e:
|
except APIConnectionError as e:
|
||||||
print(e)
|
|
||||||
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}APIConnectionError. There is an issue with your internet connection. Please check your connection.{bcolors.ENDC}")
|
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}APIConnectionError. There is an issue with your internet connection. Please check your connection.{bcolors.ENDC}")
|
||||||
|
await recall_func()
|
||||||
raise e
|
raise e
|
||||||
except InvalidRequestError as e:
|
except InvalidRequestError as e:
|
||||||
print(e)
|
|
||||||
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}InvalidRequestError. Please check your request.{bcolors.ENDC}")
|
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}InvalidRequestError. Please check your request.{bcolors.ENDC}")
|
||||||
|
await recall_func()
|
||||||
raise e
|
raise e
|
||||||
except AuthenticationError as e:
|
except AuthenticationError as e:
|
||||||
print(e)
|
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}AuthenticationError. Please check your API key and if needed, also your organization ID.{bcolors.ENDC}")
|
||||||
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}AuthenticationError. Please check your API key.{bcolors.ENDC}")
|
await recall_func("`AuthenticationError. Please check your API key.`")
|
||||||
raise e
|
raise e
|
||||||
except ServiceUnavailableError:
|
except ServiceUnavailableError as e:
|
||||||
|
print(f"\n\n{bcolors.BOLD}{bcolors.WARNING}ServiceUnavailableError. The OpenAI API is not responding. Retrying...{bcolors.ENDC}")
|
||||||
|
await recall_func("`The OpenAI API is not responding. Retrying...`")
|
||||||
await asyncio.sleep(10)
|
await asyncio.sleep(10)
|
||||||
|
await recall_func()
|
||||||
i += 1
|
i += 1
|
||||||
finally:
|
finally:
|
||||||
if i == 10:
|
if i == 10:
|
||||||
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}OpenAI API is not responding. Please try again later.{bcolors.ENDC}")
|
print(f"\n\n{bcolors.BOLD}{bcolors.FAIL}OpenAI API is not responding. Please try again later.{bcolors.ENDC}")
|
||||||
raise TimeoutError("OpenAI API is not responding. Please try again later.")
|
raise TimeoutError("OpenAI API is not responding. Please try again later.")
|
||||||
return response # type: ignore
|
return response # type: ignore
|
||||||
|
|
||||||
|
##testing
|
||||||
|
if __name__ == "__main__":
|
||||||
|
async def main():
|
||||||
|
openai = openai_caller(api_key="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")
|
||||||
|
response = await openai.generate_response(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{"role":"user", "content":"ping"}],
|
||||||
|
max_tokens=5,
|
||||||
|
temperature=0.7,
|
||||||
|
top_p=1,
|
||||||
|
frequency_penalty=0,
|
||||||
|
presence_penalty=0,
|
||||||
|
stop=["\n", " Human:", " AI:"]
|
||||||
|
)
|
||||||
|
print(response)
|
||||||
|
asyncio.run(main())
|
||||||
Reference in New Issue
Block a user