mirror of
https://github.com/Paillat-dev/Botator.git
synced 2026-01-02 09:16:19 +00:00
✨ feat(server.ts): change port variable case from lowercase port to uppercase PORT to improve semantics
✨ feat(server.ts): add support for process.env.PORT environment variable to be able to run app on a configurable port 🐛 fix(main.py): remove duplicate cog addition in main.py ✨ feat(main.py): add cogs.Help(bot) to the list of cogs in main.py 🐛 fix(main.py): remove redundant import statements in main.py ✨ feat(main.py): add on_guild_remove event handler in main.py ✨ feat(main.py): add on_guild_join event handler in main.py ✨ feat(main.py): add support for discord.Intents in main.py ✨ feat(main.py): add intents.message_content = True in main.py ✨ feat(main.py): add intents.default() in main.py ✨ feat(main.py): add discord.Bot(intents=intents, help_command=None) in main.py ✨ feat(main.py): add import statements in main.py ✨ feat(main.py): add from src.config import debug, discord_token in main.py ✨ feat(main.py): add import discord in main.py ✨ feat(main.py): add import src.config in main.py ✨ feat(main.py): add import src.cogs in main.py ✨ feat(main.py): add import src.cogs.chat in main.py ✨ feat(main.py): add import src.cogs.manage_chat in main.py ✨ feat(main.py): add import src.cogs.moderation in main.py ✨ feat(main.py): add import src.cogs.channelSetup in main.py ✨ feat(main.py): add import src.cogs.help in main.py ✨ feat(main.py): add import src.cogs.Chat in main.py ✨ feat(main.py): add import src.cogs.ManageChat in main.py ✨ feat(main.py): add import src.cogs.Moderation in main.py ✨ feat(main.py): add import src.cogs.ChannelSetup in main.py ✨ feat(main.py): add import src.cogs.Help in main.py ✨ feat(main.py): add import src.cogs.chat in main.py ✨ feat(main.py): add import src.cogs.manage_chat in main.py ✨ feat(main.py): add import src.cogs.moderation in main.py ✨ feat(main.py): add
This commit is contained in:
@@ -7,6 +7,7 @@ for character in characters.reverseMatchingDict.keys():
|
||||
with open(
|
||||
f"src/chatUtils/prompts/{character}/chat.txt", "r", encoding="utf-8"
|
||||
) as f:
|
||||
promts[character] = {}
|
||||
promts[character]["chat"] = f.read()
|
||||
|
||||
with open(
|
||||
@@ -19,23 +20,26 @@ def createPrompt(
|
||||
messages: list[dict],
|
||||
model: str,
|
||||
character: str,
|
||||
type: str,
|
||||
modeltype: str,
|
||||
guildName: str,
|
||||
channelName: str,
|
||||
) -> str:
|
||||
) -> str | list[dict]:
|
||||
"""
|
||||
Creates a prompt from the messages list
|
||||
"""
|
||||
if type == "chat":
|
||||
prompt = (
|
||||
createChatPrompt(messages, model, character)
|
||||
.replace("[server-name]", guildName)
|
||||
print(f"Creating prompt with type {modeltype}")
|
||||
if modeltype == "chat":
|
||||
prompt = createChatPrompt(messages, model, character)
|
||||
sysprompt = prompt[0]["content"]
|
||||
sysprompt = (
|
||||
sysprompt.replace("[server-name]", guildName)
|
||||
.replace("[channel-name]", channelName)
|
||||
.replace(
|
||||
"[datetime]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S")
|
||||
)
|
||||
)
|
||||
elif type == "text":
|
||||
prompt[0]["content"] = sysprompt
|
||||
elif modeltype == "text":
|
||||
prompt = (
|
||||
createTextPrompt(messages, model, character)
|
||||
.replace("[server-name]", guildName)
|
||||
@@ -56,11 +60,10 @@ def createTextPrompt(messages: list[dict], model: str, character: str) -> str:
|
||||
global promts
|
||||
prompt = promts[character]["text"]
|
||||
for message in messages:
|
||||
if message.name == "assistant":
|
||||
message.name = character
|
||||
if message["name"] == "assistant":
|
||||
message["name"] = character
|
||||
prompt += f"{message['name']}: {message['content']} <|endofmessage|>\n"
|
||||
prompt += f"{character}:"
|
||||
|
||||
return prompt
|
||||
|
||||
|
||||
|
||||
@@ -1,2 +1,19 @@
|
||||
async def llama(prompt):
|
||||
pass
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from src.utils.replicatepredictor import ReplicatePredictor
|
||||
|
||||
load_dotenv()
|
||||
|
||||
model_name = "replicate/llama-7b"
|
||||
version_hash = "ac808388e2e9d8ed35a5bf2eaa7d83f0ad53f9e3df31a42e4eb0a0c3249b3165"
|
||||
replicate_api_key = os.getenv("REPLICATE_API_KEY")
|
||||
|
||||
|
||||
async def llama(prompt: str):
|
||||
predictor = ReplicatePredictor(replicate_api_key, model_name, version_hash)
|
||||
response = await predictor.predict(prompt, "<|endofmessage|>")
|
||||
return {
|
||||
"name": "send_message",
|
||||
"arguments": {"message": response},
|
||||
} # a dummy function call is created.
|
||||
|
||||
@@ -1,5 +1,25 @@
|
||||
import orjson
|
||||
from src.utils.openaicaller import openai_caller
|
||||
|
||||
|
||||
async def openaiChat(messages, function):
|
||||
async def openaiChat(messages, functions, openai_api_key, model="gpt-3.5-turbo"):
|
||||
caller = openai_caller()
|
||||
response = await caller.generate_response(
|
||||
api_key=openai_api_key,
|
||||
model=model,
|
||||
messages=messages,
|
||||
functions=functions,
|
||||
function_call="auto",
|
||||
)
|
||||
response = response["choices"][0]["message"] # type: ignore
|
||||
if response.get("function_call", False):
|
||||
function_call = response["function_call"]
|
||||
return {
|
||||
"name": function_call["name"],
|
||||
"arguments": orjson.loads(function_call["arguments"]),
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"name": "send_message",
|
||||
"arguments": {"message": response["content"]},
|
||||
}
|
||||
|
||||
@@ -5,14 +5,30 @@ from src.chatUtils.requesters.llama import llama
|
||||
from src.chatUtils.requesters.llama2 import llama2
|
||||
|
||||
|
||||
class ModelNotFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
async def request(
|
||||
model: str, prompt: list[dict] | str, message: discord.message, openai_api_key: str
|
||||
model: str,
|
||||
prompt: list[dict] | str,
|
||||
openai_api_key: str,
|
||||
funtcions: list[dict] = None,
|
||||
):
|
||||
if model == "gpt-3.5-turbo":
|
||||
return await openaiChat(messages=prompt, openai_api_key=openai_api_key)
|
||||
return await openaiChat(
|
||||
messages=prompt,
|
||||
openai_api_key=openai_api_key,
|
||||
functions=funtcions,
|
||||
model=model,
|
||||
)
|
||||
elif model == "text-davinci-003":
|
||||
return await openaiText(prompt=prompt, openai_api_key=openai_api_key)
|
||||
# return await openaiText(prompt=prompt, openai_api_key=openai_api_key)
|
||||
raise NotImplementedError("This model is not supported yet")
|
||||
elif model == "text-llama":
|
||||
return await llama(prompt=prompt)
|
||||
elif model == "text-llama-2":
|
||||
return await llama2(prompt=prompt)
|
||||
elif model == "text-llama2":
|
||||
# return await llama2(prompt=prompt)
|
||||
raise NotImplementedError("This model is not supported yet")
|
||||
else:
|
||||
raise ModelNotFound(f"Model {model} not found")
|
||||
|
||||
Reference in New Issue
Block a user