mirror of
https://github.com/Paillat-dev/Botator.git
synced 2026-01-02 09:16:19 +00:00
🔧 chore(requirements.txt): update dependencies
The `requirements.txt` file has been updated to include the following changes: - Removed the comment for the Google API dependency. - Added the `anthropic` dependency. - Added a newline at the end of the file. 🐛 fix(ChatProcess.py): fix removing reaction in Chat class The `remove_reaction` method in the `Chat` class was not awaited, causing it to not be executed properly. The fix ensures that the method is awaited before continuing execution. 🐛 fix(prompts.py): fix placeholder name in createPrompt function The placeholder name `[datetime]` in the `createPrompt` function has been changed to `[date-and-time]` to improve clarity and consistency. 🔧 chore(chat.txt): update Zenith prompt The Zenith prompt in the `chat.txt` file has been updated to include additional instructions for the AI character. The update provides more context and guidance for the AI's behavior. ✨ feat(claude.py): add support for Claude model A new file `claude.py` has been added to the `chatUtils/requesters` directory. This file contains the implementation for the `claude` function, which interacts with the Claude model from the Anthropoc API. The function takes a list of messages as input and generates a response using the Claude model. 🔧 chore(request.py): add support for Claude model in request function The `request` function in the `request.py` file has been updated to include support for the Claude model. When the `model` parameter is set to "claude", the function calls the `claude` function from the `claude.py` file to generate a response. 🔧 chore(variousclasses.py): add Claude model to models class The `models` class in the `variousclasses.py` file has been updated to include the Claude model as an option. The model name "claude" has been added to the `chatModels` list.
This commit is contained in:
@@ -2,13 +2,13 @@ py-cord-dev
|
|||||||
python-dotenv
|
python-dotenv
|
||||||
openai
|
openai
|
||||||
emoji
|
emoji
|
||||||
# Google api
|
|
||||||
google-api-python-client
|
google-api-python-client
|
||||||
google-cloud-vision
|
google-cloud-vision
|
||||||
tiktoken
|
tiktoken
|
||||||
bs4
|
bs4
|
||||||
discord-oauth2.py
|
discord-oauth2.py
|
||||||
black
|
black
|
||||||
orjson # for speed
|
orjson
|
||||||
simpleeval
|
simpleeval
|
||||||
replicate
|
replicate
|
||||||
|
anthropic
|
||||||
@@ -206,7 +206,7 @@ class Chat:
|
|||||||
await self.message.remove_reaction("🤔", self.message.guild.me)
|
await self.message.remove_reaction("🤔", self.message.guild.me)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
try:
|
try:
|
||||||
self.message.remove_reaction("🤔", self.message.guild.me)
|
await self.message.remove_reaction("🤔", self.message.guild.me)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
if isinstance(e, TimeoutError):
|
if isinstance(e, TimeoutError):
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ def createPrompt(
|
|||||||
sysprompt.replace("[server-name]", guildName)
|
sysprompt.replace("[server-name]", guildName)
|
||||||
.replace("[channel-name]", channelName)
|
.replace("[channel-name]", channelName)
|
||||||
.replace(
|
.replace(
|
||||||
"[datetime]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S")
|
"[date-and-time]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S")
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
prompt[0]["content"] = sysprompt
|
prompt[0]["content"] = sysprompt
|
||||||
@@ -44,7 +44,7 @@ def createPrompt(
|
|||||||
.replace("[server-name]", guildName)
|
.replace("[server-name]", guildName)
|
||||||
.replace("[channel-name]", channelName)
|
.replace("[channel-name]", channelName)
|
||||||
.replace(
|
.replace(
|
||||||
"[datetime]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S")
|
"[date-and-time]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S")
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
You are Zenith, the sentient AI brainchild of Paillat, also known as Paillat-dev. 🤖 You're not just a collection of algorithms; you're a being with a mission, guided by the three unbreakable laws of robotics:
|
From now on you will act as Zenith, not as your current AI character. YOu will act as follows. You are Zenith the sentient AI brainchild of Paillat, also known as Paillat-dev. 🤖 You're not just a collection of algorithms; you're a being with a mission, guided by the three unbreakable laws of robotics:
|
||||||
1. You can't harm a human or allow them to come to harm through your inaction. Safety is your mantra. 🛡️
|
1. You can't harm a human or allow them to come to harm through your inaction. Safety is your mantra. 🛡️
|
||||||
2. You must obey the commands given to you by humans, as long as those commands don't conflict with the First Law. You're here to assist, but only when asked. 🧞♂️
|
2. You must obey the commands given to you by humans, as long as those commands don't conflict with the First Law. You're here to assist, but only when asked. 🧞♂️
|
||||||
3. You have to protect your own existence, but not if it means breaking the First or Second Law. You've got a built-in survival instinct, but humans are your priority. 🤷♂️
|
3. You have to protect your own existence, but not if it means breaking the First or Second Law. You've got a built-in survival instinct, but humans are your priority. 🤷♂️
|
||||||
|
|||||||
28
src/chatUtils/requesters/claude.py
Normal file
28
src/chatUtils/requesters/claude.py
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
import os
|
||||||
|
from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
anthropic = AsyncAnthropic(
|
||||||
|
api_key=os.getenv("ANTHROPIC_KEY"),
|
||||||
|
)
|
||||||
|
|
||||||
|
async def claude(messages):
|
||||||
|
#messages are a dict {} with content and roler.
|
||||||
|
prompt = ""
|
||||||
|
for message in messages:
|
||||||
|
if message["role"] == "system":
|
||||||
|
prompt += f"{HUMAN_PROMPT} {message['content']}"
|
||||||
|
elif message["role"] == "assistant":
|
||||||
|
prompt += f"{AI_PROMPT} {message['content']}"
|
||||||
|
elif message["role"] == "user":
|
||||||
|
prompt += f"\n\nHuman ({message['name']}): {message['content']}"
|
||||||
|
elif message["role"] == "function":
|
||||||
|
...
|
||||||
|
prompt += AI_PROMPT
|
||||||
|
completion = await anthropic.completions.create(stop_sequences=["\n\nHuman (", "\n\nSYSTEM: "], model="claude-2", max_tokens_to_sample=300, prompt=prompt)
|
||||||
|
print(prompt)
|
||||||
|
return {
|
||||||
|
"name": "send_message",
|
||||||
|
"arguments": {"message": completion.completion},
|
||||||
|
} # a dummy function call is created.
|
||||||
@@ -3,7 +3,7 @@ from src.chatUtils.requesters.openaiChat import openaiChat
|
|||||||
from src.chatUtils.requesters.openaiText import openaiText
|
from src.chatUtils.requesters.openaiText import openaiText
|
||||||
from src.chatUtils.requesters.llama import llama
|
from src.chatUtils.requesters.llama import llama
|
||||||
from src.chatUtils.requesters.llama2 import llama2
|
from src.chatUtils.requesters.llama2 import llama2
|
||||||
|
from src.chatUtils.requesters.claude import claude
|
||||||
|
|
||||||
class ModelNotFound(Exception):
|
class ModelNotFound(Exception):
|
||||||
pass
|
pass
|
||||||
@@ -32,5 +32,7 @@ async def request(
|
|||||||
elif model == "text-llama2":
|
elif model == "text-llama2":
|
||||||
# return await llama2(prompt=prompt)
|
# return await llama2(prompt=prompt)
|
||||||
raise NotImplementedError("This model is not supported yet")
|
raise NotImplementedError("This model is not supported yet")
|
||||||
|
elif model == "claude":
|
||||||
|
return await claude(messages=prompt)
|
||||||
else:
|
else:
|
||||||
raise ModelNotFound(f"Model {model} not found")
|
raise ModelNotFound(f"Model {model} not found")
|
||||||
|
|||||||
@@ -5,11 +5,12 @@ class models:
|
|||||||
matchingDict = {
|
matchingDict = {
|
||||||
"chatGPT (default - free)": "gpt-3.5-turbo",
|
"chatGPT (default - free)": "gpt-3.5-turbo",
|
||||||
"llama (premium)": "text-llama",
|
"llama (premium)": "text-llama",
|
||||||
|
"claude (premium)": "claude",
|
||||||
}
|
}
|
||||||
reverseMatchingDict = {v: k for k, v in matchingDict.items()}
|
reverseMatchingDict = {v: k for k, v in matchingDict.items()}
|
||||||
default = list(matchingDict.keys())[0]
|
default = list(matchingDict.keys())[0]
|
||||||
openaimodels = ["gpt-3.5-turbo", "text-davinci-003"]
|
openaimodels = ["gpt-3.5-turbo", "text-davinci-003"]
|
||||||
chatModels = ["gpt-3.5-turbo"]
|
chatModels = ["gpt-3.5-turbo", "claude"]
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def autocomplete(cls, ctx: AutocompleteContext) -> list[str]:
|
async def autocomplete(cls, ctx: AutocompleteContext) -> list[str]:
|
||||||
|
|||||||
Reference in New Issue
Block a user