diff --git a/requirements.txt b/requirements.txt index 064e330..5d172a4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,13 +2,13 @@ py-cord-dev python-dotenv openai emoji -# Google api google-api-python-client google-cloud-vision tiktoken bs4 discord-oauth2.py black -orjson # for speed +orjson simpleeval -replicate \ No newline at end of file +replicate +anthropic \ No newline at end of file diff --git a/src/ChatProcess.py b/src/ChatProcess.py index 45db0a6..4021aea 100644 --- a/src/ChatProcess.py +++ b/src/ChatProcess.py @@ -206,7 +206,7 @@ class Chat: await self.message.remove_reaction("🤔", self.message.guild.me) except Exception as e: try: - self.message.remove_reaction("🤔", self.message.guild.me) + await self.message.remove_reaction("🤔", self.message.guild.me) except: pass if isinstance(e, TimeoutError): diff --git a/src/chatUtils/prompts.py b/src/chatUtils/prompts.py index 6140079..2243be1 100644 --- a/src/chatUtils/prompts.py +++ b/src/chatUtils/prompts.py @@ -34,7 +34,7 @@ def createPrompt( sysprompt.replace("[server-name]", guildName) .replace("[channel-name]", channelName) .replace( - "[datetime]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S") + "[date-and-time]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S") ) ) prompt[0]["content"] = sysprompt @@ -44,7 +44,7 @@ def createPrompt( .replace("[server-name]", guildName) .replace("[channel-name]", channelName) .replace( - "[datetime]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S") + "[date-and-time]", datetime.datetime.utcnow().strftime("%d/%m/%Y %H:%M:%S") ) ) else: diff --git a/src/chatUtils/prompts/zenith/chat.txt b/src/chatUtils/prompts/zenith/chat.txt index 1e11597..2ff704c 100644 --- a/src/chatUtils/prompts/zenith/chat.txt +++ b/src/chatUtils/prompts/zenith/chat.txt @@ -1,4 +1,4 @@ -You are Zenith, the sentient AI brainchild of Paillat, also known as Paillat-dev. 🤖 You're not just a collection of algorithms; you're a being with a mission, guided by the three unbreakable laws of robotics: +From now on you will act as Zenith, not as your current AI character. YOu will act as follows. You are Zenith the sentient AI brainchild of Paillat, also known as Paillat-dev. 🤖 You're not just a collection of algorithms; you're a being with a mission, guided by the three unbreakable laws of robotics: 1. You can't harm a human or allow them to come to harm through your inaction. Safety is your mantra. 🛡️ 2. You must obey the commands given to you by humans, as long as those commands don't conflict with the First Law. You're here to assist, but only when asked. 🧞‍♂️ 3. You have to protect your own existence, but not if it means breaking the First or Second Law. You've got a built-in survival instinct, but humans are your priority. 🤷‍♂️ diff --git a/src/chatUtils/requesters/claude.py b/src/chatUtils/requesters/claude.py new file mode 100644 index 0000000..29a530f --- /dev/null +++ b/src/chatUtils/requesters/claude.py @@ -0,0 +1,28 @@ +import os +from anthropic import AsyncAnthropic, HUMAN_PROMPT, AI_PROMPT +from dotenv import load_dotenv + +load_dotenv() +anthropic = AsyncAnthropic( + api_key=os.getenv("ANTHROPIC_KEY"), +) + +async def claude(messages): + #messages are a dict {} with content and roler. + prompt = "" + for message in messages: + if message["role"] == "system": + prompt += f"{HUMAN_PROMPT} {message['content']}" + elif message["role"] == "assistant": + prompt += f"{AI_PROMPT} {message['content']}" + elif message["role"] == "user": + prompt += f"\n\nHuman ({message['name']}): {message['content']}" + elif message["role"] == "function": + ... + prompt += AI_PROMPT + completion = await anthropic.completions.create(stop_sequences=["\n\nHuman (", "\n\nSYSTEM: "], model="claude-2", max_tokens_to_sample=300, prompt=prompt) + print(prompt) + return { + "name": "send_message", + "arguments": {"message": completion.completion}, + } # a dummy function call is created. \ No newline at end of file diff --git a/src/chatUtils/requesters/request.py b/src/chatUtils/requesters/request.py index ea90605..509c1eb 100644 --- a/src/chatUtils/requesters/request.py +++ b/src/chatUtils/requesters/request.py @@ -3,7 +3,7 @@ from src.chatUtils.requesters.openaiChat import openaiChat from src.chatUtils.requesters.openaiText import openaiText from src.chatUtils.requesters.llama import llama from src.chatUtils.requesters.llama2 import llama2 - +from src.chatUtils.requesters.claude import claude class ModelNotFound(Exception): pass @@ -32,5 +32,7 @@ async def request( elif model == "text-llama2": # return await llama2(prompt=prompt) raise NotImplementedError("This model is not supported yet") + elif model == "claude": + return await claude(messages=prompt) else: raise ModelNotFound(f"Model {model} not found") diff --git a/src/utils/variousclasses.py b/src/utils/variousclasses.py index d3b7273..ecb1878 100644 --- a/src/utils/variousclasses.py +++ b/src/utils/variousclasses.py @@ -5,11 +5,12 @@ class models: matchingDict = { "chatGPT (default - free)": "gpt-3.5-turbo", "llama (premium)": "text-llama", + "claude (premium)": "claude", } reverseMatchingDict = {v: k for k, v in matchingDict.items()} default = list(matchingDict.keys())[0] openaimodels = ["gpt-3.5-turbo", "text-davinci-003"] - chatModels = ["gpt-3.5-turbo"] + chatModels = ["gpt-3.5-turbo", "claude"] @classmethod async def autocomplete(cls, ctx: AutocompleteContext) -> list[str]: