diff --git a/src/ChatProcess.py b/src/ChatProcess.py index d60b8f1..6b38199 100644 --- a/src/ChatProcess.py +++ b/src/ChatProcess.py @@ -6,12 +6,11 @@ import datetime import json from src.utils.misc import moderate -from src.utils.variousclasses import models +from src.utils.variousclasses import models, characters from src.guild import Guild from src.chatUtils.Chat import fetch_messages_history, is_ignorable from src.chatUtils.prompts import createPrompt from src.functionscalls import call_function, server_normal_channel_functions, functions -from src.config import debug from src.chatUtils.requesters.request import request @@ -157,6 +156,7 @@ class Chat: prompt=self.prompt, openai_api_key=self.openai_api_key, funtcions=funcs, + custom_temp=characters.custom_temp.get(self.character, 1.2), ) async def processResponse(self): diff --git a/src/chatUtils/requesters/openaiChat.py b/src/chatUtils/requesters/openaiChat.py index ebdbbf8..de945f9 100644 --- a/src/chatUtils/requesters/openaiChat.py +++ b/src/chatUtils/requesters/openaiChat.py @@ -2,12 +2,14 @@ import orjson from src.utils.openaicaller import openai_caller -async def openaiChat(messages, functions, openai_api_key, model="gpt-3.5-turbo"): +async def openaiChat( + messages, functions, openai_api_key, model="gpt-3.5-turbo", temperature=1.2 +): caller = openai_caller() response = await caller.generate_response( api_key=openai_api_key, model=model, - temperature=1.3, + temperature=temperature, messages=messages, functions=functions, function_call="auto", diff --git a/src/chatUtils/requesters/request.py b/src/chatUtils/requesters/request.py index 058a785..ea90605 100644 --- a/src/chatUtils/requesters/request.py +++ b/src/chatUtils/requesters/request.py @@ -14,6 +14,7 @@ async def request( prompt: list[dict] | str, openai_api_key: str, funtcions: list[dict] = None, + custom_temp: float = 1.2, ): if model == "gpt-3.5-turbo": return await openaiChat( @@ -21,6 +22,7 @@ async def request( openai_api_key=openai_api_key, functions=funtcions, model=model, + temperature=custom_temp, ) elif model == "text-davinci-003": # return await openaiText(prompt=prompt, openai_api_key=openai_api_key) diff --git a/src/utils/openaicaller.py b/src/utils/openaicaller.py index 11fd9cb..f332262 100644 --- a/src/utils/openaicaller.py +++ b/src/utils/openaicaller.py @@ -147,21 +147,21 @@ class openai_caller: await recall_func( "`An APIError occurred. This is not your fault, it is OpenAI's fault. We apologize for the inconvenience. Retrying...`" ) - await asyncio.sleep(10) + await asyncio.sleep(5) i += 1 except Timeout as e: print( f"\n\n{bcolors.BOLD}{bcolors.WARNING}The request timed out. Retrying...{bcolors.ENDC}" ) await recall_func("`The request timed out. Retrying...`") - await asyncio.sleep(10) + await asyncio.sleep(5) i += 1 except RateLimitError as e: print( f"\n\n{bcolors.BOLD}{bcolors.WARNING}RateLimitError. You are being rate limited. Retrying...{bcolors.ENDC}" ) await recall_func("`You are being rate limited. Retrying...`") - await asyncio.sleep(10) + await asyncio.sleep(5) i += 1 except APIConnectionError as e: print( @@ -185,7 +185,7 @@ class openai_caller: f"\n\n{bcolors.BOLD}{bcolors.WARNING}ServiceUnavailableError. The OpenAI API is not responding. Retrying...{bcolors.ENDC}" ) await recall_func("`The OpenAI API is not responding. Retrying...`") - await asyncio.sleep(10) + await asyncio.sleep(5) await recall_func() i += 1 finally: diff --git a/src/utils/variousclasses.py b/src/utils/variousclasses.py index 57087db..e776380 100644 --- a/src/utils/variousclasses.py +++ b/src/utils/variousclasses.py @@ -24,6 +24,11 @@ class characters: "Botator roleplay (premium)": "botator-roleplay", "Zenith - Asimov's Laws (premium)": "zenith", } + custom_temp = { + "zenith": 1.8, + "botator-roleplay": 1.8, + } + reverseMatchingDict = {v: k for k, v in matchingDict.items()} default = list(matchingDict.keys())[0]