🐛 fix(ChatProcess.py): import missing 'characters' module from src.utils.variousclasses to fix NameError

 feat(ChatProcess.py): add support for custom temperature for each character in the 'characters' module to improve chat responses
🐛 fix(openaiChat.py): add 'temperature' parameter to the 'openaiChat' function to allow custom temperature for generating responses
🐛 fix(request.py): add 'custom_temp' parameter to the 'request' function to pass custom temperature to 'openaiChat' function
🐛 fix(openaicaller.py): reduce sleep time from 10 seconds to 5 seconds for retrying API calls to improve responsiveness
 feat(variousclasses.py): add 'custom_temp' dictionary to store custom temperature values for each character to improve chat responses
This commit is contained in:
2023-09-04 12:19:08 +02:00
parent c434483c22
commit 0c182d82c2
5 changed files with 17 additions and 8 deletions

View File

@@ -6,12 +6,11 @@ import datetime
import json
from src.utils.misc import moderate
from src.utils.variousclasses import models
from src.utils.variousclasses import models, characters
from src.guild import Guild
from src.chatUtils.Chat import fetch_messages_history, is_ignorable
from src.chatUtils.prompts import createPrompt
from src.functionscalls import call_function, server_normal_channel_functions, functions
from src.config import debug
from src.chatUtils.requesters.request import request
@@ -157,6 +156,7 @@ class Chat:
prompt=self.prompt,
openai_api_key=self.openai_api_key,
funtcions=funcs,
custom_temp=characters.custom_temp.get(self.character, 1.2),
)
async def processResponse(self):

View File

@@ -2,12 +2,14 @@ import orjson
from src.utils.openaicaller import openai_caller
async def openaiChat(messages, functions, openai_api_key, model="gpt-3.5-turbo"):
async def openaiChat(
messages, functions, openai_api_key, model="gpt-3.5-turbo", temperature=1.2
):
caller = openai_caller()
response = await caller.generate_response(
api_key=openai_api_key,
model=model,
temperature=1.3,
temperature=temperature,
messages=messages,
functions=functions,
function_call="auto",

View File

@@ -14,6 +14,7 @@ async def request(
prompt: list[dict] | str,
openai_api_key: str,
funtcions: list[dict] = None,
custom_temp: float = 1.2,
):
if model == "gpt-3.5-turbo":
return await openaiChat(
@@ -21,6 +22,7 @@ async def request(
openai_api_key=openai_api_key,
functions=funtcions,
model=model,
temperature=custom_temp,
)
elif model == "text-davinci-003":
# return await openaiText(prompt=prompt, openai_api_key=openai_api_key)

View File

@@ -147,21 +147,21 @@ class openai_caller:
await recall_func(
"`An APIError occurred. This is not your fault, it is OpenAI's fault. We apologize for the inconvenience. Retrying...`"
)
await asyncio.sleep(10)
await asyncio.sleep(5)
i += 1
except Timeout as e:
print(
f"\n\n{bcolors.BOLD}{bcolors.WARNING}The request timed out. Retrying...{bcolors.ENDC}"
)
await recall_func("`The request timed out. Retrying...`")
await asyncio.sleep(10)
await asyncio.sleep(5)
i += 1
except RateLimitError as e:
print(
f"\n\n{bcolors.BOLD}{bcolors.WARNING}RateLimitError. You are being rate limited. Retrying...{bcolors.ENDC}"
)
await recall_func("`You are being rate limited. Retrying...`")
await asyncio.sleep(10)
await asyncio.sleep(5)
i += 1
except APIConnectionError as e:
print(
@@ -185,7 +185,7 @@ class openai_caller:
f"\n\n{bcolors.BOLD}{bcolors.WARNING}ServiceUnavailableError. The OpenAI API is not responding. Retrying...{bcolors.ENDC}"
)
await recall_func("`The OpenAI API is not responding. Retrying...`")
await asyncio.sleep(10)
await asyncio.sleep(5)
await recall_func()
i += 1
finally:

View File

@@ -24,6 +24,11 @@ class characters:
"Botator roleplay (premium)": "botator-roleplay",
"Zenith - Asimov's Laws (premium)": "zenith",
}
custom_temp = {
"zenith": 1.8,
"botator-roleplay": 1.8,
}
reverseMatchingDict = {v: k for k, v in matchingDict.items()}
default = list(matchingDict.keys())[0]