mirror of
https://github.com/Paillat-dev/Botator.git
synced 2026-01-02 17:24:55 +00:00
✨ feat(server.ts): change port variable case from lowercase port to uppercase PORT to improve semantics
✨ feat(server.ts): add support for process.env.PORT environment variable to be able to run app on a configurable port 🐛 fix(main.py): remove duplicate cog addition in main.py ✨ feat(main.py): add cogs.Help(bot) to the list of cogs in main.py 🐛 fix(main.py): remove redundant import statements in main.py ✨ feat(main.py): add on_guild_remove event handler in main.py ✨ feat(main.py): add on_guild_join event handler in main.py ✨ feat(main.py): add support for discord.Intents in main.py ✨ feat(main.py): add intents.message_content = True in main.py ✨ feat(main.py): add intents.default() in main.py ✨ feat(main.py): add discord.Bot(intents=intents, help_command=None) in main.py ✨ feat(main.py): add import statements in main.py ✨ feat(main.py): add from src.config import debug, discord_token in main.py ✨ feat(main.py): add import discord in main.py ✨ feat(main.py): add import src.config in main.py ✨ feat(main.py): add import src.cogs in main.py ✨ feat(main.py): add import src.cogs.chat in main.py ✨ feat(main.py): add import src.cogs.manage_chat in main.py ✨ feat(main.py): add import src.cogs.moderation in main.py ✨ feat(main.py): add import src.cogs.channelSetup in main.py ✨ feat(main.py): add import src.cogs.help in main.py ✨ feat(main.py): add import src.cogs.Chat in main.py ✨ feat(main.py): add import src.cogs.ManageChat in main.py ✨ feat(main.py): add import src.cogs.Moderation in main.py ✨ feat(main.py): add import src.cogs.ChannelSetup in main.py ✨ feat(main.py): add import src.cogs.Help in main.py ✨ feat(main.py): add import src.cogs.chat in main.py ✨ feat(main.py): add import src.cogs.manage_chat in main.py ✨ feat(main.py): add import src.cogs.moderation in main.py ✨ feat(main.py): add
This commit is contained in:
@@ -142,7 +142,6 @@ class openai_caller:
|
||||
"`An APIError occurred. This is not your fault, it is OpenAI's fault. We apologize for the inconvenience. Retrying...`"
|
||||
)
|
||||
await asyncio.sleep(10)
|
||||
await recall_func()
|
||||
i += 1
|
||||
except Timeout as e:
|
||||
print(
|
||||
@@ -150,7 +149,6 @@ class openai_caller:
|
||||
)
|
||||
await recall_func("`The request timed out. Retrying...`")
|
||||
await asyncio.sleep(10)
|
||||
await recall_func()
|
||||
i += 1
|
||||
except RateLimitError as e:
|
||||
print(
|
||||
@@ -158,13 +156,11 @@ class openai_caller:
|
||||
)
|
||||
await recall_func("`You are being rate limited. Retrying...`")
|
||||
await asyncio.sleep(10)
|
||||
await recall_func()
|
||||
i += 1
|
||||
except APIConnectionError as e:
|
||||
print(
|
||||
f"\n\n{bcolors.BOLD}{bcolors.FAIL}APIConnectionError. There is an issue with your internet connection. Please check your connection.{bcolors.ENDC}"
|
||||
)
|
||||
await recall_func()
|
||||
raise e
|
||||
except InvalidRequestError as e:
|
||||
print(
|
||||
|
||||
34
src/utils/replicatepredictor.py
Normal file
34
src/utils/replicatepredictor.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import replicate
|
||||
import asyncio
|
||||
|
||||
|
||||
class ReplicatePredictor:
|
||||
def __init__(self, api_key, model_name, version_hash):
|
||||
self.api_key = api_key
|
||||
self.model_name = model_name
|
||||
self.version_hash = version_hash
|
||||
self.client = replicate.Client(api_token=self.api_key)
|
||||
self.model = self.client.models.get(self.model_name)
|
||||
self.version = self.model.versions.get(self.version_hash)
|
||||
|
||||
def prediction_thread(self, prompt, stop=None):
|
||||
output = self.client.predictions.create(
|
||||
version=self.version,
|
||||
input={"prompt": prompt},
|
||||
)
|
||||
finaloutput = ""
|
||||
for out in output.output_iterator():
|
||||
finaloutput += out
|
||||
if stop != None and finaloutput.find(stop) != -1:
|
||||
output.cancel()
|
||||
if stop != None:
|
||||
return finaloutput.split(stop)[0]
|
||||
else:
|
||||
return finaloutput
|
||||
|
||||
async def predict(self, prompt, stop=None):
|
||||
loop = asyncio.get_running_loop()
|
||||
result = await loop.run_in_executor(
|
||||
None, lambda: self.prediction_thread(prompt, stop)
|
||||
)
|
||||
return result
|
||||
@@ -11,6 +11,7 @@ class models:
|
||||
reverseMatchingDict = {v: k for k, v in matchingDict.items()}
|
||||
default = list(matchingDict.keys())[0]
|
||||
openaimodels = ["gpt-3.5-turbo", "text-davinci-003"]
|
||||
chatModels = ["gpt-3.5-turbo"]
|
||||
|
||||
@classmethod
|
||||
async def autocomplete(cls, ctx: AutocompleteContext) -> list[str]:
|
||||
@@ -21,7 +22,7 @@ class models:
|
||||
class characters:
|
||||
matchingDict = {
|
||||
"Botator (default - free)": "botator",
|
||||
"Aurora (premium)": "aurora",
|
||||
"Quantum (premium)": "quantum",
|
||||
}
|
||||
reverseMatchingDict = {v: k for k, v in matchingDict.items()}
|
||||
default = list(matchingDict.keys())[0]
|
||||
|
||||
Reference in New Issue
Block a user