2024-02-14 17:49:51 +01:00
|
|
|
import openai
|
|
|
|
|
import gradio as gr
|
2024-02-15 11:23:36 +01:00
|
|
|
import orjson
|
2024-02-14 17:49:51 +01:00
|
|
|
|
|
|
|
|
from abc import ABC, abstractmethod
|
|
|
|
|
|
|
|
|
|
from .BaseLLMEngine import BaseLLMEngine
|
|
|
|
|
|
|
|
|
|
OPENAI_POSSIBLE_MODELS = [
|
|
|
|
|
"gpt-3.5-turbo-0125",
|
|
|
|
|
"gpt-4-turbo-preview",
|
|
|
|
|
]
|
|
|
|
|
|
|
|
|
|
class OpenaiLLMEngine(BaseLLMEngine):
|
|
|
|
|
num_options = 1
|
|
|
|
|
name = "OpenAI"
|
|
|
|
|
description = "OpenAI language model engine."
|
|
|
|
|
|
2024-02-15 11:23:36 +01:00
|
|
|
def __init__(self, options: list) -> None:
|
|
|
|
|
self.model = options[0]
|
|
|
|
|
super().__init__()
|
|
|
|
|
|
2024-02-14 17:49:51 +01:00
|
|
|
def generate(self, system_prompt: str, chat_prompt: str, max_tokens: int = 512, temperature: float = 1.0, json_mode: bool= False, top_p: float = 1, frequency_penalty: float = 0, presence_penalty: float = 0) -> str:
|
2024-02-15 11:23:36 +01:00
|
|
|
response = openai.chat.completions.create(
|
|
|
|
|
model=self.model,
|
|
|
|
|
messages=[
|
|
|
|
|
{"role": "system", "content": system_prompt},
|
|
|
|
|
{"role": "user", "content": chat_prompt},
|
|
|
|
|
],
|
|
|
|
|
max_tokens=max_tokens,
|
|
|
|
|
temperature=temperature,
|
|
|
|
|
top_p=top_p,
|
|
|
|
|
frequency_penalty=frequency_penalty,
|
|
|
|
|
presence_penalty=presence_penalty,
|
|
|
|
|
response_format={ "type": "json_object" } if json_mode else openai._types.NOT_GIVEN
|
|
|
|
|
)
|
|
|
|
|
return response.choices[0].message.content if not json_mode else orjson.loads(response.choices[0].message.content)
|
2024-02-14 17:49:51 +01:00
|
|
|
|
2024-02-15 11:23:36 +01:00
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
|
def get_options(cls) -> list:
|
2024-02-14 17:49:51 +01:00
|
|
|
return [
|
|
|
|
|
gr.Dropdown(
|
|
|
|
|
label="Model",
|
|
|
|
|
choices=OPENAI_POSSIBLE_MODELS,
|
|
|
|
|
max_choices=1,
|
|
|
|
|
value=OPENAI_POSSIBLE_MODELS[0]
|
|
|
|
|
)
|
|
|
|
|
]
|