feat(GenerationContext.py): add new file GenerationContext.py to handle the context of generation engines

feat(OpenaiLLMEngine.py): add orjson library for JSON serialization and deserialization, and implement the generate method to make API call to OpenAI chat completions endpoint

feat(__init__.py): import OpenaiLLMEngine in LLMEngine package

feat(BaseScriptEngine.py): add time_script method to the BaseScriptEngine class

feat(CustomScriptEngine.py): add new file CustomScriptEngine.py to handle custom script generation, implement generate method to return the provided script, and add get_options method to provide a textbox for the prompt input

feat(__init__.py): import CustomScriptEngine in ScriptEngine package

feat(__init__.py): import LLMEngine package and add OpenaiLLMEngine to the ENGINES dictionary

refactor(gradio_ui.py): change equal_height attribute of Row to False to allow different heights for input blocks
This commit is contained in:
2024-02-15 11:23:36 +01:00
parent 0594458865
commit 9f88e6d069
8 changed files with 74 additions and 5 deletions

View File

@@ -1,5 +1,6 @@
import openai
import gradio as gr
import orjson
from abc import ABC, abstractmethod
@@ -15,10 +16,29 @@ class OpenaiLLMEngine(BaseLLMEngine):
name = "OpenAI"
description = "OpenAI language model engine."
def __init__(self, options: list) -> None:
self.model = options[0]
super().__init__()
def generate(self, system_prompt: str, chat_prompt: str, max_tokens: int = 512, temperature: float = 1.0, json_mode: bool= False, top_p: float = 1, frequency_penalty: float = 0, presence_penalty: float = 0) -> str:
... # TODO: Implement this method
response = openai.chat.completions.create(
model=self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": chat_prompt},
],
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
response_format={ "type": "json_object" } if json_mode else openai._types.NOT_GIVEN
)
return response.choices[0].message.content if not json_mode else orjson.loads(response.choices[0].message.content)
def get_options(self) -> list:
@classmethod
def get_options(cls) -> list:
return [
gr.Dropdown(
label="Model",

View File

@@ -1 +1,2 @@
from .BaseLLMEngine import BaseLLMEngine
from .BaseLLMEngine import BaseLLMEngine
from .OpenaiLLMEngine import OpenaiLLMEngine