mirror of
https://github.com/Paillat-dev/viralfactory.git
synced 2026-01-02 09:16:19 +00:00
Fix a commit error
This commit is contained in:
@@ -1,43 +1,65 @@
|
||||
import anthropic
|
||||
import openai
|
||||
import gradio as gr
|
||||
import orjson
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from .BaseLLMEngine import BaseLLMEngine
|
||||
|
||||
# Assuming these are the models supported by Anthropics that you wish to include
|
||||
ANTHROPIC_POSSIBLE_MODELS = [
|
||||
"claude-2.1",
|
||||
# Add more models as needed
|
||||
OPENAI_POSSIBLE_MODELS = [ # Theese shall be the openai models supporting force_json
|
||||
"gpt-3.5-turbo-0125",
|
||||
"gpt-4-turbo-preview",
|
||||
]
|
||||
|
||||
class AnthropicsLLMEngine(BaseLLMEngine):
|
||||
|
||||
class OpenaiLLMEngine(BaseLLMEngine):
|
||||
num_options = 1
|
||||
name = "Anthropics"
|
||||
description = "Anthropics language model engine."
|
||||
name = "OpenAI"
|
||||
description = "OpenAI language model engine."
|
||||
|
||||
def __init__(self, options: list) -> None:
|
||||
self.model = options[0]
|
||||
self.client = anthropic.Anthropic(api_key="YourAnthropicAPIKeyHere") # Ensure API key is securely managed
|
||||
super().__init__()
|
||||
|
||||
def generate(self, system_prompt: str, chat_prompt: str, max_tokens: int = 1024, temperature: float = 1.0, json_mode: bool = False, top_p: float = 1, frequency_penalty: float = 0, presence_penalty: float = 0) -> str | dict:
|
||||
# Note: Adjust the parameters as per Anthropics API capabilities
|
||||
message = self.client.messages.create(
|
||||
max_tokens=max_tokens,
|
||||
def generate(
|
||||
self,
|
||||
system_prompt: str,
|
||||
chat_prompt: str,
|
||||
max_tokens: int = 512,
|
||||
temperature: float = 1.0,
|
||||
json_mode: bool = False,
|
||||
top_p: float = 1,
|
||||
frequency_penalty: float = 0,
|
||||
presence_penalty: float = 0,
|
||||
) -> str | dict:
|
||||
response = openai.chat.completions.create(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": chat_prompt},
|
||||
],
|
||||
model=self.model,
|
||||
max_tokens=max_tokens,
|
||||
temperature=temperature,
|
||||
top_p=top_p,
|
||||
frequency_penalty=frequency_penalty,
|
||||
presence_penalty=presence_penalty,
|
||||
response_format={"type": "json_object"}
|
||||
if json_mode
|
||||
else openai._types.NOT_GIVEN,
|
||||
)
|
||||
return (
|
||||
response.choices[0].message.content
|
||||
if not json_mode
|
||||
else orjson.loads(response.choices[0].message.content)
|
||||
)
|
||||
return message.content
|
||||
|
||||
@classmethod
|
||||
def get_options(cls) -> list:
|
||||
return [
|
||||
gr.Dropdown(
|
||||
label="Model",
|
||||
choices=ANTHROPIC_POSSIBLE_MODELS,
|
||||
choices=OPENAI_POSSIBLE_MODELS,
|
||||
max_choices=1,
|
||||
value=ANTHROPIC_POSSIBLE_MODELS[0]
|
||||
value=OPENAI_POSSIBLE_MODELS[0],
|
||||
)
|
||||
]
|
||||
|
||||
Reference in New Issue
Block a user