From 53b627f4ed1d8454f4191a57b2c1bf2f0173961f Mon Sep 17 00:00:00 2001 From: Paillat Date: Thu, 15 Feb 2024 17:53:18 +0100 Subject: [PATCH] Add anthropic llm engine --- src/engines/LLMEngine/AnthropicLLMEngine.py | 34 +++++++++++++-------- src/engines/LLMEngine/__init__.py | 3 +- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/engines/LLMEngine/AnthropicLLMEngine.py b/src/engines/LLMEngine/AnthropicLLMEngine.py index 5182bad..3b5878d 100644 --- a/src/engines/LLMEngine/AnthropicLLMEngine.py +++ b/src/engines/LLMEngine/AnthropicLLMEngine.py @@ -4,24 +4,34 @@ import orjson from .BaseLLMEngine import BaseLLMEngine -# Assuming these are the models supported by Anthropics that you wish to include ANTHROPIC_POSSIBLE_MODELS = [ "claude-2.1", - # Add more models as needed ] -class AnthropicsLLMEngine(BaseLLMEngine): + +class AnthropicLLMEngine(BaseLLMEngine): num_options = 1 - name = "Anthropics" - description = "Anthropics language model engine." + name = "Anthropic" + description = "Anthropic language model engine." def __init__(self, options: list) -> None: self.model = options[0] - self.client = anthropic.Anthropic(api_key="YourAnthropicAPIKeyHere") # Ensure API key is securely managed + self.client = anthropic.Anthropic( + api_key="YourAnthropicAPIKeyHere" + ) # Ensure API key is securely managed super().__init__() - def generate(self, system_prompt: str, chat_prompt: str, max_tokens: int = 1024, temperature: float = 1.0, json_mode: bool = False, top_p: float = 1, frequency_penalty: float = 0, presence_penalty: float = 0) -> str | dict: - # Note: Adjust the parameters as per Anthropics API capabilities + def generate( + self, + system_prompt: str, + chat_prompt: str, + max_tokens: int = 1024, + temperature: float = 1.0, + json_mode: bool = False, + top_p: float = 1, + frequency_penalty: float = 0, + presence_penalty: float = 0, + ) -> str | dict: prompt = f"""{anthropic.HUMAN_PROMPT} {system_prompt} {anthropic.HUMAN_PROMPT} {chat_prompt} {anthropic.AI_PROMPT}""" if json_mode: # anthopic does not officially support JSON mode, but we can bias the output towards a JSON-like format @@ -37,10 +47,10 @@ class AnthropicsLLMEngine(BaseLLMEngine): content = response.completion if json_mode: - #we add back the opening curly brace wich is not included in the response since it is in the prompt + # we add back the opening curly brace wich is not included in the response since it is in the prompt content = "{" + content - #we remove everything after the last closing curly brace - content = content[:content.rfind("}") + 1] + # we remove everything after the last closing curly brace + content = content[: content.rfind("}") + 1] return orjson.loads(content) else: return content @@ -52,6 +62,6 @@ class AnthropicsLLMEngine(BaseLLMEngine): label="Model", choices=ANTHROPIC_POSSIBLE_MODELS, max_choices=1, - value=ANTHROPIC_POSSIBLE_MODELS[0] + value=ANTHROPIC_POSSIBLE_MODELS[0], ) ] diff --git a/src/engines/LLMEngine/__init__.py b/src/engines/LLMEngine/__init__.py index 6438acc..ba44606 100644 --- a/src/engines/LLMEngine/__init__.py +++ b/src/engines/LLMEngine/__init__.py @@ -1,2 +1,3 @@ from .BaseLLMEngine import BaseLLMEngine -from .OpenaiLLMEngine import OpenaiLLMEngine \ No newline at end of file +from .OpenaiLLMEngine import OpenaiLLMEngine +from .AnthropicLLMEngine import AnthropicLLMEngine