Format with black

This commit is contained in:
2023-07-18 17:51:13 +02:00
parent 4b192f779e
commit 12f13ca6c4
14 changed files with 424 additions and 241 deletions

View File

@@ -1,12 +1,13 @@
'''
"""
This file's purpose is to count the number of tokens used by a list of messages.
It is used to check if the token limit of the model is reached.
Reference: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb
'''
"""
import tiktoken
async def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
"""Returns the number of tokens used by a list of messages."""
try:
@@ -16,13 +17,17 @@ async def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
encoding = tiktoken.get_encoding("cl100k_base")
if model.startswith("gpt-3.5-turbo"):
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_message = (
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
)
tokens_per_name = -1 # if there's a name, the role is omitted
elif model.startswith("gpt-4"):
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
@@ -31,4 +36,4 @@ async def num_tokens_from_messages(messages, model="gpt-3.5-turbo"):
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
return num_tokens