value="The higher the temperature, the more likely the model will take risks. Conversely, a lower temperature will make the model more conservative. The default value is 0.9",
inline=False,
)
embed.add_field(
name="max_tokens",
value="The maximum number of tokens to generate. Higher values will result in more coherent text, but will take longer to complete. (default: 50). **Lower values will result in somentimes cutting off the end of the answer, but will be faster.**",
inline=False,
)
embed.add_field(
name="frequency_penalty",
value="The higher the frequency penalty, the more new words the model will introduce (default: 0.0)",
inline=False,
)
embed.add_field(
name="presence_penalty",
value="The higher the presence penalty, the more new words the model will introduce (default: 0.0)",
inline=False,
)
embed.add_field(
name="prompt_size",
value="The number of messages to use as a prompt (default: 5). The more messages, the more coherent the text will be, but the more it will take to generate and the more it will cost.",