Added moderation

This commit is contained in:
Paillat
2023-02-02 21:44:40 +01:00
parent 7236d1c682
commit 948f2c43a2
9 changed files with 146 additions and 10 deletions

1
.gitignore vendored
View File

@@ -161,6 +161,7 @@ cython_debug/
key.txt
data.db
database
premium-key.txt
premium.db
guildscount.py

View File

@@ -1,5 +1,5 @@
# Botator
Botator is a discord bot that binds [@openai](https://github.com/openai) 's gpt3 AI with [@discord](https://github.com/discord). You will be able to take the conversation with the AI into a specific channel that you created, or by pinging/replying to a bot's message.
Botator is a discord bot that binds [@openai](https://github.com/openai) 's gpt3 AI with [@discord](https://github.com/discord). You will be able to take the conversation with the AI into a specific channel that you created, or by pinging/replying to a bot's message. He can also AI moderate the chat.
![discord com_channels_1021872219888033903_1046119234033434734](https://user-images.githubusercontent.com/75439456/204105583-2abb2d77-9404-4558-bd3e-c1a70b939758.png)
# Adding the bot to your discord server
@@ -46,8 +46,13 @@ You can always disable the bot by doing **/disable** and delete your api key fro
*/help* - Show this command list
*/moderation* - Set the AI moderation settings
*/get_toxicity* - Get the toxicity that the AI would have given to a given message
# Support me
You can support me by getting Botator premium, or donating [here](https://www.buymeacoffee.com/paillat). More informations about botator premium here below:
### Why?
At the beginning, Botator was just a project between friends, but now many people are using it, so we need something to pay for our servers. Premium is also a way to support us and our work.
### Is this mandatory?
@@ -76,7 +81,7 @@ After that you will normally be able to access some new channels in our discord
- [ ] When chatgpt API is released, add that api instead of davinci-003
- [ ] Publish a GOOD docker image on dockerhub and add some more instructions about how to selfhost
- [ ] Add a log and updates channel option and a way for devs to send messages to that channel on all servers.
- [ ] Add moderation.
- [x] Add moderation.
- [ ] Add TOKENS warnings (when setting the bot up, people dosen't understand tha ot uses their tokens)
- [ ] Add a /continue command - you know
- [x] Add DateHour in prompts

View File

@@ -9,13 +9,14 @@ from config import debug, conn, c # import the debug function and the database c
#add the message content intent to the bot, aka discord.Intents.default() and discord.Intents.message_content
intents = discord.Intents.default()
intents.message_content = True
import apsw # pip install apsw. ApSW is a Python interface to SQLite 3
bot = discord.Bot(intents=intents, help_command=None) # create the bot
bot.add_cog(cogs.Setup(bot))
bot.add_cog(cogs.Settings(bot))
bot.add_cog(cogs.Help(bot))
bot.add_cog(cogs.Chat(bot))
bot.add_cog(cogs.ManageChat(bot))
bot.add_cog(cogs.Moderation(bot))
@bot.event
async def on_ready():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="your messages to answer you"))

View File

@@ -3,3 +3,4 @@ from cogs.settings import Settings
from cogs.help import Help
from cogs.chat import Chat
from cogs.manage_chat import ManageChat
from cogs.moderation import Moderation

View File

@@ -21,6 +21,10 @@ class Help (discord.Cog) :
embed.add_field(name="/delete", value="Delete all your data from our server", inline=False)
embed.add_field(name="/cancel", value="Cancel the last message sent by the bot", inline=False)
embed.add_field(name="/default", value="Set the advanced settings to their default values", inline=False)
embed.add_field(name="/say", value="Say a message", inline=False)
embed.add_field(name="/redo", value="Redo the last message sent by the bot", inline=False)
embed.add_field(name="/moderation", value="Setup the AI auto-moderation", inline=False)
embed.add_field(name="/get_toxicity", value="Get the toxicity that the AI would have given to a given message", inline=False)
embed.add_field(name="/help", value="Show this message", inline=False)
#add a footer
embed.set_footer(text="Made by @Paillat#7777")

69
code/cogs/moderation.py Normal file
View File

@@ -0,0 +1,69 @@
import discord
from discord import default_permissions
import os
from config import debug, c, conn
import openai
import requests
import toxicity as tox #this is a file called toxicity.py, which contains the toxicity function that allows you to check if a message is toxic or not (it uses the perspective api)
class Moderation (discord.Cog):
def __init__(self, bot: discord.Bot) -> None:
super().__init__()
self.bot = bot
@discord.slash_command(name="moderation", description="Enable or disable AI moderation & set the rules")
@discord.option(name="enable", description="Enable or disable AI moderation", reqired=True,)
@discord.option(name="log_channel", description="The channel where the moderation logs will be sent", required=True)
@discord.option(name="moderator_role", description="The role of the moderators", required=True)
#we set the default permissions to the administrator permission, so only the server administrators can use this command
@default_permissions(administrator=True)
async def moderation(self, ctx: discord.ApplicationContext, enable: bool, log_channel: discord.TextChannel, moderator_role: discord.Role):
try:
data = c.execute("SELECT * FROM moderation WHERE guild_id = ?", (str(ctx.guild.id),))
data = c.fetchone()
except: data = None
if data is None:
c.execute("INSERT INTO moderation VALUES (?, ?, ?, ?)", (str(ctx.guild.id), str(log_channel.id), enable, str(moderator_role.id)))
conn.commit()
else:
c.execute("UPDATE moderation SET logs_channel_id = ?, is_enabled = ? WHERE guild_id = ?", (str(log_channel.id), enable, str(ctx.guild.id)))
conn.commit()
await ctx.respond("Successfully updated moderation settings for this server", ephemeral=True)
@discord.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author == self.bot.user: return
try: c.execute("SELECT * FROM moderation WHERE guild_id = ?", (str(message.guild.id),))
except: return
data = c.fetchone()
channel = self.bot.get_channel(int(data[1]))
is_enabled = data[2]
moderator_role = message.guild.get_role(int(data[3]))
if moderator_role in message.author.roles: return #if the user is a moderator, we don't want to moderate him because he is allowed to say whatever he wants because he is just like a dictator
if message.author.guild_permissions.administrator: return #if the user is an administrator, we don't want to moderate him because he is allowed to say whatever he wants because he is a DICTATOR
if not is_enabled: return
content = message.content
message_toxicity = tox.get_toxicity(content)
if message_toxicity >= 0.40:
await message.delete()
embed = discord.Embed(title="Message deleted", description=f"{message.author.mention} Your message was deleted because it was too toxic. Please keep this server safe and friendly. If you think this was a mistake, please contact a moderator.", color=discord.Color.red())
await message.channel.send(f"{message.author.mention}", embed=embed, delete_after=15)
formatted_message_sent_date = message.created_at.strftime("%d/%m/%Y %H:%M:%S")
embed = discord.Embed(title="Message deleted", description=f"The message \n***{content}***\n of {message.author.mention} sent in {message.channel.mention} on date **{formatted_message_sent_date}** was deleted because it was too toxic. The toxicity score was of **{message_toxicity}**", color=discord.Color.red())
await channel.send(embed=embed)
elif 0.37 < message_toxicity < 0.40: #if the message is not toxic, but it is close to being toxic, we send a warning
embed = discord.Embed(title="Possible toxic message", description=f"A possible [toxic message: **{content}**]({message.jump_url}) was sent by {message.author.mention} in {message.channel.mention}. Please check it out.", color=discord.Color.orange())
await channel.send(embed=embed)
#we also reac with an orange circle emoji to the message
await message.add_reaction("🟠")
#we reply to the message with a ping to the moderators
moderator_role = message.guild.get_role(int(data[3]))
await message.reply(f"Hey {moderator_role.mention}, this message might be toxic. Please check it out.", mention_author=False, delete_after=15)
else:
#the message is not toxic, so we don't do anything
pass
@discord.slash_command(name="get_toxicity", description="Get the toxicity of a message")
@discord.option(name="message", description="The message you want to check", required=True)
@default_permissions(administrator=True)
async def get_toxicity(self, ctx: discord.ApplicationContext, message: str):
toxicity = tox.get_toxicity(message)
await ctx.respond(f"The toxicity of the message **{message}** is **{toxicity}**")

View File

@@ -1,17 +1,22 @@
import logging
import sqlite3
from dotenv import load_dotenv
import os
load_dotenv()
perspective_api_key = os.getenv("PERSPECTIVE_API_KEY")
max_uses: int = 400
logging.basicConfig(level=logging.INFO)
def debug(message):
logging.info(message)
conn = sqlite3.connect('../database/data.db')
c = conn.cursor()
connp = sqlite3.connect('../database/premium.db')
cp = connp.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS data (guild_id text, channel_id text, api_key text, is_active boolean, max_tokens integer, temperature real, frequency_penalty real, presence_penalty real, uses_count_today integer, prompt_size integer, prompt_prefix text, tts boolean, pretend_to_be text, pretend_enabled boolean)''')
c.execute('''CREATE TABLE IF NOT EXISTS moderation (guild_id text, active boolean, rules text, disabled_channels text, disabled_roles text)''')
#we delete the moderation table because we are going to use a different database for that
#c.execute('''DROP TABLE IF EXISTS moderation''')
c.execute('''CREATE TABLE IF NOT EXISTS moderation (guild_id text, logs_channel_id text, is_enabled boolean, mod_role_id text)''')
cp.execute('''CREATE TABLE IF NOT EXISTS data (user_id text, guild_id text, premium boolean)''')
cp.execute('''CREATE TABLE IF NOT EXISTS channels (guild_id text, channel0 text, channel1 text, channel2 text, channel3 text, channel4 text)''')

View File

@@ -19,7 +19,8 @@ async def process(self, message):
#c.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
#we get all the data from the database into different variables (guild_id text, channel_id text, api_key text, is_active boolean, max_tokens integer, temperature real, frequency_penalty real, presence_penalty real, uses_count_today integer, prompt_size integer, prompt_prefix text, tts boolean, pretend_to_be text, pretend_enabled boolean)
try: c.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
except: return
except:
return
channel = message.channel.id
data = c.fetchone()
guild_id = data[0]

49
code/toxicity.py Normal file
View File

@@ -0,0 +1,49 @@
from googleapiclient import discovery
from config import perspective_api_key
import json
import re
client = discovery.build("commentanalyzer",
"v1alpha1",
developerKey=perspective_api_key,
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
static_discovery=False,
)
analyze_request = {
'comment': {'text': ''}, # The text to analyze
'requestedAttributes': {'TOXICITY': {}}, # Requested attributes
#we will analyze the text in english, french & italian
'languages': ['en', 'fr', 'it'],
'doNotStore': 'true' # We don't want google to store the data because of privacy reasons & the GDPR (General Data Protection Regulation, an EU law that protects the privacy of EU citizens and residents for data privacy and security purposes https://gdpr-info.eu/)
}
def get_toxicity(message: str):
#we first remove all kind of markdown from the message to avoid exploits
message = re.sub(r'\*([^*]+)\*', r'\1', message)
message = re.sub(r'\_([^_]+)\_', r'\1', message)
message = re.sub(r'\*\*([^*]+)\*\*', r'\1', message)
message = re.sub(r'\_\_([^_]+)\_\_', r'\1', message)
message = re.sub(r'\|\|([^|]+)\|\|', r'\1', message)
message = re.sub(r'\~([^~]+)\~', r'\1', message)
message = re.sub(r'\~\~([^~]+)\~\~', r'\1', message)
message = re.sub(r'\`([^`]+)\`', r'\1', message)
message = re.sub(r'\`\`\`([^`]+)\`\`\`', r'\1', message)
analyze_request['comment']['text'] = message
response = client.comments().analyze(body=analyze_request).execute()
return float(response['attributeScores']['TOXICITY']['summaryScore']['value'])
#test part
def test():
print(get_toxicity("Hello world"))
print(get_toxicity("You are a stupid bot I hate you!!!"))
print(get_toxicity("Je suis un bot stupide, je vous déteste !!!"))
print(get_toxicity("Ciao, come state?"))
print(get_toxicity("Siete tutti degli scemi"))
print(get_toxicity("Siete tutti degli stupidi"))
print(get_toxicity("Je n'aime pas les gens stupides"))
#markdown removal test
print(get_toxicity("You are all stupid"))
print(get_toxicity("You are all *s*t*u*p*i*d"))
print(print("*** you"))
#uncomment the following line to test the code
#test()