mirror of
https://github.com/Paillat-dev/Botator.git
synced 2026-01-02 09:16:19 +00:00
Added moderation
This commit is contained in:
@@ -9,13 +9,14 @@ from config import debug, conn, c # import the debug function and the database c
|
||||
#add the message content intent to the bot, aka discord.Intents.default() and discord.Intents.message_content
|
||||
intents = discord.Intents.default()
|
||||
intents.message_content = True
|
||||
import apsw # pip install apsw. ApSW is a Python interface to SQLite 3
|
||||
bot = discord.Bot(intents=intents, help_command=None) # create the bot
|
||||
bot.add_cog(cogs.Setup(bot))
|
||||
bot.add_cog(cogs.Settings(bot))
|
||||
bot.add_cog(cogs.Help(bot))
|
||||
bot.add_cog(cogs.Chat(bot))
|
||||
bot.add_cog(cogs.ManageChat(bot))
|
||||
bot.add_cog(cogs.Moderation(bot))
|
||||
|
||||
@bot.event
|
||||
async def on_ready():
|
||||
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name="your messages to answer you"))
|
||||
|
||||
@@ -2,4 +2,5 @@ from cogs.setup import Setup
|
||||
from cogs.settings import Settings
|
||||
from cogs.help import Help
|
||||
from cogs.chat import Chat
|
||||
from cogs.manage_chat import ManageChat
|
||||
from cogs.manage_chat import ManageChat
|
||||
from cogs.moderation import Moderation
|
||||
@@ -20,7 +20,11 @@ class Help (discord.Cog) :
|
||||
embed.add_field(name="/add|remove_channel", value="Add or remove a channel to the list of channels where the bot will answer. Only available on premium guilds", inline=False)
|
||||
embed.add_field(name="/delete", value="Delete all your data from our server", inline=False)
|
||||
embed.add_field(name="/cancel", value="Cancel the last message sent by the bot", inline=False)
|
||||
embed.add_field(name="/default", value="Set the advanced settings to their default values", inline=False)
|
||||
embed.add_field(name="/default", value="Set the advanced settings to their default values", inline=False)
|
||||
embed.add_field(name="/say", value="Say a message", inline=False)
|
||||
embed.add_field(name="/redo", value="Redo the last message sent by the bot", inline=False)
|
||||
embed.add_field(name="/moderation", value="Setup the AI auto-moderation", inline=False)
|
||||
embed.add_field(name="/get_toxicity", value="Get the toxicity that the AI would have given to a given message", inline=False)
|
||||
embed.add_field(name="/help", value="Show this message", inline=False)
|
||||
#add a footer
|
||||
embed.set_footer(text="Made by @Paillat#7777")
|
||||
|
||||
69
code/cogs/moderation.py
Normal file
69
code/cogs/moderation.py
Normal file
@@ -0,0 +1,69 @@
|
||||
import discord
|
||||
from discord import default_permissions
|
||||
import os
|
||||
from config import debug, c, conn
|
||||
import openai
|
||||
import requests
|
||||
import toxicity as tox #this is a file called toxicity.py, which contains the toxicity function that allows you to check if a message is toxic or not (it uses the perspective api)
|
||||
class Moderation (discord.Cog):
|
||||
def __init__(self, bot: discord.Bot) -> None:
|
||||
super().__init__()
|
||||
self.bot = bot
|
||||
@discord.slash_command(name="moderation", description="Enable or disable AI moderation & set the rules")
|
||||
@discord.option(name="enable", description="Enable or disable AI moderation", reqired=True,)
|
||||
@discord.option(name="log_channel", description="The channel where the moderation logs will be sent", required=True)
|
||||
@discord.option(name="moderator_role", description="The role of the moderators", required=True)
|
||||
#we set the default permissions to the administrator permission, so only the server administrators can use this command
|
||||
@default_permissions(administrator=True)
|
||||
async def moderation(self, ctx: discord.ApplicationContext, enable: bool, log_channel: discord.TextChannel, moderator_role: discord.Role):
|
||||
try:
|
||||
data = c.execute("SELECT * FROM moderation WHERE guild_id = ?", (str(ctx.guild.id),))
|
||||
data = c.fetchone()
|
||||
except: data = None
|
||||
if data is None:
|
||||
c.execute("INSERT INTO moderation VALUES (?, ?, ?, ?)", (str(ctx.guild.id), str(log_channel.id), enable, str(moderator_role.id)))
|
||||
conn.commit()
|
||||
else:
|
||||
c.execute("UPDATE moderation SET logs_channel_id = ?, is_enabled = ? WHERE guild_id = ?", (str(log_channel.id), enable, str(ctx.guild.id)))
|
||||
conn.commit()
|
||||
await ctx.respond("Successfully updated moderation settings for this server", ephemeral=True)
|
||||
|
||||
@discord.Cog.listener()
|
||||
async def on_message(self, message: discord.Message):
|
||||
if message.author == self.bot.user: return
|
||||
try: c.execute("SELECT * FROM moderation WHERE guild_id = ?", (str(message.guild.id),))
|
||||
except: return
|
||||
data = c.fetchone()
|
||||
channel = self.bot.get_channel(int(data[1]))
|
||||
is_enabled = data[2]
|
||||
moderator_role = message.guild.get_role(int(data[3]))
|
||||
if moderator_role in message.author.roles: return #if the user is a moderator, we don't want to moderate him because he is allowed to say whatever he wants because he is just like a dictator
|
||||
if message.author.guild_permissions.administrator: return #if the user is an administrator, we don't want to moderate him because he is allowed to say whatever he wants because he is a DICTATOR
|
||||
if not is_enabled: return
|
||||
content = message.content
|
||||
message_toxicity = tox.get_toxicity(content)
|
||||
if message_toxicity >= 0.40:
|
||||
await message.delete()
|
||||
embed = discord.Embed(title="Message deleted", description=f"{message.author.mention} Your message was deleted because it was too toxic. Please keep this server safe and friendly. If you think this was a mistake, please contact a moderator.", color=discord.Color.red())
|
||||
await message.channel.send(f"{message.author.mention}", embed=embed, delete_after=15)
|
||||
formatted_message_sent_date = message.created_at.strftime("%d/%m/%Y %H:%M:%S")
|
||||
embed = discord.Embed(title="Message deleted", description=f"The message \n***{content}***\n of {message.author.mention} sent in {message.channel.mention} on date **{formatted_message_sent_date}** was deleted because it was too toxic. The toxicity score was of **{message_toxicity}**", color=discord.Color.red())
|
||||
await channel.send(embed=embed)
|
||||
elif 0.37 < message_toxicity < 0.40: #if the message is not toxic, but it is close to being toxic, we send a warning
|
||||
embed = discord.Embed(title="Possible toxic message", description=f"A possible [toxic message: **{content}**]({message.jump_url}) was sent by {message.author.mention} in {message.channel.mention}. Please check it out.", color=discord.Color.orange())
|
||||
await channel.send(embed=embed)
|
||||
#we also reac with an orange circle emoji to the message
|
||||
await message.add_reaction("🟠")
|
||||
#we reply to the message with a ping to the moderators
|
||||
moderator_role = message.guild.get_role(int(data[3]))
|
||||
await message.reply(f"Hey {moderator_role.mention}, this message might be toxic. Please check it out.", mention_author=False, delete_after=15)
|
||||
else:
|
||||
#the message is not toxic, so we don't do anything
|
||||
pass
|
||||
|
||||
@discord.slash_command(name="get_toxicity", description="Get the toxicity of a message")
|
||||
@discord.option(name="message", description="The message you want to check", required=True)
|
||||
@default_permissions(administrator=True)
|
||||
async def get_toxicity(self, ctx: discord.ApplicationContext, message: str):
|
||||
toxicity = tox.get_toxicity(message)
|
||||
await ctx.respond(f"The toxicity of the message **{message}** is **{toxicity}**")
|
||||
@@ -1,17 +1,22 @@
|
||||
import logging
|
||||
import sqlite3
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
load_dotenv()
|
||||
perspective_api_key = os.getenv("PERSPECTIVE_API_KEY")
|
||||
max_uses: int = 400
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
def debug(message):
|
||||
logging.info(message)
|
||||
|
||||
conn = sqlite3.connect('../database/data.db')
|
||||
c = conn.cursor()
|
||||
connp = sqlite3.connect('../database/premium.db')
|
||||
cp = connp.cursor()
|
||||
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS data (guild_id text, channel_id text, api_key text, is_active boolean, max_tokens integer, temperature real, frequency_penalty real, presence_penalty real, uses_count_today integer, prompt_size integer, prompt_prefix text, tts boolean, pretend_to_be text, pretend_enabled boolean)''')
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS moderation (guild_id text, active boolean, rules text, disabled_channels text, disabled_roles text)''')
|
||||
#we delete the moderation table because we are going to use a different database for that
|
||||
#c.execute('''DROP TABLE IF EXISTS moderation''')
|
||||
c.execute('''CREATE TABLE IF NOT EXISTS moderation (guild_id text, logs_channel_id text, is_enabled boolean, mod_role_id text)''')
|
||||
cp.execute('''CREATE TABLE IF NOT EXISTS data (user_id text, guild_id text, premium boolean)''')
|
||||
cp.execute('''CREATE TABLE IF NOT EXISTS channels (guild_id text, channel0 text, channel1 text, channel2 text, channel3 text, channel4 text)''')
|
||||
@@ -19,10 +19,11 @@ async def process(self, message):
|
||||
#c.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
|
||||
#we get all the data from the database into different variables (guild_id text, channel_id text, api_key text, is_active boolean, max_tokens integer, temperature real, frequency_penalty real, presence_penalty real, uses_count_today integer, prompt_size integer, prompt_prefix text, tts boolean, pretend_to_be text, pretend_enabled boolean)
|
||||
try: c.execute("SELECT * FROM data WHERE guild_id = ?", (message.guild.id,))
|
||||
except: return
|
||||
except:
|
||||
return
|
||||
channel = message.channel.id
|
||||
data = c.fetchone()
|
||||
guild_id = data[0]
|
||||
guild_id = data[0]
|
||||
channel_id = data[1]
|
||||
api_key = data[2]
|
||||
is_active = data[3]
|
||||
@@ -40,7 +41,7 @@ async def process(self, message):
|
||||
try: premium = cp.fetchone()[2]
|
||||
except: premium = 0
|
||||
channels = []
|
||||
try:
|
||||
try:
|
||||
cp.execute("SELECT * FROM channels WHERE guild_id = ?", (message.guild.id,))
|
||||
if premium: channels = cp.fetchone()[1:]
|
||||
except: channels = []
|
||||
|
||||
49
code/toxicity.py
Normal file
49
code/toxicity.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from googleapiclient import discovery
|
||||
from config import perspective_api_key
|
||||
import json
|
||||
import re
|
||||
|
||||
client = discovery.build("commentanalyzer",
|
||||
"v1alpha1",
|
||||
developerKey=perspective_api_key,
|
||||
discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1",
|
||||
static_discovery=False,
|
||||
)
|
||||
|
||||
analyze_request = {
|
||||
'comment': {'text': ''}, # The text to analyze
|
||||
'requestedAttributes': {'TOXICITY': {}}, # Requested attributes
|
||||
#we will analyze the text in english, french & italian
|
||||
'languages': ['en', 'fr', 'it'],
|
||||
'doNotStore': 'true' # We don't want google to store the data because of privacy reasons & the GDPR (General Data Protection Regulation, an EU law that protects the privacy of EU citizens and residents for data privacy and security purposes https://gdpr-info.eu/)
|
||||
}
|
||||
def get_toxicity(message: str):
|
||||
#we first remove all kind of markdown from the message to avoid exploits
|
||||
message = re.sub(r'\*([^*]+)\*', r'\1', message)
|
||||
message = re.sub(r'\_([^_]+)\_', r'\1', message)
|
||||
message = re.sub(r'\*\*([^*]+)\*\*', r'\1', message)
|
||||
message = re.sub(r'\_\_([^_]+)\_\_', r'\1', message)
|
||||
message = re.sub(r'\|\|([^|]+)\|\|', r'\1', message)
|
||||
message = re.sub(r'\~([^~]+)\~', r'\1', message)
|
||||
message = re.sub(r'\~\~([^~]+)\~\~', r'\1', message)
|
||||
message = re.sub(r'\`([^`]+)\`', r'\1', message)
|
||||
message = re.sub(r'\`\`\`([^`]+)\`\`\`', r'\1', message)
|
||||
analyze_request['comment']['text'] = message
|
||||
response = client.comments().analyze(body=analyze_request).execute()
|
||||
return float(response['attributeScores']['TOXICITY']['summaryScore']['value'])
|
||||
|
||||
#test part
|
||||
def test():
|
||||
print(get_toxicity("Hello world"))
|
||||
print(get_toxicity("You are a stupid bot I hate you!!!"))
|
||||
print(get_toxicity("Je suis un bot stupide, je vous déteste !!!"))
|
||||
print(get_toxicity("Ciao, come state?"))
|
||||
print(get_toxicity("Siete tutti degli scemi"))
|
||||
print(get_toxicity("Siete tutti degli stupidi"))
|
||||
print(get_toxicity("Je n'aime pas les gens stupides"))
|
||||
#markdown removal test
|
||||
print(get_toxicity("You are all stupid"))
|
||||
print(get_toxicity("You are all *s*t*u*p*i*d"))
|
||||
print(print("*** you"))
|
||||
#uncomment the following line to test the code
|
||||
#test()
|
||||
Reference in New Issue
Block a user