mirror of
https://github.com/Paillat-dev/Botator.git
synced 2026-01-02 09:16:19 +00:00
Added some moderation features
This commit is contained in:
@@ -13,18 +13,62 @@ class Moderation (discord.Cog):
|
||||
@discord.option(name="enable", description="Enable or disable AI moderation", reqired=True,)
|
||||
@discord.option(name="log_channel", description="The channel where the moderation logs will be sent", required=True)
|
||||
@discord.option(name="moderator_role", description="The role of the moderators", required=True)
|
||||
#the types of toxicity are 'requestedAttributes': {'TOXICITY': {}, 'SEVERE_TOXICITY': {}, 'IDENTITY_ATTACK': {}, 'INSULT': {}, 'PROFANITY': {}, 'THREAT': {}, 'SEXUALLY_EXPLICIT': {}, 'FLIRTATION': {}, 'OBSCENE': {}, 'SPAM': {}},
|
||||
@discord.option(name="toxicity", description="The toxicity threshold", required=False)
|
||||
@discord.option(name="severe_toxicity", description="The severe toxicity threshold", required=False)
|
||||
@discord.option(name="identity_attack", description="The identity attack threshold", required=False)
|
||||
@discord.option(name="insult", description="The insult threshold", required=False)
|
||||
@discord.option(name="profanity", description="The profanity threshold", required=False)
|
||||
@discord.option(name="threat", description="The threat threshold", required=False)
|
||||
@discord.option(name="sexually_explicit", description="The sexually explicit threshold", required=False)
|
||||
@discord.option(name="flirtation", description="The flirtation threshold", required=False)
|
||||
@discord.option(name="obscene", description="The obscene threshold", required=False)
|
||||
@discord.option(name="spam", description="The spam threshold", required=False)
|
||||
#we set the default permissions to the administrator permission, so only the server administrators can use this command
|
||||
@default_permissions(administrator=True)
|
||||
async def moderation(self, ctx: discord.ApplicationContext, enable: bool, log_channel: discord.TextChannel, moderator_role: discord.Role):
|
||||
async def moderation(self, ctx: discord.ApplicationContext, enable: bool, log_channel: discord.TextChannel, moderator_role: discord.Role, toxicity: float = None, severe_toxicity: float = None, identity_attack: float = None, insult: float = None, profanity: float = None, threat: float = None, sexually_explicit: float = None, flirtation: float = None, obscene: float = None, spam: float = None):
|
||||
try:
|
||||
data = c.execute("SELECT * FROM moderation WHERE guild_id = ?", (str(ctx.guild.id),))
|
||||
data = c.fetchone()
|
||||
except: data = None
|
||||
if data is None:
|
||||
c.execute("INSERT INTO moderation VALUES (?, ?, ?, ?)", (str(ctx.guild.id), str(log_channel.id), enable, str(moderator_role.id)))
|
||||
#first we check if any of the values is none. If it's none, we set it to 0.40
|
||||
if toxicity is None: toxicity = 0.40
|
||||
if severe_toxicity is None: severe_toxicity = 0.40
|
||||
if identity_attack is None: identity_attack = 0.40
|
||||
if insult is None: insult = 0.40
|
||||
if profanity is None: profanity = 0.40
|
||||
if threat is None: threat = 0.40
|
||||
if sexually_explicit is None: sexually_explicit = 0.40
|
||||
if flirtation is None: flirtation = 0.40
|
||||
if obscene is None: obscene = 0.40
|
||||
if spam is None: spam = 0.40
|
||||
c.execute("INSERT INTO moderation VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", (str(ctx.guild.id), str(log_channel.id), enable, str(moderator_role.id), toxicity, severe_toxicity, identity_attack, insult, profanity, threat, sexually_explicit, flirtation, obscene, spam))
|
||||
conn.commit()
|
||||
await ctx.respond(content="Moderation has been enabled!", ephemeral=True)
|
||||
else:
|
||||
c.execute("UPDATE moderation SET logs_channel_id = ?, is_enabled = ? WHERE guild_id = ?", (str(log_channel.id), enable, str(ctx.guild.id)))
|
||||
#for each value we check if it's none. If it's none and there's no value in the database, we set it to 0.40, otherwise we set it to the value in the database
|
||||
if toxicity is None and data[4] is not None: toxicity = data[4]
|
||||
elif toxicity is None and data[4] is None: toxicity = 0.40
|
||||
if severe_toxicity is None and data[5] is not None: severe_toxicity = data[5]
|
||||
elif severe_toxicity is None and data[5] is None: severe_toxicity = 0.40
|
||||
if identity_attack is None and data[6] is not None: identity_attack = data[6]
|
||||
elif identity_attack is None and data[6] is None: identity_attack = 0.40
|
||||
if insult is None and data[7] is not None: insult = data[7]
|
||||
elif insult is None and data[7] is None: insult = 0.40
|
||||
if profanity is None and data[8] is not None: profanity = data[8]
|
||||
elif profanity is None and data[8] is None: profanity = 0.40
|
||||
if threat is None and data[9] is not None: threat = data[9]
|
||||
elif threat is None and data[9] is None: threat = 0.40
|
||||
if sexually_explicit is None and data[10] is not None: sexually_explicit = data[10]
|
||||
elif sexually_explicit is None and data[10] is None: sexually_explicit = 0.40
|
||||
if flirtation is None and data[11] is not None: flirtation = data[11]
|
||||
elif flirtation is None and data[11] is None: flirtation = 0.40
|
||||
if obscene is None and data[12] is not None: obscene = data[12]
|
||||
elif obscene is None and data[12] is None: obscene = 0.40
|
||||
if spam is None and data[13] is not None: spam = data[13]
|
||||
elif spam is None and data[13] is None: spam = 0.40
|
||||
c.execute("UPDATE moderation SET logs_channel_id = ?, is_enabled = ?, mod_role_id = ?, toxicity = ?, severe_toxicity = ?, identity_attack = ?, insult = ?, profanity = ?, threat = ?, sexually_explicit = ?, flirtation = ?, obscene = ?, spam = ? WHERE guild_id = ?", (str(log_channel.id), enable, str(moderator_role.id), toxicity, severe_toxicity, identity_attack, insult, profanity, threat, sexually_explicit, flirtation, obscene, spam, str(ctx.guild.id)))
|
||||
conn.commit()
|
||||
await ctx.respond("Successfully updated moderation settings for this server", ephemeral=True)
|
||||
|
||||
@@ -44,28 +88,58 @@ class Moderation (discord.Cog):
|
||||
if not is_enabled: return
|
||||
content = message.content
|
||||
message_toxicity = tox.get_toxicity(content)
|
||||
if message_toxicity >= 0.40:
|
||||
reasons_to_delete = []
|
||||
reasons_to_suspicous = []
|
||||
for i in message_toxicity:
|
||||
if i >= float(data[message_toxicity.index(i)+4]): reasons_to_delete.append(tox.toxicity_names[message_toxicity.index(i)])
|
||||
for i in message_toxicity:
|
||||
if float(data[message_toxicity.index(i)+4]-0.1) <= i < float(data[message_toxicity.index(i)+4]): reasons_to_suspicous.append(tox.toxicity_names[message_toxicity.index(i)])
|
||||
if len(reasons_to_delete) > 0:
|
||||
embed = discord.Embed(title="Message deleted", description=f"Your message was deleted because it was too toxic. The following reasons were found: **{'**, **'.join(reasons_to_delete)}**", color=discord.Color.red())
|
||||
await message.reply(f"{message.author.mention}", embed=embed, delete_after=15)
|
||||
await message.delete()
|
||||
embed = discord.Embed(title="Message deleted", description=f"{message.author.mention} Your message was deleted because it was too toxic. Please keep this server safe and friendly. If you think this was a mistake, please contact a moderator.", color=discord.Color.red())
|
||||
await message.channel.send(f"{message.author.mention}", embed=embed, delete_after=15)
|
||||
formatted_message_sent_date = message.created_at.strftime("%d/%m/%Y %H:%M:%S")
|
||||
embed = discord.Embed(title="Message deleted", description=f"The message \n***{content}***\n of {message.author.mention} sent in {message.channel.mention} on date **{formatted_message_sent_date}** was deleted because it was too toxic. The toxicity score was of **{message_toxicity*100}%**", color=discord.Color.red())
|
||||
embed = discord.Embed(title="Message deleted", description=f"**{message.author}**'s message ***{content}*** was deleted because it was too toxic. The following reasons were found:", color=discord.Color.red())
|
||||
for i in reasons_to_delete:
|
||||
toxicity_value = message_toxicity[tox.toxicity_names.index(i)]
|
||||
embed.add_field(name=i, value=f"Found toxicity value: **{toxicity_value*100}%**", inline=False)
|
||||
await channel.send(embed=embed)
|
||||
elif 0.37 < message_toxicity < 0.40: #if the message is not toxic, but it is close to being toxic, we send a warning
|
||||
embed = discord.Embed(title="Possible toxic message", description=f"A possible [toxic message: **{content}**]({message.jump_url}) was sent by {message.author.mention} in {message.channel.mention}. Please check it out.", color=discord.Color.orange())
|
||||
elif len(reasons_to_suspicous) > 0:
|
||||
await message.reply(f"{moderator_role.mention} This message might be toxic. The following reasons were found: **{'**, **'.join(reasons_to_suspicous)}**", delete_after=15, mention_author=False)
|
||||
embed = discord.Embed(title="Message suspicious", description=f"**{message.author}**'s message [***{content}***]({message.jump_url}) might be toxic. The following reasons were found:", color=discord.Color.orange())
|
||||
for i in reasons_to_suspicous:
|
||||
toxicity_value = message_toxicity[tox.toxicity_names.index(i)]
|
||||
embed.add_field(name=i, value=f"Found toxicity value: **{toxicity_value*100}%**", inline=False)
|
||||
await channel.send(embed=embed)
|
||||
#we also reac with an orange circle emoji to the message
|
||||
#we add a reaction to the message so the moderators can easily find it orange circle emoji
|
||||
await message.add_reaction("🟠")
|
||||
#we reply to the message with a ping to the moderators
|
||||
moderator_role = message.guild.get_role(int(data[3]))
|
||||
await message.reply(f"Hey {moderator_role.mention}, this message might be toxic. Please check it out.", mention_author=False, delete_after=15)
|
||||
else:
|
||||
#the message is not toxic, so we don't do anything
|
||||
pass
|
||||
|
||||
@discord.slash_command(name="get_toxicity", description="Get the toxicity of a message")
|
||||
@discord.option(name="message", description="The message you want to check", required=True)
|
||||
@default_permissions(administrator=True)
|
||||
async def get_toxicity(self, ctx: discord.ApplicationContext, message: str):
|
||||
toxicity = tox.get_toxicity(message)
|
||||
await ctx.respond(f"The toxicity of the message **{message}** is **{toxicity*100}%**")
|
||||
response = tox.get_toxicity(message)
|
||||
# try: toxicity, severe_toxicity, identity_attack, insult, profanity, threat, sexually_explicit, flirtation, obscene, spam = response
|
||||
# except: toxicity, severe_toxicity, identity_attack, insult, profanity, threat = response
|
||||
would_have_been_deleted = []
|
||||
would_have_been_suspicous = []
|
||||
c.execute("SELECT * FROM moderation WHERE guild_id = ?", (str(ctx.guild.id),))
|
||||
data = c.fetchone()
|
||||
for i in response:
|
||||
if i >= float(data[response.index(i)+4]):
|
||||
would_have_been_deleted.append(tox.toxicity_names[response.index(i)])
|
||||
elif i >= float(data[response.index(i)+4])-0.1:
|
||||
would_have_been_suspicous.append(tox.toxicity_names[response.index(i)])
|
||||
if would_have_been_deleted !=[]: embed = discord.Embed(title="Toxicity", description=f"Here are the different toxicity scores of the message\n***{message}***", color=discord.Color.red())
|
||||
elif would_have_been_suspicous !=[] and would_have_been_deleted ==[]: embed = discord.Embed(title="Toxicity", description=f"Here are the different toxicity scores of the message\n***{message}***", color=discord.Color.orange())
|
||||
else: embed = discord.Embed(title="Toxicity", description=f"Here are the different toxicity scores of the message\n***{message}***", color=discord.Color.green())
|
||||
for i in response: embed.add_field(name=tox.toxicity_names[response.index(i)], value=f"{str( float(i)*100)}%", inline=False)
|
||||
if would_have_been_deleted != []: embed.add_field(name="Would have been deleted", value=f"Yes, the message would have been deleted because of the following toxicity scores: **{'**, **'.join(would_have_been_deleted)}**", inline=False)
|
||||
if would_have_been_suspicous != [] and would_have_been_deleted == []: embed.add_field(name="Would have been marked as suspicious", value=f"Yes, the message would have been marked as suspicious because of the following toxicity scores: {', '.join(would_have_been_suspicous)}", inline=False)
|
||||
await ctx.respond(embed=embed)
|
||||
|
||||
@discord.slash_command(name="moderation_help", description="Get help with the moderation AI")
|
||||
async def moderation_help(self, ctx: discord.ApplicationContext):
|
||||
embed = discord.Embed(title="Moderation AI help", description="Here is a list of all the moderation commands", color=discord.Color.blurple())
|
||||
for definition in tox.toxicity_definitions:
|
||||
embed.add_field(name=tox.toxicity_names[tox.toxicity_definitions.index(definition)], value=definition, inline=False)
|
||||
await ctx.respond(embed=embed, ephemeral=True)
|
||||
|
||||
Reference in New Issue
Block a user