refactor(moderation.py): remove commented out code in on_message() method

This commit is contained in:
Paillat
2023-04-08 17:49:33 +02:00
parent 9465e33295
commit 048905d682

View File

@@ -82,96 +82,13 @@ class Moderation(discord.Cog):
ephemeral=True, ephemeral=True,
) )
if enable == False: if enable == False:
curs_data.execute("DELETE FROM moderation WHERE guild_id = ?", (str(ctx.guild.id),)) curs_data.execute(
"DELETE FROM moderation WHERE guild_id = ?", (str(ctx.guild.id),)
)
con_data.commit() con_data.commit()
await ctx.respond("Moderation disabled!", ephemeral=True) await ctx.respond("Moderation disabled!", ephemeral=True)
return return
# Moderation has been moved to a new bot..
# @discord.Cog.listener()
# async def on_message(self, message: discord.Message):
# if message.author == self.bot.user:
# return
# try:
# curs_data.execute(
# "SELECT * FROM moderation WHERE guild_id = ?", (str(message.guild.id),)
# )
# except:
# return
# data = curs_data.fetchone()
# if data is None:
# return
# channel = self.bot.get_channel(int(data[1]))
# is_enabled = data[2]
# moderator_role = message.guild.get_role(int(data[3]))
# # we also do that with the manage_messages permission, so the moderators can't be moderated
# if message.author.guild_permissions.manage_messages:
# return # if the user is a moderator, we don't want to moderate him because he is allowed to say whatever he wants because he is just like a dictator
# if message.author.guild_permissions.administrator:
# return # if the user is an administrator, we don't want to moderate him because he is allowed to say whatever he wants because he is a DICTATOR
# if not is_enabled:
# return
# content = message.content
# message_toxicity = tox.get_toxicity(content)
# reasons_to_delete = []
# reasons_to_suspicous = []
# for i in message_toxicity:
# if i >= float(data[message_toxicity.index(i) + 4]):
# reasons_to_delete.append(tox.toxicity_names[message_toxicity.index(i)])
# for i in message_toxicity:
# if (
# float(data[message_toxicity.index(i) + 4] - 0.1)
# <= i
# < float(data[message_toxicity.index(i) + 4])
# ):
# reasons_to_suspicous.append(
# tox.toxicity_names[message_toxicity.index(i)]
# )
# if len(reasons_to_delete) > 0:
# embed = discord.Embed(
# title="Message deleted",
# description=f"Your message was deleted because it was too toxic. The following reasons were found: **{'**, **'.join(reasons_to_delete)}**",
# color=discord.Color.red(),
# )
# await message.reply(
# f"{message.author.mention}", embed=embed, delete_after=15
# )
# await message.delete()
# embed = discord.Embed(
# title="Message deleted",
# description=f"**{message.author}**'s message ***{content}*** was deleted because it was too toxic. The following reasons were found:",
# color=discord.Color.red(),
# )
# for i in reasons_to_delete:
# toxicity_value = message_toxicity[tox.toxicity_names.index(i)]
# embed.add_field(
# name=i,
# value=f"Found toxicity value: **{toxicity_value*100}%**",
# inline=False,
# )
# await channel.send(embed=embed)
# elif len(reasons_to_suspicous) > 0:
# await message.reply(
# f"{moderator_role.mention} This message might be toxic. The following reasons were found: **{'**, **'.join(reasons_to_suspicous)}**",
# delete_after=15,
# mention_author=False,
# )
# embed = discord.Embed(
# title="Message suspicious",
# description=f"**{message.author}**'s message [***{content}***]({message.jump_url}) might be toxic. The following reasons were found:",
# color=discord.Color.orange(),
# )
# for i in reasons_to_suspicous:
# toxicity_value = message_toxicity[tox.toxicity_names.index(i)]
# embed.add_field(
# name=i,
# value=f"Found toxicity value: **{toxicity_value*100}%**",
# inline=False,
# )
# await channel.send(embed=embed)
# # we add a reaction to the message so the moderators can easily find it orange circle emoji
# await message.add_reaction("🟠")
@discord.slash_command( @discord.slash_command(
name="get_toxicity", description="Get the toxicity of a message" name="get_toxicity", description="Get the toxicity of a message"
) )