From 8cc99181c3971001e3d7e2a32fc301cddfbef3d4 Mon Sep 17 00:00:00 2001 From: Alexis LEBEL Date: Sat, 1 Apr 2023 10:17:51 +0200 Subject: [PATCH] [VISION] Safed vision processing, not mandatory --- code/vision_processing.py | 95 ++++++++++++++++++++------------------- 1 file changed, 48 insertions(+), 47 deletions(-) diff --git a/code/vision_processing.py b/code/vision_processing.py index 3f99303..8f0fcf0 100644 --- a/code/vision_processing.py +++ b/code/vision_processing.py @@ -15,52 +15,53 @@ except: async def process(attachment): - if not os.path.exists("./../database/google-vision"): - debug("Google Vision API is not setup, please run /setup") - return - debug("Processing image...") - image = vision.Image() - image.source.image_uri = attachment.url - labels = client.label_detection(image=image) - texts = client.text_detection(image=image) - objects = client.object_localization(image=image) - labels = labels.label_annotations - texts = texts.text_annotations - objects = objects.localized_object_annotations - # we take the first 4 labels and the first 4 objects - labels = labels[:2] - objects = objects[:7] - final = " 0: - final += "Labels:\n" - for label in labels: - final += label.description + ", " - final = final[:-2] + "\n" - if len(texts) > 0: - final += "Text:\n" try: - final += ( - texts[0].description + "\n" - ) # we take the first text, wich is the whole text in reality - except: - pass - if len(objects) > 0: - final += "Objects:\n" - for obj in objects: - final += obj.name + ", " - final = final[:-2] + "\n" - final += "!image>" - # we store the result in a file called attachment.key.txt in the folder ./../database/google-vision/results - # we create the folder if it doesn't exist - if not os.path.exists("./../database/google-vision/results"): - os.mkdir("./../database/google-vision/results") - # we create the file - with open( - f"./../database/google-vision/results/{attachment.id}.txt", - "w", - encoding="utf-8", - ) as f: - f.write(final) - f.close() + debug("Processing image...") + image = vision.Image() + image.source.image_uri = attachment.url + labels = client.label_detection(image=image) + texts = client.text_detection(image=image) + objects = client.object_localization(image=image) + labels = labels.label_annotations + texts = texts.text_annotations + objects = objects.localized_object_annotations + # we take the first 4 labels and the first 4 objects + labels = labels[:2] + objects = objects[:7] + final = " 0: + final += "Labels:\n" + for label in labels: + final += label.description + ", " + final = final[:-2] + "\n" + if len(texts) > 0: + final += "Text:\n" + try: + final += ( + texts[0].description + "\n" + ) # we take the first text, wich is the whole text in reality + except: + pass + if len(objects) > 0: + final += "Objects:\n" + for obj in objects: + final += obj.name + ", " + final = final[:-2] + "\n" + final += "!image>" + # we store the result in a file called attachment.key.txt in the folder ./../database/google-vision/results + # we create the folder if it doesn't exist + if not os.path.exists("./../database/google-vision/results"): + os.mkdir("./../database/google-vision/results") + # we create the file + with open( + f"./../database/google-vision/results/{attachment.id}.txt", + "w", + encoding="utf-8", + ) as f: + f.write(final) + f.close() - return final + return final + + except Exception as e: + debug("Error while processing image: " + str(e))