File size: 1,509 Bytes
cdbe626
6f7199d
 
 
 
 
 
 
cdbe626
 
243b207
 
 
cdbe626
 
 
 
 
 
 
 
 
 
 
4bd01c5
 
 
 
 
 
cdbe626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60fb1ce
cdbe626
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import sys, os
sys.path.append(
    os.path.join(
        os.getcwd(),
        "imageprocessing",
        "artemis"
    )
)

import artemis
from .color_detection import get_colors
from .clip_object_recognition import get_objects_in_image
from .emotion_detection import get_all_emotions_in_image


import torch

import clip

clip_device = "cpu"#"cuda" if torch.cuda.is_available() else "cpu"
clip_model, clip_preprocess = clip.load('ViT-B/32', clip_device)

emo_device = "cpu"
img2emo_model = torch.load(
    #"img2emo.pt",
    os.path.join(
        os.getcwd(),
        "imageprocessing",
        "img2emo.pt"
    ),
    map_location=emo_device
)


def extract_all_information_from_image(
    image_filepath : os.PathLike
)-> dict:
    """Extracts objects (and probabilities), colors, and emotion from the image.

    Parameters
    ----------
    image_filepath : os.PathLike
        Path to the image

    Returns
    -------
    dict
        Dictionary with the objects, colors, and emotion from the image
    """

    colors = get_colors(image_filepath)

    objects_and_probs = get_objects_in_image(
        image_filepath = image_filepath,
        model = clip_model,
        preprocess = clip_preprocess,
        device = clip_device
    )
    emotion, _ = get_all_emotions_in_image(
        filepath = image_filepath,
        model = img2emo_model,
    )

    result = {
        "colors_list": colors,
        "objects_and_probs" : objects_and_probs,
        "emotion": emotion
    }
    return result