File size: 3,237 Bytes
6eb5bbc
f10c8a2
6eb5bbc
 
 
 
 
f10c8a2
 
6eb5bbc
 
 
 
 
 
 
f10c8a2
 
 
 
 
 
6eb5bbc
 
 
f10c8a2
6eb5bbc
 
 
 
 
 
 
 
 
 
f10c8a2
 
6eb5bbc
 
 
f10c8a2
6eb5bbc
 
 
f10c8a2
6eb5bbc
 
 
f10c8a2
6eb5bbc
f10c8a2
6eb5bbc
 
 
 
 
 
 
 
5b39a3f
6eb5bbc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f10c8a2
6eb5bbc
 
 
 
 
 
 
 
 
 
 
 
 
 
5b39a3f
6eb5bbc
 
 
 
 
5b39a3f
6eb5bbc
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import torch
from torch import nn
from .vision_encoder import VisionEncoder
from .configuration_moondream import MoondreamConfig
from transformers import PreTrainedModel
import re

from .modeling_phi import PhiForCausalLM
from .configuration_moondream import PhiConfig

class Moondream(PreTrainedModel):
    config_class = MoondreamConfig

    def __init__(self, config):
        super().__init__(config)
        self.vision_encoder = VisionEncoder()

        if type(config.phi_config) == dict:
            phi_config = PhiConfig(**config.phi_config)
        else:
            phi_config = config.phi_config
        self.text_model = PhiForCausalLM(phi_config)

    @property
    def device(self):
        return self.text_model.device

    def encode_image(self, image):
        return self.vision_encoder(image)

    def input_embeds(self, prompt, image_embeds, tokenizer):
        def _tokenize(txt):
            return tokenizer(
                txt, return_tensors="pt", add_special_tokens=False
            ).input_ids.to(self.device)

        text_emb = self.text_model.get_input_embeddings()

        # Add BOS token
        embeds = []
        embeds.append(
            text_emb((torch.tensor([[tokenizer.bos_token_id]], device=self.device)))
        )

        if "{after}")))

        return torch.cat(embeds, dim=1)

    def generate(
        self,
        image_embeds,
        prompt,
        tokenizer,
        eos_text="<END>",
        max_new_tokens=128,
        **kwargs,
    ):
        eos_tokens = tokenizer(eos_text, add_special_tokens=False)[0].ids

        generate_config = {
            "eos_token_id": eos_tokens,
            "bos_token_id": tokenizer.bos_token_id,
            "pad_token_id": tokenizer.eos_token_id,
            "max_new_tokens": max_new_tokens,
            **kwargs,
        }

        with torch.no_grad():
            inputs_embeds = self.input_embeds(prompt, image_embeds, tokenizer)
            output_ids = self.text_model.generate(
                inputs_embeds=inputs_embeds, **generate_config
            )

        return tokenizer.batch_decode(output_ids, skip_special_tokens=True)

    def answer_question(
        self,
        image_embeds,
        question,
        tokenizer,
        chat_history="",
        result_queue=None,
        **kwargs,
    ):
        prompt = f"<image>\n\n{chat_history}Question: {question}\n\nAnswer: "
        answer = self.generate(
            image_embeds,
            prompt,
            eos_text="<END>",
            tokenizer=tokenizer,
            max_new_tokens=256,
            **kwargs,
        )[0]
        cleaned_answer = re.sub("<$", "", re.sub("END$", "", answer)).strip()

        # Use the result_queue to pass the result if it is provided
        if result_queue:
            result_queue.put(cleaned_answer)
        else:
            return cleaned_answer