File size: 3,980 Bytes
37cccbf
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import spaces

#######################
'''
Name: Phine Inference
License: MIT
'''
#######################


##### Dependencies

""" IMPORTANT: Uncomment the following line if you are in a Colab/Notebook environment """

#!pip install gradio einops accelerate bitsandbytes transformers

#####

import gradio as gr
import transformers
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import random
import re

def cut_text_after_last_token(text, token):

    last_occurrence = text.rfind(token)

    if last_occurrence != -1:
        result = text[last_occurrence + len(token):].strip()
        return result
    else:
        return None


class _SentinelTokenStoppingCriteria(transformers.StoppingCriteria):

    def __init__(self, sentinel_token_ids: torch.LongTensor,
                 starting_idx: int):
        transformers.StoppingCriteria.__init__(self)
        self.sentinel_token_ids = sentinel_token_ids
        self.starting_idx = starting_idx

    def __call__(self, input_ids: torch.LongTensor,
                 _scores: torch.FloatTensor) -> bool:
        for sample in input_ids:
            trimmed_sample = sample[self.starting_idx:]

            if trimmed_sample.shape[-1] < self.sentinel_token_ids.shape[-1]:
                continue

            for window in trimmed_sample.unfold(
                    0, self.sentinel_token_ids.shape[-1], 1):
                if torch.all(torch.eq(self.sentinel_token_ids, window)):
                    return True
        return False





model_path = 'freecs/phine-2-v0'

device = "cuda"

tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)

model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, load_in_4bit=False, torch_dtype=torch.float16).to(device) #remove .to() if load_in_4/8bit = True

sys_message = "You are an AI assistant named Phine developed by FreeCS.org. You are polite and smart." #System Message
@spaces.GPU
def phine(message, history, temperature, top_p, top_k, repetition_penalty):



    n = 0
    context = ""
    if history and len(history) > 0:

        for x in history:
          for h in x:
            if n%2 == 0:
              context+=f"""\n<|prompt|>{h}\n"""
            else:
              context+=f"""<|response|>{h}"""
            n+=1
    else:

        context = ""




    prompt = f"""\n<|system|>{sys_message}"""+context+"\n<|prompt|>"+message+"<|endoftext|>\n<|response|>"
    tokenized = tokenizer(prompt, return_tensors="pt").to(device)


    stopping_criteria_list = transformers.StoppingCriteriaList([
        _SentinelTokenStoppingCriteria(
            sentinel_token_ids=tokenizer(
                "<|endoftext|>",
                add_special_tokens=False,
                return_tensors="pt",
            ).input_ids.to(device),
            starting_idx=tokenized.input_ids.shape[-1])
    ])

        
    token = model.generate(**tokenized,
                        stopping_criteria=stopping_criteria_list,
                        do_sample=True,
                        max_length=2048, temperature=temperature, top_p=top_p, top_k = top_k, repetition_penalty = repetition_penalty
                           )

    completion = tokenizer.decode(token[0], skip_special_tokens=False)
    token = "<|response|>"
    res = cut_text_after_last_token(completion, token)
    return res.replace('<|endoftext|>', '')
demo = gr.ChatInterface(phine,
                          additional_inputs=[
                              gr.Slider(0.1, 2.0, label="temperature", value=0.5),
                              gr.Slider(0.1, 2.0, label="Top P", value=0.9),
                              gr.Slider(1, 500, label="Top K", value=50),
                              gr.Slider(0.1, 2.0, label="Repetition Penalty", value=1.15)
                          ]
                          )

if __name__ == "__main__":
    demo.queue().launch(share=True, debug=True) #If debug=True causes problems you can set it to False