Akjava commited on
Commit
abed9dd
1 Parent(s): ae274fc
Files changed (2) hide show
  1. app.py +86 -60
  2. requirements.txt +4 -3
app.py CHANGED
@@ -1,71 +1,97 @@
 
 
 
 
 
1
 
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
- # Load the model and tokenizer
5
- model_name = "Qwen/Qwen1.5-0.5B-Chat"
6
- tokenizer = AutoTokenizer.from_pretrained(model_name)
7
- model = AutoModelForCausalLM.from_pretrained(model_name)
 
8
 
 
9
 
10
- def generate_text(text):
11
- # Tokenize the input text, including attention mas
12
- #input_ids = tokenizer(text, return_tensors="pt", padding=True)
13
 
 
14
 
 
 
 
15
 
16
- messages = []
17
- use_system_prompt = True
18
- DEFAULT_SYSTEM_PROMPT = "you are helpfull assistant."
19
- if use_system_prompt:
20
- messages = [
21
- {"role": "system", "content": DEFAULT_SYSTEM_PROMPT}
22
- ]
23
 
24
- user_messages = [
25
- {"role": "user", "content": text}
26
- ]
27
- messages += user_messages
28
 
29
- prompt = tokenizer.apply_chat_template(
30
- conversation=messages,
31
- add_generation_prompt=True,
32
- tokenize=False
33
- )
 
 
 
 
 
 
 
34
 
35
- input_datas = tokenizer(
36
- prompt,
37
- add_special_tokens=True,
38
- return_tensors="pt"
39
- )
40
-
41
- # Generate text, passing the attention mask
42
- generated_ids = model.generate(input_ids=input_datas.input_ids, attention_mask=input_datas.attention_mask,max_length=10000)
43
- #generated_ids = model.generate(input_ids=input_ids, max_length=100)
44
 
45
- # Decode the generated tokens
46
- generated_text = tokenizer.decode(generated_ids[0][input_datas.input_ids.size(1) :], skip_special_tokens=True)
47
-
48
- # Print the generated text
49
- #print(generated_text)
50
- return generated_text
51
-
52
- from flask import Flask, request, jsonify
53
-
54
- app = Flask(__name__)
55
- #app.logger.disabled = True
56
- #log = logging.getLogger('werkzeug')
57
- #log.disabled = True
58
-
59
- @app.route('/')
60
- def predict():
61
- param_value = request.args.get('param', '')
62
- # ここにモデルの推論ロジックを追加
63
- #output = pipe(messages, **generation_args)
64
- #text = (output[0]['generated_text'])
65
- #print("hello")
66
- #result = {"prediction": "dummy_result"}
67
- text = generate_text(param_value)
68
- return f"{text}"
69
-
70
- if __name__ == '__main__':
71
- app.run(host='0.0.0.0', port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import os
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
5
+ import gradio as gr
6
 
 
7
 
8
+ huggingface_token = os.getenv("HUGGINGFACE_TOKEN")
9
+ if not huggingface_token:
10
+ pass
11
+ print("no HUGGINGFACE_TOKEN if you need set secret ")
12
+ #raise ValueError("HUGGINGFACE_TOKEN environment variable is not set")
13
 
14
+ model_id = "Qwen/Qwen1.5-0.5B-Chat"
15
 
16
+ device = "auto" # torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
+ dtype = torch.bfloat16
 
18
 
19
+ tokenizer = AutoTokenizer.from_pretrained(model_id, token=huggingface_token)
20
 
21
+ print(model_id,device,dtype)
22
+ histories = []
23
+ #model = None
24
 
 
 
 
 
 
 
 
25
 
26
+ def call_generate_text(prompt, system_message="You are a helpful assistant."):
27
+ if prompt =="":
28
+ print("empty prompt return")
29
+ return ""
30
 
31
+ global histories
32
+ #global model
33
+ #if model != None:# and model.is_cuda:
34
+ # print("Model is alive")
35
+ #else:
36
+ # model = AutoModelForCausalLM.from_pretrained(
37
+ # model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
38
+ #)
39
+
40
+ messages = [
41
+ {"role": "system", "content": system_message},
42
+ ]
43
 
44
+ messages += histories
45
+
46
+ user_message = {"role": "user", "content": prompt}
47
+
48
+ messages += [user_message]
 
 
 
 
49
 
50
+ try:
51
+ text = generate_text(messages)
52
+ histories += [user_message,{"role": "assistant", "content": text}]
53
+ #model.to("cpu")
54
+ return text
55
+ except RuntimeError as e:
56
+ print(f"An unexpected error occurred: {e}")
57
+ #model = None
58
+
59
+ return ""
60
+
61
+ iface = gr.Interface(
62
+ fn=call_generate_text,
63
+ inputs=[
64
+ gr.Textbox(lines=3, label="Input Prompt"),
65
+ gr.Textbox(lines=2, label="System Message", value="あなたは親切なアシスタントで常に日本語で返答します。"),
66
+ ],
67
+ outputs=gr.Textbox(label="Generated Text"),
68
+ title=f"{model_id}",
69
+ description=f"{model_id} CPU",
70
+ )
71
+ print("Initialized")
72
+
73
+ @spaces.GPU(duration=120)
74
+ def generate_text(messages):
75
+
76
+ model = AutoModelForCausalLM.from_pretrained(
77
+ model_id, token=huggingface_token ,torch_dtype=dtype,device_map=device
78
+ )
79
+
80
+
81
+ text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer,torch_dtype=dtype,device_map=device) #pipeline has not to(device)
82
+ result = text_generator(messages, max_new_tokens=256, do_sample=True, temperature=0.7)
83
+
84
+ generated_output = result[0]["generated_text"]
85
+ if isinstance(generated_output, list):
86
+ for message in reversed(generated_output):
87
+ if message.get("role") == "assistant":
88
+ content= message.get("content", "No content found.")
89
+ return content
90
+
91
+ return "No assistant response found."
92
+ else:
93
+ return "Unexpected output format."
94
+
95
+ if __name__ == "__main__":
96
+ print("Main")
97
+ iface.launch()
requirements.txt CHANGED
@@ -1,5 +1,6 @@
1
- llama-cpp-python
2
- transformers
3
  torch
 
4
  accelerate
5
- flask
 
 
1
+ numpy
 
2
  torch
3
+ spaces
4
  accelerate
5
+ transformers
6
+ bitsandbytes