saiga-api-lora / app.py
muryshev's picture
Create app.py
c04a60f
raw
history blame
No virus
3.29 kB
from flask import Flask, request, jsonify
from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
import torch
app = Flask(__name__)
MODEL_NAME = "IlyaGusev/saiga2_70b_lora"
DEFAULT_MESSAGE_TEMPLATE = "<s>{role}\n{content}</s>\n"
DEFAULT_SYSTEM_PROMPT = "Ты — Сайга, русскоязычный автоматический ассистент. Ты разговариваешь с людьми и помогаешь им."
class Conversation:
def __init__(
self,
message_template=DEFAULT_MESSAGE_TEMPLATE,
system_prompt=DEFAULT_SYSTEM_PROMPT,
start_token_id=1,
bot_token_id=9225
):
self.message_template = message_template
self.start_token_id = start_token_id
self.bot_token_id = bot_token_id
self.messages = [{
"role": "system",
"content": system_prompt
}]
def get_start_token_id(self):
return self.start_token_id
def get_bot_token_id(self):
return self.bot_token_id
def add_user_message(self, message):
self.messages.append({
"role": "user",
"content": message
})
def add_bot_message(self, message):
self.messages.append({
"role": "bot",
"content": message
})
def get_prompt(self, tokenizer):
final_text = ""
for message in self.messages:
message_text = self.message_template.format(**message)
final_text += message_text
final_text += tokenizer.decode([self.start_token_id, self.bot_token_id])
return final_text.strip()
def generate(model, tokenizer, prompt, generation_config):
data = tokenizer(prompt, return_tensors="pt")
data = {k: v.to(model.device) for k, v in data.items()}
output_ids = model.generate(
**data,
generation_config=generation_config
)[0]
output_ids = output_ids[len(data["input_ids"][0]):]
output = tokenizer.decode(output_ids, skip_special_tokens=True)
return output.strip()
config = PeftConfig.from_pretrained(MODEL_NAME)
# Use GPU if available, else fall back to CPU
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(
config.base_model_name_or_path,
load_in_8bit=True,
torch_dtype=torch.float16,
device_map=device
)
model = PeftModel.from_pretrained(
model,
MODEL_NAME,
torch_dtype=torch.float16
)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
generation_config = GenerationConfig.from_pretrained(MODEL_NAME)
@app.route('/run_inference', methods=['POST'])
def run_inference():
try:
data = request.json
inputs = data.get('inputs', [])
conversation = Conversation()
outputs = []
for inp in inputs:
conversation.add_user_message(inp)
prompt = conversation.get_prompt(tokenizer)
output = generate(model, tokenizer, prompt, generation_config)
outputs.append({'input': inp, 'output': output})
return jsonify(outputs)
except Exception as e:
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(port=7860)