from transformers import AutoModelForCausalLM, AutoTokenizer from flask import Flask, request, abort from linebot import LineBotApi, WebhookHandler from linebot.exceptions import InvalidSignatureError from linebot.models import MessageEvent, TextMessage, TextSendMessage import torch # Flask アプリケーションの初期化 app = Flask(__name__) # LINE Botの設定(これらの値はLINE Developer Portalから取得し、適切に置き換えてください) LINE_CHANNEL_ACCESS_TOKEN = 'YOUR_CHANNEL_ACCESS_TOKEN' LINE_CHANNEL_SECRET = 'YOUR_CHANNEL_SECRET' line_bot_api = LineBotApi(LINE_CHANNEL_ACCESS_TOKEN) handler = WebhookHandler(LINE_CHANNEL_SECRET) # Hugging Faceモデルのロード model_name = "youri-7b-chat" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) def generate_response(input_text): # ユーザーのメッセージをモデルに入力し、応答を生成 inputs = tokenizer.encode(input_text, return_tensors='pt') outputs = model.generate(inputs, max_length=50, num_return_sequences=1) return tokenizer.decode(outputs[0], skip_special_tokens=True) @app.route("/callback", methods=['POST']) def callback(): signature = request.headers['X-Line-Signature'] body = request.get_data(as_text=True) try: handler.handle(body, signature) except InvalidSignatureError: abort(400) return 'OK' @handler.add(MessageEvent, message=TextMessage) def handle_message(event): # LINEからのメッセージを受け取り、モデルで応答を生成 input_text = event.message.text response_message = generate_response(input_text) # LINEユーザーに応答を返信 line_bot_api.reply_message( event.reply_token, TextSendMessage(text=response_message) ) if __name__ == "__main__": app.run()