uhygfd commited on
Commit
6b9bc4c
1 Parent(s): 5c92cb6

Upload 5 files

Browse files
Files changed (5) hide show
  1. app.py +59 -0
  2. botm.png +0 -0
  3. dialogues.txt +0 -0
  4. requirements.txt +4 -0
  5. user.png +0 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
+ import random
4
+ from flask import Flask, request, jsonify
5
+
6
+ # Загрузка модели и токенизатора
7
+ model_name = "ai-forever/rugpt3small_based_on_gpt2"
8
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
9
+ model = GPT2LMHeadModel.from_pretrained(model_name)
10
+
11
+ # Загрузка случайных фраз из файла dialogues.txt
12
+ with open("dialogues.txt", "r", encoding="utf-8") as file:
13
+ random_phrases = [line.strip() for line in file.readlines() if line.strip()]
14
+
15
+ # Функция генерации ответа
16
+ def generate(prompt):
17
+ user_input = f"[USER]: {prompt}"
18
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
19
+ outputs = model.generate(inputs, max_length=50, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
20
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
21
+
22
+ # Прекращаем генерацию на строке [USER]
23
+ if "[USER]" in response:
24
+ response = response.split("[USER]")[0]
25
+
26
+ bot_message = f"[BOT]: {random.choice(random_phrases)}, {response.strip()}"
27
+ return bot_message
28
+
29
+ # Настройка интерфейса чат-бота
30
+ mychatbot = gr.Chatbot(
31
+ avatar_images=["./user.png", "./botm.png"], bubble_full_width=False, show_label=False, show_copy_button=True, likeable=True
32
+ )
33
+
34
+ # Создание интерфейса для чат-бота
35
+ demo = gr.ChatInterface(
36
+ fn=generate,
37
+ chatbot=mychatbot,
38
+ title="🤬НЕАДЕКВАТ🤬",
39
+ retry_btn=None,
40
+ undo_btn=None
41
+ )
42
+
43
+ # Flask приложение
44
+ app = Flask(__name__)
45
+
46
+ @app.route("/generate", methods=["POST"])
47
+ def generate_api():
48
+ data = request.json
49
+ if "prompt" not in data:
50
+ return jsonify({"error": "На что я должен отвечать, гений?"}), 400
51
+
52
+ prompt = data["prompt"]
53
+ response = generate(prompt)
54
+ return jsonify({"response": response})
55
+
56
+ # Запуск Gradio и Flask приложений
57
+ if __name__ == "__main__":
58
+ demo.launch(share=True)
59
+ app.run(host="0.0.0.0", port=5000)
botm.png ADDED
dialogues.txt ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio==3.22.1
2
+ transformers==4.29.2
3
+ torch==1.13.1
4
+ flask==2.2.3
user.png ADDED