QAway-to commited on
Commit
b6185eb
·
1 Parent(s): 3462c0f

Back to normal app.py v1.1

Browse files
Files changed (2) hide show
  1. app.py +36 -38
  2. core/interviewer.py +27 -74
app.py CHANGED
@@ -1,58 +1,56 @@
1
- import asyncio
2
  import gradio as gr
3
  from core.utils import generate_first_question
4
  from core.mbti_analyzer import analyze_mbti
5
- from core.interviewer import generate_next_question
6
 
7
-
8
- # ---- Адаптер ----
9
- def analyze_and_ask_sync(user_text, prev_count, user_id="default_user"):
10
- """Синхронный адаптер для Gradio"""
11
- return asyncio.run(analyze_and_ask(user_text, prev_count, user_id))
12
-
13
-
14
- # ---- Асинхронная логика ----
15
- async def analyze_and_ask(user_text, prev_count, user_id="default_user"):
16
  if not user_text.strip():
17
- return "⚠️ Введите ответ.", "", prev_count
 
18
 
19
  try:
20
  n = int(prev_count.split("/")[0]) + 1
21
  except Exception:
22
  n = 1
23
- counter = f"{n}/16"
24
 
 
 
25
  mbti_text = ""
26
- for part in analyze_mbti(user_text):
27
- mbti_text = part
28
-
29
- next_q_data = await generate_next_question(user_id, user_text)
30
- if next_q_data["completed"]:
31
- next_question = " Interview finished! All 16 personality categories covered."
32
- else:
33
- next_question = f"({next_q_data['category']}) {next_q_data['question']}"
34
-
35
- return mbti_text, next_question, counter
36
-
37
-
38
- # ---- Gradio UI ----
39
- with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Interviewer AI") as demo:
40
- gr.Markdown("## 🧠 MBTI Personality Interviewer\nАнализ и вопросы по 16 категориям MBTI.")
 
41
 
42
  with gr.Row():
43
  with gr.Column(scale=1):
44
- inp = gr.Textbox(label="Ваш ответ", placeholder="Например: I enjoy organizing group projects.", lines=4)
45
- btn = gr.Button("Отправить", variant="primary")
 
 
 
 
46
  with gr.Column(scale=1):
47
  mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=4)
48
- interviewer_out = gr.Textbox(label="💬 Следующий вопрос", lines=3)
49
- progress = gr.Textbox(label="⏳ Прогресс", value="0/16")
50
 
51
- # Здесь вызываем адаптер
52
- btn.click(analyze_and_ask_sync, inputs=[inp, progress], outputs=[mbti_out, interviewer_out, progress])
53
 
54
- demo.load(lambda: ("", generate_first_question(), "0/16"),
55
- inputs=None, outputs=[mbti_out, interviewer_out, progress])
56
 
57
- if __name__ == "__main__":
58
- demo.launch()
 
1
+ # app.py
2
  import gradio as gr
3
  from core.utils import generate_first_question
4
  from core.mbti_analyzer import analyze_mbti
5
+ from core.interviewer import generate_question
6
 
7
+ def analyze_and_ask(user_text, prev_count):
8
+ """Пошаговый генератор стриминг без async и без streaming=True."""
 
 
 
 
 
 
 
9
  if not user_text.strip():
10
+ yield "⚠️ Please enter your answer.", "", prev_count
11
+ return
12
 
13
  try:
14
  n = int(prev_count.split("/")[0]) + 1
15
  except Exception:
16
  n = 1
17
+ counter = f"{n}/30"
18
 
19
+ # 1️⃣ Шаг 1 — анализ
20
+ mbti_gen = analyze_mbti(user_text)
21
  mbti_text = ""
22
+ for chunk in mbti_gen:
23
+ mbti_text = chunk
24
+ yield mbti_text, "💭 Interviewer is thinking...", counter
25
+
26
+ # 2️⃣ Шаг 2 — вопрос
27
+ interviewer_gen = generate_question("default_user", user_text)
28
+ next_q = ""
29
+ for chunk in interviewer_gen:
30
+ next_q = chunk
31
+ yield mbti_text, next_q, counter
32
+
33
+ # --------------------------------------------------------------
34
+ # Gradio интерфейс
35
+ # --------------------------------------------------------------
36
+ with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Personality Interviewer") as demo:
37
+ gr.Markdown("## 🧠 MBTI Personality Interviewer\nОпредели личностный тип и получи следующий вопрос от интервьюера.")
38
 
39
  with gr.Row():
40
  with gr.Column(scale=1):
41
+ inp = gr.Textbox(
42
+ label="Ваш ответ",
43
+ placeholder="Например: I enjoy working with people and organizing events.",
44
+ lines=4
45
+ )
46
+ btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")
47
  with gr.Column(scale=1):
48
  mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=4)
49
+ interviewer_out = gr.Textbox(label="💬 Следующий вопрос от интервьюера", lines=3)
50
+ progress = gr.Textbox(label="⏳ Прогресс", value="0/30")
51
 
52
+ btn.click(analyze_and_ask, inputs=[inp, progress], outputs=[mbti_out, interviewer_out, progress])
 
53
 
54
+ demo.load(lambda: ("", generate_first_question(), "0/30"), inputs=None, outputs=[mbti_out, interviewer_out, progress])
 
55
 
56
+ demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=7860)
 
core/interviewer.py CHANGED
@@ -1,102 +1,55 @@
1
  # core/interviewer.py
2
- import asyncio, random, uuid
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
- # Используем Phi-3-mini-instruct
6
- INTERVIEWER_MODEL = "microsoft/Phi-3-mini-4k-instruct"
7
 
8
  tokenizer = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
9
- model = AutoModelForCausalLM.from_pretrained(
10
- INTERVIEWER_MODEL,
11
- torch_dtype="auto",
12
- device_map="auto"
13
- )
14
 
15
  llm_pipe = pipeline(
16
  "text-generation",
17
  model=model,
18
  tokenizer=tokenizer,
19
- max_new_tokens=80,
20
  temperature=0.7,
21
  top_p=0.9,
22
  )
23
 
24
- # Память о пользователе
25
- user_sessions = {}
26
-
27
- # 16 категорий MBTI (можно адаптировать под твои .json)
28
- MBTI_CATEGORIES = [
29
- "ENFJ", "ENFP", "ENTJ", "ENTP",
30
- "ESFJ", "ESFP", "ESTJ", "ESTP",
31
- "INFJ", "INFP", "INTJ", "INTP",
32
- "ISFJ", "ISFP", "ISTJ", "ISTP"
33
- ]
34
 
35
  def clean_question(text: str) -> str:
36
- """Очищает вопрос от инструкций."""
37
- text = text.strip().split("\n")[0]
38
- text = text.strip('"').strip("'")
39
- for bad in ["user:", "assistant:", "system:", "instruction"]:
40
  if bad.lower() in text.lower():
41
  text = text.split(bad)[-1].strip()
42
  if not text.endswith("?"):
43
  text += "?"
44
- return text
45
-
46
- async def generate_next_question(user_id: str, user_text: str = "") -> dict:
47
- """
48
- Генерирует следующий вопрос по категории.
49
- """
50
- session = user_sessions.get(user_id, {
51
- "history": [],
52
- "category_index": 0,
53
- "completed": False
54
- })
55
-
56
- # Проверяем, не закончились ли категории
57
- if session["category_index"] >= len(MBTI_CATEGORIES):
58
- session["completed"] = True
59
- user_sessions[user_id] = session
60
- return {
61
- "question": None,
62
- "category": None,
63
- "completed": True
64
- }
65
 
66
- current_category = MBTI_CATEGORIES[session["category_index"]]
67
-
68
- # Составляем контекст (историю последних 2 ответов)
69
- history = session["history"][-4:]
70
- formatted_history = "\n".join(
71
- [f"Q{i//2+1}: {history[i]}\nA{i//2+1}: {history[i+1]}" for i in range(0, len(history), 2)]
72
- )
73
 
74
  prompt = (
75
- f"You are an MBTI interviewer.\n"
76
- f"Current personality category: {current_category}\n\n"
77
- f"Previous dialogue:\n{formatted_history}\n\n"
78
- f"Now generate one new, open-ended question (starting with What, Why, How, or When) "
79
- f"to assess this category.\n"
80
- f"Do NOT repeat or rephrase any previous question.\n"
81
- f"Return only the question text."
82
  )
83
 
84
- # Генерация вопроса
85
- loop = asyncio.get_event_loop()
86
- raw = await loop.run_in_executor(None, lambda: llm_pipe(prompt)[0]["generated_text"])
87
  question = clean_question(raw)
88
 
89
- # Уникальный ID вопроса
90
- q_id = str(uuid.uuid4())[:8]
91
-
92
- # Обновляем состояние пользователя
93
- session["history"].extend([question, user_text])
94
- session["category_index"] += 1
95
- user_sessions[user_id] = session
96
 
97
- return {
98
- "id": q_id,
99
- "question": question,
100
- "category": current_category,
101
- "completed": False
102
- }
 
1
  # core/interviewer.py
2
+ import asyncio
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
4
 
5
+ INTERVIEWER_MODEL = "f3nsmart/TinyLlama-MBTI-Interviewer-LoRA"
 
6
 
7
  tokenizer = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
8
+ model = AutoModelForCausalLM.from_pretrained(INTERVIEWER_MODEL, torch_dtype="auto", device_map="auto")
 
 
 
 
9
 
10
  llm_pipe = pipeline(
11
  "text-generation",
12
  model=model,
13
  tokenizer=tokenizer,
14
+ max_new_tokens=70,
15
  temperature=0.7,
16
  top_p=0.9,
17
  )
18
 
19
+ user_memory = {}
 
 
 
 
 
 
 
 
 
20
 
21
  def clean_question(text: str) -> str:
22
+ text = text.strip().split("\n")[0].strip('"').strip("'")
23
+ bad_tokens = ["user:", "assistant:", "instruction", "interviewer", "system:"]
24
+ for bad in bad_tokens:
 
25
  if bad.lower() in text.lower():
26
  text = text.split(bad)[-1].strip()
27
  if not text.endswith("?"):
28
  text += "?"
29
+ return text if len(text.split()) > 3 else "What do you usually enjoy doing in your free time?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ def generate_question(user_id: str, user_text: str):
32
+ """Генератор вопроса (стриминг)."""
33
+ prev_qs = user_memory.get(user_id, [])
34
+ prev_joined = "; ".join(prev_qs) if prev_qs else "None"
 
 
 
35
 
36
  prompt = (
37
+ f"The following is an MBTI personality interview.\n"
38
+ f"User: {user_text}\n"
39
+ f"Interviewer: ask one new, open-ended question starting with 'What', 'Why', 'How', or 'When'. "
40
+ f"Avoid repeating or rephrasing previous questions.\n"
41
+ f"Previous questions: {prev_joined}\n"
42
+ f"Interviewer:"
 
43
  )
44
 
45
+ yield "💭 Interviewer is thinking..."
46
+ raw = llm_pipe(prompt)[0]["generated_text"]
 
47
  question = clean_question(raw)
48
 
49
+ valid_starts = ("What", "Why", "How", "When")
50
+ if not question.startswith(valid_starts):
51
+ question = "What motivates you to do the things you enjoy most?"
 
 
 
 
52
 
53
+ prev_qs.append(question)
54
+ user_memory[user_id] = prev_qs[-10:]
55
+ yield question