QAway-to
commited on
Commit
·
206b8e2
1
Parent(s):
8e845a7
Back to normal app.py v1.3
Browse files- app.py +40 -14
- core/interviewer.py +17 -14
- core/mbti_analyzer.py +9 -16
- core/utils.py +2 -2
app.py
CHANGED
|
@@ -2,19 +2,21 @@
|
|
| 2 |
import gradio as gr
|
| 3 |
from core.utils import generate_first_question
|
| 4 |
from core.mbti_analyzer import analyze_mbti
|
| 5 |
-
from core.interviewer import generate_question
|
| 6 |
|
| 7 |
def analyze_and_ask(user_text, prev_count):
|
| 8 |
-
"""
|
| 9 |
if not user_text.strip():
|
| 10 |
yield "⚠️ Please enter your answer.", "", prev_count
|
| 11 |
return
|
| 12 |
|
|
|
|
| 13 |
try:
|
| 14 |
n = int(prev_count.split("/")[0]) + 1
|
| 15 |
except Exception:
|
| 16 |
n = 1
|
| 17 |
-
|
|
|
|
| 18 |
|
| 19 |
# 1️⃣ Шаг 1 — анализ
|
| 20 |
mbti_gen = analyze_mbti(user_text)
|
|
@@ -23,18 +25,34 @@ def analyze_and_ask(user_text, prev_count):
|
|
| 23 |
mbti_text = chunk
|
| 24 |
yield mbti_text, "💭 Interviewer is thinking...", counter
|
| 25 |
|
| 26 |
-
# 2️⃣ Шаг 2 —
|
| 27 |
-
|
| 28 |
next_q = ""
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
# --------------------------------------------------------------
|
| 34 |
-
# Gradio
|
| 35 |
# --------------------------------------------------------------
|
| 36 |
with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Personality Interviewer") as demo:
|
| 37 |
-
gr.Markdown(
|
|
|
|
|
|
|
|
|
|
| 38 |
|
| 39 |
with gr.Row():
|
| 40 |
with gr.Column(scale=1):
|
|
@@ -46,11 +64,19 @@ with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Personality Interviewer") as
|
|
| 46 |
btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")
|
| 47 |
with gr.Column(scale=1):
|
| 48 |
mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=4)
|
| 49 |
-
interviewer_out = gr.Textbox(label="💬 Следующий вопрос
|
| 50 |
-
progress = gr.Textbox(label="⏳ Прогресс", value="0/
|
| 51 |
|
| 52 |
-
btn.click(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
demo.load(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
|
| 56 |
demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=7860)
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
from core.utils import generate_first_question
|
| 4 |
from core.mbti_analyzer import analyze_mbti
|
| 5 |
+
from core.interviewer import generate_question, session_state
|
| 6 |
|
| 7 |
def analyze_and_ask(user_text, prev_count):
|
| 8 |
+
"""Главная функция генерации (анализ → вопрос)."""
|
| 9 |
if not user_text.strip():
|
| 10 |
yield "⚠️ Please enter your answer.", "", prev_count
|
| 11 |
return
|
| 12 |
|
| 13 |
+
user_id = "default_user"
|
| 14 |
try:
|
| 15 |
n = int(prev_count.split("/")[0]) + 1
|
| 16 |
except Exception:
|
| 17 |
n = 1
|
| 18 |
+
|
| 19 |
+
counter = f"{n}/8"
|
| 20 |
|
| 21 |
# 1️⃣ Шаг 1 — анализ
|
| 22 |
mbti_gen = analyze_mbti(user_text)
|
|
|
|
| 25 |
mbti_text = chunk
|
| 26 |
yield mbti_text, "💭 Interviewer is thinking...", counter
|
| 27 |
|
| 28 |
+
# 2️⃣ Шаг 2 — генерация вопроса
|
| 29 |
+
q_gen = generate_question(user_id)
|
| 30 |
next_q = ""
|
| 31 |
+
cat = ""
|
| 32 |
+
for data in q_gen:
|
| 33 |
+
if isinstance(data, str):
|
| 34 |
+
next_q = data
|
| 35 |
+
yield mbti_text, next_q, counter
|
| 36 |
+
else:
|
| 37 |
+
cat, next_q = data
|
| 38 |
+
|
| 39 |
+
# Если все категории пройдены — показываем финальный результат
|
| 40 |
+
if next_q.startswith("✅ All"):
|
| 41 |
+
session = session_state[user_id]
|
| 42 |
+
yield (
|
| 43 |
+
f"{mbti_text}\n\nSession completed.",
|
| 44 |
+
"🎯 All MBTI axes covered.",
|
| 45 |
+
"8/8"
|
| 46 |
+
)
|
| 47 |
|
| 48 |
# --------------------------------------------------------------
|
| 49 |
+
# Интерфейс Gradio
|
| 50 |
# --------------------------------------------------------------
|
| 51 |
with gr.Blocks(theme=gr.themes.Soft(), title="MBTI Personality Interviewer") as demo:
|
| 52 |
+
gr.Markdown(
|
| 53 |
+
"## 🧠 MBTI Personality Interviewer\n"
|
| 54 |
+
"Определи личностный тип и получи вопросы из разных категорий MBTI."
|
| 55 |
+
)
|
| 56 |
|
| 57 |
with gr.Row():
|
| 58 |
with gr.Column(scale=1):
|
|
|
|
| 64 |
btn = gr.Button("Анализировать и задать новый вопрос", variant="primary")
|
| 65 |
with gr.Column(scale=1):
|
| 66 |
mbti_out = gr.Textbox(label="📊 Анализ MBTI", lines=4)
|
| 67 |
+
interviewer_out = gr.Textbox(label="💬 Следующий вопрос", lines=3)
|
| 68 |
+
progress = gr.Textbox(label="⏳ Прогресс", value="0/8")
|
| 69 |
|
| 70 |
+
btn.click(
|
| 71 |
+
analyze_and_ask,
|
| 72 |
+
inputs=[inp, progress],
|
| 73 |
+
outputs=[mbti_out, interviewer_out, progress]
|
| 74 |
+
)
|
| 75 |
|
| 76 |
+
demo.load(
|
| 77 |
+
lambda: ("", generate_first_question(), "0/8"),
|
| 78 |
+
inputs=None,
|
| 79 |
+
outputs=[mbti_out, interviewer_out, progress]
|
| 80 |
+
)
|
| 81 |
|
| 82 |
demo.queue(max_size=20).launch(server_name="0.0.0.0", server_port=7860)
|
core/interviewer.py
CHANGED
|
@@ -1,12 +1,13 @@
|
|
| 1 |
# core/interviewer.py
|
| 2 |
import random
|
| 3 |
-
import uuid
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 5 |
|
| 6 |
INTERVIEWER_MODEL = "f3nsmart/TinyLlama-MBTI-Interviewer-LoRA"
|
| 7 |
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
|
| 9 |
-
model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
|
|
|
| 10 |
|
| 11 |
llm_pipe = pipeline(
|
| 12 |
"text-generation",
|
|
@@ -17,7 +18,7 @@ llm_pipe = pipeline(
|
|
| 17 |
top_p=0.9,
|
| 18 |
)
|
| 19 |
|
| 20 |
-
# MBTI
|
| 21 |
CATEGORIES = [
|
| 22 |
"Introversion", "Extroversion",
|
| 23 |
"Sensing", "Intuition",
|
|
@@ -25,11 +26,11 @@ CATEGORIES = [
|
|
| 25 |
"Judging", "Perceiving"
|
| 26 |
]
|
| 27 |
|
| 28 |
-
# Память
|
| 29 |
session_state = {}
|
| 30 |
|
| 31 |
def init_session(user_id: str):
|
| 32 |
-
"""
|
| 33 |
session_state[user_id] = {
|
| 34 |
"asked": [],
|
| 35 |
"answers": {},
|
|
@@ -38,7 +39,7 @@ def init_session(user_id: str):
|
|
| 38 |
}
|
| 39 |
|
| 40 |
def select_next_category(user_id: str):
|
| 41 |
-
"""Выбирает
|
| 42 |
s = session_state[user_id]
|
| 43 |
remaining = [c for c in CATEGORIES if c not in s["asked"]]
|
| 44 |
if not remaining:
|
|
@@ -48,27 +49,29 @@ def select_next_category(user_id: str):
|
|
| 48 |
return next_cat
|
| 49 |
|
| 50 |
def build_prompt(category: str):
|
| 51 |
-
"""Формирует промпт
|
| 52 |
return (
|
| 53 |
-
f
|
| 54 |
-
f
|
| 55 |
-
f
|
| 56 |
-
f'"next_question": "?"}}'
|
| 57 |
)
|
| 58 |
|
| 59 |
def generate_question(user_id: str):
|
| 60 |
-
"""Генератор вопроса
|
| 61 |
if user_id not in session_state:
|
| 62 |
init_session(user_id)
|
| 63 |
|
| 64 |
category = select_next_category(user_id)
|
| 65 |
if not category:
|
| 66 |
yield "✅ All 8 categories completed."
|
| 67 |
-
return
|
| 68 |
|
|
|
|
| 69 |
prompt = build_prompt(category)
|
| 70 |
raw = llm_pipe(prompt)[0]["generated_text"]
|
|
|
|
|
|
|
| 71 |
question = raw.strip().split("\n")[0]
|
| 72 |
if "?" not in question:
|
| 73 |
question += "?"
|
| 74 |
-
|
|
|
|
| 1 |
# core/interviewer.py
|
| 2 |
import random
|
|
|
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 4 |
|
| 5 |
INTERVIEWER_MODEL = "f3nsmart/TinyLlama-MBTI-Interviewer-LoRA"
|
| 6 |
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(INTERVIEWER_MODEL)
|
| 8 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 9 |
+
INTERVIEWER_MODEL, torch_dtype="auto", device_map="auto"
|
| 10 |
+
)
|
| 11 |
|
| 12 |
llm_pipe = pipeline(
|
| 13 |
"text-generation",
|
|
|
|
| 18 |
top_p=0.9,
|
| 19 |
)
|
| 20 |
|
| 21 |
+
# Основные оси MBTI
|
| 22 |
CATEGORIES = [
|
| 23 |
"Introversion", "Extroversion",
|
| 24 |
"Sensing", "Intuition",
|
|
|
|
| 26 |
"Judging", "Perceiving"
|
| 27 |
]
|
| 28 |
|
| 29 |
+
# Память по сессиям
|
| 30 |
session_state = {}
|
| 31 |
|
| 32 |
def init_session(user_id: str):
|
| 33 |
+
"""Создаёт новую сессию."""
|
| 34 |
session_state[user_id] = {
|
| 35 |
"asked": [],
|
| 36 |
"answers": {},
|
|
|
|
| 39 |
}
|
| 40 |
|
| 41 |
def select_next_category(user_id: str):
|
| 42 |
+
"""Выбирает категорию, не повторяя."""
|
| 43 |
s = session_state[user_id]
|
| 44 |
remaining = [c for c in CATEGORIES if c not in s["asked"]]
|
| 45 |
if not remaining:
|
|
|
|
| 49 |
return next_cat
|
| 50 |
|
| 51 |
def build_prompt(category: str):
|
| 52 |
+
"""Формирует промпт для TinyLlama."""
|
| 53 |
return (
|
| 54 |
+
f"Ask one open-ended psychological question about {category}. "
|
| 55 |
+
f"Do not repeat previous questions. "
|
| 56 |
+
f"Output only the question text."
|
|
|
|
| 57 |
)
|
| 58 |
|
| 59 |
def generate_question(user_id: str):
|
| 60 |
+
"""Генератор вопроса по MBTI категории."""
|
| 61 |
if user_id not in session_state:
|
| 62 |
init_session(user_id)
|
| 63 |
|
| 64 |
category = select_next_category(user_id)
|
| 65 |
if not category:
|
| 66 |
yield "✅ All 8 categories completed."
|
| 67 |
+
return None, None
|
| 68 |
|
| 69 |
+
yield f"💭 Interviewer is thinking (category: {category})..."
|
| 70 |
prompt = build_prompt(category)
|
| 71 |
raw = llm_pipe(prompt)[0]["generated_text"]
|
| 72 |
+
|
| 73 |
+
# Очистка
|
| 74 |
question = raw.strip().split("\n")[0]
|
| 75 |
if "?" not in question:
|
| 76 |
question += "?"
|
| 77 |
+
return category, question
|
core/mbti_analyzer.py
CHANGED
|
@@ -1,30 +1,23 @@
|
|
| 1 |
# core/mbti_analyzer.py
|
| 2 |
from transformers import pipeline
|
| 3 |
-
import asyncio
|
| 4 |
|
| 5 |
MBTI_MODEL = "f3nsmart/MBTIclassifier"
|
| 6 |
mbti_pipe = pipeline("text-classification", model=MBTI_MODEL, return_all_scores=True)
|
| 7 |
|
| 8 |
-
async def analyze_mbti_async(user_text: str):
|
| 9 |
-
"""Асинхронный MBTI-анализ."""
|
| 10 |
-
loop = asyncio.get_event_loop()
|
| 11 |
-
return await loop.run_in_executor(None, lambda: mbti_pipe(user_text)[0])
|
| 12 |
-
|
| 13 |
def analyze_mbti(user_text: str):
|
| 14 |
-
"""
|
| 15 |
yield "⏳ Analyzing personality traits..."
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
def compute_dominant_axis(results):
|
| 22 |
"""
|
| 23 |
-
|
| 24 |
-
{"label": "Introversion", "score": 0.73},
|
| 25 |
-
{"label": "Extroversion", "score": 0.27},
|
| 26 |
-
...
|
| 27 |
-
]
|
| 28 |
"""
|
| 29 |
axes = {
|
| 30 |
"IE": ("Introversion", "Extroversion"),
|
|
|
|
| 1 |
# core/mbti_analyzer.py
|
| 2 |
from transformers import pipeline
|
|
|
|
| 3 |
|
| 4 |
MBTI_MODEL = "f3nsmart/MBTIclassifier"
|
| 5 |
mbti_pipe = pipeline("text-classification", model=MBTI_MODEL, return_all_scores=True)
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
def analyze_mbti(user_text: str):
|
| 8 |
+
"""Пошаговый MBTI-анализ (генератор для стриминга)."""
|
| 9 |
yield "⏳ Analyzing personality traits..."
|
| 10 |
+
try:
|
| 11 |
+
res = mbti_pipe(user_text)[0]
|
| 12 |
+
res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
|
| 13 |
+
mbti_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in res_sorted[:3]])
|
| 14 |
+
yield mbti_text
|
| 15 |
+
except Exception as e:
|
| 16 |
+
yield f"❌ Error: {e}"
|
| 17 |
|
| 18 |
def compute_dominant_axis(results):
|
| 19 |
"""
|
| 20 |
+
Преобразует оценки классификатора в MBTI-код.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
"""
|
| 22 |
axes = {
|
| 23 |
"IE": ("Introversion", "Extroversion"),
|
core/utils.py
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
# core/utils.py
|
| 2 |
-
def generate_first_question():
|
| 3 |
-
return "
|
|
|
|
| 1 |
# core/utils.py
|
| 2 |
+
def generate_first_question() -> str:
|
| 3 |
+
return "What do you usually enjoy doing in your free time?"
|