File size: 3,170 Bytes
a1eaa82
2914725
a1eaa82
2914725
98711d5
a1eaa82
 
2914725
a1eaa82
 
 
 
 
 
6f47e74
a1eaa82
 
98711d5
 
 
a1eaa82
6f47e74
a1eaa82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2914725
 
a1eaa82
 
 
 
 
 
 
 
 
6f47e74
a1eaa82
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
# app.py
import gradio as gr
from typing import List, Tuple, Dict, Any

MODEL_ID = "facebook/blenderbot-400M-distill"
model = None
tokenizer = None

def ensure_model_loaded():
    global model, tokenizer
    if model is None or tokenizer is None:
        from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
        tokenizer = BlenderbotTokenizer.from_pretrained(MODEL_ID)
        model = BlenderbotForConditionalGeneration.from_pretrained(MODEL_ID)

def generate_reply(context: str) -> str:
    ensure_model_loaded()
    inputs = tokenizer(context, return_tensors="pt")
    reply_ids = model.generate(**inputs, max_length=120, no_repeat_ngram_size=2)
    reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
    return reply

def history_to_context_from_tuples(history: List[Tuple[str, str]]) -> str:
    ctx = ""
    for u, b in history:
        ctx += f"User: {u}\nBot: {b}\n"
    return ctx

def history_to_context_from_messages(history: List[Dict[str, str]]) -> str:
    # messages are like {"role":"user"/"assistant","content":"..."}
    ctx = ""
    for msg in history:
        role = msg.get("role", "")
        content = msg.get("content", "")
        if role and content:
            if role.lower().startswith("user"):
                ctx += f"User: {content}\n"
            else:
                ctx += f"Bot: {content}\n"
    return ctx

def chat(state: List[Any], message: str):
    """
    state is the Gradio chatbot state.
    We support both:
      - tuples: [("hi","hello"), ...]
      - messages: [{"role":"user","content":"hi"}, {"role":"assistant","content":"hello"}, ...]
    Gradio will pass state back as-is.
    """
    # detect format
    context = ""
    if state and isinstance(state[0], dict):
        # messages format
        context = history_to_context_from_messages(state)
    elif state and isinstance(state[0], (list, tuple)):
        # nested list/tuple format
        # convert [(user,bot), ...] -> context
        context = history_to_context_from_tuples(state)
    else:
        # empty or unknown -> fine
        context = ""

    context += f"User: {message}\nBot:"
    reply = generate_reply(context)

    # append to state in messages format (preferred)
    # we'll append two entries: user then assistant
    # If original state was tuples, convert reply to tuple for compatibility
    if state and isinstance(state[0], dict):
        state.append({"role":"user","content": message})
        state.append({"role":"assistant","content": reply})
        return state, state
    else:
        # use tuples format for backward compatibility
        state.append((message, reply))
        return state, state

with gr.Blocks() as demo:
    gr.Markdown("## πŸ€– Anuj's Chatbot β€” stable (messages format)")
    # Use the new 'messages' type to avoid deprecation warning
    chatbot = gr.Chatbot(type="messages")
    state = gr.State([])

    with gr.Row():
        msg = gr.Textbox(show_label=False, placeholder="Type a message and press Enter...")

    msg.submit(chat, [state, msg], [state, chatbot])

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860)