Aftrr commited on
Commit
98711d5
·
verified ·
1 Parent(s): 6f47e74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -22
app.py CHANGED
@@ -1,39 +1,32 @@
1
  import gradio as gr
2
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
3
 
4
- MODEL_ID = "microsoft/DialoGPT-medium" # small chatbot model
 
5
 
6
- # Load model & tokenizer
7
- tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
8
- model = AutoModelForCausalLM.from_pretrained(MODEL_ID)
9
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer, return_full_text=False)
10
 
11
  def chat(history, message):
12
- # Convert list of tuples -> plain text context
13
- prompt = ""
14
  for user, bot in history:
15
- prompt += f"User: {user}\nBot: {bot}\n"
16
- prompt += f"User: {message}\nBot:"
17
 
18
- out = generator(prompt, max_length=200, do_sample=True, top_k=50, top_p=0.95, num_return_sequences=1)
19
- reply = out[0]['generated_text']
 
20
 
21
- if "Bot:" in reply:
22
- reply = reply.split("Bot:")[-1].strip()
23
-
24
- # Append new message pair as tuple (user, bot)
25
  history.append((message, reply))
26
  return history, history
27
 
28
  with gr.Blocks() as demo:
29
- gr.Markdown("## 🤖 Chatapt")
30
 
31
  chatbot = gr.Chatbot()
32
  state = gr.State([])
33
 
34
- with gr.Row():
35
- msg = gr.Textbox(show_label=False, placeholder="Type a message and press Enter...")
36
-
37
- msg.submit(chat, [state, msg], [state, chatbot])
38
 
39
- demo.launch()
 
1
  import gradio as gr
2
+ from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
3
 
4
+ # Better small chatbot model
5
+ MODEL_ID = "facebook/blenderbot-400M-distill"
6
 
7
+ tokenizer = BlenderbotTokenizer.from_pretrained(MODEL_ID)
8
+ model = BlenderbotForConditionalGeneration.from_pretrained(MODEL_ID)
 
 
9
 
10
  def chat(history, message):
11
+ # Convert chat history to context
12
+ context = ""
13
  for user, bot in history:
14
+ context += f"User: {user}\nBot: {bot}\n"
15
+ context += f"User: {message}\nBot:"
16
 
17
+ inputs = tokenizer(context, return_tensors="pt")
18
+ reply_ids = model.generate(**inputs, max_length=120, no_repeat_ngram_size=2)
19
+ reply = tokenizer.decode(reply_ids[0], skip_special_tokens=True)
20
 
 
 
 
 
21
  history.append((message, reply))
22
  return history, history
23
 
24
  with gr.Blocks() as demo:
25
+ gr.Markdown("## 🤖 CHatapt")
26
 
27
  chatbot = gr.Chatbot()
28
  state = gr.State([])
29
 
30
+ msg = gr.Textbox(show_label=False, placeholder="Type a message and press Enter...")
 
 
 
31
 
32
+ msg.submit(chat, [state,]()