Spaces:
Configuration error
Configuration error
Update to gr.Chatbot
Browse files
app.py
CHANGED
|
@@ -43,7 +43,6 @@ if torch.cuda.is_available():
|
|
| 43 |
def generate(
|
| 44 |
message: str,
|
| 45 |
chat_history: list[tuple[str, str]],
|
| 46 |
-
system_prompt: str = "",
|
| 47 |
max_new_tokens: int = 1024,
|
| 48 |
temperature: float = 0.6,
|
| 49 |
top_p: float = 0.9,
|
|
@@ -52,9 +51,7 @@ def generate(
|
|
| 52 |
) -> Iterator[str]:
|
| 53 |
print("chat history: ", chat_history)
|
| 54 |
conversation = []
|
| 55 |
-
|
| 56 |
-
system_prompt = SYSTEM_PROMPT
|
| 57 |
-
conversation.append({"role": "system", "content": system_prompt})
|
| 58 |
for user, assistant in chat_history:
|
| 59 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
| 60 |
conversation.append({"role": "user", "content": message})
|
|
@@ -96,35 +93,44 @@ examples = [
|
|
| 96 |
|
| 97 |
with gr.Blocks(css="style.css") as demo:
|
| 98 |
gr.Markdown(DESCRIPTION)
|
| 99 |
-
|
| 100 |
chatbot = gr.Chatbot()
|
| 101 |
-
msg = gr.Textbox(label="Enter your message")
|
| 102 |
-
|
|
|
|
| 103 |
|
| 104 |
def user(user_message, history):
|
| 105 |
return "", history + [[user_message, None]]
|
| 106 |
|
| 107 |
-
def bot(history,
|
| 108 |
user_message = history[-1][0]
|
| 109 |
chat_history = [(msg[0], msg[1]) for msg in history[:-1]]
|
| 110 |
bot_message = ""
|
| 111 |
-
for response in generate(user_message, chat_history,
|
| 112 |
bot_message = response
|
| 113 |
history[-1][1] = bot_message
|
| 114 |
yield history
|
| 115 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 116 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
| 117 |
bot,
|
| 118 |
-
[chatbot,
|
| 119 |
-
gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS),
|
| 120 |
-
gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6),
|
| 121 |
-
gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9),
|
| 122 |
-
gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50),
|
| 123 |
-
gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)],
|
| 124 |
chatbot,
|
| 125 |
)
|
| 126 |
clear.click(lambda: None, None, chatbot, queue=False)
|
| 127 |
-
|
| 128 |
gr.Markdown(LICENSE)
|
| 129 |
|
| 130 |
if __name__ == "__main__":
|
|
|
|
| 43 |
def generate(
|
| 44 |
message: str,
|
| 45 |
chat_history: list[tuple[str, str]],
|
|
|
|
| 46 |
max_new_tokens: int = 1024,
|
| 47 |
temperature: float = 0.6,
|
| 48 |
top_p: float = 0.9,
|
|
|
|
| 51 |
) -> Iterator[str]:
|
| 52 |
print("chat history: ", chat_history)
|
| 53 |
conversation = []
|
| 54 |
+
conversation.append({"role": "system", "content": SYSTEM_PROMPT})
|
|
|
|
|
|
|
| 55 |
for user, assistant in chat_history:
|
| 56 |
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
| 57 |
conversation.append({"role": "user", "content": message})
|
|
|
|
| 93 |
|
| 94 |
with gr.Blocks(css="style.css") as demo:
|
| 95 |
gr.Markdown(DESCRIPTION)
|
|
|
|
| 96 |
chatbot = gr.Chatbot()
|
| 97 |
+
msg = gr.Textbox(label="உங்கள் செய்தியை உள்ளிடவும் / Enter your message")
|
| 98 |
+
submit_btn = gr.Button("சமர்ப்பிக்கவும் / Submit")
|
| 99 |
+
clear = gr.Button("தெளிவானது / Clear")
|
| 100 |
|
| 101 |
def user(user_message, history):
|
| 102 |
return "", history + [[user_message, None]]
|
| 103 |
|
| 104 |
+
def bot(history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 105 |
user_message = history[-1][0]
|
| 106 |
chat_history = [(msg[0], msg[1]) for msg in history[:-1]]
|
| 107 |
bot_message = ""
|
| 108 |
+
for response in generate(user_message, chat_history, max_new_tokens, temperature, top_p, top_k, repetition_penalty):
|
| 109 |
bot_message = response
|
| 110 |
history[-1][1] = bot_message
|
| 111 |
yield history
|
| 112 |
|
| 113 |
+
gr.Examples(examples=examples, inputs=[msg], label="உதாரணங்கள் / Examples")
|
| 114 |
+
|
| 115 |
+
with gr.Accordion("மேம்பட்ட விருப்பங்கள் / Advanced Options", open=False):
|
| 116 |
+
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, step=1, value=DEFAULT_MAX_NEW_TOKENS)
|
| 117 |
+
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=4.0, step=0.1, value=0.6)
|
| 118 |
+
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, step=0.05, value=0.9)
|
| 119 |
+
top_k = gr.Slider(label="Top-k", minimum=1, maximum=1000, step=1, value=50)
|
| 120 |
+
repetition_penalty = gr.Slider(label="Repetition penalty", minimum=1.0, maximum=2.0, step=0.05, value=1.2)
|
| 121 |
+
|
| 122 |
+
submit_btn.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
| 123 |
+
bot,
|
| 124 |
+
[chatbot, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
|
| 125 |
+
chatbot,
|
| 126 |
+
)
|
| 127 |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
| 128 |
bot,
|
| 129 |
+
[chatbot, max_new_tokens, temperature, top_p, top_k, repetition_penalty],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 130 |
chatbot,
|
| 131 |
)
|
| 132 |
clear.click(lambda: None, None, chatbot, queue=False)
|
| 133 |
+
|
| 134 |
gr.Markdown(LICENSE)
|
| 135 |
|
| 136 |
if __name__ == "__main__":
|