Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
|
@@ -63,22 +63,117 @@
|
|
| 63 |
# demo.launch()
|
| 64 |
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
import gradio as gr
|
| 67 |
from huggingface_hub import InferenceClient
|
| 68 |
|
| 69 |
# Initialize the client with your desired model
|
| 70 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 71 |
|
| 72 |
-
#
|
| 73 |
def format_prompt(message, history):
|
| 74 |
-
prompt = "<s>"
|
|
|
|
|
|
|
| 75 |
for user_prompt, bot_response in history:
|
| 76 |
prompt += f"[INST] {user_prompt} [/INST]"
|
| 77 |
prompt += f" {bot_response}</s> "
|
| 78 |
-
prompt += f"[INST] {message} [/INST]"
|
| 79 |
return prompt
|
| 80 |
|
| 81 |
-
# Function to generate responses
|
| 82 |
def generate(
|
| 83 |
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
|
| 84 |
):
|
|
@@ -93,20 +188,20 @@ def generate(
|
|
| 93 |
top_p=top_p,
|
| 94 |
repetition_penalty=repetition_penalty,
|
| 95 |
do_sample=True,
|
| 96 |
-
seed=42,
|
| 97 |
)
|
| 98 |
|
| 99 |
-
# Format the prompt with the history and current message
|
| 100 |
formatted_prompt = format_prompt(prompt, history)
|
| 101 |
|
| 102 |
-
# Stream the generated response
|
| 103 |
stream = client.text_generation(
|
| 104 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False
|
| 105 |
)
|
| 106 |
output = ""
|
|
|
|
| 107 |
for response in stream:
|
| 108 |
output += response.token.text
|
| 109 |
-
yield output
|
|
|
|
| 110 |
|
| 111 |
# Customizable input controls for the chatbot interface
|
| 112 |
additional_inputs = [
|
|
@@ -148,10 +243,10 @@ additional_inputs = [
|
|
| 148 |
)
|
| 149 |
]
|
| 150 |
|
| 151 |
-
# Define the chatbot interface with
|
| 152 |
gr.ChatInterface(
|
| 153 |
fn=generate,
|
| 154 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
| 155 |
additional_inputs=additional_inputs,
|
| 156 |
-
title="
|
| 157 |
).launch(show_api=False)
|
|
|
|
| 63 |
# demo.launch()
|
| 64 |
|
| 65 |
|
| 66 |
+
# import gradio as gr
|
| 67 |
+
# from huggingface_hub import InferenceClient
|
| 68 |
+
|
| 69 |
+
# # Initialize the client with your desired model
|
| 70 |
+
# client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 71 |
+
|
| 72 |
+
# # Format the conversation prompt with history
|
| 73 |
+
# def format_prompt(message, history):
|
| 74 |
+
# prompt = "<s>" # Beginning of sequence for formatting
|
| 75 |
+
# for user_prompt, bot_response in history:
|
| 76 |
+
# prompt += f"[INST] {user_prompt} [/INST]"
|
| 77 |
+
# prompt += f" {bot_response}</s> "
|
| 78 |
+
# prompt += f"[INST] {message} [/INST]" # Format current user message
|
| 79 |
+
# return prompt
|
| 80 |
+
|
| 81 |
+
# # Function to generate responses while keeping conversation context
|
| 82 |
+
# def generate(
|
| 83 |
+
# prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
|
| 84 |
+
# ):
|
| 85 |
+
# temperature = float(temperature)
|
| 86 |
+
# if temperature < 1e-2:
|
| 87 |
+
# temperature = 1e-2
|
| 88 |
+
# top_p = float(top_p)
|
| 89 |
+
|
| 90 |
+
# generate_kwargs = dict(
|
| 91 |
+
# temperature=temperature,
|
| 92 |
+
# max_new_tokens=max_new_tokens,
|
| 93 |
+
# top_p=top_p,
|
| 94 |
+
# repetition_penalty=repetition_penalty,
|
| 95 |
+
# do_sample=True,
|
| 96 |
+
# seed=42, # Seed for reproducibility
|
| 97 |
+
# )
|
| 98 |
+
|
| 99 |
+
# # Format the prompt with the history and current message
|
| 100 |
+
# formatted_prompt = format_prompt(prompt, history)
|
| 101 |
+
|
| 102 |
+
# # Stream the generated response
|
| 103 |
+
# stream = client.text_generation(
|
| 104 |
+
# formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False
|
| 105 |
+
# )
|
| 106 |
+
# output = ""
|
| 107 |
+
# for response in stream:
|
| 108 |
+
# output += response.token.text
|
| 109 |
+
# yield output # Yield the streamed output as it's generated
|
| 110 |
+
|
| 111 |
+
# # Customizable input controls for the chatbot interface
|
| 112 |
+
# additional_inputs = [
|
| 113 |
+
# gr.Slider(
|
| 114 |
+
# label="Temperature",
|
| 115 |
+
# value=0.9,
|
| 116 |
+
# minimum=0.0,
|
| 117 |
+
# maximum=1.0,
|
| 118 |
+
# step=0.05,
|
| 119 |
+
# interactive=True,
|
| 120 |
+
# info="Higher values produce more diverse outputs",
|
| 121 |
+
# ),
|
| 122 |
+
# gr.Slider(
|
| 123 |
+
# label="Max new tokens",
|
| 124 |
+
# value=256,
|
| 125 |
+
# minimum=0,
|
| 126 |
+
# maximum=1048,
|
| 127 |
+
# step=64,
|
| 128 |
+
# interactive=True,
|
| 129 |
+
# info="The maximum numbers of new tokens",
|
| 130 |
+
# ),
|
| 131 |
+
# gr.Slider(
|
| 132 |
+
# label="Top-p (nucleus sampling)",
|
| 133 |
+
# value=0.90,
|
| 134 |
+
# minimum=0.0,
|
| 135 |
+
# maximum=1,
|
| 136 |
+
# step=0.05,
|
| 137 |
+
# interactive=True,
|
| 138 |
+
# info="Higher values sample more low-probability tokens",
|
| 139 |
+
# ),
|
| 140 |
+
# gr.Slider(
|
| 141 |
+
# label="Repetition penalty",
|
| 142 |
+
# value=1.2,
|
| 143 |
+
# minimum=1.0,
|
| 144 |
+
# maximum=2.0,
|
| 145 |
+
# step=0.05,
|
| 146 |
+
# interactive=True,
|
| 147 |
+
# info="Penalize repeated tokens",
|
| 148 |
+
# )
|
| 149 |
+
# ]
|
| 150 |
+
|
| 151 |
+
# # Define the chatbot interface with interactive sliders and chatbot panel
|
| 152 |
+
# gr.ChatInterface(
|
| 153 |
+
# fn=generate,
|
| 154 |
+
# chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
| 155 |
+
# additional_inputs=additional_inputs,
|
| 156 |
+
# title="""AI Dermatologist Chatbot"""
|
| 157 |
+
# ).launch(show_api=False)
|
| 158 |
+
|
| 159 |
import gradio as gr
|
| 160 |
from huggingface_hub import InferenceClient
|
| 161 |
|
| 162 |
# Initialize the client with your desired model
|
| 163 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
| 164 |
|
| 165 |
+
# Define the system prompt as an AI Dermatologist
|
| 166 |
def format_prompt(message, history):
|
| 167 |
+
prompt = "<s>"
|
| 168 |
+
# Start the conversation with a system message
|
| 169 |
+
prompt += "[INST] You are an AI Dermatologist designed to assist users with skin and hair care. Provide thoughtful, medically-informed advice on various skin and hair conditions. Offer diet recommendations, treatment options, and suggest appropriate skincare or haircare routines based on the user's symptoms. Always ensure that your advice is educational, supportive, and easy to understand, and recommend consulting a dermatologist for personalized medical guidance if necessary [/INST]"
|
| 170 |
for user_prompt, bot_response in history:
|
| 171 |
prompt += f"[INST] {user_prompt} [/INST]"
|
| 172 |
prompt += f" {bot_response}</s> "
|
| 173 |
+
prompt += f"[INST] {message} [/INST]"
|
| 174 |
return prompt
|
| 175 |
|
| 176 |
+
# Function to generate responses with the AI Dermatologist context
|
| 177 |
def generate(
|
| 178 |
prompt, history, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0
|
| 179 |
):
|
|
|
|
| 188 |
top_p=top_p,
|
| 189 |
repetition_penalty=repetition_penalty,
|
| 190 |
do_sample=True,
|
| 191 |
+
seed=42,
|
| 192 |
)
|
| 193 |
|
|
|
|
| 194 |
formatted_prompt = format_prompt(prompt, history)
|
| 195 |
|
|
|
|
| 196 |
stream = client.text_generation(
|
| 197 |
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False
|
| 198 |
)
|
| 199 |
output = ""
|
| 200 |
+
|
| 201 |
for response in stream:
|
| 202 |
output += response.token.text
|
| 203 |
+
yield output
|
| 204 |
+
return output
|
| 205 |
|
| 206 |
# Customizable input controls for the chatbot interface
|
| 207 |
additional_inputs = [
|
|
|
|
| 243 |
)
|
| 244 |
]
|
| 245 |
|
| 246 |
+
# Define the chatbot interface with the starting system message as AI Dermatologist
|
| 247 |
gr.ChatInterface(
|
| 248 |
fn=generate,
|
| 249 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
| 250 |
additional_inputs=additional_inputs,
|
| 251 |
+
title="AI Dermatologist"
|
| 252 |
).launch(show_api=False)
|