Spaces:
Sleeping
Sleeping
File size: 9,400 Bytes
0ece0bd b9ad539 ba5915f 036d0fc daf7e93 036d0fc b9ad539 a58fa96 b9ad539 daf7e93 b9ad539 c0f9112 a58fa96 b9ad539 a58fa96 b9ad539 a58fa96 0ece0bd daf7e93 036d0fc 0ece0bd f9e564d daf7e93 276a68a b9ad539 daf7e93 b9ad539 c0f9112 b9ad539 0ece0bd c0f9112 f9e564d c0f9112 b9ad539 036d0fc 0ece0bd c0f9112 0ece0bd 036d0fc b9ad539 036d0fc 8d0993b 036d0fc c0f9112 036d0fc c0f9112 8d0993b b9ad539 8d0993b a58fa96 daf7e93 b9ad539 a58fa96 c0f9112 036d0fc 0ece0bd 036d0fc b9ad539 036d0fc daf7e93 036d0fc 75dae40 daf7e93 036d0fc daf7e93 75dae40 036d0fc 75dae40 276a68a daf7e93 8bb0e66 ba5915f 8bb0e66 daf7e93 0ece0bd 75dae40 0ece0bd b76be56 daf7e93 75dae40 daf7e93 d310432 75dae40 d310432 ba5915f d310432 daf7e93 75dae40 b76be56 d310432 75dae40 d310432 75dae40 d310432 75dae40 d310432 ba5915f d310432 75dae40 d310432 75dae40 b76be56 8e673c2 b76be56 0ece0bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 |
import gradio as gr
import game
import ai
import bee
def format_game_result(response):
"""Format the game result with proper styling"""
location_type = "state" if game.mode == game.MODE_STATES else "country"
if f"The {location_type} was" in response:
print(f"π DEBUG - Game end detected! {location_type.capitalize()} extracted: {game.selected_country}")
else:
print("π DEBUG - Regular response (no game end)")
if "Congratulations" in response:
return f"π **Congratulations!** You correctly guessed **{game.selected_country}**! It took you **{game.guess_number}** guesses. Well done! π\n\nTo play another round, please start a new conversation or reload the page."
elif "Game over" in response:
return f"π **Game Over!** You've used all 20 questions. The {location_type} I was thinking of was **{game.selected_country}**. π\n\nTo try again, please start a new conversation or reload the page."
return response
def respond(
message,
history: list[dict[str, str]],
system_message,
max_tokens,
temperature,
top_p,
game_mode_selection,
hf_token: gr.OAuthToken | None = None,
):
"""
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""
# If this is the start of a new conversation (empty history), generate a new country/state
if not history:
game.guess_number = 0
if game_mode_selection == game.MODE_STATES:
game.current_system = game.get_system_message(game.MODE_STATES)
print(f"π DEBUG - New session started, selected state: {game.selected_country}")
else:
game.current_system = game.get_system_message(game.MODE_COUNTRIES)
print(f"π DEBUG - New session started, selected country: {game.selected_country}")
game.guess_number += 1
messages = [{"role": "system", "content": game.current_system + str(game.guess_number)}]
messages.append({"role": "user", "content": message})
# Debug: Calculate approximate input token count
total_input_chars = sum(len(str(msg.get("content", ""))) for msg in messages)
estimated_input_tokens = total_input_chars // 4 # Rough approximation: 4 chars per token
print(f"π DEBUG - Estimated input tokens: {estimated_input_tokens}")
print(f"π DEBUG - Messages count: {len(messages)}")
print(f"π DEBUG - Max tokens setting: {max_tokens}")
# Debug: Show each message type and length
for i, msg in enumerate(messages):
role = msg.get("role", "unknown")
content = str(msg.get("content", ""))
print(f"π DEBUG - Message {i+1} ({role}): {len(content)} chars")
if role == "system":
print(f"π DEBUG - System message preview: ...{content[-100:]}")
elif role == "user":
print(f"π DEBUG - User message: {content}")
elif role == "assistant":
print(f"π DEBUG - Assistant message: {content[:50]}...")
response = ""
output_token_count = 0
try:
for message_chunk in ai.client.chat_completion(
messages,
stream=True,
response_format={"type": "text"},
):
choices = message_chunk.choices
token = ""
if len(choices) and choices[0].delta.content:
token = choices[0].delta.content
output_token_count += 1
response += token
# Debug: Show output token statistics
estimated_output_tokens = len(response) // 4 # Rough approximation
print(f"π DEBUG - Output token chunks received: {output_token_count}")
print(f"π DEBUG - Estimated output tokens (by chars): {estimated_output_tokens}")
print(f"π DEBUG - Response length: {len(response)} characters")
print(f"π DEBUG - Raw response: {response}")
# Clean the response to remove unwanted artifacts
response = ai.clean_response(response)
print(f"π DEBUG - Cleaned response: {response}")
# Check if this is a game end response and format it nicely
if "The country was" in response or "The state was" in response:
print(f"π DEBUG - Game end detected! Location extracted: {game.selected_country}")
return format_game_result(response)
elif game.guess_number == 20:
print(f"π DEBUG - Maximum guesses reached: {game.guess_number}")
return format_game_result(response)
else:
print("π DEBUG - Regular response (no game end)")
return response
except Exception as e:
return f"Error during inference: {str(e)}"
# Create description based on mode
if ai.LOCAL_MODE:
description = f"π Running locally with {ai.MODEL_NAME}. Choose your game mode and I'll think of a location for you to guess with 20 yes or no questions!"
else:
description = "Choose your game mode and I'll think of a location for you to guess with 20 yes or no questions!"
# Mode-specific examples
examples_states = [
["Is it on the East Coast?"],
["Is it in the Western United States?"],
["Does it border an ocean?"],
["Does it border Canada?"],
["Is it larger than Texas?"],
["Was it one of the original 13 colonies?"],
["Does it have a major city with over 1 million people?"],
["Is it known for agriculture?"],
["Does it have mountains?"],
["Is it in the Mountain Time Zone?"],
]
examples_countries = [
["Is it located in North America?"],
["Is it in the Northern Hemisphere?"],
["Does it border an ocean?"],
["Is the official language English?"],
["Does it have more than 10 million people?"],
["Is it known for producing oil?"],
["Was it ever a British colony?"],
["Is it located on an island?"],
["Does it use the US Dollar as currency?"],
["Is it a landlocked country?"],
]
# Default examples (will be updated based on mode)
examples = examples_states
# Create wrapper function that handles both local and cloud modes
# Local mode wrapper function
def custom_respond(message, history, game_type_selection, game_mode_selection):
if game_type_selection == game.TYPE_GEOGRAPHY_BEE:
return bee.respond(message, history, 4000, game_mode_selection)
return respond(message, history, "", 4000, 0.3, 0.6, game_mode_selection, None)
def get_examples_for_mode(mode):
"""Return examples based on the selected game mode"""
if mode == game.MODE_STATES:
return examples_states
else:
return examples_countries
with gr.Blocks() as demo:
gr.Markdown("# Geography AI Tutor")
gr.Markdown(description)
# Game type selection at the top
game.type_dropdown = gr.Dropdown(
choices=[game.TYPE_GEOGRAPHY_BEE, game.TYPE_TWENTY_QUESTIONS],
value=game.TYPE_GEOGRAPHY_BEE,
label="Game Type",
info="Choose what type of game to play"
)
game.mode_dropdown = gr.Dropdown(
choices=[game.MODE_STATES, game.MODE_COUNTRIES],
value=game.MODE_STATES,
label="Game Mode",
info="Choose what type of location to guess",
visible=True
)
# ChatInterface without built-in examples
chatbot = gr.ChatInterface(
custom_respond,
type="messages",
cache_examples=False,
additional_inputs=[game.type_dropdown, game.mode_dropdown],
)
# Add examples separately using Dataset which can be updated
with gr.Row():
examples_header = gr.Markdown("### Example Questions (click to use)", visible=False)
examples_dataset = gr.Dataset(
components=[chatbot.textbox],
samples=examples_states,
type="index",
visible=False
)
# Update examples when mode changes
def update_examples(mode):
return gr.Dataset(samples=get_examples_for_mode(mode))
# Update visibility when game type changes
def update_examples_visibility(game_type):
is_visible = (game_type == game.TYPE_TWENTY_QUESTIONS)
return (
gr.Dropdown(visible=True), # mode_dropdown
gr.Markdown(visible=is_visible), # examples_header
gr.Dataset(visible=is_visible) # examples_dataset
)
# When an example is clicked, populate the textbox
def load_example(index, mode):
examples = get_examples_for_mode(mode)
if 0 <= index < len(examples):
return examples[index][0]
return ""
game.mode_dropdown.change(
fn=update_examples,
inputs=[game.mode_dropdown],
outputs=[examples_dataset]
)
game.type_dropdown.change(
fn=update_examples_visibility,
inputs=[game.type_dropdown],
outputs=[game.mode_dropdown, examples_header, examples_dataset]
)
examples_dataset.select(
fn=load_example,
inputs=[examples_dataset, game.mode_dropdown],
outputs=[chatbot.textbox]
)
gr.Markdown(
"""
## Data Sources
- [National Geographic Kids - States](https://kids.nationalgeographic.com/geography/states)
- [Kids World Travel Guide](https://www.kids-world-travel-guide.com)
""")
if __name__ == "__main__":
demo.launch()
|