Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import game | |
| import ai | |
| import bee | |
| def format_game_result(response): | |
| """Format the game result with proper styling""" | |
| location_type = "state" if game.mode == game.MODE_STATES else "country" | |
| if f"The {location_type} was" in response: | |
| print(f"π DEBUG - Game end detected! {location_type.capitalize()} extracted: {game.selected_country}") | |
| else: | |
| print("π DEBUG - Regular response (no game end)") | |
| if "Congratulations" in response: | |
| return f"π **Congratulations!** You correctly guessed **{game.selected_country}**! It took you **{game.guess_number}** guesses. Well done! π\n\nTo play another round, please start a new conversation or reload the page." | |
| elif "Game over" in response: | |
| return f"π **Game Over!** You've used all 20 questions. The {location_type} I was thinking of was **{game.selected_country}**. π\n\nTo try again, please start a new conversation or reload the page." | |
| return response | |
| def respond( | |
| message, | |
| history: list[dict[str, str]], | |
| system_message, | |
| max_tokens, | |
| temperature, | |
| top_p, | |
| game_mode_selection, | |
| hf_token: gr.OAuthToken | None = None, | |
| ): | |
| """ | |
| For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
| """ | |
| # If this is the start of a new conversation (empty history), generate a new country/state | |
| if not history: | |
| game.guess_number = 0 | |
| if game_mode_selection == game.MODE_STATES: | |
| game.current_system = game.get_system_message(game.MODE_STATES) | |
| print(f"π DEBUG - New session started, selected state: {game.selected_country}") | |
| else: | |
| game.current_system = game.get_system_message(game.MODE_COUNTRIES) | |
| print(f"π DEBUG - New session started, selected country: {game.selected_country}") | |
| game.guess_number += 1 | |
| messages = [{"role": "system", "content": game.current_system + str(game.guess_number)}] | |
| messages.append({"role": "user", "content": message}) | |
| # Debug: Calculate approximate input token count | |
| total_input_chars = sum(len(str(msg.get("content", ""))) for msg in messages) | |
| estimated_input_tokens = total_input_chars // 4 # Rough approximation: 4 chars per token | |
| print(f"π DEBUG - Estimated input tokens: {estimated_input_tokens}") | |
| print(f"π DEBUG - Messages count: {len(messages)}") | |
| print(f"π DEBUG - Max tokens setting: {max_tokens}") | |
| # Debug: Show each message type and length | |
| for i, msg in enumerate(messages): | |
| role = msg.get("role", "unknown") | |
| content = str(msg.get("content", "")) | |
| print(f"π DEBUG - Message {i+1} ({role}): {len(content)} chars") | |
| if role == "system": | |
| print(f"π DEBUG - System message preview: ...{content[-100:]}") | |
| elif role == "user": | |
| print(f"π DEBUG - User message: {content}") | |
| elif role == "assistant": | |
| print(f"π DEBUG - Assistant message: {content[:50]}...") | |
| response = "" | |
| output_token_count = 0 | |
| try: | |
| for message_chunk in ai.client.chat_completion( | |
| messages, | |
| stream=True, | |
| response_format={"type": "text"}, | |
| ): | |
| choices = message_chunk.choices | |
| token = "" | |
| if len(choices) and choices[0].delta.content: | |
| token = choices[0].delta.content | |
| output_token_count += 1 | |
| response += token | |
| # Debug: Show output token statistics | |
| estimated_output_tokens = len(response) // 4 # Rough approximation | |
| print(f"π DEBUG - Output token chunks received: {output_token_count}") | |
| print(f"π DEBUG - Estimated output tokens (by chars): {estimated_output_tokens}") | |
| print(f"π DEBUG - Response length: {len(response)} characters") | |
| print(f"π DEBUG - Raw response: {response}") | |
| # Clean the response to remove unwanted artifacts | |
| response = ai.clean_response(response) | |
| print(f"π DEBUG - Cleaned response: {response}") | |
| # Check if this is a game end response and format it nicely | |
| if "The country was" in response or "The state was" in response: | |
| print(f"π DEBUG - Game end detected! Location extracted: {game.selected_country}") | |
| return format_game_result(response) | |
| elif game.guess_number == 20: | |
| print(f"π DEBUG - Maximum guesses reached: {game.guess_number}") | |
| return format_game_result(response) | |
| else: | |
| print("π DEBUG - Regular response (no game end)") | |
| return response | |
| except Exception as e: | |
| return f"Error during inference: {str(e)}" | |
| # Create description based on mode | |
| if ai.LOCAL_MODE: | |
| description = f"π Running locally with {ai.MODEL_NAME}. Choose your game mode and I'll think of a location for you to guess with 20 yes or no questions!" | |
| else: | |
| description = "Choose your game mode and I'll think of a location for you to guess with 20 yes or no questions!" | |
| # Mode-specific examples | |
| examples_states = [ | |
| ["Is it on the East Coast?"], | |
| ["Is it in the Western United States?"], | |
| ["Does it border an ocean?"], | |
| ["Does it border Canada?"], | |
| ["Is it larger than Texas?"], | |
| ["Was it one of the original 13 colonies?"], | |
| ["Does it have a major city with over 1 million people?"], | |
| ["Is it known for agriculture?"], | |
| ["Does it have mountains?"], | |
| ["Is it in the Mountain Time Zone?"], | |
| ] | |
| examples_countries = [ | |
| ["Is it located in North America?"], | |
| ["Is it in the Northern Hemisphere?"], | |
| ["Does it border an ocean?"], | |
| ["Is the official language English?"], | |
| ["Does it have more than 10 million people?"], | |
| ["Is it known for producing oil?"], | |
| ["Was it ever a British colony?"], | |
| ["Is it located on an island?"], | |
| ["Does it use the US Dollar as currency?"], | |
| ["Is it a landlocked country?"], | |
| ] | |
| # Default examples (will be updated based on mode) | |
| examples = examples_states | |
| # Create wrapper function that handles both local and cloud modes | |
| # Local mode wrapper function | |
| def custom_respond(message, history, game_type_selection, game_mode_selection): | |
| if game_type_selection == game.TYPE_GEOGRAPHY_BEE: | |
| return bee.respond(message, history, 4000, game_mode_selection) | |
| return respond(message, history, "", 4000, 0.3, 0.6, game_mode_selection, None) | |
| def get_examples_for_mode(mode): | |
| """Return examples based on the selected game mode""" | |
| if mode == game.MODE_STATES: | |
| return examples_states | |
| else: | |
| return examples_countries | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# Geography AI Tutor") | |
| gr.Markdown(description) | |
| # Game type selection at the top | |
| game.type_dropdown = gr.Dropdown( | |
| choices=[game.TYPE_GEOGRAPHY_BEE, game.TYPE_TWENTY_QUESTIONS], | |
| value=game.TYPE_GEOGRAPHY_BEE, | |
| label="Game Type", | |
| info="Choose what type of game to play" | |
| ) | |
| game.mode_dropdown = gr.Dropdown( | |
| choices=[game.MODE_STATES, game.MODE_COUNTRIES], | |
| value=game.MODE_STATES, | |
| label="Game Mode", | |
| info="Choose what type of location to guess", | |
| visible=True | |
| ) | |
| # ChatInterface without built-in examples | |
| chatbot = gr.ChatInterface( | |
| custom_respond, | |
| type="messages", | |
| cache_examples=False, | |
| additional_inputs=[game.type_dropdown, game.mode_dropdown], | |
| ) | |
| # Add examples separately using Dataset which can be updated | |
| with gr.Row(): | |
| examples_header = gr.Markdown("### Example Questions (click to use)", visible=False) | |
| examples_dataset = gr.Dataset( | |
| components=[chatbot.textbox], | |
| samples=examples_states, | |
| type="index", | |
| visible=False | |
| ) | |
| # Update examples when mode changes | |
| def update_examples(mode): | |
| return gr.Dataset(samples=get_examples_for_mode(mode)) | |
| # Update visibility when game type changes | |
| def update_examples_visibility(game_type): | |
| is_visible = (game_type == game.TYPE_TWENTY_QUESTIONS) | |
| return ( | |
| gr.Dropdown(visible=True), # mode_dropdown | |
| gr.Markdown(visible=is_visible), # examples_header | |
| gr.Dataset(visible=is_visible) # examples_dataset | |
| ) | |
| # When an example is clicked, populate the textbox | |
| def load_example(index, mode): | |
| examples = get_examples_for_mode(mode) | |
| if 0 <= index < len(examples): | |
| return examples[index][0] | |
| return "" | |
| game.mode_dropdown.change( | |
| fn=update_examples, | |
| inputs=[game.mode_dropdown], | |
| outputs=[examples_dataset] | |
| ) | |
| game.type_dropdown.change( | |
| fn=update_examples_visibility, | |
| inputs=[game.type_dropdown], | |
| outputs=[game.mode_dropdown, examples_header, examples_dataset] | |
| ) | |
| examples_dataset.select( | |
| fn=load_example, | |
| inputs=[examples_dataset, game.mode_dropdown], | |
| outputs=[chatbot.textbox] | |
| ) | |
| gr.Markdown( | |
| """ | |
| ## Data Sources | |
| - [National Geographic Kids - States](https://kids.nationalgeographic.com/geography/states) | |
| - [Kids World Travel Guide](https://www.kids-world-travel-guide.com) | |
| """) | |
| if __name__ == "__main__": | |
| demo.launch() | |