Jonathan Bejarano commited on
Commit
ba5915f
Β·
1 Parent(s): d310432

Always keep the dropdown because it still determines the question types

Browse files
Files changed (2) hide show
  1. app.py +5 -3
  2. bee.py +92 -0
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import gradio as gr
2
  import game
3
  import ai
4
-
5
 
6
 
7
 
@@ -152,6 +152,8 @@ examples = examples_states
152
  # Create wrapper function that handles both local and cloud modes
153
  # Local mode wrapper function
154
  def custom_respond(message, history, game_mode_selection, game_type_selection):
 
 
155
  return respond(message, history, "", 4000, 0.3, 0.6, game_mode_selection, None)
156
 
157
  def get_examples_for_mode(mode):
@@ -180,7 +182,7 @@ with gr.Blocks() as demo:
180
  value=game.MODE_STATES,
181
  label="Game Mode",
182
  info="Choose what type of location to guess",
183
- visible=False
184
  )
185
 
186
  # ChatInterface without built-in examples
@@ -210,7 +212,7 @@ with gr.Blocks() as demo:
210
  def update_examples_visibility(game_type):
211
  is_visible = (game_type == game.TYPE_TWENTY_QUESTIONS)
212
  return (
213
- gr.Dropdown(visible=is_visible), # mode_dropdown
214
  gr.Markdown(visible=is_visible), # examples_header
215
  gr.Dataset(visible=is_visible) # examples_dataset
216
  )
 
1
  import gradio as gr
2
  import game
3
  import ai
4
+ import bee
5
 
6
 
7
 
 
152
  # Create wrapper function that handles both local and cloud modes
153
  # Local mode wrapper function
154
  def custom_respond(message, history, game_mode_selection, game_type_selection):
155
+ if game_type_selection == game.TYPE_GEOGRAPHY_BEE:
156
+ return bee.respond(message, history, game_mode_selection)
157
  return respond(message, history, "", 4000, 0.3, 0.6, game_mode_selection, None)
158
 
159
  def get_examples_for_mode(mode):
 
182
  value=game.MODE_STATES,
183
  label="Game Mode",
184
  info="Choose what type of location to guess",
185
+ visible=True
186
  )
187
 
188
  # ChatInterface without built-in examples
 
212
  def update_examples_visibility(game_type):
213
  is_visible = (game_type == game.TYPE_TWENTY_QUESTIONS)
214
  return (
215
+ gr.Dropdown(visible=True), # mode_dropdown
216
  gr.Markdown(visible=is_visible), # examples_header
217
  gr.Dataset(visible=is_visible) # examples_dataset
218
  )
bee.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import game
3
+ from app import format_game_result
4
+ def respond(
5
+ message,
6
+ history: list[dict[str, str]],
7
+ system_message,
8
+ max_tokens,
9
+ temperature,
10
+ top_p,
11
+ game_mode_selection,
12
+ hf_token: gr.OAuthToken | None = None,
13
+ ):
14
+ """
15
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
16
+ """
17
+
18
+ # If this is the start of a new conversation (empty history), generate a new country/state
19
+ if not history:
20
+ game.guess_number = 0
21
+ if game_mode_selection == game.MODE_STATES:
22
+ game.current_system = game.get_system_message(game.MODE_STATES)
23
+ print(f"πŸ” DEBUG - New session started, selected state: {game.selected_country}")
24
+ else:
25
+ game.current_system = game.get_system_message(game.MODE_COUNTRIES)
26
+ print(f"πŸ” DEBUG - New session started, selected country: {game.selected_country}")
27
+
28
+ game.guess_number += 1
29
+ messages = [{"role": "system", "content": game.current_system + str(game.guess_number)}]
30
+ messages.append({"role": "user", "content": message})
31
+
32
+ # Debug: Calculate approximate input token count
33
+ total_input_chars = sum(len(str(msg.get("content", ""))) for msg in messages)
34
+ estimated_input_tokens = total_input_chars // 4 # Rough approximation: 4 chars per token
35
+ print(f"πŸ” DEBUG - Estimated input tokens: {estimated_input_tokens}")
36
+ print(f"πŸ” DEBUG - Messages count: {len(messages)}")
37
+ print(f"πŸ” DEBUG - Max tokens setting: {max_tokens}")
38
+
39
+ # Debug: Show each message type and length
40
+ for i, msg in enumerate(messages):
41
+ role = msg.get("role", "unknown")
42
+ content = str(msg.get("content", ""))
43
+ print(f"πŸ” DEBUG - Message {i+1} ({role}): {len(content)} chars")
44
+ if role == "system":
45
+ print(f"πŸ” DEBUG - System message preview: ...{content[-100:]}")
46
+ elif role == "user":
47
+ print(f"πŸ” DEBUG - User message: {content}")
48
+ elif role == "assistant":
49
+ print(f"πŸ” DEBUG - Assistant message: {content[:50]}...")
50
+
51
+
52
+
53
+ response = ""
54
+ output_token_count = 0
55
+
56
+ try:
57
+ for message_chunk in ai.client.chat_completion(
58
+ messages,
59
+ stream=True,
60
+ response_format={"type": "text"},
61
+ ):
62
+ choices = message_chunk.choices
63
+ token = ""
64
+ if len(choices) and choices[0].delta.content:
65
+ token = choices[0].delta.content
66
+ output_token_count += 1
67
+
68
+ response += token
69
+
70
+ # Debug: Show output token statistics
71
+ estimated_output_tokens = len(response) // 4 # Rough approximation
72
+ print(f"πŸ” DEBUG - Output token chunks received: {output_token_count}")
73
+ print(f"πŸ” DEBUG - Estimated output tokens (by chars): {estimated_output_tokens}")
74
+ print(f"πŸ” DEBUG - Response length: {len(response)} characters")
75
+ print(f"πŸ” DEBUG - Raw response: {response}")
76
+
77
+ # Clean the response to remove unwanted artifacts
78
+ response = ai.clean_response(response)
79
+ print(f"πŸ” DEBUG - Cleaned response: {response}")
80
+
81
+ # Check if this is a game end response and format it nicely
82
+ if "The country was" in response or "The state was" in response:
83
+ print(f"πŸ” DEBUG - Game end detected! Location extracted: {game.selected_country}")
84
+ return format_game_result(response)
85
+ elif game.guess_number == 20:
86
+ print(f"πŸ” DEBUG - Maximum guesses reached: {game.guess_number}")
87
+ return format_game_result(response)
88
+ else:
89
+ print("πŸ” DEBUG - Regular response (no game end)")
90
+ return response
91
+ except Exception as e:
92
+ return f"Error during inference: {str(e)}"