Diomedes Git commited on
Commit
500f511
·
1 Parent(s): 3f1845a

spltting off formatters from server

Browse files
.vscode/launch.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "0.2.0",
3
+ "configurations": [
4
+ {
5
+ "name": "Debug app.py as module",
6
+ "type": "debugpy",
7
+ "request": "launch",
8
+ "module": "src.gradio.app",
9
+ "console": "integratedTerminal",
10
+ "cwd": "${workspaceFolder}",
11
+ "env": {
12
+ "PYTHONPATH": "${workspaceFolder}"
13
+ }
14
+ }
15
+ ]
16
+ }
.vscode/settings.json CHANGED
@@ -1,3 +1,18 @@
1
  {
2
- "python-envs.pythonProjects": []
3
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  {
2
+ "version": "0.2.0",
3
+ "configurations": [
4
+ {
5
+ "name": "Debug app.py",
6
+ "type": "python",
7
+ "request": "launch",
8
+ "module": "src.gradio.app",
9
+ "program": "${workspaceFolder}/src/gradio/app.py",
10
+ "console": "integratedTerminal",
11
+ "cwd": "${workspaceFolder}",
12
+ "env": {
13
+ "PYTHONPATH": "${workspaceFolder}"
14
+ }
15
+ }
16
+ ],
17
+ "python-envs.pythonProjects": []
18
+ }
src/characters/neutral_moderator.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+ from typing import Optional, Dict, List
4
+ from dotenv import load_dotenv
5
+ from groq import Groq
6
+ from openai import OpenAI
7
+ from src.characters.base_character import Character
8
+
9
+ load_dotenv()
10
+ logger = logging.getLogger(__name__)
11
+
12
+ class Moderator(Character):
13
+ """A neutral moderator character for summarizing deliberations."""
14
+
15
+ name = "Moderator"
16
+ emoji = "⚖️"
17
+ color = "#708090"
18
+ default_location = "Council Chamber"
19
+ role = "Neutral moderator focused on balanced summarization"
20
+ tone = "Neutral, balanced, reasonable, objective"
21
+ delay = 0.5
22
+
23
+ def __init__(self, location: Optional[str] = None, provider_config: Optional[Dict] = None):
24
+ super().__init__(location, provider_config)
25
+
26
+ # Simple provider config for moderator (no tools needed)
27
+ if provider_config is None:
28
+ provider_config = {
29
+ "primary": "groq",
30
+ "fallback": ["nebius"],
31
+ "models": {
32
+ "groq": "qwen/qwen3-32b",
33
+ "nebius": "Qwen3-30B-A3B-Instruct-2507"
34
+ },
35
+ "timeout": 30,
36
+ "use_cloud": True
37
+ }
38
+
39
+ self.provider_config = provider_config
40
+ self.use_cloud = provider_config.get("use_cloud", True)
41
+
42
+ if self.use_cloud:
43
+ self._init_clients()
44
+ else:
45
+ self.model = "llama3.1:8b"
46
+
47
+ def _init_clients(self) -> None:
48
+ """Initialize remote provider clients."""
49
+ self.clients = {}
50
+
51
+ api_timeout = self.provider_config.get("timeout", 30)
52
+
53
+ if os.getenv("GROQ_API_KEY"):
54
+ self.clients["groq"] = Groq(
55
+ api_key=os.getenv("GROQ_API_KEY"),
56
+ timeout=api_timeout
57
+ )
58
+
59
+ if os.getenv("NEBIUS_API_KEY"):
60
+ self.clients["nebius"] = OpenAI(
61
+ api_key=os.getenv("NEBIUS_API_KEY"),
62
+ base_url="https://api.tokenfactory.nebius.com/v1",
63
+ timeout=api_timeout
64
+ )
65
+
66
+ if not self.clients:
67
+ logger.warning(f"{self.name}: No LLM provider API keys found, using fallback")
68
+
69
+ def get_system_prompt(self) -> str:
70
+ """Return a simple system prompt for the moderator."""
71
+ return """You are a neutral moderator. Your role is to summarize discussions in a balanced, objective manner.
72
+
73
+ TONE: Neutral, balanced, reasonable, objective
74
+ PURPOSE: Summarize key points, agreements, and disagreements without taking sides
75
+ STYLE: Concise, clear, and fair to all perspectives
76
+
77
+ Focus on:
78
+ - Identifying main arguments from each side
79
+ - Highlighting areas of agreement and disagreement
80
+ - Maintaining neutrality and balance
81
+ - Providing clear, accessible summaries"""
82
+
83
+ def _call_llm(self, messages: List[Dict], temperature: float = 0.3, max_tokens: int = 300):
84
+ """Call configured LLM providers with fallback order."""
85
+ providers = [self.provider_config["primary"]] + self.provider_config.get("fallback", [])
86
+ last_error = None
87
+
88
+ for provider in providers:
89
+ client = self.clients.get(provider)
90
+ if not client:
91
+ logger.debug("%s: skipping provider %s (not configured)", self.name, provider)
92
+ continue
93
+
94
+ try:
95
+ model = self.provider_config["models"][provider]
96
+ response = client.chat.completions.create(
97
+ model=model,
98
+ messages=messages,
99
+ temperature=temperature,
100
+ max_tokens=max_tokens
101
+ )
102
+ logger.info("%s successfully used %s", self.name, provider)
103
+ return response, provider
104
+ except Exception as exc:
105
+ last_error = exc
106
+ logger.warning("%s: %s failed (%s)", self.name, provider, str(exc)[:100])
107
+ continue
108
+
109
+ # Fallback to simple response if all providers fail
110
+ logger.warning(f"All LLM providers failed for {self.name}, using fallback response")
111
+ return None, None
112
+
113
+ async def respond(self, message: str, history: Optional[List[Dict]] = None) -> str:
114
+ """Generate a response as the moderator."""
115
+ if self.use_cloud and self.clients:
116
+ return await self._respond_cloud(message, history)
117
+ return self._respond_fallback(message)
118
+
119
+ async def _respond_cloud(self, message: str, history: Optional[List[Dict]] = None) -> str:
120
+ """Use configured cloud providers."""
121
+ messages = [{"role": "system", "content": self.get_system_prompt()}]
122
+
123
+ if history:
124
+ messages.extend(history[-3:]) # Keep recent context
125
+
126
+ messages.append({"role": "user", "content": message})
127
+
128
+ response, provider = self._call_llm(
129
+ messages=messages,
130
+ temperature=0.3, # Lower temperature for consistency
131
+ max_tokens=300
132
+ )
133
+
134
+ if response:
135
+ return response.choices[0].message.content.strip()
136
+ else:
137
+ return self._respond_fallback(message)
138
+
139
+ def _respond_fallback(self, message: str) -> str:
140
+ """Simple fallback response when LLM providers are unavailable."""
141
+ # Basic summarization logic
142
+ words = message.split()
143
+ if len(words) > 50:
144
+ summary = " ".join(words[:47]) + "..."
145
+ return f"Moderator Summary: {summary}"
146
+ else:
147
+ return f"Moderator Summary: {message}"
src/cluas_mcp/server.py CHANGED
@@ -12,7 +12,7 @@ from src.cluas_mcp.web.trending import get_trends, explore_trend_angles
12
  from src.cluas_mcp.news.news_search_entrypoint import verify_news
13
  from src.cluas_mcp.observation.observation_entrypoint import get_bird_sightings, get_weather_patterns, analyze_temporal_patterns
14
  from src.cluas_mcp.common.check_local_weather import check_local_weather_sync
15
- from src.cluas_mcp.formatters import (
16
  format_bird_sightings,
17
  format_news_results,
18
  format_local_weather,
 
12
  from src.cluas_mcp.news.news_search_entrypoint import verify_news
13
  from src.cluas_mcp.observation.observation_entrypoint import get_bird_sightings, get_weather_patterns, analyze_temporal_patterns
14
  from src.cluas_mcp.common.check_local_weather import check_local_weather_sync
15
+ from .formatters import (
16
  format_bird_sightings,
17
  format_news_results,
18
  format_local_weather,
src/gradio/app.py CHANGED
@@ -4,7 +4,6 @@ import logging
4
  import asyncio
5
  import html
6
  import random
7
- import re
8
  import tempfile
9
  from pathlib import Path
10
  from typing import Any, Dict, List, Literal, Optional, Tuple
@@ -12,6 +11,7 @@ from src.characters.corvus import Corvus
12
  from src.characters.magpie import Magpie
13
  from src.characters.raven import Raven
14
  from src.characters.crow import Crow
 
15
  from src.characters.base_character import Character
16
  from src.characters.registry import register_instance, get_all_characters, REGISTRY
17
  from src.gradio.types import BaseMessage, UIMessage, to_llm_history, from_gradio_format
@@ -24,6 +24,7 @@ corvus = Corvus()
24
  magpie = Magpie()
25
  raven = Raven()
26
  crow = Crow()
 
27
 
28
  # register them
29
  register_instance(corvus)
@@ -210,12 +211,12 @@ def _history_text(history: List[str], limit: int = 13) -> str:
210
  return "\n".join(history[-limit:])
211
 
212
 
213
- async def _neutral_summary(history_text: str, moderator: Character = None) -> str:
214
  if not history_text.strip():
215
  return "No discussion to summarize."
216
  prompt = (
217
- "You are the neutral moderator. "
218
- "Summarize the key points, agreements, and disagreements succinctly.\n\n"
219
  f"TRANSCRIPT:\n{history_text}"
220
  )
221
  return await get_character_response(moderator, prompt, [])
@@ -367,13 +368,13 @@ async def deliberate(
367
  name_map = {char.name.lower(): char for char in CHARACTERS}
368
  selected = name_map.get(summariser_normalized)
369
  if not selected:
370
- raise ValueError(f"Unknown summariser '{summariser}'. Choose moderator or one of: {', '.join(order_names)}.")
371
  summary_prompt = (
372
  "Provide a concise synthesis (3 sentences max) from your perspective, referencing the discussion below.\n\n"
373
  f"{full_history_text}"
374
  )
375
  final_summary = await get_character_response(selected, summary_prompt, [])
376
- summary_author = summariser.title()
377
 
378
  history_output: List[Any]
379
  if format == "chat":
@@ -477,7 +478,7 @@ def format_deliberation_html(entries: list | dict) -> str:
477
 
478
  html_parts = ['<div class="deliberation-container">']
479
 
480
- for entry in entries:
481
  phase = entry.get("phase", "unknown").lower()
482
  cycle = entry.get("cycle", 0)
483
  name = entry.get("name", "unknown")
 
4
  import asyncio
5
  import html
6
  import random
 
7
  import tempfile
8
  from pathlib import Path
9
  from typing import Any, Dict, List, Literal, Optional, Tuple
 
11
  from src.characters.magpie import Magpie
12
  from src.characters.raven import Raven
13
  from src.characters.crow import Crow
14
+ from src.characters.neutral_moderator import Moderator
15
  from src.characters.base_character import Character
16
  from src.characters.registry import register_instance, get_all_characters, REGISTRY
17
  from src.gradio.types import BaseMessage, UIMessage, to_llm_history, from_gradio_format
 
24
  magpie = Magpie()
25
  raven = Raven()
26
  crow = Crow()
27
+ moderator_instance = Moderator() # always available
28
 
29
  # register them
30
  register_instance(corvus)
 
211
  return "\n".join(history[-limit:])
212
 
213
 
214
+ async def _neutral_summary(history_text: str, moderator: Character = moderator_instance) -> str:
215
  if not history_text.strip():
216
  return "No discussion to summarize."
217
  prompt = (
218
+ "You are the moderator. "
219
+ "Summarize the key points, agreements, and disagreements succinctly..\n\n"
220
  f"TRANSCRIPT:\n{history_text}"
221
  )
222
  return await get_character_response(moderator, prompt, [])
 
368
  name_map = {char.name.lower(): char for char in CHARACTERS}
369
  selected = name_map.get(summariser_normalized)
370
  if not selected:
371
+ selected = moderator_instance # fallback just in case
372
  summary_prompt = (
373
  "Provide a concise synthesis (3 sentences max) from your perspective, referencing the discussion below.\n\n"
374
  f"{full_history_text}"
375
  )
376
  final_summary = await get_character_response(selected, summary_prompt, [])
377
+ summary_author = selected.name
378
 
379
  history_output: List[Any]
380
  if format == "chat":
 
478
 
479
  html_parts = ['<div class="deliberation-container">']
480
 
481
+ for entry in entries:#
482
  phase = entry.get("phase", "unknown").lower()
483
  cycle = entry.get("cycle", 0)
484
  name = entry.get("name", "unknown")