diff --git a/.cursorrules b/.cursorrules deleted file mode 100644 index 8fbe6def025d95d15c47f657eafbbbf0643a5ca5..0000000000000000000000000000000000000000 --- a/.cursorrules +++ /dev/null @@ -1,240 +0,0 @@ -# DeepCritical Project - Cursor Rules - -## Project-Wide Rules - -**Architecture**: Multi-agent research system using Pydantic AI for agent orchestration, supporting iterative and deep research patterns. Uses middleware for state management, budget tracking, and workflow coordination. - -**Type Safety**: ALWAYS use complete type hints. All functions must have parameter and return type annotations. Use `mypy --strict` compliance. Use `TYPE_CHECKING` imports for circular dependencies: `from typing import TYPE_CHECKING; if TYPE_CHECKING: from src.services.embeddings import EmbeddingService` - -**Async Patterns**: ALL I/O operations must be async (`async def`, `await`). Use `asyncio.gather()` for parallel operations. CPU-bound work must use `run_in_executor()`: `loop = asyncio.get_running_loop(); result = await loop.run_in_executor(None, cpu_bound_function, args)`. Never block the event loop. - -**Error Handling**: Use custom exceptions from `src/utils/exceptions.py`: `DeepCriticalError`, `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions: `raise SearchError(...) from e`. Log with structlog: `logger.error("Operation failed", error=str(e), context=value)`. - -**Logging**: Use `structlog` for ALL logging (NOT `print` or `logging`). Import: `import structlog; logger = structlog.get_logger()`. Log with structured data: `logger.info("event", key=value)`. Use appropriate levels: DEBUG, INFO, WARNING, ERROR. - -**Pydantic Models**: All data exchange uses Pydantic models from `src/utils/models.py`. Models are frozen (`model_config = {"frozen": True}`) for immutability. Use `Field()` with descriptions. Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints. - -**Code Style**: Ruff with 100-char line length. Ignore rules: `PLR0913` (too many arguments), `PLR0912` (too many branches), `PLR0911` (too many returns), `PLR2004` (magic values), `PLW0603` (global statement), `PLC0415` (lazy imports). - -**Docstrings**: Google-style docstrings for all public functions. Include Args, Returns, Raises sections. Use type hints in docstrings only if needed for clarity. - -**Testing**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). Use `respx` for httpx mocking, `pytest-mock` for general mocking. - -**State Management**: Use `ContextVar` in middleware for thread-safe isolation. Never use global mutable state (except singletons via `@lru_cache`). Use `WorkflowState` from `src/middleware/state_machine.py` for workflow state. - -**Citation Validation**: ALWAYS validate references before returning reports. Use `validate_references()` from `src/utils/citation_validator.py`. Remove hallucinated citations. Log warnings for removed citations. - ---- - -## src/agents/ - Agent Implementation Rules - -**Pattern**: All agents use Pydantic AI `Agent` class. Agents have structured output types (Pydantic models) or return strings. Use factory functions in `src/agent_factory/agents.py` for creation. - -**Agent Structure**: -- System prompt as module-level constant (with date injection: `datetime.now().strftime("%Y-%m-%d")`) -- Agent class with `__init__(model: Any | None = None)` -- Main method (e.g., `async def evaluate()`, `async def write_report()`) -- Factory function: `def create_agent_name(model: Any | None = None) -> AgentName` - -**Model Initialization**: Use `get_model()` from `src/agent_factory/judges.py` if no model provided. Support OpenAI/Anthropic/HF Inference via settings. - -**Error Handling**: Return fallback values (e.g., `KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])`) on failure. Log errors with context. Use retry logic (3 retries) in Pydantic AI Agent initialization. - -**Input Validation**: Validate query/inputs are not empty. Truncate very long inputs with warnings. Handle None values gracefully. - -**Output Types**: Use structured output types from `src/utils/models.py` (e.g., `KnowledgeGapOutput`, `AgentSelectionPlan`, `ReportDraft`). For text output (writer agents), return `str` directly. - -**Agent-Specific Rules**: -- `knowledge_gap.py`: Outputs `KnowledgeGapOutput`. Evaluates research completeness. -- `tool_selector.py`: Outputs `AgentSelectionPlan`. Selects tools (RAG/web/database). -- `writer.py`: Returns markdown string. Includes citations in numbered format. -- `long_writer.py`: Uses `ReportDraft` input/output. Handles section-by-section writing. -- `proofreader.py`: Takes `ReportDraft`, returns polished markdown. -- `thinking.py`: Returns observation string from conversation history. -- `input_parser.py`: Outputs `ParsedQuery` with research mode detection. - ---- - -## src/tools/ - Search Tool Rules - -**Protocol**: All tools implement `SearchTool` protocol from `src/tools/base.py`: `name` property and `async def search(query, max_results) -> list[Evidence]`. - -**Rate Limiting**: Use `@retry` decorator from tenacity: `@retry(stop=stop_after_attempt(3), wait=wait_exponential(...))`. Implement `_rate_limit()` method for APIs with limits. Use shared rate limiters from `src/tools/rate_limiter.py`. - -**Error Handling**: Raise `SearchError` or `RateLimitError` on failures. Handle HTTP errors (429, 500, timeout). Return empty list on non-critical errors (log warning). - -**Query Preprocessing**: Use `preprocess_query()` from `src/tools/query_utils.py` to remove noise and expand synonyms. - -**Evidence Conversion**: Convert API responses to `Evidence` objects with `Citation`. Extract metadata (title, url, date, authors). Set relevance scores (0.0-1.0). Handle missing fields gracefully. - -**Tool-Specific Rules**: -- `pubmed.py`: Use NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Parse XML with `xmltodict`. Handle single vs. multiple articles. -- `clinicaltrials.py`: Use `requests` library (NOT httpx - WAF blocks httpx). Run in thread pool: `await asyncio.to_thread(requests.get, ...)`. Filter: Only interventional studies, active/completed. -- `europepmc.py`: Handle preprint markers: `[PREPRINT - Not peer-reviewed]`. Build URLs from DOI or PMID. -- `rag_tool.py`: Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. Handles ingestion. -- `search_handler.py`: Orchestrates parallel searches across multiple tools. Uses `asyncio.gather()` with `return_exceptions=True`. Aggregates results into `SearchResult`. - ---- - -## src/middleware/ - Middleware Rules - -**State Management**: Use `ContextVar` for thread-safe isolation. `WorkflowState` uses `ContextVar[WorkflowState | None]`. Initialize with `init_workflow_state(embedding_service)`. Access with `get_workflow_state()` (auto-initializes if missing). - -**WorkflowState**: Tracks `evidence: list[Evidence]`, `conversation: Conversation`, `embedding_service: Any`. Methods: `add_evidence()` (deduplicates by URL), `async search_related()` (semantic search). - -**WorkflowManager**: Manages parallel research loops. Methods: `add_loop()`, `run_loops_parallel()`, `update_loop_status()`, `sync_loop_evidence_to_state()`. Uses `asyncio.gather()` for parallel execution. Handles errors per loop (don't fail all if one fails). - -**BudgetTracker**: Tracks tokens, time, iterations per loop and globally. Methods: `create_budget()`, `add_tokens()`, `start_timer()`, `update_timer()`, `increment_iteration()`, `check_budget()`, `can_continue()`. Token estimation: `estimate_tokens(text)` (~4 chars per token), `estimate_llm_call_tokens(prompt, response)`. - -**Models**: All middleware models in `src/utils/models.py`. `IterationData`, `Conversation`, `ResearchLoop`, `BudgetStatus` are used by middleware. - ---- - -## src/orchestrator/ - Orchestration Rules - -**Research Flows**: Two patterns: `IterativeResearchFlow` (single loop) and `DeepResearchFlow` (plan → parallel loops → synthesis). Both support agent chains (`use_graph=False`) and graph execution (`use_graph=True`). - -**IterativeResearchFlow**: Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete. Uses `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent`, `WriterAgent`, `JudgeHandler`. Tracks iterations, time, budget. - -**DeepResearchFlow**: Pattern: Planner → Parallel iterative loops per section → Synthesizer. Uses `PlannerAgent`, `IterativeResearchFlow` (per section), `LongWriterAgent` or `ProofreaderAgent`. Uses `WorkflowManager` for parallel execution. - -**Graph Orchestrator**: Uses Pydantic AI Graphs (when available) or agent chains (fallback). Routes based on research mode (iterative/deep/auto). Streams `AgentEvent` objects for UI. - -**State Initialization**: Always call `init_workflow_state()` before running flows. Initialize `BudgetTracker` per loop. Use `WorkflowManager` for parallel coordination. - -**Event Streaming**: Yield `AgentEvent` objects during execution. Event types: "started", "search_complete", "judge_complete", "hypothesizing", "synthesizing", "complete", "error". Include iteration numbers and data payloads. - ---- - -## src/services/ - Service Rules - -**EmbeddingService**: Local sentence-transformers (NO API key required). All operations async-safe via `run_in_executor()`. ChromaDB for vector storage. Deduplication threshold: 0.85 (85% similarity = duplicate). - -**LlamaIndexRAGService**: Uses OpenAI embeddings (requires `OPENAI_API_KEY`). Methods: `ingest_evidence()`, `retrieve()`, `query()`. Returns documents with metadata (source, title, url, date, authors). Lazy initialization with graceful fallback. - -**StatisticalAnalyzer**: Generates Python code via LLM. Executes in Modal sandbox (secure, isolated). Library versions pinned in `SANDBOX_LIBRARIES` dict. Returns `AnalysisResult` with verdict (SUPPORTED/REFUTED/INCONCLUSIVE). - -**Singleton Pattern**: Use `@lru_cache(maxsize=1)` for singletons: `@lru_cache(maxsize=1); def get_service() -> Service: return Service()`. Lazy initialization to avoid requiring dependencies at import time. - ---- - -## src/utils/ - Utility Rules - -**Models**: All Pydantic models in `src/utils/models.py`. Use frozen models (`model_config = {"frozen": True}`) except where mutation needed. Use `Field()` with descriptions. Validate with constraints. - -**Config**: Settings via Pydantic Settings (`src/utils/config.py`). Load from `.env` automatically. Use `settings` singleton: `from src.utils.config import settings`. Validate API keys with properties: `has_openai_key`, `has_anthropic_key`. - -**Exceptions**: Custom exception hierarchy in `src/utils/exceptions.py`. Base: `DeepCriticalError`. Specific: `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions. - -**LLM Factory**: Centralized LLM model creation in `src/utils/llm_factory.py`. Supports OpenAI, Anthropic, HF Inference. Use `get_model()` or factory functions. Check requirements before initialization. - -**Citation Validator**: Use `validate_references()` from `src/utils/citation_validator.py`. Removes hallucinated citations (URLs not in evidence). Logs warnings. Returns validated report string. - ---- - -## src/orchestrator_factory.py Rules - -**Purpose**: Factory for creating orchestrators. Supports "simple" (legacy) and "advanced" (magentic) modes. Auto-detects mode based on API key availability. - -**Pattern**: Lazy import for optional dependencies (`_get_magentic_orchestrator_class()`). Handles `ImportError` gracefully with clear error messages. - -**Mode Detection**: `_determine_mode()` checks explicit mode or auto-detects: "advanced" if `settings.has_openai_key`, else "simple". Maps "magentic" → "advanced". - -**Function Signature**: `create_orchestrator(search_handler, judge_handler, config, mode) -> Any`. Simple mode requires handlers. Advanced mode uses MagenticOrchestrator. - -**Error Handling**: Raise `ValueError` with clear messages if requirements not met. Log mode selection with structlog. - ---- - -## src/orchestrator_hierarchical.py Rules - -**Purpose**: Hierarchical orchestrator using middleware and sub-teams. Adapts Magentic ChatAgent to SubIterationTeam protocol. - -**Pattern**: Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge`. Event-driven via callback queue. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated, but kept for compatibility). - -**Event Streaming**: Uses `asyncio.Queue` for event coordination. Yields `AgentEvent` objects. Handles event callback pattern with `asyncio.wait()`. - -**Error Handling**: Log errors with context. Yield error events. Process remaining events after task completion. - ---- - -## src/orchestrator_magentic.py Rules - -**Purpose**: Magentic-based orchestrator using ChatAgent pattern. Each agent has internal LLM. Manager orchestrates agents. - -**Pattern**: Uses `MagenticBuilder` with participants (searcher, hypothesizer, judge, reporter). Manager uses `OpenAIChatClient`. Workflow built in `_build_workflow()`. - -**Event Processing**: `_process_event()` converts Magentic events to `AgentEvent`. Handles: `MagenticOrchestratorMessageEvent`, `MagenticAgentMessageEvent`, `MagenticFinalResultEvent`, `MagenticAgentDeltaEvent`, `WorkflowOutputEvent`. - -**Text Extraction**: `_extract_text()` defensively extracts text from messages. Priority: `.content` → `.text` → `str(message)`. Handles buggy message objects. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated). - -**Requirements**: Must call `check_magentic_requirements()` in `__init__`. Requires `agent-framework-core` and OpenAI API key. - -**Event Types**: Maps agent names to event types: "search" → "search_complete", "judge" → "judge_complete", "hypothes" → "hypothesizing", "report" → "synthesizing". - ---- - -## src/agent_factory/ - Factory Rules - -**Pattern**: Factory functions for creating agents and handlers. Lazy initialization for optional dependencies. Support OpenAI/Anthropic/HF Inference. - -**Judges**: `create_judge_handler()` creates `JudgeHandler` with structured output (`JudgeAssessment`). Supports `MockJudgeHandler`, `HFInferenceJudgeHandler` as fallbacks. - -**Agents**: Factory functions in `agents.py` for all Pydantic AI agents. Pattern: `create_agent_name(model: Any | None = None) -> AgentName`. Use `get_model()` if model not provided. - -**Graph Builder**: `graph_builder.py` contains utilities for building research graphs. Supports iterative and deep research graph construction. - -**Error Handling**: Raise `ConfigurationError` if required API keys missing. Log agent creation. Handle import errors gracefully. - ---- - -## src/prompts/ - Prompt Rules - -**Pattern**: System prompts stored as module-level constants. Include date injection: `datetime.now().strftime("%Y-%m-%d")`. Format evidence with truncation (1500 chars per item). - -**Judge Prompts**: In `judge.py`. Handle empty evidence case separately. Always request structured JSON output. - -**Hypothesis Prompts**: In `hypothesis.py`. Use diverse evidence selection (MMR algorithm). Sentence-aware truncation. - -**Report Prompts**: In `report.py`. Include full citation details. Use diverse evidence selection (n=20). Emphasize citation validation rules. - ---- - -## Testing Rules - -**Structure**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). - -**Mocking**: Use `respx` for httpx mocking. Use `pytest-mock` for general mocking. Mock LLM calls in unit tests (use `MockJudgeHandler`). - -**Fixtures**: Common fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response`. - -**Coverage**: Aim for >80% coverage. Test error handling, edge cases, and integration paths. - ---- - -## File-Specific Agent Rules - -**knowledge_gap.py**: Outputs `KnowledgeGapOutput`. System prompt evaluates research completeness. Handles conversation history. Returns fallback on error. - -**writer.py**: Returns markdown string. System prompt includes citation format examples. Validates inputs. Truncates long findings. Retry logic for transient failures. - -**long_writer.py**: Uses `ReportDraft` input/output. Writes sections iteratively. Reformats references (deduplicates, renumbers). Reformats section headings. - -**proofreader.py**: Takes `ReportDraft`, returns polished markdown. Removes duplicates. Adds summary. Preserves references. - -**tool_selector.py**: Outputs `AgentSelectionPlan`. System prompt lists available agents (WebSearchAgent, SiteCrawlerAgent, RAGAgent). Guidelines for when to use each. - -**thinking.py**: Returns observation string. Generates observations from conversation history. Uses query and background context. - -**input_parser.py**: Outputs `ParsedQuery`. Detects research mode (iterative/deep). Extracts entities and research questions. Improves/refines query. - - - - - - - diff --git a/.env.example b/.env.example index 442ff75d33f92422e78850b3c9d6d49af6f1d6e3..b8061357538326dd7fad717c627cdcfa5c0b3eb9 100644 --- a/.env.example +++ b/.env.example @@ -1,83 +1,63 @@ -# HuggingFace -HF_TOKEN=your_huggingface_token_here +# ============== LLM CONFIGURATION ============== -# OpenAI (optional) -OPENAI_API_KEY=your_openai_key_here +# Provider: "openai", "anthropic", or "huggingface" +LLM_PROVIDER=openai -# Anthropic (optional) -ANTHROPIC_API_KEY=your_anthropic_key_here +# API Keys (at least one required for full LLM analysis) +OPENAI_API_KEY=sk-your-key-here +ANTHROPIC_API_KEY=sk-ant-your-key-here # Model names (optional - sensible defaults set in config.py) -# ANTHROPIC_MODEL=claude-sonnet-4-5-20250929 # OPENAI_MODEL=gpt-5.1 +# ANTHROPIC_MODEL=claude-sonnet-4-5-20250929 +# ============== HUGGINGFACE CONFIGURATION ============== -# ============================================ -# Audio Processing Configuration (TTS) -# ============================================ -# Kokoro TTS Model Configuration -TTS_MODEL=hexgrad/Kokoro-82M -TTS_VOICE=af_heart -TTS_SPEED=1.0 -TTS_GPU=T4 -TTS_TIMEOUT=60 - -# Available TTS Voices: -# American English Female: af_heart, af_bella, af_nicole, af_aoede, af_kore, af_sarah, af_nova, af_sky, af_alloy, af_jessica, af_river -# American English Male: am_michael, am_fenrir, am_puck, am_echo, am_eric, am_liam, am_onyx, am_santa, am_adam - -# Available GPU Types (Modal): -# T4 - Cheapest, good for testing (default) -# A10 - Good balance of cost/performance -# A100 - Fastest, most expensive -# L4 - NVIDIA L4 GPU -# L40S - NVIDIA L40S GPU -# Note: GPU type is set at function definition time. Changes require app restart. - -# ============================================ -# Audio Processing Configuration (STT) -# ============================================ -# Speech-to-Text API Configuration -STT_API_URL=nvidia/canary-1b-v2 -STT_SOURCE_LANG=English -STT_TARGET_LANG=English - -# Available STT Languages: -# English, Bulgarian, Croatian, Czech, Danish, Dutch, Estonian, Finnish, French, German, Greek, Hungarian, Italian, Latvian, Lithuanian, Maltese, Polish, Portuguese, Romanian, Slovak, Slovenian, Spanish, Swedish, Russian, Ukrainian - -# ============================================ -# Audio Feature Flags -# ============================================ -ENABLE_AUDIO_INPUT=true -ENABLE_AUDIO_OUTPUT=true - -# ============================================ -# Image OCR Configuration -# ============================================ -OCR_API_URL=prithivMLmods/Multimodal-OCR3 -ENABLE_IMAGE_INPUT=true - -# ============== EMBEDDINGS ============== - -# OpenAI Embedding Model (used if LLM_PROVIDER is openai and performing RAG/Embeddings) -OPENAI_EMBEDDING_MODEL=text-embedding-3-small - -# Local Embedding Model (used for local/offline embeddings) -LOCAL_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 - -# ============== HUGGINGFACE (FREE TIER) ============== - -# HuggingFace Token - enables Llama 3.1 (best quality free model) +# HuggingFace Token - enables gated models and higher rate limits # Get yours at: https://huggingface.co/settings/tokens -# -# WITHOUT HF_TOKEN: Falls back to ungated models (zephyr-7b-beta) -# WITH HF_TOKEN: Uses Llama 3.1 8B Instruct (requires accepting license) +# +# WITHOUT HF_TOKEN: Falls back to ungated models (zephyr-7b-beta, Qwen2-7B) +# WITH HF_TOKEN: Uses gated models (Llama 3.1, Gemma-2) via inference providers # # For HuggingFace Spaces deployment: # Set this as a "Secret" in Space Settings -> Variables and secrets # Users/judges don't need their own token - the Space secret is used # HF_TOKEN=hf_your-token-here +# Alternative: HUGGINGFACE_API_KEY (same as HF_TOKEN) + +# Default HuggingFace model for inference (gated, requires auth) +# Can be overridden in UI dropdown +# Latest reasoning models: Qwen3-Next-80B-A3B-Thinking, Qwen3-Next-80B-A3B-Instruct, Llama-3.3-70B-Instruct +HUGGINGFACE_MODEL=Qwen/Qwen3-Next-80B-A3B-Thinking + +# Fallback models for HuggingFace Inference API (comma-separated) +# Models are tried in order until one succeeds +# Format: model1,model2,model3 +# Latest reasoning models first, then reliable fallbacks +# Reasoning models: Qwen3-Next (thinking/instruct), Llama-3.3-70B, Qwen3-235B +# Fallbacks: Llama-3.1-8B, Zephyr-7B (ungated), Qwen2-7B (ungated) +HF_FALLBACK_MODELS=Qwen/Qwen3-Next-80B-A3B-Thinking,Qwen/Qwen3-Next-80B-A3B-Instruct,meta-llama/Llama-3.3-70B-Instruct,meta-llama/Llama-3.1-8B-Instruct,HuggingFaceH4/zephyr-7b-beta,Qwen/Qwen2-7B-Instruct + +# Override model/provider selection (optional, usually set via UI) +# HF_MODEL=Qwen/Qwen3-Next-80B-A3B-Thinking +# HF_PROVIDER=hyperbolic + +# ============== EMBEDDING CONFIGURATION ============== + +# Embedding Provider: "openai", "local", or "huggingface" +# Default: "local" (no API key required) +EMBEDDING_PROVIDER=local + +# OpenAI Embedding Model (used if EMBEDDING_PROVIDER=openai) +OPENAI_EMBEDDING_MODEL=text-embedding-3-small + +# Local Embedding Model (sentence-transformers, used if EMBEDDING_PROVIDER=local) +# BAAI/bge-small-en-v1.5 is newer, faster, and better than all-MiniLM-L6-v2 +LOCAL_EMBEDDING_MODEL=BAAI/bge-small-en-v1.5 + +# HuggingFace Embedding Model (used if EMBEDDING_PROVIDER=huggingface) +HUGGINGFACE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 # ============== AGENT CONFIGURATION ============== @@ -85,23 +65,60 @@ MAX_ITERATIONS=10 SEARCH_TIMEOUT=30 LOG_LEVEL=INFO -# ============================================ -# Modal Configuration (Required for TTS) -# ============================================ -# Modal credentials are required for TTS (Text-to-Speech) functionality -# Get your credentials from: https://modal.com/ -MODAL_TOKEN_ID=your_modal_token_id_here -MODAL_TOKEN_SECRET=your_modal_token_secret_here +# Graph-based execution (experimental) +# USE_GRAPH_EXECUTION=false + +# Budget & Rate Limiting +# DEFAULT_TOKEN_LIMIT=100000 +# DEFAULT_TIME_LIMIT_MINUTES=10 +# DEFAULT_ITERATIONS_LIMIT=10 + +# ============== WEB SEARCH CONFIGURATION ============== + +# Web Search Provider: "serper", "searchxng", "brave", "tavily", or "duckduckgo" +# Default: "duckduckgo" (no API key required) +WEB_SEARCH_PROVIDER=duckduckgo + +# Serper API Key (for Google search via Serper) +# SERPER_API_KEY=your-serper-key-here + +# SearchXNG Host URL (for self-hosted search) +# SEARCHXNG_HOST=http://localhost:8080 + +# Brave Search API Key +# BRAVE_API_KEY=your-brave-key-here + +# Tavily API Key +# TAVILY_API_KEY=your-tavily-key-here # ============== EXTERNAL SERVICES ============== -# PubMed (optional - higher rate limits) +# PubMed (optional - higher rate limits: 10 req/sec vs 3 req/sec) NCBI_API_KEY=your-ncbi-key-here -# Vector Database (optional - for LlamaIndex RAG) +# Modal (optional - for secure code execution sandbox) +# MODAL_TOKEN_ID=your-modal-token-id +# MODAL_TOKEN_SECRET=your-modal-token-secret + +# ============== VECTOR DATABASE (ChromaDB) ============== + +# ChromaDB storage path CHROMA_DB_PATH=./chroma_db -# Neo4j Knowledge Graph -NEO4J_URI=bolt://localhost:7687 -NEO4J_USER=neo4j -NEO4J_PASSWORD=your_neo4j_password_here -NEO4J_DATABASE=your_database_name + +# Persist ChromaDB to disk (default: true) +# CHROMA_DB_PERSIST=true + +# Remote ChromaDB server (optional) +# CHROMA_DB_HOST=localhost +# CHROMA_DB_PORT=8000 + +# ============== RAG SERVICE CONFIGURATION ============== + +# ChromaDB collection name for RAG +# RAG_COLLECTION_NAME=deepcritical_evidence + +# Number of top results to retrieve from RAG +# RAG_SIMILARITY_TOP_K=5 + +# Automatically ingest evidence into RAG +# RAG_AUTO_INGEST=true diff --git a/.github/README.md b/.github/README.md index 8f3727f7e12fb16c26e4cc7bd30f99d7ffcf36b2..a3b61ae53484cce05c72522913bb9d15f7e67c90 100644 --- a/.github/README.md +++ b/.github/README.md @@ -3,7 +3,8 @@ > **You are reading the Github README!** > > - 📚 **Documentation**: See our [technical documentation](https://deepcritical.github.io/GradioDemo/) for detailed information -> - 📖 **Demo README**: Check out the [Demo README](..README.md) for more information > - 🏆 **Demo**: Kindly consider using our [Free Demo](https://hf.co/DataQuests/GradioDemo) +> - 📖 **Demo README**: Check out the [Demo README](..README.md) for setup, configuration, and contribution guidelines +> - 🏆 **Hackathon Submission**: Keep reading below for more information about our MCP Hackathon submission
@@ -37,7 +38,15 @@ gradio run "src/app.py" Open your browser to `http://localhost:7860`. -### 3. Connect via MCP +### 3. Authentication (Optional) + +**HuggingFace OAuth Login**: +- Click the "Sign in with HuggingFace" button at the top of the app +- Your HuggingFace API token will be automatically used for AI inference +- No need to manually enter API keys when logged in +- OAuth token is used only for the current session and never stored + +### 4. Connect via MCP This application exposes a Model Context Protocol (MCP) server, allowing you to use its search tools directly from Claude Desktop or other MCP clients. diff --git a/.github/scripts/deploy_to_hf_space.py b/.github/scripts/deploy_to_hf_space.py deleted file mode 100644 index 0c5782714054ef8a63628ddf4bc84ffacd8e80ff..0000000000000000000000000000000000000000 --- a/.github/scripts/deploy_to_hf_space.py +++ /dev/null @@ -1,391 +0,0 @@ -"""Deploy repository to Hugging Face Space, excluding unnecessary files.""" - -import os -import shutil -import subprocess -import tempfile -from pathlib import Path - -from huggingface_hub import HfApi - - -def get_excluded_dirs() -> set[str]: - """Get set of directory names to exclude from deployment.""" - return { - "docs", - "dev", - "folder", - "site", - "tests", # Optional - can be included if desired - "examples", # Optional - can be included if desired - ".git", - ".github", - "__pycache__", - ".pytest_cache", - ".mypy_cache", - ".ruff_cache", - ".venv", - "venv", - "env", - "ENV", - "node_modules", - ".cursor", - "reference_repos", - "burner_docs", - "chroma_db", - "logs", - "build", - "dist", - ".eggs", - "htmlcov", - "hf_space", # Exclude the cloned HF Space directory itself - } - - -def get_excluded_files() -> set[str]: - """Get set of file names to exclude from deployment.""" - return { - ".pre-commit-config.yaml", - "mkdocs.yml", - "uv.lock", - "AGENTS.txt", - ".env", - ".env.local", - "*.local", - ".DS_Store", - "Thumbs.db", - "*.log", - ".coverage", - "coverage.xml", - } - - -def should_exclude(path: Path, excluded_dirs: set[str], excluded_files: set[str]) -> bool: - """Check if a path should be excluded from deployment.""" - # Check if any parent directory is excluded - for parent in path.parents: - if parent.name in excluded_dirs: - return True - - # Check if the path itself is a directory that should be excluded - if path.is_dir() and path.name in excluded_dirs: - return True - - # Check if the file name matches excluded patterns - if path.is_file(): - # Check exact match - if path.name in excluded_files: - return True - # Check pattern matches (simple wildcard support) - for pattern in excluded_files: - if "*" in pattern: - # Simple pattern matching (e.g., "*.log") - suffix = pattern.replace("*", "") - if path.name.endswith(suffix): - return True - - return False - - -def deploy_to_hf_space() -> None: - """Deploy repository to Hugging Face Space. - - Supports both user and organization Spaces: - - User Space: username/space-name - - Organization Space: organization-name/space-name - - Works with both classic tokens and fine-grained tokens. - """ - # Get configuration from environment variables - hf_token = os.getenv("HF_TOKEN") - hf_username = os.getenv("HF_USERNAME") # Can be username or organization name - space_name = os.getenv("HF_SPACE_NAME") - - # Check which variables are missing and provide helpful error message - missing = [] - if not hf_token: - missing.append("HF_TOKEN (should be in repository secrets)") - if not hf_username: - missing.append("HF_USERNAME (should be in repository variables)") - if not space_name: - missing.append("HF_SPACE_NAME (should be in repository variables)") - - if missing: - raise ValueError( - f"Missing required environment variables: {', '.join(missing)}\n" - f"Please configure:\n" - f" - HF_TOKEN in Settings > Secrets and variables > Actions > Secrets\n" - f" - HF_USERNAME in Settings > Secrets and variables > Actions > Variables\n" - f" - HF_SPACE_NAME in Settings > Secrets and variables > Actions > Variables" - ) - - # HF_USERNAME can be either a username or organization name - # Format: {username|organization}/{space_name} - repo_id = f"{hf_username}/{space_name}" - local_dir = "hf_space" - - print(f"🚀 Deploying to Hugging Face Space: {repo_id}") - - # Initialize HF API - api = HfApi(token=hf_token) - - # Create Space if it doesn't exist - try: - api.repo_info(repo_id=repo_id, repo_type="space", token=hf_token) - print(f"✅ Space exists: {repo_id}") - except Exception: - print(f"⚠️ Space does not exist, creating: {repo_id}") - # Create new repository - # Note: For organizations, repo_id should be "org/space-name" - # For users, repo_id should be "username/space-name" - api.create_repo( - repo_id=repo_id, # Full repo_id including owner - repo_type="space", - space_sdk="gradio", - token=hf_token, - exist_ok=True, - ) - print(f"✅ Created new Space: {repo_id}") - - # Configure Git credential helper for authentication - # This is needed for Git LFS to work properly with fine-grained tokens - print("🔐 Configuring Git credentials...") - - # Use Git credential store to store the token - # This allows Git LFS to authenticate properly - temp_dir = Path(tempfile.gettempdir()) - credential_store = temp_dir / ".git-credentials-hf" - - # Write credentials in the format: https://username:token@huggingface.co - credential_store.write_text( - f"https://{hf_username}:{hf_token}@huggingface.co\n", encoding="utf-8" - ) - try: - credential_store.chmod(0o600) # Secure permissions (Unix only) - except OSError: - # Windows doesn't support chmod, skip - pass - - # Configure Git to use the credential store - subprocess.run( - ["git", "config", "--global", "credential.helper", f"store --file={credential_store}"], - check=True, - capture_output=True, - ) - - # Also set environment variable for Git LFS - os.environ["GIT_CREDENTIAL_HELPER"] = f"store --file={credential_store}" - - # Clone repository using git - # Use the token in the URL for initial clone, but LFS will use credential store - space_url = f"https://{hf_username}:{hf_token}@huggingface.co/spaces/{repo_id}" - - if Path(local_dir).exists(): - print(f"🧹 Removing existing {local_dir} directory...") - shutil.rmtree(local_dir) - - print("📥 Cloning Space repository...") - try: - result = subprocess.run( - ["git", "clone", space_url, local_dir], - check=True, - capture_output=True, - text=True, - ) - print("✅ Cloned Space repository") - - # After clone, configure the remote to use credential helper - # This ensures future operations (like push) use the credential store - os.chdir(local_dir) - subprocess.run( - ["git", "remote", "set-url", "origin", f"https://huggingface.co/spaces/{repo_id}"], - check=True, - capture_output=True, - ) - os.chdir("..") - - except subprocess.CalledProcessError as e: - error_msg = e.stderr if e.stderr else e.stdout if e.stdout else "Unknown error" - print(f"❌ Failed to clone Space repository: {error_msg}") - - # Try alternative: clone with LFS skip, then fetch LFS files separately - print("🔄 Trying alternative clone method (skip LFS during clone)...") - try: - env = os.environ.copy() - env["GIT_LFS_SKIP_SMUDGE"] = "1" # Skip LFS during clone - - subprocess.run( - ["git", "clone", space_url, local_dir], - check=True, - capture_output=True, - text=True, - env=env, - ) - print("✅ Cloned Space repository (LFS skipped)") - - # Configure remote - os.chdir(local_dir) - subprocess.run( - ["git", "remote", "set-url", "origin", f"https://huggingface.co/spaces/{repo_id}"], - check=True, - capture_output=True, - ) - - # Try to fetch LFS files with proper authentication - print("📥 Fetching LFS files...") - subprocess.run( - ["git", "lfs", "pull"], - check=False, # Don't fail if LFS pull fails - we'll continue without LFS files - capture_output=True, - text=True, - ) - os.chdir("..") - print("✅ Repository cloned (LFS files may be incomplete, but deployment can continue)") - except subprocess.CalledProcessError as e2: - error_msg2 = e2.stderr if e2.stderr else e2.stdout if e2.stdout else "Unknown error" - print(f"❌ Alternative clone method also failed: {error_msg2}") - raise RuntimeError(f"Git clone failed: {error_msg}") from e - - # Get exclusion sets - excluded_dirs = get_excluded_dirs() - excluded_files = get_excluded_files() - - # Remove all existing files in HF Space (except .git) - print("🧹 Cleaning existing files...") - for item in Path(local_dir).iterdir(): - if item.name == ".git": - continue - if item.is_dir(): - shutil.rmtree(item) - else: - item.unlink() - - # Copy files from repository root - print("📦 Copying files...") - repo_root = Path(".") - files_copied = 0 - dirs_copied = 0 - - for item in repo_root.rglob("*"): - # Skip if in .git directory - if ".git" in item.parts: - continue - - # Skip if in hf_space directory (the cloned Space directory) - if "hf_space" in item.parts: - continue - - # Skip if should be excluded - if should_exclude(item, excluded_dirs, excluded_files): - continue - - # Calculate relative path - try: - rel_path = item.relative_to(repo_root) - except ValueError: - # Item is outside repo root, skip - continue - - # Skip if in excluded directory - if any(part in excluded_dirs for part in rel_path.parts): - continue - - # Destination path - dest_path = Path(local_dir) / rel_path - - # Create parent directories - dest_path.parent.mkdir(parents=True, exist_ok=True) - - # Copy file or directory - if item.is_file(): - shutil.copy2(item, dest_path) - files_copied += 1 - elif item.is_dir(): - # Directory will be created by parent mkdir, but we track it - dirs_copied += 1 - - print(f"✅ Copied {files_copied} files and {dirs_copied} directories") - - # Commit and push changes using git - print("💾 Committing changes...") - - # Change to the Space directory - original_cwd = os.getcwd() - os.chdir(local_dir) - - try: - # Configure git user (required for commit) - subprocess.run( - ["git", "config", "user.name", "github-actions[bot]"], - check=True, - capture_output=True, - ) - subprocess.run( - ["git", "config", "user.email", "github-actions[bot]@users.noreply.github.com"], - check=True, - capture_output=True, - ) - - # Add all files - subprocess.run( - ["git", "add", "."], - check=True, - capture_output=True, - ) - - # Check if there are changes to commit - result = subprocess.run( - ["git", "status", "--porcelain"], - check=False, - capture_output=True, - text=True, - ) - - if result.stdout.strip(): - # There are changes, commit and push - subprocess.run( - ["git", "commit", "-m", "Deploy to Hugging Face Space [skip ci]"], - check=True, - capture_output=True, - ) - print("📤 Pushing to Hugging Face Space...") - # Ensure remote URL uses credential helper (not token in URL) - subprocess.run( - ["git", "remote", "set-url", "origin", f"https://huggingface.co/spaces/{repo_id}"], - check=True, - capture_output=True, - ) - subprocess.run( - ["git", "push"], - check=True, - capture_output=True, - ) - print("✅ Deployment complete!") - else: - print("ℹ️ No changes to commit (repository is up to date)") - except subprocess.CalledProcessError as e: - error_msg = e.stderr if e.stderr else (e.stdout if e.stdout else str(e)) - if isinstance(error_msg, bytes): - error_msg = error_msg.decode("utf-8", errors="replace") - if "nothing to commit" in error_msg.lower(): - print("ℹ️ No changes to commit (repository is up to date)") - else: - print(f"⚠️ Error during git operations: {error_msg}") - raise RuntimeError(f"Git operation failed: {error_msg}") from e - finally: - # Return to original directory - os.chdir(original_cwd) - - # Clean up credential store for security - try: - if credential_store.exists(): - credential_store.unlink() - except Exception: - # Ignore cleanup errors - pass - - print(f"🎉 Successfully deployed to: https://huggingface.co/spaces/{repo_id}") - - -if __name__ == "__main__": - deploy_to_hf_space() diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3379c544288b6a62f9b004de196aacfce4d9159f..a5b3e2c4c25c5ef61d52e6795e5bc48900d48f07 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -2,9 +2,9 @@ name: CI on: push: - branches: [main, dev, develop] + branches: [main, dev] pull_request: - branches: [main, dev, develop] + branches: [main, dev] jobs: test: @@ -16,6 +16,11 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "latest" + - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v5 with: @@ -23,105 +28,53 @@ jobs: - name: Install dependencies run: | - python -m pip install --upgrade pip - pip install -e ".[dev]" + uv sync --extra dev - name: Lint with ruff - run: | - ruff check . --exclude tests - ruff format --check . --exclude tests continue-on-error: true + run: | + uv run ruff check . --exclude tests --exclude reference_repos + uv run ruff format --check . --exclude tests --exclude reference_repos - name: Type check with mypy - run: | - mypy src continue-on-error: true - - - name: Install embedding dependencies run: | - pip install -e ".[embeddings]" + uv run mypy src --ignore-missing-imports - - name: Run unit tests (excluding OpenAI and embedding providers) + - name: Run unit tests (No OpenAI/Anthropic, HuggingFace only) env: HF_TOKEN: ${{ secrets.HF_TOKEN }} + LLM_PROVIDER: huggingface run: | - pytest tests/unit/ -v -m "not openai and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term + uv run pytest tests/unit/ -v -m "not openai and not anthropic and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml - name: Run local embeddings tests env: HF_TOKEN: ${{ secrets.HF_TOKEN }} + LLM_PROVIDER: huggingface run: | - pytest tests/ -v -m "local_embeddings" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term --cov-append || true + uv run pytest tests/ -v -m "local_embeddings" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-append || true continue-on-error: true # Allow failures if dependencies not available - name: Run HuggingFace integration tests env: HF_TOKEN: ${{ secrets.HF_TOKEN }} + LLM_PROVIDER: huggingface run: | - pytest tests/integration/ -v -m "huggingface and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term --cov-append || true + uv run pytest tests/integration/ -v -m "huggingface and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-append || true continue-on-error: true # Allow failures if HF_TOKEN not set - - name: Run non-OpenAI integration tests (excluding embedding providers) + - name: Run non-OpenAI/Anthropic integration tests (excluding embedding providers) env: HF_TOKEN: ${{ secrets.HF_TOKEN }} + LLM_PROVIDER: huggingface run: | - pytest tests/integration/ -v -m "integration and not openai and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-report=term --cov-append || true + uv run pytest tests/integration/ -v -m "integration and not openai and not anthropic and not embedding_provider" --tb=short -p no:logfire --cov --cov-branch --cov-report=xml --cov-append || true continue-on-error: true # Allow failures if dependencies not available - name: Upload coverage reports to Codecov uses: codecov/codecov-action@v5 + continue-on-error: true with: token: ${{ secrets.CODECOV_TOKEN }} slug: DeepCritical/GradioDemo - files: ./coverage.xml - fail_ci_if_error: false - continue-on-error: true - - docs: - runs-on: ubuntu-latest - permissions: - contents: write - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev' || github.ref == 'refs/heads/develop') - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install uv - uses: astral-sh/setup-uv@v5 - with: - version: "latest" - - - name: Install dependencies - run: | - uv sync --extra dev - - - name: Configure Git - run: | - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - git remote set-url origin https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}.git - - - name: Deploy to GitHub Pages - run: | - # mkdocs gh-deploy automatically creates .nojekyll, but let's verify - uv run mkdocs gh-deploy --force --message "Deploy docs [skip ci]" --strict - # Verify .nojekyll was created in gh-pages branch - git fetch origin gh-pages:gh-pages || true - git checkout gh-pages || true - if [ -f .nojekyll ]; then - echo "✓ .nojekyll file exists" - else - echo "⚠ .nojekyll file missing, creating it..." - touch .nojekyll - git add .nojekyll - git commit -m "Add .nojekyll to disable Jekyll [skip ci]" || true - git push origin gh-pages || true - fi - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/deploy-hf-space.yml b/.github/workflows/deploy-hf-space.yml deleted file mode 100644 index e22f89ab05f4f47184e769ff11426d8338285d81..0000000000000000000000000000000000000000 --- a/.github/workflows/deploy-hf-space.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: Deploy to Hugging Face Space - -on: - push: - branches: [main] - workflow_dispatch: # Allow manual triggering - -jobs: - deploy: - runs-on: ubuntu-latest - permissions: - contents: read - # No write permissions needed for GitHub repo (we're pushing to HF Space) - - steps: - - name: Checkout Repository - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: '3.11' - - - name: Install dependencies - run: | - pip install --upgrade pip - pip install huggingface-hub - - - name: Deploy to Hugging Face Space - env: - # Token from secrets (sensitive data) - HF_TOKEN: ${{ secrets.HF_TOKEN }} - # Username/Organization from repository variables (non-sensitive) - HF_USERNAME: ${{ vars.HF_USERNAME }} - # Space name from repository variables (non-sensitive) - HF_SPACE_NAME: ${{ vars.HF_SPACE_NAME }} - run: | - python .github/scripts/deploy_to_hf_space.py - - - name: Verify deployment - if: success() - run: | - echo "✅ Deployment completed successfully!" - echo "Space URL: https://huggingface.co/spaces/${{ vars.HF_USERNAME }}/${{ vars.HF_SPACE_NAME }}" - diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000000000000000000000000000000000000..bc0f876a94219ca871c49e4f009bfdcd5e0ab4ff --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,61 @@ +name: Documentation + +on: + push: + branches: + - main + - dev + paths: + - 'docs/**' + - 'mkdocs.yml' + - '.github/workflows/docs.yml' + pull_request: + branches: + - main + - dev + paths: + - 'docs/**' + - 'mkdocs.yml' + - '.github/workflows/docs.yml' + workflow_dispatch: + +permissions: + contents: write + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.11' + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + version: "latest" + + - name: Install dependencies + run: | + uv sync --extra dev + + - name: Build documentation + run: | + uv run mkdocs build --strict + + - name: Deploy to GitHub Pages + if: (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/dev') && github.event_name == 'push' + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + publish_dir: ./site + publish_branch: dev + cname: false + keep_files: true + + + + diff --git a/.gitignore b/.gitignore index 9a982ecba57a76ced6b098ad431436049f052514..73a21b4fb2ddf780de3cb84b706c2635565359df 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,10 @@ +=0.22.0 +=0.22.0, folder/ site/ .cursor/ .ruff_cache/ +docs/contributing/ # Python __pycache__/ *.py[cod] @@ -57,9 +60,6 @@ reference_repos/DeepCritical/ # Keep the README in reference_repos !reference_repos/README.md -# Development directory -dev/ - # OS .DS_Store Thumbs.db @@ -72,13 +72,12 @@ logs/ .pytest_cache/ .mypy_cache/ .coverage +.coverage.* +coverage.xml htmlcov/ -test_output*.txt # Database files chroma_db/ *.sqlite3 - # Trigger rebuild Wed Nov 26 17:51:41 EST 2025 -.env diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8b1184f5dec51a8c5abbab8bcc450b9b431a45e5..66993b5ec97b1bfa659fc9cdc9b3a323372d56ee 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,20 +1,20 @@ repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.4 + rev: v0.14.7 # Compatible with ruff>=0.14.6 (matches CI) hooks: - id: ruff - args: [--fix, --exclude, tests] + args: [--fix, --exclude, tests, --exclude, reference_repos] exclude: ^reference_repos/ - id: ruff-format - args: [--exclude, tests] + args: [--exclude, tests, --exclude, reference_repos] exclude: ^reference_repos/ - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.10.0 + rev: v1.18.2 # Matches CI version mypy>=1.18.2 hooks: - id: mypy files: ^src/ - exclude: ^folder|^src/app.py + exclude: ^folder additional_dependencies: - pydantic>=2.7 - pydantic-settings>=2.2 @@ -31,14 +31,9 @@ repos: types: [python] args: [ "run", - "pytest", - "tests/unit/", - "-v", - "-m", - "not openai and not embedding_provider", - "--tb=short", - "-p", - "no:logfire", + "python", + ".pre-commit-hooks/run_pytest_with_sync.py", + "unit", ] pass_filenames: false always_run: true @@ -50,14 +45,9 @@ repos: types: [python] args: [ "run", - "pytest", - "tests/", - "-v", - "-m", - "local_embeddings", - "--tb=short", - "-p", - "no:logfire", + "python", + ".pre-commit-hooks/run_pytest_with_sync.py", + "embeddings", ] pass_filenames: false always_run: true diff --git a/=0.22.0 b/=0.22.0 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/=0.22.0, b/=0.22.0, new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/AGENTS.txt b/AGENTS.txt deleted file mode 100644 index 24cb3ed4b8ac8cd3c519cbe24b640faaac1217bd..0000000000000000000000000000000000000000 --- a/AGENTS.txt +++ /dev/null @@ -1,236 +0,0 @@ -# DeepCritical Project - Rules - -## Project-Wide Rules - -**Architecture**: Multi-agent research system using Pydantic AI for agent orchestration, supporting iterative and deep research patterns. Uses middleware for state management, budget tracking, and workflow coordination. - -**Type Safety**: ALWAYS use complete type hints. All functions must have parameter and return type annotations. Use `mypy --strict` compliance. Use `TYPE_CHECKING` imports for circular dependencies: `from typing import TYPE_CHECKING; if TYPE_CHECKING: from src.services.embeddings import EmbeddingService` - -**Async Patterns**: ALL I/O operations must be async (`async def`, `await`). Use `asyncio.gather()` for parallel operations. CPU-bound work must use `run_in_executor()`: `loop = asyncio.get_running_loop(); result = await loop.run_in_executor(None, cpu_bound_function, args)`. Never block the event loop. - -**Error Handling**: Use custom exceptions from `src/utils/exceptions.py`: `DeepCriticalError`, `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions: `raise SearchError(...) from e`. Log with structlog: `logger.error("Operation failed", error=str(e), context=value)`. - -**Logging**: Use `structlog` for ALL logging (NOT `print` or `logging`). Import: `import structlog; logger = structlog.get_logger()`. Log with structured data: `logger.info("event", key=value)`. Use appropriate levels: DEBUG, INFO, WARNING, ERROR. - -**Pydantic Models**: All data exchange uses Pydantic models from `src/utils/models.py`. Models are frozen (`model_config = {"frozen": True}`) for immutability. Use `Field()` with descriptions. Validate with `ge=`, `le=`, `min_length=`, `max_length=` constraints. - -**Code Style**: Ruff with 100-char line length. Ignore rules: `PLR0913` (too many arguments), `PLR0912` (too many branches), `PLR0911` (too many returns), `PLR2004` (magic values), `PLW0603` (global statement), `PLC0415` (lazy imports). - -**Docstrings**: Google-style docstrings for all public functions. Include Args, Returns, Raises sections. Use type hints in docstrings only if needed for clarity. - -**Testing**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). Use `respx` for httpx mocking, `pytest-mock` for general mocking. - -**State Management**: Use `ContextVar` in middleware for thread-safe isolation. Never use global mutable state (except singletons via `@lru_cache`). Use `WorkflowState` from `src/middleware/state_machine.py` for workflow state. - -**Citation Validation**: ALWAYS validate references before returning reports. Use `validate_references()` from `src/utils/citation_validator.py`. Remove hallucinated citations. Log warnings for removed citations. - ---- - -## src/agents/ - Agent Implementation Rules - -**Pattern**: All agents use Pydantic AI `Agent` class. Agents have structured output types (Pydantic models) or return strings. Use factory functions in `src/agent_factory/agents.py` for creation. - -**Agent Structure**: -- System prompt as module-level constant (with date injection: `datetime.now().strftime("%Y-%m-%d")`) -- Agent class with `__init__(model: Any | None = None)` -- Main method (e.g., `async def evaluate()`, `async def write_report()`) -- Factory function: `def create_agent_name(model: Any | None = None) -> AgentName` - -**Model Initialization**: Use `get_model()` from `src/agent_factory/judges.py` if no model provided. Support OpenAI/Anthropic/HF Inference via settings. - -**Error Handling**: Return fallback values (e.g., `KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])`) on failure. Log errors with context. Use retry logic (3 retries) in Pydantic AI Agent initialization. - -**Input Validation**: Validate query/inputs are not empty. Truncate very long inputs with warnings. Handle None values gracefully. - -**Output Types**: Use structured output types from `src/utils/models.py` (e.g., `KnowledgeGapOutput`, `AgentSelectionPlan`, `ReportDraft`). For text output (writer agents), return `str` directly. - -**Agent-Specific Rules**: -- `knowledge_gap.py`: Outputs `KnowledgeGapOutput`. Evaluates research completeness. -- `tool_selector.py`: Outputs `AgentSelectionPlan`. Selects tools (RAG/web/database). -- `writer.py`: Returns markdown string. Includes citations in numbered format. -- `long_writer.py`: Uses `ReportDraft` input/output. Handles section-by-section writing. -- `proofreader.py`: Takes `ReportDraft`, returns polished markdown. -- `thinking.py`: Returns observation string from conversation history. -- `input_parser.py`: Outputs `ParsedQuery` with research mode detection. - ---- - -## src/tools/ - Search Tool Rules - -**Protocol**: All tools implement `SearchTool` protocol from `src/tools/base.py`: `name` property and `async def search(query, max_results) -> list[Evidence]`. - -**Rate Limiting**: Use `@retry` decorator from tenacity: `@retry(stop=stop_after_attempt(3), wait=wait_exponential(...))`. Implement `_rate_limit()` method for APIs with limits. Use shared rate limiters from `src/tools/rate_limiter.py`. - -**Error Handling**: Raise `SearchError` or `RateLimitError` on failures. Handle HTTP errors (429, 500, timeout). Return empty list on non-critical errors (log warning). - -**Query Preprocessing**: Use `preprocess_query()` from `src/tools/query_utils.py` to remove noise and expand synonyms. - -**Evidence Conversion**: Convert API responses to `Evidence` objects with `Citation`. Extract metadata (title, url, date, authors). Set relevance scores (0.0-1.0). Handle missing fields gracefully. - -**Tool-Specific Rules**: -- `pubmed.py`: Use NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Parse XML with `xmltodict`. Handle single vs. multiple articles. -- `clinicaltrials.py`: Use `requests` library (NOT httpx - WAF blocks httpx). Run in thread pool: `await asyncio.to_thread(requests.get, ...)`. Filter: Only interventional studies, active/completed. -- `europepmc.py`: Handle preprint markers: `[PREPRINT - Not peer-reviewed]`. Build URLs from DOI or PMID. -- `rag_tool.py`: Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. Handles ingestion. -- `search_handler.py`: Orchestrates parallel searches across multiple tools. Uses `asyncio.gather()` with `return_exceptions=True`. Aggregates results into `SearchResult`. - ---- - -## src/middleware/ - Middleware Rules - -**State Management**: Use `ContextVar` for thread-safe isolation. `WorkflowState` uses `ContextVar[WorkflowState | None]`. Initialize with `init_workflow_state(embedding_service)`. Access with `get_workflow_state()` (auto-initializes if missing). - -**WorkflowState**: Tracks `evidence: list[Evidence]`, `conversation: Conversation`, `embedding_service: Any`. Methods: `add_evidence()` (deduplicates by URL), `async search_related()` (semantic search). - -**WorkflowManager**: Manages parallel research loops. Methods: `add_loop()`, `run_loops_parallel()`, `update_loop_status()`, `sync_loop_evidence_to_state()`. Uses `asyncio.gather()` for parallel execution. Handles errors per loop (don't fail all if one fails). - -**BudgetTracker**: Tracks tokens, time, iterations per loop and globally. Methods: `create_budget()`, `add_tokens()`, `start_timer()`, `update_timer()`, `increment_iteration()`, `check_budget()`, `can_continue()`. Token estimation: `estimate_tokens(text)` (~4 chars per token), `estimate_llm_call_tokens(prompt, response)`. - -**Models**: All middleware models in `src/utils/models.py`. `IterationData`, `Conversation`, `ResearchLoop`, `BudgetStatus` are used by middleware. - ---- - -## src/orchestrator/ - Orchestration Rules - -**Research Flows**: Two patterns: `IterativeResearchFlow` (single loop) and `DeepResearchFlow` (plan → parallel loops → synthesis). Both support agent chains (`use_graph=False`) and graph execution (`use_graph=True`). - -**IterativeResearchFlow**: Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete. Uses `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent`, `WriterAgent`, `JudgeHandler`. Tracks iterations, time, budget. - -**DeepResearchFlow**: Pattern: Planner → Parallel iterative loops per section → Synthesizer. Uses `PlannerAgent`, `IterativeResearchFlow` (per section), `LongWriterAgent` or `ProofreaderAgent`. Uses `WorkflowManager` for parallel execution. - -**Graph Orchestrator**: Uses Pydantic AI Graphs (when available) or agent chains (fallback). Routes based on research mode (iterative/deep/auto). Streams `AgentEvent` objects for UI. - -**State Initialization**: Always call `init_workflow_state()` before running flows. Initialize `BudgetTracker` per loop. Use `WorkflowManager` for parallel coordination. - -**Event Streaming**: Yield `AgentEvent` objects during execution. Event types: "started", "search_complete", "judge_complete", "hypothesizing", "synthesizing", "complete", "error". Include iteration numbers and data payloads. - ---- - -## src/services/ - Service Rules - -**EmbeddingService**: Local sentence-transformers (NO API key required). All operations async-safe via `run_in_executor()`. ChromaDB for vector storage. Deduplication threshold: 0.85 (85% similarity = duplicate). - -**LlamaIndexRAGService**: Uses OpenAI embeddings (requires `OPENAI_API_KEY`). Methods: `ingest_evidence()`, `retrieve()`, `query()`. Returns documents with metadata (source, title, url, date, authors). Lazy initialization with graceful fallback. - -**StatisticalAnalyzer**: Generates Python code via LLM. Executes in Modal sandbox (secure, isolated). Library versions pinned in `SANDBOX_LIBRARIES` dict. Returns `AnalysisResult` with verdict (SUPPORTED/REFUTED/INCONCLUSIVE). - -**Singleton Pattern**: Use `@lru_cache(maxsize=1)` for singletons: `@lru_cache(maxsize=1); def get_service() -> Service: return Service()`. Lazy initialization to avoid requiring dependencies at import time. - ---- - -## src/utils/ - Utility Rules - -**Models**: All Pydantic models in `src/utils/models.py`. Use frozen models (`model_config = {"frozen": True}`) except where mutation needed. Use `Field()` with descriptions. Validate with constraints. - -**Config**: Settings via Pydantic Settings (`src/utils/config.py`). Load from `.env` automatically. Use `settings` singleton: `from src.utils.config import settings`. Validate API keys with properties: `has_openai_key`, `has_anthropic_key`. - -**Exceptions**: Custom exception hierarchy in `src/utils/exceptions.py`. Base: `DeepCriticalError`. Specific: `SearchError`, `RateLimitError`, `JudgeError`, `ConfigurationError`. Always chain exceptions. - -**LLM Factory**: Centralized LLM model creation in `src/utils/llm_factory.py`. Supports OpenAI, Anthropic, HF Inference. Use `get_model()` or factory functions. Check requirements before initialization. - -**Citation Validator**: Use `validate_references()` from `src/utils/citation_validator.py`. Removes hallucinated citations (URLs not in evidence). Logs warnings. Returns validated report string. - ---- - -## src/orchestrator_factory.py Rules - -**Purpose**: Factory for creating orchestrators. Supports "simple" (legacy) and "advanced" (magentic) modes. Auto-detects mode based on API key availability. - -**Pattern**: Lazy import for optional dependencies (`_get_magentic_orchestrator_class()`). Handles `ImportError` gracefully with clear error messages. - -**Mode Detection**: `_determine_mode()` checks explicit mode or auto-detects: "advanced" if `settings.has_openai_key`, else "simple". Maps "magentic" → "advanced". - -**Function Signature**: `create_orchestrator(search_handler, judge_handler, config, mode) -> Any`. Simple mode requires handlers. Advanced mode uses MagenticOrchestrator. - -**Error Handling**: Raise `ValueError` with clear messages if requirements not met. Log mode selection with structlog. - ---- - -## src/orchestrator_hierarchical.py Rules - -**Purpose**: Hierarchical orchestrator using middleware and sub-teams. Adapts Magentic ChatAgent to SubIterationTeam protocol. - -**Pattern**: Uses `SubIterationMiddleware` with `ResearchTeam` and `LLMSubIterationJudge`. Event-driven via callback queue. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated, but kept for compatibility). - -**Event Streaming**: Uses `asyncio.Queue` for event coordination. Yields `AgentEvent` objects. Handles event callback pattern with `asyncio.wait()`. - -**Error Handling**: Log errors with context. Yield error events. Process remaining events after task completion. - ---- - -## src/orchestrator_magentic.py Rules - -**Purpose**: Magentic-based orchestrator using ChatAgent pattern. Each agent has internal LLM. Manager orchestrates agents. - -**Pattern**: Uses `MagenticBuilder` with participants (searcher, hypothesizer, judge, reporter). Manager uses `OpenAIChatClient`. Workflow built in `_build_workflow()`. - -**Event Processing**: `_process_event()` converts Magentic events to `AgentEvent`. Handles: `MagenticOrchestratorMessageEvent`, `MagenticAgentMessageEvent`, `MagenticFinalResultEvent`, `MagenticAgentDeltaEvent`, `WorkflowOutputEvent`. - -**Text Extraction**: `_extract_text()` defensively extracts text from messages. Priority: `.content` → `.text` → `str(message)`. Handles buggy message objects. - -**State Initialization**: Initialize embedding service with graceful fallback. Use `init_magentic_state()` (deprecated). - -**Requirements**: Must call `check_magentic_requirements()` in `__init__`. Requires `agent-framework-core` and OpenAI API key. - -**Event Types**: Maps agent names to event types: "search" → "search_complete", "judge" → "judge_complete", "hypothes" → "hypothesizing", "report" → "synthesizing". - ---- - -## src/agent_factory/ - Factory Rules - -**Pattern**: Factory functions for creating agents and handlers. Lazy initialization for optional dependencies. Support OpenAI/Anthropic/HF Inference. - -**Judges**: `create_judge_handler()` creates `JudgeHandler` with structured output (`JudgeAssessment`). Supports `MockJudgeHandler`, `HFInferenceJudgeHandler` as fallbacks. - -**Agents**: Factory functions in `agents.py` for all Pydantic AI agents. Pattern: `create_agent_name(model: Any | None = None) -> AgentName`. Use `get_model()` if model not provided. - -**Graph Builder**: `graph_builder.py` contains utilities for building research graphs. Supports iterative and deep research graph construction. - -**Error Handling**: Raise `ConfigurationError` if required API keys missing. Log agent creation. Handle import errors gracefully. - ---- - -## src/prompts/ - Prompt Rules - -**Pattern**: System prompts stored as module-level constants. Include date injection: `datetime.now().strftime("%Y-%m-%d")`. Format evidence with truncation (1500 chars per item). - -**Judge Prompts**: In `judge.py`. Handle empty evidence case separately. Always request structured JSON output. - -**Hypothesis Prompts**: In `hypothesis.py`. Use diverse evidence selection (MMR algorithm). Sentence-aware truncation. - -**Report Prompts**: In `report.py`. Include full citation details. Use diverse evidence selection (n=20). Emphasize citation validation rules. - ---- - -## Testing Rules - -**Structure**: Unit tests in `tests/unit/` (mocked, fast). Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`). - -**Mocking**: Use `respx` for httpx mocking. Use `pytest-mock` for general mocking. Mock LLM calls in unit tests (use `MockJudgeHandler`). - -**Fixtures**: Common fixtures in `tests/conftest.py`: `mock_httpx_client`, `mock_llm_response`. - -**Coverage**: Aim for >80% coverage. Test error handling, edge cases, and integration paths. - ---- - -## File-Specific Agent Rules - -**knowledge_gap.py**: Outputs `KnowledgeGapOutput`. System prompt evaluates research completeness. Handles conversation history. Returns fallback on error. - -**writer.py**: Returns markdown string. System prompt includes citation format examples. Validates inputs. Truncates long findings. Retry logic for transient failures. - -**long_writer.py**: Uses `ReportDraft` input/output. Writes sections iteratively. Reformats references (deduplicates, renumbers). Reformats section headings. - -**proofreader.py**: Takes `ReportDraft`, returns polished markdown. Removes duplicates. Adds summary. Preserves references. - -**tool_selector.py**: Outputs `AgentSelectionPlan`. System prompt lists available agents (WebSearchAgent, SiteCrawlerAgent, RAGAgent). Guidelines for when to use each. - -**thinking.py**: Returns observation string. Generates observations from conversation history. Uses query and background context. - -**input_parser.py**: Outputs `ParsedQuery`. Detects research mode (iterative/deep). Extracts entities and research questions. Improves/refines query. - - - diff --git a/LICENSE.md b/LICENSE.md deleted file mode 100644 index a1f9be9c2733fb22fc43dfc4e5f23c62dbfb02ad..0000000000000000000000000000000000000000 --- a/LICENSE.md +++ /dev/null @@ -1,25 +0,0 @@ -# License - -DeepCritical is licensed under the MIT License. - -## MIT License - -Copyright (c) 2024 DeepCritical Team - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..185a214d84f7fd56284179c298c26790e1f938c8 --- /dev/null +++ b/Makefile @@ -0,0 +1,51 @@ +.PHONY: install test lint format typecheck check clean all cov cov-html + +# Default target +all: check + +install: + uv sync --all-extras + uv run pre-commit install + +test: + uv run pytest tests/unit/ -v -m "not openai" -p no:logfire + +test-hf: + uv run pytest tests/ -v -m "huggingface" -p no:logfire + +test-all: + uv run pytest tests/ -v -p no:logfire + +# Coverage aliases +cov: test-cov +test-cov: + uv run pytest --cov=src --cov-report=term-missing -m "not openai" -p no:logfire + +cov-html: + uv run pytest --cov=src --cov-report=html -p no:logfire + @echo "Coverage report: open htmlcov/index.html" + +lint: + uv run ruff check src tests + +format: + uv run ruff format src tests + +typecheck: + uv run mypy src + +check: lint typecheck test-cov + @echo "All checks passed!" + +docs-build: + uv run mkdocs build + +docs-serve: + uv run mkdocs serve + +docs-clean: + rm -rf site/ + +clean: + rm -rf .pytest_cache .mypy_cache .ruff_cache __pycache__ .coverage htmlcov + find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true diff --git a/README.md b/README.md index a4c0e90eae994942af1b1ea0f5cdfe967a29b8f5..e93d2a54d71e1550b16fd16b573b9a6d80e78cf7 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ --- -title: The DETERMINATOR +title: Critical Deep Resarch emoji: 🐉 colorFrom: red colorTo: yellow @@ -10,54 +10,114 @@ app_file: src/app.py hf_oauth: true hf_oauth_expiration_minutes: 480 hf_oauth_scopes: - # Required for HuggingFace Inference API (includes all third-party providers) - # This scope grants access to: - # - HuggingFace's own Inference API - # - Third-party inference providers (nebius, together, scaleway, hyperbolic, novita, nscale, sambanova, ovh, fireworks, etc.) - # - All models available through the Inference Providers API - - inference-api - # Optional: Uncomment if you need to access user's billing information - # - read-billing + - inference-api pinned: true license: mit tags: - mcp-in-action-track-enterprise - mcp-hackathon - - deep-research + - drug-repurposing - biomedical-ai - pydantic-ai - llamaindex - modal - - building-mcp-track-enterprise - - building-mcp-track-consumer - - mcp-in-action-track-enterprise - - mcp-in-action-track-consumer - - building-mcp-track-modal - - building-mcp-track-blaxel - - building-mcp-track-llama-index - - building-mcp-track-HUGGINGFACE --- > [!IMPORTANT] > **You are reading the Gradio Demo README!** > -> - 📚 **Documentation**: See our [technical documentation](https://deepcritical.github.io/GradioDemo/) for detailed information -> - 📖 **Complete README**: Check out the [Github README](.github/README.md) for setup, configuration, and contribution guidelines -> - ⚠️**This README is for our Gradio Demo Only !** +> - 📚 **Documentation**: See our [technical documentation](deepcritical.github.io/GradioDemo/) for detailed information +> - 📖 **Complete README**: Check out the [full README](.github/README.md) for setup, configuration, and contribution guidelines +> - 🏆 **Hackathon Submission**: Keep reading below for more information about our MCP Hackathon submission
-[![GitHub](https://img.shields.io/github/stars/DeepCritical/GradioDemo?style=for-the-badge&logo=github&logoColor=white&label=GitHub&labelColor=181717&color=181717)](https://github.com/DeepCritical/GradioDemo) -[![Documentation](https://img.shields.io/badge/Docs-0080FF?style=for-the-badge&logo=readthedocs&logoColor=white&labelColor=0080FF&color=0080FF)](deepcritical.github.io/GradioDemo/) -[![Demo](https://img.shields.io/badge/Demo-FFD21E?style=for-the-badge&logo=huggingface&logoColor=white&labelColor=FFD21E&color=FFD21E)](https://huggingface.co/spaces/DataQuests/DeepCritical) +[![GitHub](https://img.shields.io/github/stars/DeepCritical/GradioDemo?style=for-the-badge&logo=github&logoColor=white&label=🐙%20GitHub&labelColor=181717&color=181717)](https://github.com/DeepCritical/GradioDemo) +[![Documentation](https://img.shields.io/badge/📚%20Docs-0080FF?style=for-the-badge&logo=readthedocs&logoColor=white&labelColor=0080FF&color=0080FF)](deepcritical.github.io/GradioDemo/) +[![Demo](https://img.shields.io/badge/🚀%20Demo-FFD21E?style=for-the-badge&logo=huggingface&logoColor=white&labelColor=FFD21E&color=FFD21E)](https://huggingface.co/spaces/DataQuests/DeepCritical) [![codecov](https://codecov.io/gh/DeepCritical/GradioDemo/graph/badge.svg?token=B1f05RCGpz)](https://codecov.io/gh/DeepCritical/GradioDemo) [![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP)
-# The DETERMINATOR +# DeepCritical ## About -The DETERMINATOR is a powerful generalist deep research agent system that stops at nothing until finding precise answers to complex questions. It uses iterative search-and-judge loops to comprehensively investigate any research question from any domain. +The [Deep Critical Gradio Hackathon Team](### Team) met online in the Alzheimer's Critical Literature Review Group in the Hugging Science initiative. We're building the agent framework we want to use for ai assisted research to [turn the vast amounts of clinical data into cures](https://github.com/DeepCritical/GradioDemo). + +For this hackathon we're proposing a simple yet powerful Deep Research Agent that iteratively looks for the answer until it finds it using general purpose websearch and special purpose retrievers for technical retrievers. + +## Deep Critical In the Medial + +- Social Medial Posts about Deep Critical : + - + - + - + - + - + - + - + +## Important information + +- **[readme](.github\README.md)**: configure, deploy , contribute and learn more here. +- **[docs](deepcritical.github.io/GradioDemo/)**: want to know how all this works ? read our detailed technical documentation here. +- **[demo](https://huggingface/spaces/DataQuests/DeepCritical)**: Try our demo on huggingface +- **[team](### Team)**: Join us , or follow us ! +- **[video]**: See our demo video + +## Future Developments + +- [] Apply Deep Research Systems To Generate Short Form Video (up to 5 minutes) +- [] Visualize Pydantic Graphs as Loading Screens in the UI +- [] Improve Data Science with more Complex Graph Agents +- [] Create Deep Critical Drug Reporposing / Discovery Demo +- [] Create Deep Critical Literal Review +- [] Create Deep Critical Hypothesis Generator +- [] Create PyPi Package + +## Completed + +- [] **Multi-Source Search**: PubMed, ClinicalTrials.gov, bioRxiv/medRxiv +- [] **MCP Integration**: Use our tools from Claude Desktop or any MCP client +- [] **HuggingFace OAuth**: Sign in with HuggingFace +- [] **Modal Sandbox**: Secure execution of AI-generated statistical code +- [] **LlamaIndex RAG**: Semantic search and evidence synthesis +- [] **HuggingfaceInference**: +- [] **HuggingfaceMCP Custom Config To Use Community Tools**: +- [] **Strongly Typed Composable Graphs**: +- [] **Specialized Research Teams of Agents**: + + + +### Team + +- ZJ +- MarioAderman +- Josephrp + + +## Acknowledgements + +- McSwaggins +- Magentic +- Huggingface +- Gradio +- DeepCritical +- Sponsors +- Microsoft +- Pydantic +- Llama-index +- Anthhropic/MCP +- List of Tools Makers + + +## Links + +[![GitHub](https://img.shields.io/github/stars/DeepCritical/GradioDemo?style=for-the-badge&logo=github&logoColor=white&label=🐙%20GitHub&labelColor=181717&color=181717)](https://github.com/DeepCritical/GradioDemo) +[![Documentation](https://img.shields.io/badge/📚%20Docs-0080FF?style=for-the-badge&logo=readthedocs&logoColor=white&labelColor=0080FF&color=0080FF)](deepcritical.github.io/GradioDemo/) +[![Demo](https://img.shields.io/badge/🚀%20Demo-FFD21E?style=for-the-badge&logo=huggingface&logoColor=white&labelColor=FFD21E&color=FFD21E)](https://huggingface.co/spaces/DataQuests/DeepCritical) +[![codecov](https://codecov.io/gh/DeepCritical/GradioDemo/graph/badge.svg?token=B1f05RCGpz)](https://codecov.io/gh/DeepCritical/GradioDemo) +[![Join us on Discord](https://img.shields.io/discord/1109943800132010065?label=Discord&logo=discord&style=flat-square)](https://discord.gg/qdfnvSPcqP) \ No newline at end of file diff --git a/deployments/README.md b/deployments/README.md deleted file mode 100644 index 3a4f4a4d8d7a3cccf6beacd56ce135117f3ad07a..0000000000000000000000000000000000000000 --- a/deployments/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Deployments - -This directory contains infrastructure deployment scripts for DeepCritical services. - -## Modal Deployments - -### TTS Service (`modal_tts.py`) - -Deploys the Kokoro TTS (Text-to-Speech) function to Modal's GPU infrastructure. - -**Deploy:** -```bash -modal deploy deployments/modal_tts.py -``` - -**Features:** -- Kokoro 82M TTS model -- GPU-accelerated (T4) -- Voice options: af_heart, af_bella, am_michael, etc. -- Configurable speech speed - -**Requirements:** -- Modal account and credentials (`MODAL_TOKEN_ID`, `MODAL_TOKEN_SECRET` in `.env`) -- GPU quota on Modal - -**After Deployment:** -The function will be available at: -- App: `deepcritical-tts` -- Function: `kokoro_tts_function` - -The main application (`src/services/tts_modal.py`) will call this deployed function. - ---- - -## Adding New Deployments - -When adding new deployment scripts: - -1. Create a new file: `deployments/.py` -2. Use Modal's app pattern: - ```python - import modal - app = modal.App("deepcritical-") - ``` -3. Document in this README -4. Test deployment: `modal deploy deployments/.py` diff --git a/deployments/modal_tts.py b/deployments/modal_tts.py deleted file mode 100644 index 9987a339f6b89eb63cd512eb594dd6a6d488f42a..0000000000000000000000000000000000000000 --- a/deployments/modal_tts.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Deploy Kokoro TTS function to Modal. - -This script deploys the TTS function to Modal so it can be called -from the main DeepCritical application. - -Usage: - modal deploy deploy_modal_tts.py - -After deployment, the function will be available at: - App: deepcritical-tts - Function: kokoro_tts_function -""" - -import modal -import numpy as np - -# Create Modal app -app = modal.App("deepcritical-tts") - -# Define Kokoro TTS dependencies -KOKORO_DEPENDENCIES = [ - "torch>=2.0.0", - "transformers>=4.30.0", - "numpy<2.0", -] - -# Create Modal image with Kokoro -tts_image = ( - modal.Image.debian_slim(python_version="3.11") - .apt_install("git") # Install git first for pip install from github - .pip_install(*KOKORO_DEPENDENCIES) - .pip_install("git+https://github.com/hexgrad/kokoro.git") -) - - -@app.function( - image=tts_image, - gpu="T4", - timeout=60, -) -def kokoro_tts_function(text: str, voice: str, speed: float) -> tuple[int, np.ndarray]: - """Modal GPU function for Kokoro TTS. - - This function runs on Modal's GPU infrastructure. - Based on: https://huggingface.co/spaces/hexgrad/Kokoro-TTS - - Args: - text: Text to synthesize - voice: Voice ID (e.g., af_heart, af_bella, am_michael) - speed: Speech speed multiplier (0.5-2.0) - - Returns: - Tuple of (sample_rate, audio_array) - """ - import numpy as np - - try: - import torch - from kokoro import KModel, KPipeline - - # Initialize model (cached on GPU) - model = KModel().to("cuda").eval() - pipeline = KPipeline(lang_code=voice[0]) - pack = pipeline.load_voice(voice) - - # Generate audio - accumulate all chunks - audio_chunks = [] - for _, ps, _ in pipeline(text, voice, speed): - ref_s = pack[len(ps) - 1] - audio = model(ps, ref_s, speed) - audio_chunks.append(audio.numpy()) - - # Concatenate all audio chunks - if audio_chunks: - full_audio = np.concatenate(audio_chunks) - return (24000, full_audio) - - # If no audio generated, return empty - return (24000, np.zeros(1, dtype=np.float32)) - - except ImportError as e: - raise RuntimeError( - f"Kokoro not installed: {e}. " - "Install with: pip install git+https://github.com/hexgrad/kokoro.git" - ) from e - except Exception as e: - raise RuntimeError(f"TTS synthesis failed: {e}") from e - - -# Optional: Add a test entrypoint -@app.local_entrypoint() -def test(): - """Test the TTS function.""" - print("Testing Modal TTS function...") - sample_rate, audio = kokoro_tts_function.remote("Hello, this is a test.", "af_heart", 1.0) - print(f"Generated audio: {sample_rate}Hz, shape={audio.shape}") - print("✓ TTS function works!") diff --git a/dev/Makefile b/dev/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..185a214d84f7fd56284179c298c26790e1f938c8 --- /dev/null +++ b/dev/Makefile @@ -0,0 +1,51 @@ +.PHONY: install test lint format typecheck check clean all cov cov-html + +# Default target +all: check + +install: + uv sync --all-extras + uv run pre-commit install + +test: + uv run pytest tests/unit/ -v -m "not openai" -p no:logfire + +test-hf: + uv run pytest tests/ -v -m "huggingface" -p no:logfire + +test-all: + uv run pytest tests/ -v -p no:logfire + +# Coverage aliases +cov: test-cov +test-cov: + uv run pytest --cov=src --cov-report=term-missing -m "not openai" -p no:logfire + +cov-html: + uv run pytest --cov=src --cov-report=html -p no:logfire + @echo "Coverage report: open htmlcov/index.html" + +lint: + uv run ruff check src tests + +format: + uv run ruff format src tests + +typecheck: + uv run mypy src + +check: lint typecheck test-cov + @echo "All checks passed!" + +docs-build: + uv run mkdocs build + +docs-serve: + uv run mkdocs serve + +docs-clean: + rm -rf site/ + +clean: + rm -rf .pytest_cache .mypy_cache .ruff_cache __pycache__ .coverage htmlcov + find . -type d -name "__pycache__" -exec rm -rf {} + 2>/dev/null || true diff --git a/docs/api/agents.md b/docs/api/agents.md index c89e5b66d7c52f59197d3707c139999c0c12babe..8f0fa38939da25884c2dfef878ca84f94c7762fb 100644 --- a/docs/api/agents.md +++ b/docs/api/agents.md @@ -12,19 +12,27 @@ This page documents the API for DeepCritical agents. #### `evaluate` - -[KnowledgeGapAgent.evaluate](../src/agents/knowledge_gap.py) start_line:66 end_line:74 - +```python +async def evaluate( + self, + query: str, + background_context: str, + conversation_history: Conversation, + iteration: int, + time_elapsed_minutes: float, + max_time_minutes: float +) -> KnowledgeGapOutput +``` Evaluates research completeness and identifies outstanding knowledge gaps. **Parameters**: - `query`: Research query string -- `background_context`: Background context for the query (default: "") -- `conversation_history`: History of actions, findings, and thoughts as string (default: "") -- `iteration`: Current iteration number (default: 0) -- `time_elapsed_minutes`: Elapsed time in minutes (default: 0.0) -- `max_time_minutes`: Maximum time limit in minutes (default: 10) +- `background_context`: Background context for the query +- `conversation_history`: Conversation history with previous iterations +- `iteration`: Current iteration number +- `time_elapsed_minutes`: Elapsed time in minutes +- `max_time_minutes`: Maximum time limit in minutes **Returns**: `KnowledgeGapOutput` with: - `research_complete`: Boolean indicating if research is complete @@ -40,17 +48,21 @@ Evaluates research completeness and identifies outstanding knowledge gaps. #### `select_tools` - -[ToolSelectorAgent.select_tools](../src/agents/tool_selector.py) start_line:78 end_line:84 - +```python +async def select_tools( + self, + query: str, + knowledge_gaps: list[str], + available_tools: list[str] +) -> AgentSelectionPlan +``` -Selects tools for addressing a knowledge gap. +Selects tools for addressing knowledge gaps. **Parameters**: -- `gap`: The knowledge gap to address - `query`: Research query string -- `background_context`: Optional background context (default: "") -- `conversation_history`: History of actions, findings, and thoughts as string (default: "") +- `knowledge_gaps`: List of knowledge gaps to address +- `available_tools`: List of available tool names **Returns**: `AgentSelectionPlan` with list of `AgentTask` objects. @@ -64,17 +76,23 @@ Selects tools for addressing a knowledge gap. #### `write_report` - -[WriterAgent.write_report](../src/agents/writer.py) start_line:67 end_line:73 - +```python +async def write_report( + self, + query: str, + findings: str, + output_length: str = "medium", + output_instructions: str | None = None +) -> str +``` Generates a markdown report from research findings. **Parameters**: - `query`: Research query string - `findings`: Research findings to include in report -- `output_length`: Optional description of desired output length (default: "") -- `output_instructions`: Optional additional instructions for report generation (default: "") +- `output_length`: Desired output length ("short", "medium", "long") +- `output_instructions`: Additional instructions for report generation **Returns**: Markdown string with numbered citations. @@ -88,25 +106,36 @@ Generates a markdown report from research findings. #### `write_next_section` - -[LongWriterAgent.write_next_section](../src/agents/long_writer.py) start_line:94 end_line:100 - +```python +async def write_next_section( + self, + query: str, + draft: ReportDraft, + section_title: str, + section_content: str +) -> LongWriterOutput +``` Writes the next section of a long-form report. **Parameters**: -- `original_query`: The original research query -- `report_draft`: Current report draft as string (all sections written so far) -- `next_section_title`: Title of the section to write -- `next_section_draft`: Draft content for the next section +- `query`: Research query string +- `draft`: Current report draft +- `section_title`: Title of the section to write +- `section_content`: Content/guidance for the section -**Returns**: `LongWriterOutput` with formatted section and references. +**Returns**: `LongWriterOutput` with updated draft. #### `write_report` - -[LongWriterAgent.write_report](../src/agents/long_writer.py) start_line:263 end_line:268 - +```python +async def write_report( + self, + query: str, + report_title: str, + report_draft: ReportDraft +) -> str +``` Generates final report from draft. @@ -127,9 +156,14 @@ Generates final report from draft. #### `proofread` - -[ProofreaderAgent.proofread](../src/agents/proofreader.py) start_line:72 end_line:76 - +```python +async def proofread( + self, + query: str, + report_title: str, + report_draft: ReportDraft +) -> str +``` Proofreads and polishes a report draft. @@ -150,17 +184,21 @@ Proofreads and polishes a report draft. #### `generate_observations` - -[ThinkingAgent.generate_observations](../src/agents/thinking.py) start_line:70 end_line:76 - +```python +async def generate_observations( + self, + query: str, + background_context: str, + conversation_history: Conversation +) -> str +``` Generates observations from conversation history. **Parameters**: - `query`: Research query string -- `background_context`: Optional background context (default: "") -- `conversation_history`: History of actions, findings, and thoughts as string (default: "") -- `iteration`: Current iteration number (default: 1) +- `background_context`: Background context +- `conversation_history`: Conversation history **Returns**: Observation string. @@ -172,11 +210,14 @@ Generates observations from conversation history. ### Methods -#### `parse` +#### `parse_query` - -[InputParserAgent.parse](../src/agents/input_parser.py) start_line:82 end_line:82 - +```python +async def parse_query( + self, + query: str +) -> ParsedQuery +``` Parses and improves a user query. @@ -194,13 +235,18 @@ Parses and improves a user query. All agents have factory functions in `src.agent_factory.agents`: - -[Factory Functions](../src/agent_factory/agents.py) start_line:30 end_line:50 - +```python +def create_knowledge_gap_agent(model: Any | None = None) -> KnowledgeGapAgent +def create_tool_selector_agent(model: Any | None = None) -> ToolSelectorAgent +def create_writer_agent(model: Any | None = None) -> WriterAgent +def create_long_writer_agent(model: Any | None = None) -> LongWriterAgent +def create_proofreader_agent(model: Any | None = None) -> ProofreaderAgent +def create_thinking_agent(model: Any | None = None) -> ThinkingAgent +def create_input_parser_agent(model: Any | None = None) -> InputParserAgent +``` **Parameters**: - `model`: Optional Pydantic AI model. If None, uses `get_model()` from settings. -- `oauth_token`: Optional OAuth token from HuggingFace login (takes priority over env vars) **Returns**: Agent instance. @@ -209,3 +255,12 @@ All agents have factory functions in `src.agent_factory.agents`: - [Architecture - Agents](../architecture/agents.md) - Architecture overview - [Models API](models.md) - Data models used by agents + + + + + + + + + diff --git a/docs/api/models.md b/docs/api/models.md index d9513147a503f9dfaf8c9aab2bc312ed4d672a3d..f226647a52dc2d324877ce12e9311feffb8df591 100644 --- a/docs/api/models.md +++ b/docs/api/models.md @@ -8,14 +8,18 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Represents evidence from search results. - -[Evidence Model](../src/utils/models.py) start_line:33 end_line:44 - +```python +class Evidence(BaseModel): + citation: Citation + content: str + relevance_score: float = Field(ge=0.0, le=1.0) + metadata: dict[str, Any] = Field(default_factory=dict) +``` **Fields**: - `citation`: Citation information (title, URL, date, authors) - `content`: Evidence text content -- `relevance`: Relevance score (0.0-1.0) +- `relevance_score`: Relevance score (0.0-1.0) - `metadata`: Additional metadata dictionary ## Citation @@ -24,15 +28,18 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Citation information for evidence. - -[Citation Model](../src/utils/models.py) start_line:12 end_line:30 - +```python +class Citation(BaseModel): + title: str + url: str + date: str | None = None + authors: list[str] = Field(default_factory=list) +``` **Fields**: -- `source`: Source name (e.g., "pubmed", "clinicaltrials", "europepmc", "web", "rag") - `title`: Article/trial title - `url`: Source URL -- `date`: Publication date (YYYY-MM-DD or "Unknown") +- `date`: Publication date (optional) - `authors`: List of authors (optional) ## KnowledgeGapOutput @@ -41,9 +48,11 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Output from knowledge gap evaluation. - -[KnowledgeGapOutput Model](../src/utils/models.py) start_line:494 end_line:504 - +```python +class KnowledgeGapOutput(BaseModel): + research_complete: bool + outstanding_gaps: list[str] = Field(default_factory=list) +``` **Fields**: - `research_complete`: Boolean indicating if research is complete @@ -55,9 +64,10 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Plan for tool/agent selection. - -[AgentSelectionPlan Model](../src/utils/models.py) start_line:521 end_line:526 - +```python +class AgentSelectionPlan(BaseModel): + tasks: list[AgentTask] = Field(default_factory=list) +``` **Fields**: - `tasks`: List of agent tasks to execute @@ -68,15 +78,17 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Individual agent task. - -[AgentTask Model](../src/utils/models.py) start_line:507 end_line:518 - +```python +class AgentTask(BaseModel): + agent_name: str + query: str + context: dict[str, Any] = Field(default_factory=dict) +``` **Fields**: -- `gap`: The knowledge gap being addressed (optional) -- `agent`: Name of agent to use -- `query`: The specific query for the agent -- `entity_website`: The website of the entity being researched, if known (optional) +- `agent_name`: Name of agent to use +- `query`: Task query +- `context`: Additional context dictionary ## ReportDraft @@ -84,12 +96,17 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Draft structure for long-form reports. - -[ReportDraft Model](../src/utils/models.py) start_line:538 end_line:545 - +```python +class ReportDraft(BaseModel): + title: str + sections: list[ReportSection] = Field(default_factory=list) + references: list[Citation] = Field(default_factory=list) +``` **Fields**: +- `title`: Report title - `sections`: List of report sections +- `references`: List of citations ## ReportSection @@ -97,13 +114,17 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Individual section in a report draft. - -[ReportDraftSection Model](../src/utils/models.py) start_line:529 end_line:535 - +```python +class ReportSection(BaseModel): + title: str + content: str + order: int +``` **Fields**: -- `section_title`: The title of the section -- `section_content`: The content of the section +- `title`: Section title +- `content`: Section content +- `order`: Section order number ## ParsedQuery @@ -111,9 +132,14 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Parsed and improved query. - -[ParsedQuery Model](../src/utils/models.py) start_line:557 end_line:572 - +```python +class ParsedQuery(BaseModel): + original_query: str + improved_query: str + research_mode: Literal["iterative", "deep"] + key_entities: list[str] = Field(default_factory=list) + research_questions: list[str] = Field(default_factory=list) +``` **Fields**: - `original_query`: Original query string @@ -128,12 +154,13 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Conversation history with iterations. - -[Conversation Model](../src/utils/models.py) start_line:331 end_line:337 - +```python +class Conversation(BaseModel): + iterations: list[IterationData] = Field(default_factory=list) +``` **Fields**: -- `history`: List of iteration data +- `iterations`: List of iteration data ## IterationData @@ -141,15 +168,23 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Data for a single iteration. - -[IterationData Model](../src/utils/models.py) start_line:315 end_line:328 - +```python +class IterationData(BaseModel): + iteration: int + observations: str | None = None + knowledge_gaps: list[str] = Field(default_factory=list) + tool_calls: list[dict[str, Any]] = Field(default_factory=list) + findings: str | None = None + thoughts: str | None = None +``` **Fields**: -- `gap`: The gap addressed in the iteration -- `tool_calls`: The tool calls made -- `findings`: The findings collected from tool calls -- `thought`: The thinking done to reflect on the success of the iteration and next steps +- `iteration`: Iteration number +- `observations`: Generated observations +- `knowledge_gaps`: Identified knowledge gaps +- `tool_calls`: Tool calls made +- `findings`: Findings from tools +- `thoughts`: Agent thoughts ## AgentEvent @@ -157,9 +192,12 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Event emitted during research execution. - -[AgentEvent Model](../src/utils/models.py) start_line:104 end_line:125 - +```python +class AgentEvent(BaseModel): + type: str + iteration: int | None = None + data: dict[str, Any] = Field(default_factory=dict) +``` **Fields**: - `type`: Event type (e.g., "started", "search_complete", "complete") @@ -172,20 +210,35 @@ This page documents the Pydantic models used throughout DeepCritical. **Purpose**: Current budget status. - -[BudgetStatus Model](../src/middleware/budget_tracker.py) start_line:15 end_line:25 - +```python +class BudgetStatus(BaseModel): + tokens_used: int + tokens_limit: int + time_elapsed_seconds: float + time_limit_seconds: float + iterations: int + iterations_limit: int +``` **Fields**: -- `tokens_used`: Total tokens used -- `tokens_limit`: Token budget limit -- `time_elapsed_seconds`: Time elapsed in seconds -- `time_limit_seconds`: Time budget limit (default: 600.0 seconds / 10 minutes) -- `iterations`: Number of iterations completed -- `iterations_limit`: Maximum iterations (default: 10) -- `iteration_tokens`: Tokens used per iteration (iteration number -> token count) +- `tokens_used`: Tokens used so far +- `tokens_limit`: Token limit +- `time_elapsed_seconds`: Elapsed time in seconds +- `time_limit_seconds`: Time limit in seconds +- `iterations`: Current iteration count +- `iterations_limit`: Iteration limit ## See Also - [Architecture - Agents](../architecture/agents.md) - How models are used - [Configuration](../configuration/index.md) - Model configuration + + + + + + + + + + diff --git a/docs/api/orchestrators.md b/docs/api/orchestrators.md index ec1874536e9809064e3a59a220a6aaa5e6d4eb37..27c52249fc18fbcdb893036cdfcb4472e5d2f99e 100644 --- a/docs/api/orchestrators.md +++ b/docs/api/orchestrators.md @@ -12,24 +12,33 @@ This page documents the API for DeepCritical orchestrators. #### `run` - -[IterativeResearchFlow.run](../src/orchestrator/research_flow.py) start_line:134 end_line:140 - +```python +async def run( + self, + query: str, + background_context: str = "", + max_iterations: int | None = None, + max_time_minutes: float | None = None, + token_budget: int | None = None +) -> AsyncGenerator[AgentEvent, None] +``` Runs iterative research flow. **Parameters**: - `query`: Research query string - `background_context`: Background context (default: "") -- `output_length`: Optional description of desired output length (default: "") -- `output_instructions`: Optional additional instructions for report generation (default: "") -- `message_history`: Optional user conversation history in Pydantic AI `ModelMessage` format (default: None) - -**Returns**: Final report string. - -**Note**: The `message_history` parameter enables multi-turn conversations by providing context from previous interactions. - -**Note**: `max_iterations`, `max_time_minutes`, and `token_budget` are constructor parameters, not `run()` parameters. +- `max_iterations`: Maximum iterations (default: from settings) +- `max_time_minutes`: Maximum time in minutes (default: from settings) +- `token_budget`: Token budget (default: from settings) + +**Yields**: `AgentEvent` objects for: +- `started`: Research started +- `search_complete`: Search completed +- `judge_complete`: Evidence evaluation completed +- `synthesizing`: Generating report +- `complete`: Research completed +- `error`: Error occurred ## DeepResearchFlow @@ -41,21 +50,33 @@ Runs iterative research flow. #### `run` - -[DeepResearchFlow.run](../src/orchestrator/research_flow.py) start_line:778 end_line:778 - +```python +async def run( + self, + query: str, + background_context: str = "", + max_iterations_per_section: int | None = None, + max_time_minutes: float | None = None, + token_budget: int | None = None +) -> AsyncGenerator[AgentEvent, None] +``` Runs deep research flow. **Parameters**: - `query`: Research query string -- `message_history`: Optional user conversation history in Pydantic AI `ModelMessage` format (default: None) - -**Returns**: Final report string. - -**Note**: The `message_history` parameter enables multi-turn conversations by providing context from previous interactions. - -**Note**: `max_iterations_per_section`, `max_time_minutes`, and `token_budget` are constructor parameters, not `run()` parameters. +- `background_context`: Background context (default: "") +- `max_iterations_per_section`: Maximum iterations per section (default: from settings) +- `max_time_minutes`: Maximum time in minutes (default: from settings) +- `token_budget`: Token budget (default: from settings) + +**Yields**: `AgentEvent` objects for: +- `started`: Research started +- `planning`: Creating research plan +- `looping`: Running parallel research loops +- `synthesizing`: Synthesizing results +- `complete`: Research completed +- `error`: Error occurred ## GraphOrchestrator @@ -67,22 +88,24 @@ Runs deep research flow. #### `run` - -[GraphOrchestrator.run](../src/orchestrator/graph_orchestrator.py) start_line:177 end_line:177 - +```python +async def run( + self, + query: str, + research_mode: str = "auto", + use_graph: bool = True +) -> AsyncGenerator[AgentEvent, None] +``` Runs graph-based research orchestration. **Parameters**: - `query`: Research query string -- `message_history`: Optional user conversation history in Pydantic AI `ModelMessage` format (default: None) +- `research_mode`: Research mode ("iterative", "deep", or "auto") +- `use_graph`: Whether to use graph execution (default: True) **Yields**: `AgentEvent` objects during graph execution. -**Note**: -- `research_mode` and `use_graph` are constructor parameters, not `run()` parameters. -- The `message_history` parameter enables multi-turn conversations by providing context from previous interactions. Message history is stored in `GraphExecutionContext` and passed to agents during execution. - ## Orchestrator Factory **Module**: `src.orchestrator_factory` @@ -93,18 +116,22 @@ Runs graph-based research orchestration. #### `create_orchestrator` - -[create_orchestrator](../src/orchestrator_factory.py) start_line:44 end_line:50 - +```python +def create_orchestrator( + search_handler: SearchHandlerProtocol, + judge_handler: JudgeHandlerProtocol, + config: dict[str, Any], + mode: str | None = None +) -> Any +``` Creates an orchestrator instance. **Parameters**: -- `search_handler`: Search handler protocol implementation (optional, required for simple mode) -- `judge_handler`: Judge handler protocol implementation (optional, required for simple mode) -- `config`: Configuration object (optional) -- `mode`: Orchestrator mode ("simple", "advanced", "magentic", "iterative", "deep", "auto", or None for auto-detect) -- `oauth_token`: Optional OAuth token from HuggingFace login (takes priority over env vars) +- `search_handler`: Search handler protocol implementation +- `judge_handler`: Judge handler protocol implementation +- `config`: Configuration dictionary +- `mode`: Orchestrator mode ("simple", "advanced", "magentic", or None for auto-detect) **Returns**: Orchestrator instance. @@ -126,19 +153,24 @@ Creates an orchestrator instance. #### `run` - -[MagenticOrchestrator.run](../src/orchestrator_magentic.py) start_line:101 end_line:101 - +```python +async def run( + self, + query: str, + max_rounds: int = 15, + max_stalls: int = 3 +) -> AsyncGenerator[AgentEvent, None] +``` Runs Magentic orchestration. **Parameters**: - `query`: Research query string +- `max_rounds`: Maximum rounds (default: 15) +- `max_stalls`: Maximum stalls before reset (default: 3) **Yields**: `AgentEvent` objects converted from Magentic events. -**Note**: `max_rounds` and `max_stalls` are constructor parameters, not `run()` parameters. - **Requirements**: - `agent-framework-core` package - OpenAI API key @@ -146,4 +178,14 @@ Runs Magentic orchestration. ## See Also - [Architecture - Orchestrators](../architecture/orchestrators.md) - Architecture overview -- [Graph Orchestration](../architecture/graph_orchestration.md) - Graph execution details +- [Graph Orchestration](../architecture/graph-orchestration.md) - Graph execution details + + + + + + + + + + diff --git a/docs/api/services.md b/docs/api/services.md index 52647e795fa543d8c3a782a07c8d4dbe28b5fbf8..30edfc557afb8872d4262c5cdb4ebb2e149f46af 100644 --- a/docs/api/services.md +++ b/docs/api/services.md @@ -12,9 +12,9 @@ This page documents the API for DeepCritical services. #### `embed` - -[EmbeddingService.embed](../src/services/embeddings.py) start_line:55 end_line:55 - +```python +async def embed(self, text: str) -> list[float] +``` Generates embedding for a text string. @@ -68,60 +68,6 @@ Finds duplicate texts based on similarity threshold. **Returns**: List of (index1, index2) tuples for duplicate pairs. -#### `add_evidence` - -```python -async def add_evidence( - self, - evidence_id: str, - content: str, - metadata: dict[str, Any] -) -> None -``` - -Adds evidence to vector store for semantic search. - -**Parameters**: -- `evidence_id`: Unique identifier for the evidence -- `content`: Evidence text content -- `metadata`: Additional metadata dictionary - -#### `search_similar` - -```python -async def search_similar( - self, - query: str, - n_results: int = 5 -) -> list[dict[str, Any]] -``` - -Finds semantically similar evidence. - -**Parameters**: -- `query`: Search query string -- `n_results`: Number of results to return (default: 5) - -**Returns**: List of dictionaries with `id`, `content`, `metadata`, and `distance` keys. - -#### `deduplicate` - -```python -async def deduplicate( - self, - new_evidence: list[Evidence], - threshold: float = 0.9 -) -> list[Evidence] -``` - -Removes semantically duplicate evidence. - -**Parameters**: -- `new_evidence`: List of evidence items to deduplicate -- `threshold`: Similarity threshold (default: 0.9, where 0.9 = 90% similar is duplicate) - -**Returns**: List of unique evidence items (not already in vector store). - ### Factory Function #### `get_embedding_service` @@ -143,97 +89,63 @@ Returns singleton EmbeddingService instance. #### `ingest_evidence` - -[LlamaIndexRAGService.ingest_evidence](../src/services/llamaindex_rag.py) start_line:290 end_line:290 - +```python +async def ingest_evidence(self, evidence: list[Evidence]) -> None +``` Ingests evidence into RAG service. **Parameters**: -- `evidence_list`: List of Evidence objects to ingest +- `evidence`: List of Evidence objects to ingest -**Note**: Supports multiple embedding providers (OpenAI, local sentence-transformers, Hugging Face). +**Note**: Requires OpenAI API key for embeddings. #### `retrieve` ```python -def retrieve( +async def retrieve( self, query: str, - top_k: int | None = None -) -> list[dict[str, Any]] + top_k: int = 5 +) -> list[Document] ``` Retrieves relevant documents for a query. **Parameters**: - `query`: Search query string -- `top_k`: Number of top results to return (defaults to `similarity_top_k` from constructor) +- `top_k`: Number of top results to return (default: 5) -**Returns**: List of dictionaries with `text`, `score`, and `metadata` keys. +**Returns**: List of Document objects with metadata. #### `query` ```python -def query( +async def query( self, - query_str: str, - top_k: int | None = None + query: str, + top_k: int = 5 ) -> str ``` -Queries RAG service and returns synthesized response. - -**Parameters**: -- `query_str`: Query string -- `top_k`: Number of results to use (defaults to `similarity_top_k` from constructor) - -**Returns**: Synthesized response string. - -**Raises**: -- `ConfigurationError`: If no LLM API key is available for query synthesis - -#### `ingest_documents` - -```python -def ingest_documents(self, documents: list[Any]) -> None -``` - -Ingests raw LlamaIndex Documents. +Queries RAG service and returns formatted results. **Parameters**: -- `documents`: List of LlamaIndex Document objects - -#### `clear_collection` - -```python -def clear_collection(self) -> None -``` +- `query`: Search query string +- `top_k`: Number of top results to return (default: 5) -Clears all documents from the collection. +**Returns**: Formatted query results as string. ### Factory Function #### `get_rag_service` ```python -def get_rag_service( - collection_name: str = "deepcritical_evidence", - oauth_token: str | None = None, - **kwargs: Any -) -> LlamaIndexRAGService +@lru_cache(maxsize=1) +def get_rag_service() -> LlamaIndexRAGService | None ``` -Get or create a RAG service instance. - -**Parameters**: -- `collection_name`: Name of the ChromaDB collection (default: "deepcritical_evidence") -- `oauth_token`: Optional OAuth token from HuggingFace login (takes priority over env vars) -- `**kwargs`: Additional arguments for LlamaIndexRAGService (e.g., `use_openai_embeddings=False`) - -**Returns**: Configured LlamaIndexRAGService instance. - -**Note**: By default, uses local embeddings (sentence-transformers) which require no API keys. +Returns singleton LlamaIndexRAGService instance, or None if OpenAI key not available. ## StatisticalAnalyzer @@ -248,27 +160,24 @@ Get or create a RAG service instance. ```python async def analyze( self, - query: str, + hypothesis: str, evidence: list[Evidence], - hypothesis: dict[str, Any] | None = None + data_description: str | None = None ) -> AnalysisResult ``` -Analyzes a research question using statistical methods. +Analyzes a hypothesis using statistical methods. **Parameters**: -- `query`: The research question -- `evidence`: List of Evidence objects to analyze -- `hypothesis`: Optional hypothesis dict with `drug`, `target`, `pathway`, `effect`, `confidence` keys +- `hypothesis`: Hypothesis to analyze +- `evidence`: List of Evidence objects +- `data_description`: Optional data description **Returns**: `AnalysisResult` with: - `verdict`: SUPPORTED, REFUTED, or INCONCLUSIVE -- `confidence`: Confidence in verdict (0.0-1.0) -- `statistical_evidence`: Summary of statistical findings -- `code_generated`: Python code that was executed -- `execution_output`: Output from code execution -- `key_takeaways`: Key takeaways from analysis -- `limitations`: List of limitations +- `code`: Generated analysis code +- `output`: Execution output +- `error`: Error message if execution failed **Note**: Requires Modal credentials for sandbox execution. @@ -277,3 +186,12 @@ Analyzes a research question using statistical methods. - [Architecture - Services](../architecture/services.md) - Architecture overview - [Configuration](../configuration/index.md) - Service configuration + + + + + + + + + diff --git a/docs/api/tools.md b/docs/api/tools.md index c3eb95bf88d0d70cf8e2265931a59c774283b021..b93cd31e37e7a31413fec0ec282424fe6ae0ca82 100644 --- a/docs/api/tools.md +++ b/docs/api/tools.md @@ -56,10 +56,8 @@ Searches PubMed for articles. **Returns**: List of `Evidence` objects with PubMed articles. **Raises**: -- `SearchError`: If search fails (timeout, HTTP error, XML parsing error) -- `RateLimitError`: If rate limit is exceeded (429 status code) - -**Note**: Uses NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Handles single vs. multiple articles. +- `SearchError`: If search fails +- `RateLimitError`: If rate limit is exceeded ## ClinicalTrialsTool @@ -98,10 +96,10 @@ Searches ClinicalTrials.gov for trials. **Returns**: List of `Evidence` objects with clinical trials. -**Note**: Only returns interventional studies with status: COMPLETED, ACTIVE_NOT_RECRUITING, RECRUITING, ENROLLING_BY_INVITATION. Uses `requests` library (NOT httpx - WAF blocks httpx). Runs in thread pool for async compatibility. +**Note**: Only returns interventional studies with status: COMPLETED, ACTIVE_NOT_RECRUITING, RECRUITING, ENROLLING_BY_INVITATION **Raises**: -- `SearchError`: If search fails (HTTP error, request exception) +- `SearchError`: If search fails ## EuropePMCTool @@ -140,10 +138,10 @@ Searches Europe PMC for articles and preprints. **Returns**: List of `Evidence` objects with articles/preprints. -**Note**: Includes both preprints (marked with `[PREPRINT - Not peer-reviewed]`) and peer-reviewed articles. Handles preprint markers. Builds URLs from DOI or PMID. +**Note**: Includes both preprints (marked with `[PREPRINT - Not peer-reviewed]`) and peer-reviewed articles. **Raises**: -- `SearchError`: If search fails (HTTP error, connection error) +- `SearchError`: If search fails ## RAGTool @@ -151,20 +149,6 @@ Searches Europe PMC for articles and preprints. **Purpose**: Semantic search within collected evidence. -### Initialization - -```python -def __init__( - self, - rag_service: LlamaIndexRAGService | None = None, - oauth_token: str | None = None -) -> None -``` - -**Parameters**: -- `rag_service`: Optional RAG service instance. If None, will be lazy-initialized. -- `oauth_token`: Optional OAuth token from HuggingFace login (for RAG LLM) - ### Properties #### `name` @@ -196,10 +180,7 @@ Searches collected evidence using semantic similarity. **Returns**: List of `Evidence` objects from collected evidence. -**Raises**: -- `ConfigurationError`: If RAG service is unavailable - -**Note**: Requires evidence to be ingested into RAG service first. Wraps `LlamaIndexRAGService`. Returns Evidence from RAG results. +**Note**: Requires evidence to be ingested into RAG service first. ## SearchHandler @@ -207,53 +188,44 @@ Searches collected evidence using semantic similarity. **Purpose**: Orchestrates parallel searches across multiple tools. -### Initialization +### Methods + +#### `search` ```python -def __init__( +async def search( self, - tools: list[SearchTool], - timeout: float = 30.0, - include_rag: bool = False, - auto_ingest_to_rag: bool = True, - oauth_token: str | None = None -) -> None + query: str, + tools: list[SearchTool] | None = None, + max_results_per_tool: int = 10 +) -> SearchResult ``` -**Parameters**: -- `tools`: List of search tools to use -- `timeout`: Timeout for each search in seconds (default: 30.0) -- `include_rag`: Whether to include RAG tool in searches (default: False) -- `auto_ingest_to_rag`: Whether to automatically ingest results into RAG (default: True) -- `oauth_token`: Optional OAuth token from HuggingFace login (for RAG LLM) - -### Methods - -#### `execute` - - -[SearchHandler.execute](../src/tools/search_handler.py) start_line:86 end_line:86 - - Searches multiple tools in parallel. **Parameters**: - `query`: Search query string +- `tools`: List of tools to use (default: all available tools) - `max_results_per_tool`: Maximum results per tool (default: 10) **Returns**: `SearchResult` with: -- `query`: The search query - `evidence`: Aggregated list of evidence -- `sources_searched`: List of source names searched -- `total_found`: Total number of results -- `errors`: List of error messages from failed tools +- `tool_results`: Results per tool +- `total_count`: Total number of results -**Raises**: -- `SearchError`: If search times out - -**Note**: Uses `asyncio.gather()` for parallel execution. Handles tool failures gracefully (returns errors in `SearchResult.errors`). Automatically ingests evidence into RAG if enabled. +**Note**: Uses `asyncio.gather()` for parallel execution. Handles tool failures gracefully. ## See Also - [Architecture - Tools](../architecture/tools.md) - Architecture overview - [Models API](models.md) - Data models used by tools + + + + + + + + + + diff --git a/docs/architecture/agents.md b/docs/architecture/agents.md index 3bf400ba7bf42cfb3cd836f8ec9af3949731c3a6..b65da9e379c329fc478bf7c9fe3ff4ca4c40745a 100644 --- a/docs/architecture/agents.md +++ b/docs/architecture/agents.md @@ -4,16 +4,12 @@ DeepCritical uses Pydantic AI agents for all AI-powered operations. All agents f ## Agent Pattern -### Pydantic AI Agents - -Pydantic AI agents use the `Agent` class with the following structure: +All agents use the Pydantic AI `Agent` class with the following structure: - **System Prompt**: Module-level constant with date injection - **Agent Class**: `__init__(model: Any | None = None)` - **Main Method**: Async method (e.g., `async def evaluate()`, `async def write_report()`) -- **Factory Function**: `def create_agent_name(model: Any | None = None, oauth_token: str | None = None) -> AgentName` - -**Note**: Factory functions accept an optional `oauth_token` parameter for HuggingFace authentication, which takes priority over environment variables. +- **Factory Function**: `def create_agent_name(model: Any | None = None) -> AgentName` ## Model Initialization @@ -159,130 +155,19 @@ For text output (writer agents), agents return `str` directly. - `key_entities`: List of key entities - `research_questions`: List of research questions -## Magentic Agents - -The following agents use the `BaseAgent` pattern from `agent-framework` and are used exclusively with `MagenticOrchestrator`: - -### Hypothesis Agent - -**File**: `src/agents/hypothesis_agent.py` - -**Purpose**: Generates mechanistic hypotheses based on evidence. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Uses internal Pydantic AI `Agent` with `HypothesisAssessment` output type -- Accesses shared `evidence_store` for evidence -- Uses embedding service for diverse evidence selection (MMR algorithm) -- Stores hypotheses in shared context - -### Search Agent - -**File**: `src/agents/search_agent.py` - -**Purpose**: Wraps `SearchHandler` as an agent for Magentic orchestrator. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Executes searches via `SearchHandlerProtocol` -- Deduplicates evidence using embedding service -- Searches for semantically related evidence -- Updates shared evidence store - -### Analysis Agent - -**File**: `src/agents/analysis_agent.py` - -**Purpose**: Performs statistical analysis using Modal sandbox. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Wraps `StatisticalAnalyzer` service -- Analyzes evidence and hypotheses -- Returns verdict (SUPPORTED/REFUTED/INCONCLUSIVE) -- Stores analysis results in shared context - -### Report Agent (Magentic) - -**File**: `src/agents/report_agent.py` - -**Purpose**: Generates structured scientific reports from evidence and hypotheses. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` - -**Features**: -- Uses internal Pydantic AI `Agent` with `ResearchReport` output type -- Accesses shared evidence store and hypotheses -- Validates citations before returning -- Formats report as markdown - -### Judge Agent - -**File**: `src/agents/judge_agent.py` - -**Purpose**: Evaluates evidence quality and determines if sufficient for synthesis. - -**Pattern**: `BaseAgent` from `agent-framework` - -**Methods**: -- `async def run(messages, thread, **kwargs) -> AgentRunResponse` -- `async def run_stream(messages, thread, **kwargs) -> AsyncIterable[AgentRunResponseUpdate]` - -**Features**: -- Wraps `JudgeHandlerProtocol` -- Accesses shared evidence store -- Returns `JudgeAssessment` with sufficient flag, confidence, and recommendation - -## Agent Patterns - -DeepCritical uses two distinct agent patterns: - -### 1. Pydantic AI Agents (Traditional Pattern) - -These agents use the Pydantic AI `Agent` class directly and are used in iterative and deep research flows: - -- **Pattern**: `Agent(model, output_type, system_prompt)` -- **Initialization**: `__init__(model: Any | None = None)` -- **Methods**: Agent-specific async methods (e.g., `async def evaluate()`, `async def write_report()`) -- **Examples**: `KnowledgeGapAgent`, `ToolSelectorAgent`, `WriterAgent`, `LongWriterAgent`, `ProofreaderAgent`, `ThinkingAgent`, `InputParserAgent` - -### 2. Magentic Agents (Agent-Framework Pattern) - -These agents use the `BaseAgent` class from `agent-framework` and are used in Magentic orchestrator: - -- **Pattern**: `BaseAgent` from `agent-framework` with `async def run()` method -- **Initialization**: `__init__(evidence_store, embedding_service, ...)` -- **Methods**: `async def run(messages, thread, **kwargs) -> AgentRunResponse` -- **Examples**: `HypothesisAgent`, `SearchAgent`, `AnalysisAgent`, `ReportAgent`, `JudgeAgent` - -**Note**: Magentic agents are used exclusively with the `MagenticOrchestrator` and follow the agent-framework protocol for multi-agent coordination. - ## Factory Functions All agents have factory functions in `src/agent_factory/agents.py`: - -[Factory Functions](../src/agent_factory/agents.py) start_line:79 end_line:100 - +```python +def create_knowledge_gap_agent(model: Any | None = None) -> KnowledgeGapAgent +def create_tool_selector_agent(model: Any | None = None) -> ToolSelectorAgent +def create_writer_agent(model: Any | None = None) -> WriterAgent +# ... etc +``` Factory functions: - Use `get_model()` if no model provided -- Accept `oauth_token` parameter for HuggingFace authentication - Raise `ConfigurationError` if creation fails - Log agent creation @@ -291,3 +176,13 @@ Factory functions: - [Orchestrators](orchestrators.md) - How agents are orchestrated - [API Reference - Agents](../api/agents.md) - API documentation - [Contributing - Code Style](../contributing/code-style.md) - Development guidelines + + + + + + + + + + diff --git a/docs/architecture/graph-orchestration.md b/docs/architecture/graph-orchestration.md new file mode 100644 index 0000000000000000000000000000000000000000..249351123cd97c4945634767eda230e551e26da4 --- /dev/null +++ b/docs/architecture/graph-orchestration.md @@ -0,0 +1,152 @@ +# Graph Orchestration Architecture + +## Overview + +Phase 4 implements a graph-based orchestration system for research workflows using Pydantic AI agents as nodes. This enables better parallel execution, conditional routing, and state management compared to simple agent chains. + +## Graph Structure + +### Nodes + +Graph nodes represent different stages in the research workflow: + +1. **Agent Nodes**: Execute Pydantic AI agents + - Input: Prompt/query + - Output: Structured or unstructured response + - Examples: `KnowledgeGapAgent`, `ToolSelectorAgent`, `ThinkingAgent` + +2. **State Nodes**: Update or read workflow state + - Input: Current state + - Output: Updated state + - Examples: Update evidence, update conversation history + +3. **Decision Nodes**: Make routing decisions based on conditions + - Input: Current state/results + - Output: Next node ID + - Examples: Continue research vs. complete research + +4. **Parallel Nodes**: Execute multiple nodes concurrently + - Input: List of node IDs + - Output: Aggregated results + - Examples: Parallel iterative research loops + +### Edges + +Edges define transitions between nodes: + +1. **Sequential Edges**: Always traversed (no condition) + - From: Source node + - To: Target node + - Condition: None (always True) + +2. **Conditional Edges**: Traversed based on condition + - From: Source node + - To: Target node + - Condition: Callable that returns bool + - Example: If research complete → go to writer, else → continue loop + +3. **Parallel Edges**: Used for parallel execution branches + - From: Parallel node + - To: Multiple target nodes + - Execution: All targets run concurrently + +## Graph Patterns + +### Iterative Research Graph + +``` +[Input] → [Thinking] → [Knowledge Gap] → [Decision: Complete?] + ↓ No ↓ Yes + [Tool Selector] [Writer] + ↓ + [Execute Tools] → [Loop Back] +``` + +### Deep Research Graph + +``` +[Input] → [Planner] → [Parallel Iterative Loops] → [Synthesizer] + ↓ ↓ ↓ + [Loop1] [Loop2] [Loop3] +``` + +## State Management + +State is managed via `WorkflowState` using `ContextVar` for thread-safe isolation: + +- **Evidence**: Collected evidence from searches +- **Conversation**: Iteration history (gaps, tool calls, findings, thoughts) +- **Embedding Service**: For semantic search + +State transitions occur at state nodes, which update the global workflow state. + +## Execution Flow + +1. **Graph Construction**: Build graph from nodes and edges +2. **Graph Validation**: Ensure graph is valid (no cycles, all nodes reachable) +3. **Graph Execution**: Traverse graph from entry node +4. **Node Execution**: Execute each node based on type +5. **Edge Evaluation**: Determine next node(s) based on edges +6. **Parallel Execution**: Use `asyncio.gather()` for parallel nodes +7. **State Updates**: Update state at state nodes +8. **Event Streaming**: Yield events during execution for UI + +## Conditional Routing + +Decision nodes evaluate conditions and return next node IDs: + +- **Knowledge Gap Decision**: If `research_complete` → writer, else → tool selector +- **Budget Decision**: If budget exceeded → exit, else → continue +- **Iteration Decision**: If max iterations → exit, else → continue + +## Parallel Execution + +Parallel nodes execute multiple nodes concurrently: + +- Each parallel branch runs independently +- Results are aggregated after all branches complete +- State is synchronized after parallel execution +- Errors in one branch don't stop other branches + +## Budget Enforcement + +Budget constraints are enforced at decision nodes: + +- **Token Budget**: Track LLM token usage +- **Time Budget**: Track elapsed time +- **Iteration Budget**: Track iteration count + +If any budget is exceeded, execution routes to exit node. + +## Error Handling + +Errors are handled at multiple levels: + +1. **Node Level**: Catch errors in individual node execution +2. **Graph Level**: Handle errors during graph traversal +3. **State Level**: Rollback state changes on error + +Errors are logged and yield error events for UI. + +## Backward Compatibility + +Graph execution is optional via feature flag: + +- `USE_GRAPH_EXECUTION=true`: Use graph-based execution +- `USE_GRAPH_EXECUTION=false`: Use agent chain execution (existing) + +This allows gradual migration and fallback if needed. + + + + + + + + + + + + + + diff --git a/docs/architecture/graph_orchestration.md b/docs/architecture/graph_orchestration.md index cdbec2d163918dceb7dc1bf0f9659402a4aa7377..ec5601fdc9ad0108706bb6a4c855fc2407cb6064 100644 --- a/docs/architecture/graph_orchestration.md +++ b/docs/architecture/graph_orchestration.md @@ -2,163 +2,7 @@ ## Overview -DeepCritical implements a graph-based orchestration system for research workflows using Pydantic AI agents as nodes. This enables better parallel execution, conditional routing, and state management compared to simple agent chains. - -## Conversation History - -DeepCritical supports multi-turn conversations through Pydantic AI's native message history format. The system maintains two types of history: - -1. **User Conversation History**: Multi-turn user interactions (from Gradio chat interface) stored as `list[ModelMessage]` -2. **Research Iteration History**: Internal research process state (existing `Conversation` model) - -### Message History Flow - -``` -Gradio Chat History → convert_gradio_to_message_history() → GraphOrchestrator.run(message_history) - ↓ -GraphExecutionContext (stores message_history) - ↓ -Agent Nodes (receive message_history via agent.run()) - ↓ -WorkflowState (persists user_message_history) -``` - -### Usage - -Message history is automatically converted from Gradio format and passed through the orchestrator: - -```python -# In app.py - automatic conversion -message_history = convert_gradio_to_message_history(history) if history else None -async for event in orchestrator.run(query, message_history=message_history): - yield event -``` - -Agents receive message history through their `run()` methods: - -```python -# In agent execution -if message_history: - result = await agent.run(input_data, message_history=message_history) -``` - -## Graph Patterns - -### Iterative Research Graph - -The iterative research graph follows this pattern: - -``` -[Input] → [Thinking] → [Knowledge Gap] → [Decision: Complete?] - ↓ No ↓ Yes - [Tool Selector] [Writer] - ↓ - [Execute Tools] → [Loop Back] -``` - -**Node IDs**: `thinking` → `knowledge_gap` → `continue_decision` → `tool_selector`/`writer` → `execute_tools` → (loop back to `thinking`) - -**Special Node Handling**: -- `execute_tools`: State node that uses `search_handler` to execute searches and add evidence to workflow state -- `continue_decision`: Decision node that routes based on `research_complete` flag from `KnowledgeGapOutput` - -### Deep Research Graph - -The deep research graph follows this pattern: - -``` -[Input] → [Planner] → [Store Plan] → [Parallel Loops] → [Collect Drafts] → [Synthesizer] - ↓ ↓ ↓ - [Loop1] [Loop2] [Loop3] -``` - -**Node IDs**: `planner` → `store_plan` → `parallel_loops` → `collect_drafts` → `synthesizer` - -**Special Node Handling**: -- `planner`: Agent node that creates `ReportPlan` with report outline -- `store_plan`: State node that stores `ReportPlan` in context for parallel loops -- `parallel_loops`: Parallel node that executes `IterativeResearchFlow` instances for each section -- `collect_drafts`: State node that collects section drafts from parallel loops -- `synthesizer`: Agent node that calls `LongWriterAgent.write_report()` directly with `ReportDraft` - -### Deep Research - -```mermaid - -sequenceDiagram - actor User - participant GraphOrchestrator - participant InputParser - participant GraphBuilder - participant GraphExecutor - participant Agent - participant BudgetTracker - participant WorkflowState - - User->>GraphOrchestrator: run(query) - GraphOrchestrator->>InputParser: detect_research_mode(query) - InputParser-->>GraphOrchestrator: mode (iterative/deep) - GraphOrchestrator->>GraphBuilder: build_graph(mode) - GraphBuilder-->>GraphOrchestrator: ResearchGraph - GraphOrchestrator->>WorkflowState: init_workflow_state() - GraphOrchestrator->>BudgetTracker: create_budget() - GraphOrchestrator->>GraphExecutor: _execute_graph(graph) - - loop For each node in graph - GraphExecutor->>Agent: execute_node(agent_node) - Agent->>Agent: process_input - Agent-->>GraphExecutor: result - GraphExecutor->>WorkflowState: update_state(result) - GraphExecutor->>BudgetTracker: add_tokens(used) - GraphExecutor->>BudgetTracker: check_budget() - alt Budget exceeded - GraphExecutor->>GraphOrchestrator: emit(error_event) - else Continue - GraphExecutor->>GraphOrchestrator: emit(progress_event) - end - end - - GraphOrchestrator->>User: AsyncGenerator[AgentEvent] - -``` - -### Iterative Research - -```mermaid -sequenceDiagram - participant IterativeFlow - participant ThinkingAgent - participant KnowledgeGapAgent - participant ToolSelector - participant ToolExecutor - participant JudgeHandler - participant WriterAgent - - IterativeFlow->>IterativeFlow: run(query) - - loop Until complete or max_iterations - IterativeFlow->>ThinkingAgent: generate_observations() - ThinkingAgent-->>IterativeFlow: observations - - IterativeFlow->>KnowledgeGapAgent: evaluate_gaps() - KnowledgeGapAgent-->>IterativeFlow: KnowledgeGapOutput - - alt Research complete - IterativeFlow->>WriterAgent: create_final_report() - WriterAgent-->>IterativeFlow: final_report - else Gaps remain - IterativeFlow->>ToolSelector: select_agents(gap) - ToolSelector-->>IterativeFlow: AgentSelectionPlan - - IterativeFlow->>ToolExecutor: execute_tool_tasks() - ToolExecutor-->>IterativeFlow: ToolAgentOutput[] - - IterativeFlow->>JudgeHandler: assess_evidence() - JudgeHandler-->>IterativeFlow: should_continue - end - end -``` - +Phase 4 implements a graph-based orchestration system for research workflows using Pydantic AI agents as nodes. This enables better parallel execution, conditional routing, and state management compared to simple agent chains. ## Graph Structure @@ -206,6 +50,25 @@ Edges define transitions between nodes: - To: Multiple target nodes - Execution: All targets run concurrently +## Graph Patterns + +### Iterative Research Graph + +``` +[Input] → [Thinking] → [Knowledge Gap] → [Decision: Complete?] + ↓ No ↓ Yes + [Tool Selector] [Writer] + ↓ + [Execute Tools] → [Loop Back] +``` + +### Deep Research Graph + +``` +[Input] → [Planner] → [Parallel Iterative Loops] → [Synthesizer] + ↓ ↓ ↓ + [Loop1] [Loop2] [Loop3] +``` ## State Management @@ -219,35 +82,14 @@ State transitions occur at state nodes, which update the global workflow state. ## Execution Flow -1. **Graph Construction**: Build graph from nodes and edges using `create_iterative_graph()` or `create_deep_graph()` -2. **Graph Validation**: Ensure graph is valid (no cycles, all nodes reachable) via `ResearchGraph.validate_structure()` -3. **Graph Execution**: Traverse graph from entry node using `GraphOrchestrator._execute_graph()` -4. **Node Execution**: Execute each node based on type: - - **Agent Nodes**: Call `agent.run()` with transformed input - - **State Nodes**: Update workflow state via `state_updater` function - - **Decision Nodes**: Evaluate `decision_function` to get next node ID - - **Parallel Nodes**: Execute all parallel nodes concurrently via `asyncio.gather()` -5. **Edge Evaluation**: Determine next node(s) based on edges and conditions +1. **Graph Construction**: Build graph from nodes and edges +2. **Graph Validation**: Ensure graph is valid (no cycles, all nodes reachable) +3. **Graph Execution**: Traverse graph from entry node +4. **Node Execution**: Execute each node based on type +5. **Edge Evaluation**: Determine next node(s) based on edges 6. **Parallel Execution**: Use `asyncio.gather()` for parallel nodes -7. **State Updates**: Update state at state nodes via `GraphExecutionContext.update_state()` -8. **Event Streaming**: Yield `AgentEvent` objects during execution for UI - -### GraphExecutionContext - -The `GraphExecutionContext` class manages execution state during graph traversal: - -- **State**: Current `WorkflowState` instance -- **Budget Tracker**: `BudgetTracker` instance for budget enforcement -- **Node Results**: Dictionary storing results from each node execution -- **Visited Nodes**: Set of node IDs that have been executed -- **Current Node**: ID of the node currently being executed - -Methods: -- `set_node_result(node_id, result)`: Store result from node execution -- `get_node_result(node_id)`: Retrieve stored result -- `has_visited(node_id)`: Check if node was visited -- `mark_visited(node_id)`: Mark node as visited -- `update_state(updater, data)`: Update workflow state +7. **State Updates**: Update state at state nodes +8. **Event Streaming**: Yield events during execution for UI ## Conditional Routing @@ -298,5 +140,20 @@ This allows gradual migration and fallback if needed. ## See Also - [Orchestrators](orchestrators.md) - Overview of all orchestrator patterns +- [Workflows](workflows.md) - Workflow diagrams and patterns - [Workflow Diagrams](workflow-diagrams.md) - Detailed workflow diagrams - [API Reference - Orchestrators](../api/orchestrators.md) - API documentation + + + + + + + + + + + + + + diff --git a/docs/architecture/middleware.md b/docs/architecture/middleware.md index 6a9c40edefb2c1bb6af2c3b58a3ad1344b75d7d9..82058ccf979591845b8c5ab87e42913ce8a62458 100644 --- a/docs/architecture/middleware.md +++ b/docs/architecture/middleware.md @@ -18,20 +18,22 @@ DeepCritical uses middleware for state management, budget tracking, and workflow - `embedding_service: Any`: Embedding service for semantic search **Methods**: -- `add_evidence(new_evidence: list[Evidence]) -> int`: Adds evidence with URL-based deduplication. Returns the number of new items added (excluding duplicates). -- `async search_related(query: str, n_results: int = 5) -> list[Evidence]`: Semantic search for related evidence using embedding service +- `add_evidence(evidence: Evidence)`: Adds evidence with URL-based deduplication +- `async search_related(query: str, top_k: int = 5) -> list[Evidence]`: Semantic search **Initialization**: +```python +from src.middleware.state_machine import init_workflow_state - -[Initialize Workflow State](../src/middleware/state_machine.py) start_line:98 end_line:110 - +init_workflow_state(embedding_service) +``` **Access**: +```python +from src.middleware.state_machine import get_workflow_state - -[Get Workflow State](../src/middleware/state_machine.py) start_line:115 end_line:129 - +state = get_workflow_state() # Auto-initializes if missing +``` ## Workflow Manager @@ -40,10 +42,10 @@ DeepCritical uses middleware for state management, budget tracking, and workflow **Purpose**: Coordinates parallel research loops **Methods**: -- `async add_loop(loop_id: str, query: str) -> ResearchLoop`: Add a new research loop to manage -- `async run_loops_parallel(loop_configs: list[dict], loop_func: Callable, judge_handler: Any | None = None, budget_tracker: Any | None = None) -> list[Any]`: Run multiple research loops in parallel. Takes configuration dicts and a loop function. -- `async update_loop_status(loop_id: str, status: LoopStatus, error: str | None = None)`: Update loop status -- `async sync_loop_evidence_to_state(loop_id: str)`: Synchronize evidence from a specific loop to global state +- `add_loop(loop: ResearchLoop)`: Add a research loop to manage +- `async run_loops_parallel() -> list[ResearchLoop]`: Run all loops in parallel +- `update_loop_status(loop_id: str, status: str)`: Update loop status +- `sync_loop_evidence_to_state()`: Synchronize evidence from loops to global state **Features**: - Uses `asyncio.gather()` for parallel execution @@ -56,22 +58,9 @@ DeepCritical uses middleware for state management, budget tracking, and workflow from src.middleware.workflow_manager import WorkflowManager manager = WorkflowManager() -await manager.add_loop("loop1", "Research query 1") -await manager.add_loop("loop2", "Research query 2") - -async def run_research(config: dict) -> str: - loop_id = config["loop_id"] - query = config["query"] - # ... research logic ... - return "report" - -results = await manager.run_loops_parallel( - loop_configs=[ - {"loop_id": "loop1", "query": "Research query 1"}, - {"loop_id": "loop2", "query": "Research query 2"}, - ], - loop_func=run_research, -) +manager.add_loop(loop1) +manager.add_loop(loop2) +completed_loops = await manager.run_loops_parallel() ``` ## Budget Tracker @@ -86,13 +75,13 @@ results = await manager.run_loops_parallel( - **Iterations**: Number of iterations **Methods**: -- `create_budget(loop_id: str, tokens_limit: int = 100000, time_limit_seconds: float = 600.0, iterations_limit: int = 10) -> BudgetStatus`: Create a budget for a specific loop -- `add_tokens(loop_id: str, tokens: int)`: Add token usage to a loop's budget -- `start_timer(loop_id: str)`: Start time tracking for a loop -- `update_timer(loop_id: str)`: Update elapsed time for a loop -- `increment_iteration(loop_id: str)`: Increment iteration count for a loop -- `check_budget(loop_id: str) -> tuple[bool, str]`: Check if a loop's budget has been exceeded. Returns (exceeded: bool, reason: str) -- `can_continue(loop_id: str) -> bool`: Check if a loop can continue based on budget +- `create_budget(token_limit, time_limit_seconds, iterations_limit) -> BudgetStatus` +- `add_tokens(tokens: int)`: Add token usage +- `start_timer()`: Start time tracking +- `update_timer()`: Update elapsed time +- `increment_iteration()`: Increment iteration count +- `check_budget() -> BudgetStatus`: Check current budget status +- `can_continue() -> bool`: Check if research can continue **Token Estimation**: - `estimate_tokens(text: str) -> int`: ~4 chars per token @@ -104,20 +93,13 @@ from src.middleware.budget_tracker import BudgetTracker tracker = BudgetTracker() budget = tracker.create_budget( - loop_id="research_loop", - tokens_limit=100000, + token_limit=100000, time_limit_seconds=600, iterations_limit=10 ) -tracker.start_timer("research_loop") +tracker.start_timer() # ... research operations ... -tracker.add_tokens("research_loop", 5000) -tracker.update_timer("research_loop") -exceeded, reason = tracker.check_budget("research_loop") -if exceeded: - # Budget exceeded, stop research - pass -if not tracker.can_continue("research_loop"): +if not tracker.can_continue(): # Budget exceeded, stop research pass ``` @@ -144,3 +126,13 @@ All middleware components use `ContextVar` for thread-safe isolation: - [Orchestrators](orchestrators.md) - How middleware is used in orchestration - [API Reference - Orchestrators](../api/orchestrators.md) - API documentation - [Contributing - Code Style](../contributing/code-style.md) - Development guidelines + + + + + + + + + + diff --git a/docs/architecture/orchestrators.md b/docs/architecture/orchestrators.md index 34ea966177da345a8e0c92254b2c50633cbf2f5e..cf227d0a46a483f5c9a972ca65c8d24753481adc 100644 --- a/docs/architecture/orchestrators.md +++ b/docs/architecture/orchestrators.md @@ -23,10 +23,19 @@ DeepCritical supports multiple orchestration patterns for research workflows. - Iterates until research complete or constraints met **Usage**: +```python +from src.orchestrator.research_flow import IterativeResearchFlow - -[IterativeResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:57 end_line:80 - +flow = IterativeResearchFlow( + search_handler=search_handler, + judge_handler=judge_handler, + use_graph=False +) + +async for event in flow.run(query): + # Handle events + pass +``` ### DeepResearchFlow @@ -46,10 +55,19 @@ DeepCritical supports multiple orchestration patterns for research workflows. - Supports graph execution and agent chains **Usage**: +```python +from src.orchestrator.research_flow import DeepResearchFlow + +flow = DeepResearchFlow( + search_handler=search_handler, + judge_handler=judge_handler, + use_graph=True +) - -[DeepResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:709 end_line:728 - +async for event in flow.run(query): + # Handle events + pass +``` ## Graph Orchestrator @@ -58,10 +76,9 @@ DeepCritical supports multiple orchestration patterns for research workflows. **Purpose**: Graph-based execution using Pydantic AI agents as nodes **Features**: -- Uses graph execution (`use_graph=True`) or agent chains (`use_graph=False`) as fallback +- Uses Pydantic AI Graphs (when available) or agent chains (fallback) - Routes based on research mode (iterative/deep/auto) - Streams `AgentEvent` objects for UI -- Uses `GraphExecutionContext` to manage execution state **Node Types**: - **Agent Nodes**: Execute Pydantic AI agents @@ -74,22 +91,6 @@ DeepCritical supports multiple orchestration patterns for research workflows. - **Conditional Edges**: Traversed based on condition - **Parallel Edges**: Used for parallel execution branches -**Special Node Handling**: - -The `GraphOrchestrator` has special handling for certain nodes: - -- **`execute_tools` node**: State node that uses `search_handler` to execute searches and add evidence to workflow state -- **`parallel_loops` node**: Parallel node that executes `IterativeResearchFlow` instances for each section in deep research mode -- **`synthesizer` node**: Agent node that calls `LongWriterAgent.write_report()` directly with `ReportDraft` instead of using `agent.run()` -- **`writer` node**: Agent node that calls `WriterAgent.write_report()` directly with findings instead of using `agent.run()` - -**GraphExecutionContext**: - -The orchestrator uses `GraphExecutionContext` to manage execution state: -- Tracks current node, visited nodes, and node results -- Manages workflow state and budget tracker -- Provides methods to store and retrieve node execution results - ## Orchestrator Factory **File**: `src/orchestrator_factory.py` @@ -102,10 +103,16 @@ The orchestrator uses `GraphExecutionContext` to manage execution state: - **Auto-detect**: Chooses based on API key availability **Usage**: +```python +from src.orchestrator_factory import create_orchestrator - -[Create Orchestrator](../src/orchestrator_factory.py) start_line:44 end_line:66 - +orchestrator = create_orchestrator( + search_handler=search_handler, + judge_handler=judge_handler, + config={}, + mode="advanced" # or "simple" or None for auto-detect +) +``` ## Magentic Orchestrator @@ -116,26 +123,14 @@ The orchestrator uses `GraphExecutionContext` to manage execution state: **Features**: - Uses `agent-framework-core` - ChatAgent pattern with internal LLMs per agent -- `MagenticBuilder` with participants: - - `searcher`: SearchAgent (wraps SearchHandler) - - `hypothesizer`: HypothesisAgent (generates hypotheses) - - `judge`: JudgeAgent (evaluates evidence) - - `reporter`: ReportAgent (generates final report) -- Manager orchestrates agents via chat client (OpenAI or HuggingFace) -- Event-driven: converts Magentic events to `AgentEvent` for UI streaming via `_process_event()` method -- Supports max rounds, stall detection, and reset handling - -**Event Processing**: - -The orchestrator processes Magentic events and converts them to `AgentEvent`: -- `MagenticOrchestratorMessageEvent` → `AgentEvent` with type based on message content -- `MagenticAgentMessageEvent` → `AgentEvent` with type based on agent name -- `MagenticAgentDeltaEvent` → `AgentEvent` for streaming updates -- `MagenticFinalResultEvent` → `AgentEvent` with type "complete" +- `MagenticBuilder` with participants: searcher, hypothesizer, judge, reporter +- Manager orchestrates agents via `OpenAIChatClient` +- Requires OpenAI API key (function calling support) +- Event-driven: converts Magentic events to `AgentEvent` for UI streaming **Requirements**: - `agent-framework-core` package -- OpenAI API key or HuggingFace authentication +- OpenAI API key ## Hierarchical Orchestrator @@ -164,9 +159,13 @@ The orchestrator processes Magentic events and converts them to `AgentEvent`: All orchestrators must initialize workflow state: - -[Initialize Workflow State](../src/middleware/state_machine.py) start_line:98 end_line:112 - +```python +from src.middleware.state_machine import init_workflow_state +from src.services.embeddings import get_embedding_service + +embedding_service = get_embedding_service() +init_workflow_state(embedding_service) +``` ## Event Streaming @@ -174,28 +173,26 @@ All orchestrators yield `AgentEvent` objects: **Event Types**: - `started`: Research started -- `searching`: Search in progress - `search_complete`: Search completed -- `judging`: Evidence evaluation in progress - `judge_complete`: Evidence evaluation completed -- `looping`: Iteration in progress - `hypothesizing`: Generating hypotheses -- `analyzing`: Statistical analysis in progress -- `analysis_complete`: Statistical analysis completed - `synthesizing`: Synthesizing results - `complete`: Research completed - `error`: Error occurred -- `streaming`: Streaming update (delta events) **Event Structure**: - - -[AgentEvent Model](../src/utils/models.py) start_line:104 end_line:126 - +```python +class AgentEvent: + type: str + iteration: int | None + data: dict[str, Any] +``` ## See Also -- [Graph Orchestration](graph_orchestration.md) - Graph-based execution details +- [Graph Orchestration](graph-orchestration.md) - Graph-based execution details +- [Graph Orchestration (Detailed)](graph_orchestration.md) - Detailed graph architecture +- [Workflows](workflows.md) - Workflow diagrams and patterns - [Workflow Diagrams](workflow-diagrams.md) - Detailed workflow diagrams - [API Reference - Orchestrators](../api/orchestrators.md) - API documentation diff --git a/docs/architecture/services.md b/docs/architecture/services.md index 102b95b2340b11ebad8c4a797410258b5d3202b4..fda7c8367aac5c7f2a907f2c45372a91d7a7fc64 100644 --- a/docs/architecture/services.md +++ b/docs/architecture/services.md @@ -10,18 +10,17 @@ DeepCritical provides several services for embeddings, RAG, and statistical anal **Features**: - **No API Key Required**: Uses local sentence-transformers models -- **Async-Safe**: All operations use `run_in_executor()` to avoid blocking the event loop -- **ChromaDB Storage**: In-memory vector storage for embeddings -- **Deduplication**: 0.9 similarity threshold by default (90% similarity = duplicate, configurable) +- **Async-Safe**: All operations use `run_in_executor()` to avoid blocking +- **ChromaDB Storage**: Vector storage for embeddings +- **Deduplication**: 0.85 similarity threshold (85% similarity = duplicate) **Model**: Configurable via `settings.local_embedding_model` (default: `all-MiniLM-L6-v2`) **Methods**: -- `async def embed(text: str) -> list[float]`: Generate embeddings (async-safe via `run_in_executor()`) -- `async def embed_batch(texts: list[str]) -> list[list[float]]`: Batch embedding (more efficient) -- `async def add_evidence(evidence_id: str, content: str, metadata: dict[str, Any]) -> None`: Add evidence to vector store -- `async def search_similar(query: str, n_results: int = 5) -> list[dict[str, Any]]`: Find semantically similar evidence -- `async def deduplicate(new_evidence: list[Evidence], threshold: float = 0.9) -> list[Evidence]`: Remove semantically duplicate evidence +- `async def embed(text: str) -> list[float]`: Generate embeddings +- `async def embed_batch(texts: list[str]) -> list[list[float]]`: Batch embedding +- `async def similarity(text1: str, text2: str) -> float`: Calculate similarity +- `async def find_duplicates(texts: list[str], threshold: float = 0.85) -> list[tuple[int, int]]`: Find duplicates **Usage**: ```python @@ -33,21 +32,15 @@ embedding = await service.embed("text to embed") ## LlamaIndex RAG Service -**File**: `src/services/llamaindex_rag.py` +**File**: `src/services/rag.py` **Purpose**: Retrieval-Augmented Generation using LlamaIndex **Features**: -- **Multiple Embedding Providers**: OpenAI embeddings (requires `OPENAI_API_KEY`) or local sentence-transformers (no API key) -- **Multiple LLM Providers**: HuggingFace LLM (preferred) or OpenAI LLM (fallback) for query synthesis -- **ChromaDB Storage**: Vector database for document storage (supports in-memory mode) +- **OpenAI Embeddings**: Requires `OPENAI_API_KEY` +- **ChromaDB Storage**: Vector database for document storage - **Metadata Preservation**: Preserves source, title, URL, date, authors -- **Lazy Initialization**: Graceful fallback if dependencies not available - -**Initialization Parameters**: -- `use_openai_embeddings: bool | None`: Force OpenAI embeddings (None = auto-detect) -- `use_in_memory: bool`: Use in-memory ChromaDB client (useful for tests) -- `oauth_token: str | None`: Optional OAuth token from HuggingFace login (takes priority over env vars) +- **Lazy Initialization**: Graceful fallback if OpenAI key not available **Methods**: - `async def ingest_evidence(evidence: list[Evidence]) -> None`: Ingest evidence into RAG @@ -56,13 +49,9 @@ embedding = await service.embed("text to embed") **Usage**: ```python -from src.services.llamaindex_rag import get_rag_service +from src.services.rag import get_rag_service -service = get_rag_service( - use_openai_embeddings=False, # Use local embeddings - use_in_memory=True, # Use in-memory ChromaDB - oauth_token=token # Optional HuggingFace token -) +service = get_rag_service() if service: documents = await service.retrieve("query", top_k=5) ``` @@ -103,19 +92,13 @@ result = await analyzer.analyze( ## Singleton Pattern -Services use singleton patterns for lazy initialization: - -**EmbeddingService**: Uses a global variable pattern: - - -[EmbeddingService Singleton](../src/services/embeddings.py) start_line:164 end_line:172 - +All services use the singleton pattern with `@lru_cache(maxsize=1)`: -**LlamaIndexRAGService**: Direct instantiation (no caching): - - -[LlamaIndexRAGService Factory](../src/services/llamaindex_rag.py) start_line:440 end_line:466 - +```python +@lru_cache(maxsize=1) +def get_embedding_service() -> EmbeddingService: + return EmbeddingService() +``` This ensures: - Single instance per process @@ -144,3 +127,12 @@ if settings.has_openai_key: - [API Reference - Services](../api/services.md) - API documentation - [Configuration](../configuration/index.md) - Service configuration + + + + + + + + + diff --git a/docs/architecture/tools.md b/docs/architecture/tools.md index 3585874442c672b724f5819deedf683697074bb7..7ddbe7eaaf0a579ddba89c63506ba37560d33405 100644 --- a/docs/architecture/tools.md +++ b/docs/architecture/tools.md @@ -6,17 +6,30 @@ DeepCritical implements a protocol-based search tool system for retrieving evide All tools implement the `SearchTool` protocol from `src/tools/base.py`: - -[SearchTool Protocol](../src/tools/base.py) start_line:8 end_line:31 - +```python +class SearchTool(Protocol): + @property + def name(self) -> str: ... + + async def search( + self, + query: str, + max_results: int = 10 + ) -> list[Evidence]: ... +``` ## Rate Limiting All tools use the `@retry` decorator from tenacity: - -[Retry Decorator Pattern](../src/tools/pubmed.py) start_line:46 end_line:50 - +```python +@retry( + stop=stop_after_attempt(3), + wait=wait_exponential(...) +) +async def search(self, query: str, max_results: int = 10) -> list[Evidence]: + # Implementation +``` Tools with API rate limits implement `_rate_limit()` method and use shared rate limiters from `src/tools/rate_limiter.py`. @@ -117,23 +130,11 @@ Missing fields are handled gracefully with defaults. **Purpose**: Orchestrates parallel searches across multiple tools -**Initialization Parameters**: -- `tools: list[SearchTool]`: List of search tools to use -- `timeout: float = 30.0`: Timeout for each search in seconds -- `include_rag: bool = False`: Whether to include RAG tool in searches -- `auto_ingest_to_rag: bool = True`: Whether to automatically ingest results into RAG -- `oauth_token: str | None = None`: Optional OAuth token from HuggingFace login (for RAG LLM) - -**Methods**: -- `async def execute(query: str, max_results_per_tool: int = 10) -> SearchResult`: Execute search across all tools in parallel - **Features**: -- Uses `asyncio.gather()` with `return_exceptions=True` for parallel execution -- Aggregates results into `SearchResult` with evidence and metadata -- Handles tool failures gracefully (continues with other tools) +- Uses `asyncio.gather()` with `return_exceptions=True` +- Aggregates results into `SearchResult` +- Handles tool failures gracefully - Deduplicates results by URL -- Automatically ingests results into RAG if `auto_ingest_to_rag=True` -- Can add RAG tool dynamically via `add_rag_tool()` method ## Tool Registration @@ -143,21 +144,14 @@ Tools are registered in the search handler: from src.tools.pubmed import PubMedTool from src.tools.clinicaltrials import ClinicalTrialsTool from src.tools.europepmc import EuropePMCTool -from src.tools.search_handler import SearchHandler search_handler = SearchHandler( tools=[ PubMedTool(), ClinicalTrialsTool(), EuropePMCTool(), - ], - include_rag=True, # Include RAG tool for semantic search - auto_ingest_to_rag=True, # Automatically ingest results into RAG - oauth_token=token # Optional HuggingFace token for RAG LLM + ] ) - -# Execute search -result = await search_handler.execute("query", max_results_per_tool=10) ``` ## See Also @@ -165,3 +159,13 @@ result = await search_handler.execute("query", max_results_per_tool=10) - [Services](services.md) - RAG and embedding services - [API Reference - Tools](../api/tools.md) - API documentation - [Contributing - Implementation Patterns](../contributing/implementation-patterns.md) - Development guidelines + + + + + + + + + + diff --git a/docs/architecture/workflow-diagrams.md b/docs/architecture/workflow-diagrams.md index 1d22a77898faa66673192f8d8196e7efa88385da..c0f86c232be13ea07438eabdca7cbad803a1c1ac 100644 --- a/docs/architecture/workflow-diagrams.md +++ b/docs/architecture/workflow-diagrams.md @@ -627,10 +627,23 @@ gantt ## Implementation Highlights **Simple 4-Agent Setup:** - - -[Magentic Workflow Builder](../src/orchestrator_magentic.py) start_line:72 end_line:99 - +```python +workflow = ( + MagenticBuilder() + .participants( + hypothesis=HypothesisAgent(tools=[background_tool]), + search=SearchAgent(tools=[web_search, rag_tool]), + analysis=AnalysisAgent(tools=[code_execution]), + report=ReportAgent(tools=[code_execution, visualization]) + ) + .with_standard_manager( + chat_client=AnthropicClient(model="claude-sonnet-4"), + max_round_count=15, # Prevent infinite loops + max_stall_count=3 # Detect stuck workflows + ) + .build() +) +``` **Manager handles quality assessment in its instructions:** - Checks hypothesis quality (testable, novel, clear) @@ -651,5 +664,7 @@ No separate Judge Agent needed - manager does it all! ## See Also - [Orchestrators](orchestrators.md) - Overview of all orchestrator patterns -- [Graph Orchestration](graph_orchestration.md) - Graph-based execution overview +- [Graph Orchestration](graph-orchestration.md) - Graph-based execution overview +- [Graph Orchestration (Detailed)](graph_orchestration.md) - Detailed graph architecture +- [Workflows](workflows.md) - Workflow patterns summary - [API Reference - Orchestrators](../api/orchestrators.md) - API documentation \ No newline at end of file diff --git a/docs/architecture/workflows.md b/docs/architecture/workflows.md new file mode 100644 index 0000000000000000000000000000000000000000..509a0985e70e07fc1829eecaa7fae34ff74462c2 --- /dev/null +++ b/docs/architecture/workflows.md @@ -0,0 +1,662 @@ +# DeepCritical Workflow - Simplified Magentic Architecture + +> **Architecture Pattern**: Microsoft Magentic Orchestration +> **Design Philosophy**: Simple, dynamic, manager-driven coordination +> **Key Innovation**: Intelligent manager replaces rigid sequential phases + +--- + +## 1. High-Level Magentic Workflow + +```mermaid +flowchart TD + Start([User Query]) --> Manager[Magentic Manager
Plan • Select • Assess • Adapt] + + Manager -->|Plans| Task1[Task Decomposition] + Task1 --> Manager + + Manager -->|Selects & Executes| HypAgent[Hypothesis Agent] + Manager -->|Selects & Executes| SearchAgent[Search Agent] + Manager -->|Selects & Executes| AnalysisAgent[Analysis Agent] + Manager -->|Selects & Executes| ReportAgent[Report Agent] + + HypAgent -->|Results| Manager + SearchAgent -->|Results| Manager + AnalysisAgent -->|Results| Manager + ReportAgent -->|Results| Manager + + Manager -->|Assesses Quality| Decision{Good Enough?} + Decision -->|No - Refine| Manager + Decision -->|No - Different Agent| Manager + Decision -->|No - Stalled| Replan[Reset Plan] + Replan --> Manager + + Decision -->|Yes| Synthesis[Synthesize Final Result] + Synthesis --> Output([Research Report]) + + style Start fill:#e1f5e1 + style Manager fill:#ffe6e6 + style HypAgent fill:#fff4e6 + style SearchAgent fill:#fff4e6 + style AnalysisAgent fill:#fff4e6 + style ReportAgent fill:#fff4e6 + style Decision fill:#ffd6d6 + style Synthesis fill:#d4edda + style Output fill:#e1f5e1 +``` + +## 2. Magentic Manager: The 6-Phase Cycle + +```mermaid +flowchart LR + P1[1. Planning
Analyze task
Create strategy] --> P2[2. Agent Selection
Pick best agent
for subtask] + P2 --> P3[3. Execution
Run selected
agent with tools] + P3 --> P4[4. Assessment
Evaluate quality
Check progress] + P4 --> Decision{Quality OK?
Progress made?} + Decision -->|Yes| P6[6. Synthesis
Combine results
Generate report] + Decision -->|No| P5[5. Iteration
Adjust plan
Try again] + P5 --> P2 + P6 --> Done([Complete]) + + style P1 fill:#fff4e6 + style P2 fill:#ffe6e6 + style P3 fill:#e6f3ff + style P4 fill:#ffd6d6 + style P5 fill:#fff3cd + style P6 fill:#d4edda + style Done fill:#e1f5e1 +``` + +## 3. Simplified Agent Architecture + +```mermaid +graph TB + subgraph "Orchestration Layer" + Manager[Magentic Manager
• Plans workflow
• Selects agents
• Assesses quality
• Adapts strategy] + SharedContext[(Shared Context
• Hypotheses
• Search Results
• Analysis
• Progress)] + Manager <--> SharedContext + end + + subgraph "Specialist Agents" + HypAgent[Hypothesis Agent
• Domain understanding
• Hypothesis generation
• Testability refinement] + SearchAgent[Search Agent
• Multi-source search
• RAG retrieval
• Result ranking] + AnalysisAgent[Analysis Agent
• Evidence extraction
• Statistical analysis
• Code execution] + ReportAgent[Report Agent
• Report assembly
• Visualization
• Citation formatting] + end + + subgraph "MCP Tools" + WebSearch[Web Search
PubMed • arXiv • bioRxiv] + CodeExec[Code Execution
Sandboxed Python] + RAG[RAG Retrieval
Vector DB • Embeddings] + Viz[Visualization
Charts • Graphs] + end + + Manager -->|Selects & Directs| HypAgent + Manager -->|Selects & Directs| SearchAgent + Manager -->|Selects & Directs| AnalysisAgent + Manager -->|Selects & Directs| ReportAgent + + HypAgent --> SharedContext + SearchAgent --> SharedContext + AnalysisAgent --> SharedContext + ReportAgent --> SharedContext + + SearchAgent --> WebSearch + SearchAgent --> RAG + AnalysisAgent --> CodeExec + ReportAgent --> CodeExec + ReportAgent --> Viz + + style Manager fill:#ffe6e6 + style SharedContext fill:#ffe6f0 + style HypAgent fill:#fff4e6 + style SearchAgent fill:#fff4e6 + style AnalysisAgent fill:#fff4e6 + style ReportAgent fill:#fff4e6 + style WebSearch fill:#e6f3ff + style CodeExec fill:#e6f3ff + style RAG fill:#e6f3ff + style Viz fill:#e6f3ff +``` + +## 4. Dynamic Workflow Example + +```mermaid +sequenceDiagram + participant User + participant Manager + participant HypAgent + participant SearchAgent + participant AnalysisAgent + participant ReportAgent + + User->>Manager: "Research protein folding in Alzheimer's" + + Note over Manager: PLAN: Generate hypotheses → Search → Analyze → Report + + Manager->>HypAgent: Generate 3 hypotheses + HypAgent-->>Manager: Returns 3 hypotheses + Note over Manager: ASSESS: Good quality, proceed + + Manager->>SearchAgent: Search literature for hypothesis 1 + SearchAgent-->>Manager: Returns 15 papers + Note over Manager: ASSESS: Good results, continue + + Manager->>SearchAgent: Search for hypothesis 2 + SearchAgent-->>Manager: Only 2 papers found + Note over Manager: ASSESS: Insufficient, refine search + + Manager->>SearchAgent: Refined query for hypothesis 2 + SearchAgent-->>Manager: Returns 12 papers + Note over Manager: ASSESS: Better, proceed + + Manager->>AnalysisAgent: Analyze evidence for all hypotheses + AnalysisAgent-->>Manager: Returns analysis with code + Note over Manager: ASSESS: Complete, generate report + + Manager->>ReportAgent: Create comprehensive report + ReportAgent-->>Manager: Returns formatted report + Note over Manager: SYNTHESIZE: Combine all results + + Manager->>User: Final Research Report +``` + +## 5. Manager Decision Logic + +```mermaid +flowchart TD + Start([Manager Receives Task]) --> Plan[Create Initial Plan] + + Plan --> Select[Select Agent for Next Subtask] + Select --> Execute[Execute Agent] + Execute --> Collect[Collect Results] + + Collect --> Assess[Assess Quality & Progress] + + Assess --> Q1{Quality Sufficient?} + Q1 -->|No| Q2{Same Agent Can Fix?} + Q2 -->|Yes| Feedback[Provide Specific Feedback] + Feedback --> Execute + Q2 -->|No| Different[Try Different Agent] + Different --> Select + + Q1 -->|Yes| Q3{Task Complete?} + Q3 -->|No| Q4{Making Progress?} + Q4 -->|Yes| Select + Q4 -->|No - Stalled| Replan[Reset Plan & Approach] + Replan --> Plan + + Q3 -->|Yes| Synth[Synthesize Final Result] + Synth --> Done([Return Report]) + + style Start fill:#e1f5e1 + style Plan fill:#fff4e6 + style Select fill:#ffe6e6 + style Execute fill:#e6f3ff + style Assess fill:#ffd6d6 + style Q1 fill:#ffe6e6 + style Q2 fill:#ffe6e6 + style Q3 fill:#ffe6e6 + style Q4 fill:#ffe6e6 + style Synth fill:#d4edda + style Done fill:#e1f5e1 +``` + +## 6. Hypothesis Agent Workflow + +```mermaid +flowchart LR + Input[Research Query] --> Domain[Identify Domain
& Key Concepts] + Domain --> Context[Retrieve Background
Knowledge] + Context --> Generate[Generate 3-5
Initial Hypotheses] + Generate --> Refine[Refine for
Testability] + Refine --> Rank[Rank by
Quality Score] + Rank --> Output[Return Top
Hypotheses] + + Output --> Struct[Hypothesis Structure:
• Statement
• Rationale
• Testability Score
• Data Requirements
• Expected Outcomes] + + style Input fill:#e1f5e1 + style Output fill:#fff4e6 + style Struct fill:#e6f3ff +``` + +## 7. Search Agent Workflow + +```mermaid +flowchart TD + Input[Hypotheses] --> Strategy[Formulate Search
Strategy per Hypothesis] + + Strategy --> Multi[Multi-Source Search] + + Multi --> PubMed[PubMed Search
via MCP] + Multi --> ArXiv[arXiv Search
via MCP] + Multi --> BioRxiv[bioRxiv Search
via MCP] + + PubMed --> Aggregate[Aggregate Results] + ArXiv --> Aggregate + BioRxiv --> Aggregate + + Aggregate --> Filter[Filter & Rank
by Relevance] + Filter --> Dedup[Deduplicate
Cross-Reference] + Dedup --> Embed[Embed Documents
via MCP] + Embed --> Vector[(Vector DB)] + Vector --> RAGRetrieval[RAG Retrieval
Top-K per Hypothesis] + RAGRetrieval --> Output[Return Contextualized
Search Results] + + style Input fill:#fff4e6 + style Multi fill:#ffe6e6 + style Vector fill:#ffe6f0 + style Output fill:#e6f3ff +``` + +## 8. Analysis Agent Workflow + +```mermaid +flowchart TD + Input1[Hypotheses] --> Extract + Input2[Search Results] --> Extract[Extract Evidence
per Hypothesis] + + Extract --> Methods[Determine Analysis
Methods Needed] + + Methods --> Branch{Requires
Computation?} + Branch -->|Yes| GenCode[Generate Python
Analysis Code] + Branch -->|No| Qual[Qualitative
Synthesis] + + GenCode --> Execute[Execute Code
via MCP Sandbox] + Execute --> Interpret1[Interpret
Results] + Qual --> Interpret2[Interpret
Findings] + + Interpret1 --> Synthesize[Synthesize Evidence
Across Sources] + Interpret2 --> Synthesize + + Synthesize --> Verdict[Determine Verdict
per Hypothesis] + Verdict --> Support[• Supported
• Refuted
• Inconclusive] + Support --> Gaps[Identify Knowledge
Gaps & Limitations] + Gaps --> Output[Return Analysis
Report] + + style Input1 fill:#fff4e6 + style Input2 fill:#e6f3ff + style Execute fill:#ffe6e6 + style Output fill:#e6ffe6 +``` + +## 9. Report Agent Workflow + +```mermaid +flowchart TD + Input1[Query] --> Assemble + Input2[Hypotheses] --> Assemble + Input3[Search Results] --> Assemble + Input4[Analysis] --> Assemble[Assemble Report
Sections] + + Assemble --> Exec[Executive Summary] + Assemble --> Intro[Introduction] + Assemble --> Methods[Methods] + Assemble --> Results[Results per
Hypothesis] + Assemble --> Discussion[Discussion] + Assemble --> Future[Future Directions] + Assemble --> Refs[References] + + Results --> VizCheck{Needs
Visualization?} + VizCheck -->|Yes| GenViz[Generate Viz Code] + GenViz --> ExecViz[Execute via MCP
Create Charts] + ExecViz --> Combine + VizCheck -->|No| Combine[Combine All
Sections] + + Exec --> Combine + Intro --> Combine + Methods --> Combine + Discussion --> Combine + Future --> Combine + Refs --> Combine + + Combine --> Format[Format Output] + Format --> MD[Markdown] + Format --> PDF[PDF] + Format --> JSON[JSON] + + MD --> Output[Return Final
Report] + PDF --> Output + JSON --> Output + + style Input1 fill:#e1f5e1 + style Input2 fill:#fff4e6 + style Input3 fill:#e6f3ff + style Input4 fill:#e6ffe6 + style Output fill:#d4edda +``` + +## 10. Data Flow & Event Streaming + +```mermaid +flowchart TD + User[👤 User] -->|Research Query| UI[Gradio UI] + UI -->|Submit| Manager[Magentic Manager] + + Manager -->|Event: Planning| UI + Manager -->|Select Agent| HypAgent[Hypothesis Agent] + HypAgent -->|Event: Delta/Message| UI + HypAgent -->|Hypotheses| Context[(Shared Context)] + + Context -->|Retrieved by| Manager + Manager -->|Select Agent| SearchAgent[Search Agent] + SearchAgent -->|MCP Request| WebSearch[Web Search Tool] + WebSearch -->|Results| SearchAgent + SearchAgent -->|Event: Delta/Message| UI + SearchAgent -->|Documents| Context + SearchAgent -->|Embeddings| VectorDB[(Vector DB)] + + Context -->|Retrieved by| Manager + Manager -->|Select Agent| AnalysisAgent[Analysis Agent] + AnalysisAgent -->|MCP Request| CodeExec[Code Execution Tool] + CodeExec -->|Results| AnalysisAgent + AnalysisAgent -->|Event: Delta/Message| UI + AnalysisAgent -->|Analysis| Context + + Context -->|Retrieved by| Manager + Manager -->|Select Agent| ReportAgent[Report Agent] + ReportAgent -->|MCP Request| CodeExec + ReportAgent -->|Event: Delta/Message| UI + ReportAgent -->|Report| Context + + Manager -->|Event: Final Result| UI + UI -->|Display| User + + style User fill:#e1f5e1 + style UI fill:#e6f3ff + style Manager fill:#ffe6e6 + style Context fill:#ffe6f0 + style VectorDB fill:#ffe6f0 + style WebSearch fill:#f0f0f0 + style CodeExec fill:#f0f0f0 +``` + +## 11. MCP Tool Architecture + +```mermaid +graph TB + subgraph "Agent Layer" + Manager[Magentic Manager] + HypAgent[Hypothesis Agent] + SearchAgent[Search Agent] + AnalysisAgent[Analysis Agent] + ReportAgent[Report Agent] + end + + subgraph "MCP Protocol Layer" + Registry[MCP Tool Registry
• Discovers tools
• Routes requests
• Manages connections] + end + + subgraph "MCP Servers" + Server1[Web Search Server
localhost:8001
• PubMed
• arXiv
• bioRxiv] + Server2[Code Execution Server
localhost:8002
• Sandboxed Python
• Package management] + Server3[RAG Server
localhost:8003
• Vector embeddings
• Similarity search] + Server4[Visualization Server
localhost:8004
• Chart generation
• Plot rendering] + end + + subgraph "External Services" + PubMed[PubMed API] + ArXiv[arXiv API] + BioRxiv[bioRxiv API] + Modal[Modal Sandbox] + ChromaDB[(ChromaDB)] + end + + SearchAgent -->|Request| Registry + AnalysisAgent -->|Request| Registry + ReportAgent -->|Request| Registry + + Registry --> Server1 + Registry --> Server2 + Registry --> Server3 + Registry --> Server4 + + Server1 --> PubMed + Server1 --> ArXiv + Server1 --> BioRxiv + Server2 --> Modal + Server3 --> ChromaDB + + style Manager fill:#ffe6e6 + style Registry fill:#fff4e6 + style Server1 fill:#e6f3ff + style Server2 fill:#e6f3ff + style Server3 fill:#e6f3ff + style Server4 fill:#e6f3ff +``` + +## 12. Progress Tracking & Stall Detection + +```mermaid +stateDiagram-v2 + [*] --> Initialization: User Query + + Initialization --> Planning: Manager starts + + Planning --> AgentExecution: Select agent + + AgentExecution --> Assessment: Collect results + + Assessment --> QualityCheck: Evaluate output + + QualityCheck --> AgentExecution: Poor quality
(retry < max_rounds) + QualityCheck --> Planning: Poor quality
(try different agent) + QualityCheck --> NextAgent: Good quality
(task incomplete) + QualityCheck --> Synthesis: Good quality
(task complete) + + NextAgent --> AgentExecution: Select next agent + + state StallDetection <> + Assessment --> StallDetection: Check progress + StallDetection --> Planning: No progress
(stall count < max) + StallDetection --> ErrorRecovery: No progress
(max stalls reached) + + ErrorRecovery --> PartialReport: Generate partial results + PartialReport --> [*] + + Synthesis --> FinalReport: Combine all outputs + FinalReport --> [*] + + note right of QualityCheck + Manager assesses: + • Output completeness + • Quality metrics + • Progress made + end note + + note right of StallDetection + Stall = no new progress + after agent execution + Triggers plan reset + end note +``` + +## 13. Gradio UI Integration + +```mermaid +graph TD + App[Gradio App
DeepCritical Research Agent] + + App --> Input[Input Section] + App --> Status[Status Section] + App --> Output[Output Section] + + Input --> Query[Research Question
Text Area] + Input --> Controls[Controls] + Controls --> MaxHyp[Max Hypotheses: 1-10] + Controls --> MaxRounds[Max Rounds: 5-20] + Controls --> Submit[Start Research Button] + + Status --> Log[Real-time Event Log
• Manager planning
• Agent selection
• Execution updates
• Quality assessment] + Status --> Progress[Progress Tracker
• Current agent
• Round count
• Stall count] + + Output --> Tabs[Tabbed Results] + Tabs --> Tab1[Hypotheses Tab
Generated hypotheses with scores] + Tabs --> Tab2[Search Results Tab
Papers & sources found] + Tabs --> Tab3[Analysis Tab
Evidence & verdicts] + Tabs --> Tab4[Report Tab
Final research report] + Tab4 --> Download[Download Report
MD / PDF / JSON] + + Submit -.->|Triggers| Workflow[Magentic Workflow] + Workflow -.->|MagenticOrchestratorMessageEvent| Log + Workflow -.->|MagenticAgentDeltaEvent| Log + Workflow -.->|MagenticAgentMessageEvent| Log + Workflow -.->|MagenticFinalResultEvent| Tab4 + + style App fill:#e1f5e1 + style Input fill:#fff4e6 + style Status fill:#e6f3ff + style Output fill:#e6ffe6 + style Workflow fill:#ffe6e6 +``` + +## 14. Complete System Context + +```mermaid +graph LR + User[👤 Researcher
Asks research questions] -->|Submits query| DC[DeepCritical
Magentic Workflow] + + DC -->|Literature search| PubMed[PubMed API
Medical papers] + DC -->|Preprint search| ArXiv[arXiv API
Scientific preprints] + DC -->|Biology search| BioRxiv[bioRxiv API
Biology preprints] + DC -->|Agent reasoning| Claude[Claude API
Sonnet 4 / Opus] + DC -->|Code execution| Modal[Modal Sandbox
Safe Python env] + DC -->|Vector storage| Chroma[ChromaDB
Embeddings & RAG] + + DC -->|Deployed on| HF[HuggingFace Spaces
Gradio 6.0] + + PubMed -->|Results| DC + ArXiv -->|Results| DC + BioRxiv -->|Results| DC + Claude -->|Responses| DC + Modal -->|Output| DC + Chroma -->|Context| DC + + DC -->|Research report| User + + style User fill:#e1f5e1 + style DC fill:#ffe6e6 + style PubMed fill:#e6f3ff + style ArXiv fill:#e6f3ff + style BioRxiv fill:#e6f3ff + style Claude fill:#ffd6d6 + style Modal fill:#f0f0f0 + style Chroma fill:#ffe6f0 + style HF fill:#d4edda +``` + +## 15. Workflow Timeline (Simplified) + +```mermaid +gantt + title DeepCritical Magentic Workflow - Typical Execution + dateFormat mm:ss + axisFormat %M:%S + + section Manager Planning + Initial planning :p1, 00:00, 10s + + section Hypothesis Agent + Generate hypotheses :h1, after p1, 30s + Manager assessment :h2, after h1, 5s + + section Search Agent + Search hypothesis 1 :s1, after h2, 20s + Search hypothesis 2 :s2, after s1, 20s + Search hypothesis 3 :s3, after s2, 20s + RAG processing :s4, after s3, 15s + Manager assessment :s5, after s4, 5s + + section Analysis Agent + Evidence extraction :a1, after s5, 15s + Code generation :a2, after a1, 20s + Code execution :a3, after a2, 25s + Synthesis :a4, after a3, 20s + Manager assessment :a5, after a4, 5s + + section Report Agent + Report assembly :r1, after a5, 30s + Visualization :r2, after r1, 15s + Formatting :r3, after r2, 10s + + section Manager Synthesis + Final synthesis :f1, after r3, 10s +``` + +--- + +## Key Differences from Original Design + +| Aspect | Original (Judge-in-Loop) | New (Magentic) | +|--------|-------------------------|----------------| +| **Control Flow** | Fixed sequential phases | Dynamic agent selection | +| **Quality Control** | Separate Judge Agent | Manager assessment built-in | +| **Retry Logic** | Phase-level with feedback | Agent-level with adaptation | +| **Flexibility** | Rigid 4-phase pipeline | Adaptive workflow | +| **Complexity** | 5 agents (including Judge) | 4 agents (no Judge) | +| **Progress Tracking** | Manual state management | Built-in round/stall detection | +| **Agent Coordination** | Sequential handoff | Manager-driven dynamic selection | +| **Error Recovery** | Retry same phase | Try different agent or replan | + +--- + +## Simplified Design Principles + +1. **Manager is Intelligent**: LLM-powered manager handles planning, selection, and quality assessment +2. **No Separate Judge**: Manager's assessment phase replaces dedicated Judge Agent +3. **Dynamic Workflow**: Agents can be called multiple times in any order based on need +4. **Built-in Safety**: max_round_count (15) and max_stall_count (3) prevent infinite loops +5. **Event-Driven UI**: Real-time streaming updates to Gradio interface +6. **MCP-Powered Tools**: All external capabilities via Model Context Protocol +7. **Shared Context**: Centralized state accessible to all agents +8. **Progress Awareness**: Manager tracks what's been done and what's needed + +--- + +## Legend + +- 🔴 **Red/Pink**: Manager, orchestration, decision-making +- 🟡 **Yellow/Orange**: Specialist agents, processing +- 🔵 **Blue**: Data, tools, MCP services +- 🟣 **Purple/Pink**: Storage, databases, state +- 🟢 **Green**: User interactions, final outputs +- ⚪ **Gray**: External services, APIs + +--- + +## Implementation Highlights + +**Simple 4-Agent Setup:** +```python +workflow = ( + MagenticBuilder() + .participants( + hypothesis=HypothesisAgent(tools=[background_tool]), + search=SearchAgent(tools=[web_search, rag_tool]), + analysis=AnalysisAgent(tools=[code_execution]), + report=ReportAgent(tools=[code_execution, visualization]) + ) + .with_standard_manager( + chat_client=AnthropicClient(model="claude-sonnet-4"), + max_round_count=15, # Prevent infinite loops + max_stall_count=3 # Detect stuck workflows + ) + .build() +) +``` + +**Manager handles quality assessment in its instructions:** +- Checks hypothesis quality (testable, novel, clear) +- Validates search results (relevant, authoritative, recent) +- Assesses analysis soundness (methodology, evidence, conclusions) +- Ensures report completeness (all sections, proper citations) + +No separate Judge Agent needed - manager does it all! + +--- + +**Document Version**: 2.0 (Magentic Simplified) +**Last Updated**: 2025-11-24 +**Architecture**: Microsoft Magentic Orchestration Pattern +**Agents**: 4 (Hypothesis, Search, Analysis, Report) + 1 Manager +**License**: MIT diff --git a/docs/configuration/CONFIGURATION.md b/docs/configuration/CONFIGURATION.md new file mode 100644 index 0000000000000000000000000000000000000000..b33bdcce1d71eb39664e84dc94f25eca451f70e5 --- /dev/null +++ b/docs/configuration/CONFIGURATION.md @@ -0,0 +1,743 @@ +# Configuration Guide + +## Overview + +DeepCritical uses **Pydantic Settings** for centralized configuration management. All settings are defined in the `Settings` class in `src/utils/config.py` and can be configured via environment variables or a `.env` file. + +The configuration system provides: + +- **Type Safety**: Strongly-typed fields with Pydantic validation +- **Environment File Support**: Automatically loads from `.env` file (if present) +- **Case-Insensitive**: Environment variables are case-insensitive +- **Singleton Pattern**: Global `settings` instance for easy access throughout the codebase +- **Validation**: Automatic validation on load with helpful error messages + +## Quick Start + +1. Create a `.env` file in the project root +2. Set at least one LLM API key (`OPENAI_API_KEY`, `ANTHROPIC_API_KEY`, or `HF_TOKEN`) +3. Optionally configure other services as needed +4. The application will automatically load and validate your configuration + +## Configuration System Architecture + +### Settings Class + +The `Settings` class extends `BaseSettings` from `pydantic_settings` and defines all application configuration: + +```13:21:src/utils/config.py +class Settings(BaseSettings): + """Strongly-typed application settings.""" + + model_config = SettingsConfigDict( + env_file=".env", + env_file_encoding="utf-8", + case_sensitive=False, + extra="ignore", + ) +``` + +### Singleton Instance + +A global `settings` instance is available for import: + +```234:235:src/utils/config.py +# Singleton for easy import +settings = get_settings() +``` + +### Usage Pattern + +Access configuration throughout the codebase: + +```python +from src.utils.config import settings + +# Check if API keys are available +if settings.has_openai_key: + # Use OpenAI + pass + +# Access configuration values +max_iterations = settings.max_iterations +web_search_provider = settings.web_search_provider +``` + +## Required Configuration + +### LLM Provider + +You must configure at least one LLM provider. The system supports: + +- **OpenAI**: Requires `OPENAI_API_KEY` +- **Anthropic**: Requires `ANTHROPIC_API_KEY` +- **HuggingFace**: Optional `HF_TOKEN` or `HUGGINGFACE_API_KEY` (can work without key for public models) + +#### OpenAI Configuration + +```bash +LLM_PROVIDER=openai +OPENAI_API_KEY=your_openai_api_key_here +OPENAI_MODEL=gpt-5.1 +``` + +The default model is defined in the `Settings` class: + +```29:29:src/utils/config.py + openai_model: str = Field(default="gpt-5.1", description="OpenAI model name") +``` + +#### Anthropic Configuration + +```bash +LLM_PROVIDER=anthropic +ANTHROPIC_API_KEY=your_anthropic_api_key_here +ANTHROPIC_MODEL=claude-sonnet-4-5-20250929 +``` + +The default model is defined in the `Settings` class: + +```30:32:src/utils/config.py + anthropic_model: str = Field( + default="claude-sonnet-4-5-20250929", description="Anthropic model" + ) +``` + +#### HuggingFace Configuration + +HuggingFace can work without an API key for public models, but an API key provides higher rate limits: + +```bash +# Option 1: Using HF_TOKEN (preferred) +HF_TOKEN=your_huggingface_token_here + +# Option 2: Using HUGGINGFACE_API_KEY (alternative) +HUGGINGFACE_API_KEY=your_huggingface_api_key_here + +# Default model +HUGGINGFACE_MODEL=meta-llama/Llama-3.1-8B-Instruct +``` + +The HuggingFace token can be set via either environment variable: + +```33:35:src/utils/config.py + hf_token: str | None = Field( + default=None, alias="HF_TOKEN", description="HuggingFace API token" + ) +``` + +```57:59:src/utils/config.py + huggingface_api_key: str | None = Field( + default=None, description="HuggingFace API token (HF_TOKEN or HUGGINGFACE_API_KEY)" + ) +``` + +## Optional Configuration + +### Embedding Configuration + +DeepCritical supports multiple embedding providers for semantic search and RAG: + +```bash +# Embedding Provider: "openai", "local", or "huggingface" +EMBEDDING_PROVIDER=local + +# OpenAI Embedding Model (used by LlamaIndex RAG) +OPENAI_EMBEDDING_MODEL=text-embedding-3-small + +# Local Embedding Model (sentence-transformers, used by EmbeddingService) +LOCAL_EMBEDDING_MODEL=all-MiniLM-L6-v2 + +# HuggingFace Embedding Model +HUGGINGFACE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 +``` + +The embedding provider configuration: + +```47:50:src/utils/config.py + embedding_provider: Literal["openai", "local", "huggingface"] = Field( + default="local", + description="Embedding provider to use", + ) +``` + +**Note**: OpenAI embeddings require `OPENAI_API_KEY`. The local provider (default) uses sentence-transformers and requires no API key. + +### Web Search Configuration + +DeepCritical supports multiple web search providers: + +```bash +# Web Search Provider: "serper", "searchxng", "brave", "tavily", or "duckduckgo" +# Default: "duckduckgo" (no API key required) +WEB_SEARCH_PROVIDER=duckduckgo + +# Serper API Key (for Google search via Serper) +SERPER_API_KEY=your_serper_api_key_here + +# SearchXNG Host URL (for self-hosted search) +SEARCHXNG_HOST=http://localhost:8080 + +# Brave Search API Key +BRAVE_API_KEY=your_brave_api_key_here + +# Tavily API Key +TAVILY_API_KEY=your_tavily_api_key_here +``` + +The web search provider configuration: + +```71:74:src/utils/config.py + web_search_provider: Literal["serper", "searchxng", "brave", "tavily", "duckduckgo"] = Field( + default="duckduckgo", + description="Web search provider to use", + ) +``` + +**Note**: DuckDuckGo is the default and requires no API key, making it ideal for development and testing. + +### PubMed Configuration + +PubMed search supports optional NCBI API key for higher rate limits: + +```bash +# NCBI API Key (optional, for higher rate limits: 10 req/sec vs 3 req/sec) +NCBI_API_KEY=your_ncbi_api_key_here +``` + +The PubMed tool uses this configuration: + +```22:29:src/tools/pubmed.py + def __init__(self, api_key: str | None = None) -> None: + self.api_key = api_key or settings.ncbi_api_key + # Ignore placeholder values from .env.example + if self.api_key == "your-ncbi-key-here": + self.api_key = None + + # Use shared rate limiter + self._limiter = get_pubmed_limiter(self.api_key) +``` + +### Agent Configuration + +Control agent behavior and research loop execution: + +```bash +# Maximum iterations per research loop (1-50, default: 10) +MAX_ITERATIONS=10 + +# Search timeout in seconds +SEARCH_TIMEOUT=30 + +# Use graph-based execution for research flows +USE_GRAPH_EXECUTION=false +``` + +The agent configuration fields: + +```80:85:src/utils/config.py + # Agent Configuration + max_iterations: int = Field(default=10, ge=1, le=50) + search_timeout: int = Field(default=30, description="Seconds to wait for search") + use_graph_execution: bool = Field( + default=False, description="Use graph-based execution for research flows" + ) +``` + +### Budget & Rate Limiting Configuration + +Control resource limits for research loops: + +```bash +# Default token budget per research loop (1000-1000000, default: 100000) +DEFAULT_TOKEN_LIMIT=100000 + +# Default time limit per research loop in minutes (1-120, default: 10) +DEFAULT_TIME_LIMIT_MINUTES=10 + +# Default iterations limit per research loop (1-50, default: 10) +DEFAULT_ITERATIONS_LIMIT=10 +``` + +The budget configuration with validation: + +```87:105:src/utils/config.py + # Budget & Rate Limiting Configuration + default_token_limit: int = Field( + default=100000, + ge=1000, + le=1000000, + description="Default token budget per research loop", + ) + default_time_limit_minutes: int = Field( + default=10, + ge=1, + le=120, + description="Default time limit per research loop (minutes)", + ) + default_iterations_limit: int = Field( + default=10, + ge=1, + le=50, + description="Default iterations limit per research loop", + ) +``` + +### RAG Service Configuration + +Configure the Retrieval-Augmented Generation service: + +```bash +# ChromaDB collection name for RAG +RAG_COLLECTION_NAME=deepcritical_evidence + +# Number of top results to retrieve from RAG (1-50, default: 5) +RAG_SIMILARITY_TOP_K=5 + +# Automatically ingest evidence into RAG +RAG_AUTO_INGEST=true +``` + +The RAG configuration: + +```127:141:src/utils/config.py + # RAG Service Configuration + rag_collection_name: str = Field( + default="deepcritical_evidence", + description="ChromaDB collection name for RAG", + ) + rag_similarity_top_k: int = Field( + default=5, + ge=1, + le=50, + description="Number of top results to retrieve from RAG", + ) + rag_auto_ingest: bool = Field( + default=True, + description="Automatically ingest evidence into RAG", + ) +``` + +### ChromaDB Configuration + +Configure the vector database for embeddings and RAG: + +```bash +# ChromaDB storage path +CHROMA_DB_PATH=./chroma_db + +# Whether to persist ChromaDB to disk +CHROMA_DB_PERSIST=true + +# ChromaDB server host (for remote ChromaDB, optional) +CHROMA_DB_HOST=localhost + +# ChromaDB server port (for remote ChromaDB, optional) +CHROMA_DB_PORT=8000 +``` + +The ChromaDB configuration: + +```113:125:src/utils/config.py + chroma_db_path: str = Field(default="./chroma_db", description="ChromaDB storage path") + chroma_db_persist: bool = Field( + default=True, + description="Whether to persist ChromaDB to disk", + ) + chroma_db_host: str | None = Field( + default=None, + description="ChromaDB server host (for remote ChromaDB)", + ) + chroma_db_port: int | None = Field( + default=None, + description="ChromaDB server port (for remote ChromaDB)", + ) +``` + +### External Services + +#### Modal Configuration + +Modal is used for secure sandbox execution of statistical analysis: + +```bash +# Modal Token ID (for Modal sandbox execution) +MODAL_TOKEN_ID=your_modal_token_id_here + +# Modal Token Secret +MODAL_TOKEN_SECRET=your_modal_token_secret_here +``` + +The Modal configuration: + +```110:112:src/utils/config.py + # External Services + modal_token_id: str | None = Field(default=None, description="Modal token ID") + modal_token_secret: str | None = Field(default=None, description="Modal token secret") +``` + +### Logging Configuration + +Configure structured logging: + +```bash +# Log Level: "DEBUG", "INFO", "WARNING", or "ERROR" +LOG_LEVEL=INFO +``` + +The logging configuration: + +```107:108:src/utils/config.py + # Logging + log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR"] = "INFO" +``` + +Logging is configured via the `configure_logging()` function: + +```212:231:src/utils/config.py +def configure_logging(settings: Settings) -> None: + """Configure structured logging with the configured log level.""" + # Set stdlib logging level from settings + logging.basicConfig( + level=getattr(logging, settings.log_level), + format="%(message)s", + ) + + structlog.configure( + processors=[ + structlog.stdlib.filter_by_level, + structlog.stdlib.add_logger_name, + structlog.stdlib.add_log_level, + structlog.processors.TimeStamper(fmt="iso"), + structlog.processors.JSONRenderer(), + ], + wrapper_class=structlog.stdlib.BoundLogger, + context_class=dict, + logger_factory=structlog.stdlib.LoggerFactory(), + ) +``` + +## Configuration Properties + +The `Settings` class provides helpful properties for checking configuration state: + +### API Key Availability + +Check which API keys are available: + +```171:189:src/utils/config.py + @property + def has_openai_key(self) -> bool: + """Check if OpenAI API key is available.""" + return bool(self.openai_api_key) + + @property + def has_anthropic_key(self) -> bool: + """Check if Anthropic API key is available.""" + return bool(self.anthropic_api_key) + + @property + def has_huggingface_key(self) -> bool: + """Check if HuggingFace API key is available.""" + return bool(self.huggingface_api_key or self.hf_token) + + @property + def has_any_llm_key(self) -> bool: + """Check if any LLM API key is available.""" + return self.has_openai_key or self.has_anthropic_key or self.has_huggingface_key +``` + +**Usage:** + +```python +from src.utils.config import settings + +# Check API key availability +if settings.has_openai_key: + # Use OpenAI + pass + +if settings.has_anthropic_key: + # Use Anthropic + pass + +if settings.has_huggingface_key: + # Use HuggingFace + pass + +if settings.has_any_llm_key: + # At least one LLM is available + pass +``` + +### Service Availability + +Check if external services are configured: + +```143:146:src/utils/config.py + @property + def modal_available(self) -> bool: + """Check if Modal credentials are configured.""" + return bool(self.modal_token_id and self.modal_token_secret) +``` + +```191:204:src/utils/config.py + @property + def web_search_available(self) -> bool: + """Check if web search is available (either no-key provider or API key present).""" + if self.web_search_provider == "duckduckgo": + return True # No API key required + if self.web_search_provider == "serper": + return bool(self.serper_api_key) + if self.web_search_provider == "searchxng": + return bool(self.searchxng_host) + if self.web_search_provider == "brave": + return bool(self.brave_api_key) + if self.web_search_provider == "tavily": + return bool(self.tavily_api_key) + return False +``` + +**Usage:** + +```python +from src.utils.config import settings + +# Check service availability +if settings.modal_available: + # Use Modal sandbox + pass + +if settings.web_search_available: + # Web search is configured + pass +``` + +### API Key Retrieval + +Get the API key for the configured provider: + +```148:160:src/utils/config.py + def get_api_key(self) -> str: + """Get the API key for the configured provider.""" + if self.llm_provider == "openai": + if not self.openai_api_key: + raise ConfigurationError("OPENAI_API_KEY not set") + return self.openai_api_key + + if self.llm_provider == "anthropic": + if not self.anthropic_api_key: + raise ConfigurationError("ANTHROPIC_API_KEY not set") + return self.anthropic_api_key + + raise ConfigurationError(f"Unknown LLM provider: {self.llm_provider}") +``` + +For OpenAI-specific operations (e.g., Magentic mode): + +```162:169:src/utils/config.py + def get_openai_api_key(self) -> str: + """Get OpenAI API key (required for Magentic function calling).""" + if not self.openai_api_key: + raise ConfigurationError( + "OPENAI_API_KEY not set. Magentic mode requires OpenAI for function calling. " + "Use mode='simple' for other providers." + ) + return self.openai_api_key +``` + +## Configuration Usage in Codebase + +The configuration system is used throughout the codebase: + +### LLM Factory + +The LLM factory uses settings to create appropriate models: + +```129:144:src/utils/llm_factory.py + if settings.llm_provider == "huggingface": + model_name = settings.huggingface_model or "meta-llama/Llama-3.1-8B-Instruct" + hf_provider = HuggingFaceProvider(api_key=settings.hf_token) + return HuggingFaceModel(model_name, provider=hf_provider) + + if settings.llm_provider == "openai": + if not settings.openai_api_key: + raise ConfigurationError("OPENAI_API_KEY not set for pydantic-ai") + provider = OpenAIProvider(api_key=settings.openai_api_key) + return OpenAIModel(settings.openai_model, provider=provider) + + if settings.llm_provider == "anthropic": + if not settings.anthropic_api_key: + raise ConfigurationError("ANTHROPIC_API_KEY not set for pydantic-ai") + anthropic_provider = AnthropicProvider(api_key=settings.anthropic_api_key) + return AnthropicModel(settings.anthropic_model, provider=anthropic_provider) +``` + +### Embedding Service + +The embedding service uses local embedding model configuration: + +```29:31:src/services/embeddings.py + def __init__(self, model_name: str | None = None): + self._model_name = model_name or settings.local_embedding_model + self._model = SentenceTransformer(self._model_name) +``` + +### Orchestrator Factory + +The orchestrator factory uses settings to determine mode: + +```69:80:src/orchestrator_factory.py +def _determine_mode(explicit_mode: str | None) -> str: + """Determine which mode to use.""" + if explicit_mode: + if explicit_mode in ("magentic", "advanced"): + return "advanced" + return "simple" + + # Auto-detect: advanced if paid API key available + if settings.has_openai_key: + return "advanced" + + return "simple" +``` + +## Environment Variables Reference + +### Required (at least one LLM) + +- `OPENAI_API_KEY` - OpenAI API key (required for OpenAI provider) +- `ANTHROPIC_API_KEY` - Anthropic API key (required for Anthropic provider) +- `HF_TOKEN` or `HUGGINGFACE_API_KEY` - HuggingFace API token (optional, can work without for public models) + +#### LLM Configuration Variables + +- `LLM_PROVIDER` - Provider to use: `"openai"`, `"anthropic"`, or `"huggingface"` (default: `"huggingface"`) +- `OPENAI_MODEL` - OpenAI model name (default: `"gpt-5.1"`) +- `ANTHROPIC_MODEL` - Anthropic model name (default: `"claude-sonnet-4-5-20250929"`) +- `HUGGINGFACE_MODEL` - HuggingFace model ID (default: `"meta-llama/Llama-3.1-8B-Instruct"`) + +#### Embedding Configuration Variables + +- `EMBEDDING_PROVIDER` - Provider: `"openai"`, `"local"`, or `"huggingface"` (default: `"local"`) +- `OPENAI_EMBEDDING_MODEL` - OpenAI embedding model (default: `"text-embedding-3-small"`) +- `LOCAL_EMBEDDING_MODEL` - Local sentence-transformers model (default: `"all-MiniLM-L6-v2"`) +- `HUGGINGFACE_EMBEDDING_MODEL` - HuggingFace embedding model (default: `"sentence-transformers/all-MiniLM-L6-v2"`) + +#### Web Search Configuration Variables + +- `WEB_SEARCH_PROVIDER` - Provider: `"serper"`, `"searchxng"`, `"brave"`, `"tavily"`, or `"duckduckgo"` (default: `"duckduckgo"`) +- `SERPER_API_KEY` - Serper API key (required for Serper provider) +- `SEARCHXNG_HOST` - SearchXNG host URL (required for SearchXNG provider) +- `BRAVE_API_KEY` - Brave Search API key (required for Brave provider) +- `TAVILY_API_KEY` - Tavily API key (required for Tavily provider) + +#### PubMed Configuration Variables + +- `NCBI_API_KEY` - NCBI API key (optional, increases rate limit from 3 to 10 req/sec) + +#### Agent Configuration Variables + +- `MAX_ITERATIONS` - Maximum iterations per research loop (1-50, default: `10`) +- `SEARCH_TIMEOUT` - Search timeout in seconds (default: `30`) +- `USE_GRAPH_EXECUTION` - Use graph-based execution (default: `false`) + +#### Budget Configuration Variables + +- `DEFAULT_TOKEN_LIMIT` - Default token budget per research loop (1000-1000000, default: `100000`) +- `DEFAULT_TIME_LIMIT_MINUTES` - Default time limit in minutes (1-120, default: `10`) +- `DEFAULT_ITERATIONS_LIMIT` - Default iterations limit (1-50, default: `10`) + +#### RAG Configuration Variables + +- `RAG_COLLECTION_NAME` - ChromaDB collection name (default: `"deepcritical_evidence"`) +- `RAG_SIMILARITY_TOP_K` - Number of top results to retrieve (1-50, default: `5`) +- `RAG_AUTO_INGEST` - Automatically ingest evidence into RAG (default: `true`) + +#### ChromaDB Configuration Variables + +- `CHROMA_DB_PATH` - ChromaDB storage path (default: `"./chroma_db"`) +- `CHROMA_DB_PERSIST` - Whether to persist ChromaDB to disk (default: `true`) +- `CHROMA_DB_HOST` - ChromaDB server host (optional, for remote ChromaDB) +- `CHROMA_DB_PORT` - ChromaDB server port (optional, for remote ChromaDB) + +#### External Services Variables + +- `MODAL_TOKEN_ID` - Modal token ID (optional, for Modal sandbox execution) +- `MODAL_TOKEN_SECRET` - Modal token secret (optional, for Modal sandbox execution) + +#### Logging Configuration Variables + +- `LOG_LEVEL` - Log level: `"DEBUG"`, `"INFO"`, `"WARNING"`, or `"ERROR"` (default: `"INFO"`) + +## Validation + +Settings are validated on load using Pydantic validation: + +- **Type Checking**: All fields are strongly typed +- **Range Validation**: Numeric fields have min/max constraints (e.g., `ge=1, le=50` for `max_iterations`) +- **Literal Validation**: Enum fields only accept specific values (e.g., `Literal["openai", "anthropic", "huggingface"]`) +- **Required Fields**: API keys are checked when accessed via `get_api_key()` or `get_openai_api_key()` + +### Validation Examples + +The `max_iterations` field has range validation: + +```81:81:src/utils/config.py + max_iterations: int = Field(default=10, ge=1, le=50) +``` + +The `llm_provider` field has literal validation: + +```26:28:src/utils/config.py + llm_provider: Literal["openai", "anthropic", "huggingface"] = Field( + default="openai", description="Which LLM provider to use" + ) +``` + +## Error Handling + +Configuration errors raise `ConfigurationError` from `src/utils/exceptions.py`: + +```22:25:src/utils/exceptions.py +class ConfigurationError(DeepCriticalError): + """Raised when configuration is invalid.""" + + pass +``` + +### Error Handling Example + +```python +from src.utils.config import settings +from src.utils.exceptions import ConfigurationError + +try: + api_key = settings.get_api_key() +except ConfigurationError as e: + print(f"Configuration error: {e}") +``` + +### Common Configuration Errors + +1. **Missing API Key**: When `get_api_key()` is called but the required API key is not set +2. **Invalid Provider**: When `llm_provider` is set to an unsupported value +3. **Out of Range**: When numeric values exceed their min/max constraints +4. **Invalid Literal**: When enum fields receive unsupported values + +## Configuration Best Practices + +1. **Use `.env` File**: Store sensitive keys in `.env` file (add to `.gitignore`) +2. **Check Availability**: Use properties like `has_openai_key` before accessing API keys +3. **Handle Errors**: Always catch `ConfigurationError` when calling `get_api_key()` +4. **Validate Early**: Configuration is validated on import, so errors surface immediately +5. **Use Defaults**: Leverage sensible defaults for optional configuration + +## Future Enhancements + +The following configurations are planned for future phases: + +1. **Additional LLM Providers**: DeepSeek, OpenRouter, Gemini, Perplexity, Azure OpenAI, Local models +2. **Model Selection**: Reasoning/main/fast model configuration +3. **Service Integration**: Additional service integrations and configurations + diff --git a/docs/configuration/index.md b/docs/configuration/index.md index 729345de47dee08be61b75fcf74e8ac91438ce21..d7f10d2fff20e213c98f3871e71d9dd023a21a81 100644 --- a/docs/configuration/index.md +++ b/docs/configuration/index.md @@ -25,9 +25,17 @@ The configuration system provides: The [`Settings`][settings-class] class extends `BaseSettings` from `pydantic_settings` and defines all application configuration: - -[Settings Class Definition](../src/utils/config.py) start_line:13 end_line:21 - +```13:21:src/utils/config.py +class Settings(BaseSettings): + """Strongly-typed application settings.""" + + model_config = SettingsConfigDict( + env_file=".env", + env_file_encoding="utf-8", + case_sensitive=False, + extra="ignore", + ) +``` [View source](https://github.com/DeepCritical/GradioDemo/blob/main/src/utils/config.py#L13-L21) @@ -35,9 +43,10 @@ The [`Settings`][settings-class] class extends `BaseSettings` from `pydantic_set A global `settings` instance is available for import: - -[Singleton Instance](../src/utils/config.py) start_line:234 end_line:235 - +```234:235:src/utils/config.py +# Singleton for easy import +settings = get_settings() +``` [View source](https://github.com/DeepCritical/GradioDemo/blob/main/src/utils/config.py#L234-L235) @@ -78,9 +87,9 @@ OPENAI_MODEL=gpt-5.1 The default model is defined in the `Settings` class: - -[OpenAI Model Configuration](../src/utils/config.py) start_line:29 end_line:29 - +```29:29:src/utils/config.py + openai_model: str = Field(default="gpt-5.1", description="OpenAI model name") +``` #### Anthropic Configuration @@ -92,9 +101,11 @@ ANTHROPIC_MODEL=claude-sonnet-4-5-20250929 The default model is defined in the `Settings` class: - -[Anthropic Model Configuration](../src/utils/config.py) start_line:30 end_line:32 - +```30:32:src/utils/config.py + anthropic_model: str = Field( + default="claude-sonnet-4-5-20250929", description="Anthropic model" + ) +``` #### HuggingFace Configuration @@ -113,13 +124,17 @@ HUGGINGFACE_MODEL=meta-llama/Llama-3.1-8B-Instruct The HuggingFace token can be set via either environment variable: - -[HuggingFace Token Configuration](../src/utils/config.py) start_line:33 end_line:35 - +```33:35:src/utils/config.py + hf_token: str | None = Field( + default=None, alias="HF_TOKEN", description="HuggingFace API token" + ) +``` - -[HuggingFace API Key Configuration](../src/utils/config.py) start_line:57 end_line:59 - +```57:59:src/utils/config.py + huggingface_api_key: str | None = Field( + default=None, description="HuggingFace API token (HF_TOKEN or HUGGINGFACE_API_KEY)" + ) +``` ## Optional Configuration @@ -143,9 +158,12 @@ HUGGINGFACE_EMBEDDING_MODEL=sentence-transformers/all-MiniLM-L6-v2 The embedding provider configuration: - -[Embedding Provider Configuration](../src/utils/config.py) start_line:47 end_line:50 - +```47:50:src/utils/config.py + embedding_provider: Literal["openai", "local", "huggingface"] = Field( + default="local", + description="Embedding provider to use", + ) +``` **Note**: OpenAI embeddings require `OPENAI_API_KEY`. The local provider (default) uses sentence-transformers and requires no API key. @@ -173,9 +191,12 @@ TAVILY_API_KEY=your_tavily_api_key_here The web search provider configuration: - -[Web Search Provider Configuration](../src/utils/config.py) start_line:71 end_line:74 - +```71:74:src/utils/config.py + web_search_provider: Literal["serper", "searchxng", "brave", "tavily", "duckduckgo"] = Field( + default="duckduckgo", + description="Web search provider to use", + ) +``` **Note**: DuckDuckGo is the default and requires no API key, making it ideal for development and testing. @@ -190,9 +211,16 @@ NCBI_API_KEY=your_ncbi_api_key_here The PubMed tool uses this configuration: - -[PubMed Tool Configuration](../src/tools/pubmed.py) start_line:22 end_line:29 - +```22:29:src/tools/pubmed.py + def __init__(self, api_key: str | None = None) -> None: + self.api_key = api_key or settings.ncbi_api_key + # Ignore placeholder values from .env.example + if self.api_key == "your-ncbi-key-here": + self.api_key = None + + # Use shared rate limiter + self._limiter = get_pubmed_limiter(self.api_key) +``` ### Agent Configuration @@ -211,9 +239,14 @@ USE_GRAPH_EXECUTION=false The agent configuration fields: - -[Agent Configuration](../src/utils/config.py) start_line:80 end_line:85 - +```80:85:src/utils/config.py + # Agent Configuration + max_iterations: int = Field(default=10, ge=1, le=50) + search_timeout: int = Field(default=30, description="Seconds to wait for search") + use_graph_execution: bool = Field( + default=False, description="Use graph-based execution for research flows" + ) +``` ### Budget & Rate Limiting Configuration @@ -232,9 +265,27 @@ DEFAULT_ITERATIONS_LIMIT=10 The budget configuration with validation: - -[Budget Configuration](../src/utils/config.py) start_line:87 end_line:105 - +```87:105:src/utils/config.py + # Budget & Rate Limiting Configuration + default_token_limit: int = Field( + default=100000, + ge=1000, + le=1000000, + description="Default token budget per research loop", + ) + default_time_limit_minutes: int = Field( + default=10, + ge=1, + le=120, + description="Default time limit per research loop (minutes)", + ) + default_iterations_limit: int = Field( + default=10, + ge=1, + le=50, + description="Default iterations limit per research loop", + ) +``` ### RAG Service Configuration @@ -253,9 +304,23 @@ RAG_AUTO_INGEST=true The RAG configuration: - -[RAG Service Configuration](../src/utils/config.py) start_line:127 end_line:141 - +```127:141:src/utils/config.py + # RAG Service Configuration + rag_collection_name: str = Field( + default="deepcritical_evidence", + description="ChromaDB collection name for RAG", + ) + rag_similarity_top_k: int = Field( + default=5, + ge=1, + le=50, + description="Number of top results to retrieve from RAG", + ) + rag_auto_ingest: bool = Field( + default=True, + description="Automatically ingest evidence into RAG", + ) +``` ### ChromaDB Configuration @@ -277,9 +342,21 @@ CHROMA_DB_PORT=8000 The ChromaDB configuration: - -[ChromaDB Configuration](../src/utils/config.py) start_line:113 end_line:125 - +```113:125:src/utils/config.py + chroma_db_path: str = Field(default="./chroma_db", description="ChromaDB storage path") + chroma_db_persist: bool = Field( + default=True, + description="Whether to persist ChromaDB to disk", + ) + chroma_db_host: str | None = Field( + default=None, + description="ChromaDB server host (for remote ChromaDB)", + ) + chroma_db_port: int | None = Field( + default=None, + description="ChromaDB server port (for remote ChromaDB)", + ) +``` ### External Services @@ -297,9 +374,11 @@ MODAL_TOKEN_SECRET=your_modal_token_secret_here The Modal configuration: - -[Modal Configuration](../src/utils/config.py) start_line:110 end_line:112 - +```110:112:src/utils/config.py + # External Services + modal_token_id: str | None = Field(default=None, description="Modal token ID") + modal_token_secret: str | None = Field(default=None, description="Modal token secret") +``` ### Logging Configuration @@ -312,15 +391,35 @@ LOG_LEVEL=INFO The logging configuration: - -[Logging Configuration](../src/utils/config.py) start_line:107 end_line:108 - +```107:108:src/utils/config.py + # Logging + log_level: Literal["DEBUG", "INFO", "WARNING", "ERROR"] = "INFO" +``` Logging is configured via the `configure_logging()` function: - -[Configure Logging Function](../src/utils/config.py) start_line:212 end_line:231 - +```212:231:src/utils/config.py +def configure_logging(settings: Settings) -> None: + """Configure structured logging with the configured log level.""" + # Set stdlib logging level from settings + logging.basicConfig( + level=getattr(logging, settings.log_level), + format="%(message)s", + ) + + structlog.configure( + processors=[ + structlog.stdlib.filter_by_level, + structlog.stdlib.add_logger_name, + structlog.stdlib.add_log_level, + structlog.processors.TimeStamper(fmt="iso"), + structlog.processors.JSONRenderer(), + ], + wrapper_class=structlog.stdlib.BoundLogger, + context_class=dict, + logger_factory=structlog.stdlib.LoggerFactory(), + ) +``` ## Configuration Properties @@ -330,9 +429,27 @@ The `Settings` class provides helpful properties for checking configuration stat Check which API keys are available: - -[API Key Availability Properties](../src/utils/config.py) start_line:171 end_line:189 - +```171:189:src/utils/config.py + @property + def has_openai_key(self) -> bool: + """Check if OpenAI API key is available.""" + return bool(self.openai_api_key) + + @property + def has_anthropic_key(self) -> bool: + """Check if Anthropic API key is available.""" + return bool(self.anthropic_api_key) + + @property + def has_huggingface_key(self) -> bool: + """Check if HuggingFace API key is available.""" + return bool(self.huggingface_api_key or self.hf_token) + + @property + def has_any_llm_key(self) -> bool: + """Check if any LLM API key is available.""" + return self.has_openai_key or self.has_anthropic_key or self.has_huggingface_key +``` **Usage:** @@ -361,13 +478,29 @@ if settings.has_any_llm_key: Check if external services are configured: - -[Modal Availability Property](../src/utils/config.py) start_line:143 end_line:146 - +```143:146:src/utils/config.py + @property + def modal_available(self) -> bool: + """Check if Modal credentials are configured.""" + return bool(self.modal_token_id and self.modal_token_secret) +``` - -[Web Search Availability Property](../src/utils/config.py) start_line:191 end_line:204 - +```191:204:src/utils/config.py + @property + def web_search_available(self) -> bool: + """Check if web search is available (either no-key provider or API key present).""" + if self.web_search_provider == "duckduckgo": + return True # No API key required + if self.web_search_provider == "serper": + return bool(self.serper_api_key) + if self.web_search_provider == "searchxng": + return bool(self.searchxng_host) + if self.web_search_provider == "brave": + return bool(self.brave_api_key) + if self.web_search_provider == "tavily": + return bool(self.tavily_api_key) + return False +``` **Usage:** @@ -388,15 +521,34 @@ if settings.web_search_available: Get the API key for the configured provider: - -[Get API Key Method](../src/utils/config.py) start_line:148 end_line:160 - +```148:160:src/utils/config.py + def get_api_key(self) -> str: + """Get the API key for the configured provider.""" + if self.llm_provider == "openai": + if not self.openai_api_key: + raise ConfigurationError("OPENAI_API_KEY not set") + return self.openai_api_key + + if self.llm_provider == "anthropic": + if not self.anthropic_api_key: + raise ConfigurationError("ANTHROPIC_API_KEY not set") + return self.anthropic_api_key + + raise ConfigurationError(f"Unknown LLM provider: {self.llm_provider}") +``` For OpenAI-specific operations (e.g., Magentic mode): - -[Get OpenAI API Key Method](../src/utils/config.py) start_line:162 end_line:169 - +```162:169:src/utils/config.py + def get_openai_api_key(self) -> str: + """Get OpenAI API key (required for Magentic function calling).""" + if not self.openai_api_key: + raise ConfigurationError( + "OPENAI_API_KEY not set. Magentic mode requires OpenAI for function calling. " + "Use mode='simple' for other providers." + ) + return self.openai_api_key +``` ## Configuration Usage in Codebase @@ -406,25 +558,53 @@ The configuration system is used throughout the codebase: The LLM factory uses settings to create appropriate models: - -[LLM Factory Usage](../src/utils/llm_factory.py) start_line:129 end_line:144 - +```129:144:src/utils/llm_factory.py + if settings.llm_provider == "huggingface": + model_name = settings.huggingface_model or "meta-llama/Llama-3.1-8B-Instruct" + hf_provider = HuggingFaceProvider(api_key=settings.hf_token) + return HuggingFaceModel(model_name, provider=hf_provider) + + if settings.llm_provider == "openai": + if not settings.openai_api_key: + raise ConfigurationError("OPENAI_API_KEY not set for pydantic-ai") + provider = OpenAIProvider(api_key=settings.openai_api_key) + return OpenAIModel(settings.openai_model, provider=provider) + + if settings.llm_provider == "anthropic": + if not settings.anthropic_api_key: + raise ConfigurationError("ANTHROPIC_API_KEY not set for pydantic-ai") + anthropic_provider = AnthropicProvider(api_key=settings.anthropic_api_key) + return AnthropicModel(settings.anthropic_model, provider=anthropic_provider) +``` ### Embedding Service The embedding service uses local embedding model configuration: - -[Embedding Service Usage](../src/services/embeddings.py) start_line:29 end_line:31 - +```29:31:src/services/embeddings.py + def __init__(self, model_name: str | None = None): + self._model_name = model_name or settings.local_embedding_model + self._model = SentenceTransformer(self._model_name) +``` ### Orchestrator Factory The orchestrator factory uses settings to determine mode: - -[Orchestrator Factory Mode Detection](../src/orchestrator_factory.py) start_line:69 end_line:80 - +```69:80:src/orchestrator_factory.py +def _determine_mode(explicit_mode: str | None) -> str: + """Determine which mode to use.""" + if explicit_mode: + if explicit_mode in ("magentic", "advanced"): + return "advanced" + return "simple" + + # Auto-detect: advanced if paid API key available + if settings.has_openai_key: + return "advanced" + + return "simple" +``` ## Environment Variables Reference @@ -507,15 +687,17 @@ Settings are validated on load using Pydantic validation: The `max_iterations` field has range validation: - -[Max Iterations Validation](../src/utils/config.py) start_line:81 end_line:81 - +```81:81:src/utils/config.py + max_iterations: int = Field(default=10, ge=1, le=50) +``` The `llm_provider` field has literal validation: - -[LLM Provider Literal Validation](../src/utils/config.py) start_line:26 end_line:28 - +```26:28:src/utils/config.py + llm_provider: Literal["openai", "anthropic", "huggingface"] = Field( + default="openai", description="Which LLM provider to use" + ) +``` ## Error Handling diff --git a/CONTRIBUTING.md b/docs/contributing.md similarity index 61% rename from CONTRIBUTING.md rename to docs/contributing.md index 9a93fd1f812752141d49e4e27efee17405ed9563..ddfb1c06dbd53064b62040c4ded4fa9e4e942f72 100644 --- a/CONTRIBUTING.md +++ b/docs/contributing.md @@ -1,26 +1,24 @@ -# Contributing to The DETERMINATOR +# Contributing to DeepCritical -Thank you for your interest in contributing to The DETERMINATOR! This guide will help you get started. +Thank you for your interest in contributing to DeepCritical! This guide will help you get started. ## Table of Contents - [Git Workflow](#git-workflow) - [Getting Started](#getting-started) - [Development Commands](#development-commands) +- [Code Style & Conventions](#code-style--conventions) +- [Type Safety](#type-safety) +- [Error Handling & Logging](#error-handling--logging) +- [Testing Requirements](#testing-requirements) +- [Implementation Patterns](#implementation-patterns) +- [Code Quality & Documentation](#code-quality--documentation) +- [Prompt Engineering & Citation Validation](#prompt-engineering--citation-validation) - [MCP Integration](#mcp-integration) - [Common Pitfalls](#common-pitfalls) - [Key Principles](#key-principles) - [Pull Request Process](#pull-request-process) -> **Note**: Additional sections (Code Style, Error Handling, Testing, Implementation Patterns, Code Quality, and Prompt Engineering) are available as separate pages in the [documentation](https://deepcritical.github.io/GradioDemo/contributing/). -> **Note on Project Names**: "The DETERMINATOR" is the product name, "DeepCritical" is the organization/project name, and "determinator" is the Python package name. - -## Repository Information - -- **GitHub Repository**: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) (source of truth, PRs, code review) -- **HuggingFace Space**: [`DataQuests/DeepCritical`](https://huggingface.co/spaces/DataQuests/DeepCritical) (deployment/demo) -- **Package Name**: `determinator` (Python package name in `pyproject.toml`) - ## Git Workflow - `main`: Production-ready (GitHub) @@ -29,31 +27,9 @@ Thank you for your interest in contributing to The DETERMINATOR! This guide will - **NEVER** push directly to `main` or `dev` on HuggingFace - GitHub is source of truth; HuggingFace is for deployment -### Dual Repository Setup - -This project uses a dual repository setup: - -- **GitHub (`DeepCritical/GradioDemo`)**: Source of truth for code, PRs, and code review -- **HuggingFace (`DataQuests/DeepCritical`)**: Deployment target for the Gradio demo - -#### Remote Configuration - -When cloning, set up remotes as follows: - -```bash -# Clone from GitHub -git clone https://github.com/DeepCritical/GradioDemo.git -cd GradioDemo - -# Add HuggingFace remote (optional, for deployment) -git remote add huggingface-upstream https://huggingface.co/spaces/DataQuests/DeepCritical -``` - -**Important**: Never push directly to `main` or `dev` on HuggingFace. Always work through GitHub PRs. GitHub is the source of truth; HuggingFace is for deployment/demo only. - ## Getting Started -1. **Fork the repository** on GitHub: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) +1. **Fork the repository** on GitHub 2. **Clone your fork**: ```bash @@ -64,8 +40,7 @@ git remote add huggingface-upstream https://huggingface.co/spaces/DataQuests/Dee 3. **Install dependencies**: ```bash - uv sync --all-extras - uv run pre-commit install + make install ``` 4. **Create a feature branch**: @@ -78,9 +53,7 @@ git remote add huggingface-upstream https://huggingface.co/spaces/DataQuests/Dee 6. **Run checks**: ```bash - uv run ruff check src tests - uv run mypy src - uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire + make check ``` 7. **Commit and push**: @@ -89,72 +62,22 @@ git remote add huggingface-upstream https://huggingface.co/spaces/DataQuests/Dee git commit -m "Description of changes" git push origin yourname-feature-name ``` - 8. **Create a pull request** on GitHub -## Package Manager - -This project uses [`uv`](https://github.com/astral-sh/uv) as the package manager. All commands should be prefixed with `uv run` to ensure they run in the correct environment. - -### Installation - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync all dependencies including dev extras -uv sync --all-extras - -# Install pre-commit hooks -uv run pre-commit install -``` - ## Development Commands ```bash -# Installation -uv sync --all-extras # Install all dependencies including dev -uv run pre-commit install # Install pre-commit hooks - -# Code Quality Checks (run all before committing) -uv run ruff check src tests # Lint with ruff -uv run ruff format src tests # Format with ruff -uv run mypy src # Type checking -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with coverage - -# Testing Commands -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire # Run unit tests (excludes OpenAI tests) -uv run pytest tests/ -v -m "huggingface" -p no:logfire # Run HuggingFace tests -uv run pytest tests/ -v -p no:logfire # Run all tests -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with terminal coverage -uv run pytest --cov=src --cov-report=html -p no:logfire # Generate HTML coverage report (opens htmlcov/index.html) - -# Documentation Commands -uv run mkdocs build # Build documentation -uv run mkdocs serve # Serve documentation locally (http://127.0.0.1:8000) +make install # Install dependencies + pre-commit +make check # Lint + typecheck + test (MUST PASS) +make test # Run unit tests +make lint # Run ruff +make format # Format with ruff +make typecheck # Run mypy +make test-cov # Test with coverage +make docs-build # Build documentation +make docs-serve # Serve documentation locally ``` -### Test Markers - -The project uses pytest markers to categorize tests. See [Testing Guidelines](docs/contributing/testing.md) for details: - -- `unit`: Unit tests (mocked, fast) -- `integration`: Integration tests (real APIs) -- `slow`: Slow tests -- `openai`: Tests requiring OpenAI API key -- `huggingface`: Tests requiring HuggingFace API key -- `embedding_provider`: Tests requiring API-based embedding providers -- `local_embeddings`: Tests using local embeddings - -**Note**: The `-p no:logfire` flag disables the logfire plugin to avoid conflicts during testing. - ## Code Style & Conventions ### Type Safety @@ -163,9 +86,11 @@ The project uses pytest markers to categorize tests. See [Testing Guidelines](do - Use `mypy --strict` compliance (no `Any` unless absolutely necessary) - Use `TYPE_CHECKING` imports for circular dependencies: - -[TYPE_CHECKING Import Pattern](../src/utils/citation_validator.py) start_line:8 end_line:11 - +```python +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from src.services.embeddings import EmbeddingService +``` ### Pydantic Models @@ -200,10 +125,10 @@ result = await loop.run_in_executor(None, cpu_bound_function, args) ### Pre-commit -- Pre-commit hooks run automatically on commit +- Run `make check` before committing - Must pass: lint + typecheck + test-cov -- Install hooks with: `uv run pre-commit install` -- Note: `uv sync --all-extras` installs the pre-commit package, but you must run `uv run pre-commit install` separately to set up the git hooks +- Pre-commit hooks installed via `make install` +- **CRITICAL**: Make sure you run the full pre-commit checks before opening a PR (not draft), otherwise Obstacle is the Way will lose his mind ## Error Handling & Logging @@ -211,9 +136,10 @@ result = await loop.run_in_executor(None, cpu_bound_function, args) Use custom exception hierarchy (`src/utils/exceptions.py`): - -[Exception Hierarchy](../src/utils/exceptions.py) start_line:4 end_line:31 - +- `DeepCriticalError` (base) +- `SearchError` → `RateLimitError` +- `JudgeError` +- `ConfigurationError` ### Error Handling Rules @@ -273,7 +199,7 @@ except httpx.HTTPError as e: 1. Write failing test in `tests/unit/` 2. Implement in `src/` 3. Ensure test passes -4. Run checks: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` +4. Run `make check` (lint + typecheck + test) ### Test Examples @@ -294,8 +220,7 @@ async def test_real_pubmed_search(): ### Test Coverage -- Run `uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` for coverage report -- Run `uv run pytest --cov=src --cov-report=html -p no:logfire` for HTML coverage report (opens `htmlcov/index.html`) +- Run `make test-cov` for coverage report - Aim for >80% coverage on critical paths - Exclude: `__init__.py`, `TYPE_CHECKING` blocks @@ -339,9 +264,11 @@ class MySearchTool: - Lazy initialization for optional dependencies (e.g., embeddings, Modal) - Check requirements before initialization: - -[Check Magentic Requirements](../src/utils/llm_factory.py) start_line:152 end_line:170 - +```python +def check_magentic_requirements() -> None: + if not settings.has_openai_key: + raise ConfigurationError("Magentic requires OpenAI") +``` ### State Management @@ -353,9 +280,11 @@ class MySearchTool: Use `@lru_cache(maxsize=1)` for singletons: - -[Singleton Pattern Example](../src/services/statistical_analyzer.py) start_line:252 end_line:255 - +```python +@lru_cache(maxsize=1) +def get_embedding_service() -> EmbeddingService: + return EmbeddingService() +``` - Lazy initialization to avoid requiring dependencies at import time @@ -369,9 +298,22 @@ Use `@lru_cache(maxsize=1)` for singletons: Example: - -[Search Method Docstring Example](../src/tools/pubmed.py) start_line:51 end_line:58 - +```python +async def search(self, query: str, max_results: int = 10) -> list[Evidence]: + """Search PubMed and return evidence. + + Args: + query: The search query string + max_results: Maximum number of results to return + + Returns: + List of Evidence objects + + Raises: + SearchError: If the search fails + RateLimitError: If we hit rate limits + """ +``` ### Code Comments @@ -468,7 +410,7 @@ Example: ## Pull Request Process -1. Ensure all checks pass: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` +1. Ensure all checks pass: `make check` 2. Update documentation if needed 3. Add tests for new features 4. Update CHANGELOG if applicable @@ -476,19 +418,11 @@ Example: 6. Address review feedback 7. Wait for approval before merging -## Project Structure - -- `src/`: Main source code -- `tests/`: Test files (`unit/` and `integration/`) -- `docs/`: Documentation source files (MkDocs) -- `examples/`: Example usage scripts -- `pyproject.toml`: Project configuration and dependencies -- `.pre-commit-config.yaml`: Pre-commit hook configuration - ## Questions? -- Open an issue on [GitHub](https://github.com/DeepCritical/GradioDemo) -- Check existing [documentation](https://deepcritical.github.io/GradioDemo/) +- Open an issue on GitHub +- Check existing documentation - Review code examples in the codebase -Thank you for contributing to The DETERMINATOR! +Thank you for contributing to DeepCritical! + diff --git a/docs/contributing/code-quality.md b/docs/contributing/code-quality.md index 8e8bcf54bf5a77296abcb3581730e07c3d489274..b15ec66c60f46d285179fd83f5abc14a695a2a20 100644 --- a/docs/contributing/code-quality.md +++ b/docs/contributing/code-quality.md @@ -1,6 +1,6 @@ # Code Quality & Documentation -This document outlines code quality standards and documentation requirements for The DETERMINATOR. +This document outlines code quality standards and documentation requirements. ## Linting @@ -12,9 +12,6 @@ This document outlines code quality standards and documentation requirements for - `PLR2004`: Magic values (statistical constants) - `PLW0603`: Global statement (singleton pattern) - `PLC0415`: Lazy imports for optional dependencies - - `E402`: Module level import not at top (needed for pytest.importorskip) - - `E501`: Line too long (ignore line length violations) - - `RUF100`: Unused noqa (version differences between local/CI) ## Type Checking @@ -25,75 +22,12 @@ This document outlines code quality standards and documentation requirements for ## Pre-commit -Pre-commit hooks run automatically on commit to ensure code quality. Configuration is in `.pre-commit-config.yaml`. - -### Installation - -```bash -# Install dependencies (includes pre-commit package) -uv sync --all-extras - -# Set up git hooks (must be run separately) -uv run pre-commit install -``` - -**Note**: `uv sync --all-extras` installs the pre-commit package, but you must run `uv run pre-commit install` separately to set up the git hooks. - -### Pre-commit Hooks - -The following hooks run automatically on commit: - -1. **ruff**: Lints code and fixes issues automatically - - Runs on: `src/` (excludes `tests/`, `reference_repos/`) - - Auto-fixes: Yes - -2. **ruff-format**: Formats code with ruff - - Runs on: `src/` (excludes `tests/`, `reference_repos/`) - - Auto-fixes: Yes - -3. **mypy**: Type checking - - Runs on: `src/` (excludes `folder/`) - - Additional dependencies: pydantic, pydantic-settings, tenacity, pydantic-ai - -4. **pytest-unit**: Runs unit tests (excludes OpenAI and embedding_provider tests) - - Runs: `tests/unit/` with `-m "not openai and not embedding_provider"` - - Always runs: Yes (not just on changed files) - -5. **pytest-local-embeddings**: Runs local embedding tests - - Runs: `tests/` with `-m "local_embeddings"` - - Always runs: Yes - -### Manual Pre-commit Run - -To run pre-commit hooks manually (without committing): - -```bash -uv run pre-commit run --all-files -``` - -### Troubleshooting - -- **Hooks failing**: Fix the issues shown in the output, then commit again -- **Skipping hooks**: Use `git commit --no-verify` (not recommended) -- **Hook not running**: Ensure hooks are installed with `uv run pre-commit install` -- **Type errors**: Check that all dependencies are installed with `uv sync --all-extras` +- Run `make check` before committing +- Must pass: lint + typecheck + test-cov +- Pre-commit hooks installed via `make install` ## Documentation -### Building Documentation - -Documentation is built using MkDocs. Source files are in `docs/`, and the configuration is in `mkdocs.yml`. - -```bash -# Build documentation -uv run mkdocs build - -# Serve documentation locally (http://127.0.0.1:8000) -uv run mkdocs serve -``` - -The documentation site is published at: - ### Docstrings - Google-style docstrings for all public functions @@ -102,9 +36,22 @@ The documentation site is published at: -[Search Method Docstring Example](../src/tools/pubmed.py) start_line:51 end_line:70 - +```python +async def search(self, query: str, max_results: int = 10) -> list[Evidence]: + """Search PubMed and return evidence. + + Args: + query: The search query string + max_results: Maximum number of results to return + + Returns: + List of Evidence objects + + Raises: + SearchError: If the search fails + RateLimitError: If we hit rate limits + """ +``` ### Code Comments @@ -118,3 +65,13 @@ Example: - [Code Style](code-style.md) - Code style guidelines - [Testing](testing.md) - Testing guidelines + + + + + + + + + + diff --git a/docs/contributing/code-style.md b/docs/contributing/code-style.md index e6f0b32ca5a90082e9c2789738f7ee2c44aab35f..6a0ca8c0d62f7cff541a2abef854ffe49fa89ef8 100644 --- a/docs/contributing/code-style.md +++ b/docs/contributing/code-style.md @@ -1,44 +1,6 @@ # Code Style & Conventions -This document outlines the code style and conventions for The DETERMINATOR. - -## Package Manager - -This project uses [`uv`](https://github.com/astral-sh/uv) as the package manager. All commands should be prefixed with `uv run` to ensure they run in the correct environment. - -### Installation - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync all dependencies including dev extras -uv sync --all-extras -``` - -### Running Commands - -All development commands should use `uv run` prefix: - -```bash -# Instead of: pytest tests/ -uv run pytest tests/ - -# Instead of: ruff check src -uv run ruff check src - -# Instead of: mypy src -uv run mypy src -``` - -This ensures commands run in the correct virtual environment managed by `uv`. +This document outlines the code style and conventions for DeepCritical. ## Type Safety @@ -46,9 +8,11 @@ This ensures commands run in the correct virtual environment managed by `uv`. - Use `mypy --strict` compliance (no `Any` unless absolutely necessary) - Use `TYPE_CHECKING` imports for circular dependencies: - -[TYPE_CHECKING Import Pattern](../src/utils/citation_validator.py) start_line:8 end_line:11 - +```python +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from src.services.embeddings import EmbeddingService +``` ## Pydantic Models @@ -81,3 +45,13 @@ result = await loop.run_in_executor(None, cpu_bound_function, args) - [Error Handling](error-handling.md) - Error handling guidelines - [Implementation Patterns](implementation-patterns.md) - Common patterns + + + + + + + + + + diff --git a/docs/contributing/error-handling.md b/docs/contributing/error-handling.md index 626fec9f5a5a7786813943d38c0673a3cb24d3c7..5d3ead5b23c77d8970f236b460b5668a40a1d566 100644 --- a/docs/contributing/error-handling.md +++ b/docs/contributing/error-handling.md @@ -1,14 +1,15 @@ # Error Handling & Logging -This document outlines error handling and logging conventions for The DETERMINATOR. +This document outlines error handling and logging conventions for DeepCritical. ## Exception Hierarchy Use custom exception hierarchy (`src/utils/exceptions.py`): - -[Exception Hierarchy](../src/utils/exceptions.py) start_line:4 end_line:31 - +- `DeepCriticalError` (base) +- `SearchError` → `RateLimitError` +- `JudgeError` +- `ConfigurationError` ## Error Handling Rules @@ -52,3 +53,13 @@ except httpx.HTTPError as e: - [Code Style](code-style.md) - Code style guidelines - [Testing](testing.md) - Testing guidelines + + + + + + + + + + diff --git a/docs/contributing/implementation-patterns.md b/docs/contributing/implementation-patterns.md index 8590ec3ea4b226b13a388d027a52e70984ea553c..d2cf076c39f24f6f42611c9bbd0bcff4ff05ee8a 100644 --- a/docs/contributing/implementation-patterns.md +++ b/docs/contributing/implementation-patterns.md @@ -1,6 +1,6 @@ # Implementation Patterns -This document outlines common implementation patterns used in The DETERMINATOR. +This document outlines common implementation patterns used in DeepCritical. ## Search Tools @@ -40,9 +40,11 @@ class MySearchTool: - Lazy initialization for optional dependencies (e.g., embeddings, Modal) - Check requirements before initialization: - -[Check Magentic Requirements](../src/utils/llm_factory.py) start_line:152 end_line:170 - +```python +def check_magentic_requirements() -> None: + if not settings.has_openai_key: + raise ConfigurationError("Magentic requires OpenAI") +``` ## State Management @@ -54,9 +56,11 @@ class MySearchTool: Use `@lru_cache(maxsize=1)` for singletons: - -[Singleton Pattern Example](../src/services/statistical_analyzer.py) start_line:252 end_line:255 - +```python +@lru_cache(maxsize=1) +def get_embedding_service() -> EmbeddingService: + return EmbeddingService() +``` - Lazy initialization to avoid requiring dependencies at import time @@ -65,3 +69,12 @@ Use `@lru_cache(maxsize=1)` for singletons: - [Code Style](code-style.md) - Code style guidelines - [Error Handling](error-handling.md) - Error handling guidelines + + + + + + + + + diff --git a/docs/contributing/index.md b/docs/contributing/index.md index 466fcaf16f41e49dc36201307f95204ee7b9ef6c..6fab401289f8a568b36096eb201bfe0453b3a6d3 100644 --- a/docs/contributing/index.md +++ b/docs/contributing/index.md @@ -1,8 +1,6 @@ -# Contributing to The DETERMINATOR +# Contributing to DeepCritical -Thank you for your interest in contributing to The DETERMINATOR! This guide will help you get started. - -> **Note on Project Names**: "The DETERMINATOR" is the product name, "DeepCritical" is the organization/project name, and "determinator" is the Python package name. +Thank you for your interest in contributing to DeepCritical! This guide will help you get started. ## Git Workflow @@ -12,138 +10,44 @@ Thank you for your interest in contributing to The DETERMINATOR! This guide will - **NEVER** push directly to `main` or `dev` on HuggingFace - GitHub is source of truth; HuggingFace is for deployment -## Repository Information - -- **GitHub Repository**: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) (source of truth, PRs, code review) -- **HuggingFace Space**: [`DataQuests/DeepCritical`](https://huggingface.co/spaces/DataQuests/DeepCritical) (deployment/demo) -- **Package Name**: `determinator` (Python package name in `pyproject.toml`) - -### Dual Repository Setup - -This project uses a dual repository setup: - -- **GitHub (`DeepCritical/GradioDemo`)**: Source of truth for code, PRs, and code review -- **HuggingFace (`DataQuests/DeepCritical`)**: Deployment target for the Gradio demo - -#### Remote Configuration - -When cloning, set up remotes as follows: - -```bash -# Clone from GitHub -git clone https://github.com/DeepCritical/GradioDemo.git -cd GradioDemo - -# Add HuggingFace remote (optional, for deployment) -git remote add huggingface-upstream https://huggingface.co/spaces/DataQuests/DeepCritical -``` - -**Important**: Never push directly to `main` or `dev` on HuggingFace. Always work through GitHub PRs. GitHub is the source of truth; HuggingFace is for deployment/demo only. - -## Package Manager - -This project uses [`uv`](https://github.com/astral-sh/uv) as the package manager. All commands should be prefixed with `uv run` to ensure they run in the correct environment. - -### Installation - -```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv - -# Sync all dependencies including dev extras -uv sync --all-extras - -# Install pre-commit hooks -uv run pre-commit install -``` - ## Development Commands ```bash -# Installation -uv sync --all-extras # Install all dependencies including dev -uv run pre-commit install # Install pre-commit hooks - -# Code Quality Checks (run all before committing) -uv run ruff check src tests # Lint with ruff -uv run ruff format src tests # Format with ruff -uv run mypy src # Type checking -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with coverage - -# Testing Commands -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire # Run unit tests (excludes OpenAI tests) -uv run pytest tests/ -v -m "huggingface" -p no:logfire # Run HuggingFace tests -uv run pytest tests/ -v -p no:logfire # Run all tests -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire # Tests with terminal coverage -uv run pytest --cov=src --cov-report=html -p no:logfire # Generate HTML coverage report (opens htmlcov/index.html) - -# Documentation Commands -uv run mkdocs build # Build documentation -uv run mkdocs serve # Serve documentation locally (http://127.0.0.1:8000) +make install # Install dependencies + pre-commit +make check # Lint + typecheck + test (MUST PASS) +make test # Run unit tests +make lint # Run ruff +make format # Format with ruff +make typecheck # Run mypy +make test-cov # Test with coverage ``` -### Test Markers - -The project uses pytest markers to categorize tests. See [Testing Guidelines](testing.md) for details: - -- `unit`: Unit tests (mocked, fast) -- `integration`: Integration tests (real APIs) -- `slow`: Slow tests -- `openai`: Tests requiring OpenAI API key -- `huggingface`: Tests requiring HuggingFace API key -- `embedding_provider`: Tests requiring API-based embedding providers -- `local_embeddings`: Tests using local embeddings - -**Note**: The `-p no:logfire` flag disables the logfire plugin to avoid conflicts during testing. - ## Getting Started -1. **Fork the repository** on GitHub: [`DeepCritical/GradioDemo`](https://github.com/DeepCritical/GradioDemo) - +1. **Fork the repository** on GitHub 2. **Clone your fork**: - ```bash git clone https://github.com/yourusername/GradioDemo.git cd GradioDemo ``` - 3. **Install dependencies**: - ```bash - uv sync --all-extras - uv run pre-commit install + make install ``` - 4. **Create a feature branch**: - ```bash git checkout -b yourname-feature-name ``` - 5. **Make your changes** following the guidelines below - 6. **Run checks**: - ```bash - uv run ruff check src tests - uv run mypy src - uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire + make check ``` - 7. **Commit and push**: - ```bash git commit -m "Description of changes" git push origin yourname-feature-name ``` - 8. **Create a pull request** on GitHub ## Development Guidelines @@ -228,7 +132,7 @@ The project uses pytest markers to categorize tests. See [Testing Guidelines](te ## Pull Request Process -1. Ensure all checks pass: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` +1. Ensure all checks pass: `make check` 2. Update documentation if needed 3. Add tests for new features 4. Update CHANGELOG if applicable @@ -236,19 +140,20 @@ The project uses pytest markers to categorize tests. See [Testing Guidelines](te 6. Address review feedback 7. Wait for approval before merging -## Project Structure - -- `src/`: Main source code -- `tests/`: Test files (`unit/` and `integration/`) -- `docs/`: Documentation source files (MkDocs) -- `examples/`: Example usage scripts -- `pyproject.toml`: Project configuration and dependencies -- `.pre-commit-config.yaml`: Pre-commit hook configuration - ## Questions? -- Open an issue on [GitHub](https://github.com/DeepCritical/GradioDemo) -- Check existing [documentation](https://deepcritical.github.io/GradioDemo/) +- Open an issue on GitHub +- Check existing documentation - Review code examples in the codebase -Thank you for contributing to The DETERMINATOR! +Thank you for contributing to DeepCritical! + + + + + + + + + + diff --git a/docs/contributing/prompt-engineering.md b/docs/contributing/prompt-engineering.md index c90cd094112854a3eda7b05f705871ce77b6874f..a1bae2444bb669cddb7d1e3c81081422420ee820 100644 --- a/docs/contributing/prompt-engineering.md +++ b/docs/contributing/prompt-engineering.md @@ -53,3 +53,13 @@ This document outlines prompt engineering guidelines and citation validation rul - [Code Quality](code-quality.md) - Code quality guidelines - [Error Handling](error-handling.md) - Error handling guidelines + + + + + + + + + + diff --git a/docs/contributing/testing.md b/docs/contributing/testing.md index 38149535306ae90071238d61bdb67ed1e21e3718..ebb1b21477c34a34c39cd8d49e1d898b684527ab 100644 --- a/docs/contributing/testing.md +++ b/docs/contributing/testing.md @@ -1,45 +1,12 @@ # Testing Requirements -This document outlines testing requirements and guidelines for The DETERMINATOR. +This document outlines testing requirements and guidelines for DeepCritical. ## Test Structure - Unit tests in `tests/unit/` (mocked, fast) - Integration tests in `tests/integration/` (real APIs, marked `@pytest.mark.integration`) -- Use markers: `unit`, `integration`, `slow`, `openai`, `huggingface`, `embedding_provider`, `local_embeddings` - -## Test Markers - -The project uses pytest markers to categorize tests. These markers are defined in `pyproject.toml`: - -- `@pytest.mark.unit`: Unit tests (mocked, fast) - Run with `-m "unit"` -- `@pytest.mark.integration`: Integration tests (real APIs) - Run with `-m "integration"` -- `@pytest.mark.slow`: Slow tests - Run with `-m "slow"` -- `@pytest.mark.openai`: Tests requiring OpenAI API key - Run with `-m "openai"` or exclude with `-m "not openai"` -- `@pytest.mark.huggingface`: Tests requiring HuggingFace API key or using HuggingFace models - Run with `-m "huggingface"` -- `@pytest.mark.embedding_provider`: Tests requiring API-based embedding providers (OpenAI, etc.) - Run with `-m "embedding_provider"` -- `@pytest.mark.local_embeddings`: Tests using local embeddings (sentence-transformers, ChromaDB) - Run with `-m "local_embeddings"` - -### Running Tests by Marker - -```bash -# Run only unit tests (excludes OpenAI tests by default) -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire - -# Run HuggingFace tests -uv run pytest tests/ -v -m "huggingface" -p no:logfire - -# Run all tests -uv run pytest tests/ -v -p no:logfire - -# Run only local embedding tests -uv run pytest tests/ -v -m "local_embeddings" -p no:logfire - -# Exclude slow tests -uv run pytest tests/ -v -m "not slow" -p no:logfire -``` - -**Note**: The `-p no:logfire` flag disables the logfire plugin to avoid conflicts during testing. +- Use markers: `unit`, `integration`, `slow` ## Mocking @@ -53,20 +20,7 @@ uv run pytest tests/ -v -m "not slow" -p no:logfire 1. Write failing test in `tests/unit/` 2. Implement in `src/` 3. Ensure test passes -4. Run checks: `uv run ruff check src tests && uv run mypy src && uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire` - -### Test Command Examples - -```bash -# Run unit tests (default, excludes OpenAI tests) -uv run pytest tests/unit/ -v -m "not openai" -p no:logfire - -# Run HuggingFace tests -uv run pytest tests/ -v -m "huggingface" -p no:logfire - -# Run all tests -uv run pytest tests/ -v -p no:logfire -``` +4. Run `make check` (lint + typecheck + test) ## Test Examples @@ -87,29 +41,21 @@ async def test_real_pubmed_search(): ## Test Coverage -### Terminal Coverage Report +- Run `make test-cov` for coverage report +- Aim for >80% coverage on critical paths +- Exclude: `__init__.py`, `TYPE_CHECKING` blocks + +## See Also + +- [Code Style](code-style.md) - Code style guidelines +- [Implementation Patterns](implementation-patterns.md) - Common patterns + -```bash -uv run pytest --cov=src --cov-report=term-missing tests/unit/ -v -m "not openai" -p no:logfire -``` -This shows coverage with missing lines highlighted in the terminal output. -### HTML Coverage Report -```bash -uv run pytest --cov=src --cov-report=html -p no:logfire -``` -This generates an HTML coverage report in `htmlcov/index.html`. Open this file in your browser to see detailed coverage information. -### Coverage Goals -- Aim for >80% coverage on critical paths -- Exclude: `__init__.py`, `TYPE_CHECKING` blocks -- Coverage configuration is in `pyproject.toml` under `[tool.coverage.*]` -## See Also -- [Code Style](code-style.md) - Code style guidelines -- [Implementation Patterns](implementation-patterns.md) - Common patterns diff --git a/docs/getting-started/examples.md b/docs/getting-started/examples.md index c5b7f4150787a8e66f8eafec399fe89c11b449e8..e71e7b8360070341f38f526d1e2df344980e246a 100644 --- a/docs/getting-started/examples.md +++ b/docs/getting-started/examples.md @@ -1,6 +1,6 @@ # Examples -This page provides examples of using The DETERMINATOR for various research tasks. +This page provides examples of using DeepCritical for various research tasks. ## Basic Research Query @@ -11,7 +11,7 @@ This page provides examples of using The DETERMINATOR for various research tasks What are the latest treatments for Alzheimer's disease? ``` -**What The DETERMINATOR Does**: +**What DeepCritical Does**: 1. Searches PubMed for recent papers 2. Searches ClinicalTrials.gov for active trials 3. Evaluates evidence quality @@ -24,8 +24,7 @@ What are the latest treatments for Alzheimer's disease? What clinical trials are investigating metformin for cancer prevention? ``` -**What The DETERMINATOR Does**: - +**What DeepCritical Does**: 1. Searches ClinicalTrials.gov for relevant trials 2. Searches PubMed for supporting literature 3. Provides trial details and status @@ -36,13 +35,12 @@ What clinical trials are investigating metformin for cancer prevention? ### Example 3: Comprehensive Review **Query**: - ``` Review the evidence for using metformin as an anti-aging intervention, including clinical trials, mechanisms of action, and safety profile. ``` -**What The DETERMINATOR Does**: +**What DeepCritical Does**: 1. Uses deep research mode (multi-section) 2. Searches multiple sources in parallel 3. Generates sections on: @@ -58,7 +56,7 @@ including clinical trials, mechanisms of action, and safety profile. Test the hypothesis that regular exercise reduces Alzheimer's disease risk. ``` -**What The DETERMINATOR Does**: +**What DeepCritical Does**: 1. Generates testable hypotheses 2. Searches for supporting/contradicting evidence 3. Performs statistical analysis (if Modal configured) @@ -102,13 +100,13 @@ from src.agent_factory.judges import create_judge_handler # Create orchestrator search_handler = SearchHandler() judge_handler = create_judge_handler() -``` +orchestrator = create_orchestrator( + search_handler=search_handler, + judge_handler=judge_handler, + config={}, + mode="advanced" +) - -[Create Orchestrator](../src/orchestrator_factory.py) start_line:44 end_line:66 - - -```python # Run research query query = "What are the latest treatments for Alzheimer's disease?" async for event in orchestrator.run(query): @@ -136,13 +134,13 @@ Single-loop research with search-judge-synthesize cycles: ```python from src.orchestrator.research_flow import IterativeResearchFlow -``` - -[IterativeResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:56 end_line:77 - +flow = IterativeResearchFlow( + search_handler=search_handler, + judge_handler=judge_handler, + use_graph=False +) -```python async for event in flow.run(query): # Handle events pass @@ -154,13 +152,13 @@ Multi-section parallel research: ```python from src.orchestrator.research_flow import DeepResearchFlow -``` - -[DeepResearchFlow Initialization](../src/orchestrator/research_flow.py) start_line:674 end_line:697 - +flow = DeepResearchFlow( + search_handler=search_handler, + judge_handler=judge_handler, + use_graph=True +) -```python async for event in flow.run(query): # Handle events pass @@ -193,6 +191,15 @@ USE_GRAPH_EXECUTION=true ## Next Steps - Read the [Configuration Guide](../configuration/index.md) for all options -- Explore the [Architecture Documentation](../architecture/graph_orchestration.md) +- Explore the [Architecture Documentation](../architecture/graph-orchestration.md) - Check out the [API Reference](../api/agents.md) for programmatic usage + + + + + + + + + diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md index 34c0b2848a1c86097f9a73e90df7ed18219658fc..861e1ef751221b4844daad8221430067a71699e1 100644 --- a/docs/getting-started/installation.md +++ b/docs/getting-started/installation.md @@ -12,29 +12,12 @@ This guide will help you install and set up DeepCritical on your system. ### 1. Install uv (Recommended) -`uv` is a fast Python package installer and resolver. Install it using the standalone installer (recommended): +`uv` is a fast Python package installer and resolver. Install it with: -**Unix/macOS/Linux:** ```bash -curl -LsSf https://astral.sh/uv/install.sh | sh -``` - -**Windows (PowerShell):** -```powershell -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" -``` - -**Alternative methods:** -```bash -# Using pipx (recommended if you have pipx installed) -pipx install uv - -# Or using pip pip install uv ``` -After installation, restart your terminal or add `~/.cargo/bin` to your PATH. - ### 2. Clone the Repository ```bash @@ -150,3 +133,12 @@ uv run pre-commit install - Learn about [MCP Integration](mcp-integration.md) - Explore [Examples](examples.md) + + + + + + + + + diff --git a/docs/getting-started/mcp-integration.md b/docs/getting-started/mcp-integration.md index a06201f3603d6ded79a4c1bf5cb1a91515de0e5e..28cb0806a9b669212221c13367a0326b7de0d14b 100644 --- a/docs/getting-started/mcp-integration.md +++ b/docs/getting-started/mcp-integration.md @@ -1,10 +1,10 @@ # MCP Integration -The DETERMINATOR exposes a Model Context Protocol (MCP) server, allowing you to use its search tools directly from Claude Desktop or other MCP clients. +DeepCritical exposes a Model Context Protocol (MCP) server, allowing you to use its search tools directly from Claude Desktop or other MCP clients. ## What is MCP? -The Model Context Protocol (MCP) is a standard for connecting AI assistants to external tools and data sources. The DETERMINATOR implements an MCP server that exposes its search capabilities as MCP tools. +The Model Context Protocol (MCP) is a standard for connecting AI assistants to external tools and data sources. DeepCritical implements an MCP server that exposes its search capabilities as MCP tools. ## MCP Server URL @@ -33,14 +33,14 @@ http://localhost:7860/gradio_api/mcp/ ~/.config/Claude/claude_desktop_config.json ``` -### 2. Add The DETERMINATOR Server +### 2. Add DeepCritical Server Edit `claude_desktop_config.json` and add: ```json { "mcpServers": { - "determinator": { + "deepcritical": { "url": "http://localhost:7860/gradio_api/mcp/" } } @@ -53,7 +53,7 @@ Close and restart Claude Desktop for changes to take effect. ### 4. Verify Connection -In Claude Desktop, you should see The DETERMINATOR tools available: +In Claude Desktop, you should see DeepCritical tools available: - `search_pubmed` - `search_clinical_trials` - `search_biorxiv` @@ -198,6 +198,14 @@ You can configure multiple DeepCritical instances: - Learn about [Configuration](../configuration/index.md) for advanced settings - Explore [Examples](examples.md) for use cases -- Read the [Architecture Documentation](../architecture/graph_orchestration.md) +- Read the [Architecture Documentation](../architecture/graph-orchestration.md) + + + + + + + + diff --git a/docs/getting-started/quick-start.md b/docs/getting-started/quick-start.md index 45dfe1cb18c5cf4d1d20b0b62b0f227661ac50b5..9c927dbe5cb373d4c4a289ca626d25c72d39610e 100644 --- a/docs/getting-started/quick-start.md +++ b/docs/getting-started/quick-start.md @@ -1,47 +1,11 @@ -# Single Command Deploy +# Quick Start Guide -Deploy with docker instandly with a single command : - -```bash -docker run -it -p 7860:7860 --platform=linux/amd64 \ - -e DB_KEY="YOUR_VALUE_HERE" \ - -e SERP_API="YOUR_VALUE_HERE" \ - -e INFERENCE_API="YOUR_VALUE_HERE" \ - -e MODAL_TOKEN_ID="YOUR_VALUE_HERE" \ - -e MODAL_TOKEN_SECRET="YOUR_VALUE_HERE" \ - -e NCBI_API_KEY="YOUR_VALUE_HERE" \ - -e SERPER_API_KEY="YOUR_VALUE_HERE" \ - -e CHROMA_DB_PATH="./chroma_db" \ - -e CHROMA_DB_HOST="localhost" \ - -e CHROMA_DB_PORT="8000" \ - -e RAG_COLLECTION_NAME="deepcritical_evidence" \ - -e RAG_SIMILARITY_TOP_K="5" \ - -e RAG_AUTO_INGEST="true" \ - -e USE_GRAPH_EXECUTION="false" \ - -e DEFAULT_TOKEN_LIMIT="100000" \ - -e DEFAULT_TIME_LIMIT_MINUTES="10" \ - -e DEFAULT_ITERATIONS_LIMIT="10" \ - -e WEB_SEARCH_PROVIDER="duckduckgo" \ - -e MAX_ITERATIONS="10" \ - -e SEARCH_TIMEOUT="30" \ - -e LOG_LEVEL="DEBUG" \ - -e EMBEDDING_PROVIDER="local" \ - -e OPENAI_EMBEDDING_MODEL="text-embedding-3-small" \ - -e LOCAL_EMBEDDING_MODEL="BAAI/bge-small-en-v1.5" \ - -e HUGGINGFACE_EMBEDDING_MODEL="sentence-transformers/all-MiniLM-L6-v2" \ - -e HF_FALLBACK_MODELS="Qwen/Qwen3-Next-80B-A3B-Thinking,Qwen/Qwen3-Next-80B-A3B-Instruct,meta-llama/Llama-3.3-70B-Instruct,meta-llama/Llama-3.1-8B-Instruct,HuggingFaceH4/zephyr-7b-beta,Qwen/Qwen2-7B-Instruct" \ - -e HUGGINGFACE_MODEL="Qwen/Qwen3-Next-80B-A3B-Thinking" \ - registry.hf.space/dataquests-deepcritical:latest python src/app.py - ``` - -## Quick start guide - -Get up and running with The DETERMINATOR in minutes. +Get up and running with DeepCritical in minutes. ## Start the Application ```bash -gradio src/app.py +uv run gradio run src/app.py ``` Open your browser to `http://localhost:7860`. @@ -135,8 +99,17 @@ What are the active clinical trials investigating Alzheimer's disease treatments ## Next Steps -- Learn about [MCP Integration](mcp-integration.md) to use The DETERMINATOR from Claude Desktop +- Learn about [MCP Integration](mcp-integration.md) to use DeepCritical from Claude Desktop - Explore [Examples](examples.md) for more use cases - Read the [Configuration Guide](../configuration/index.md) for advanced settings -- Check out the [Architecture Documentation](../architecture/graph_orchestration.md) to understand how it works +- Check out the [Architecture Documentation](../architecture/graph-orchestration.md) to understand how it works + + + + + + + + + diff --git a/docs/index.md b/docs/index.md index 3ad907ad29c4ba891f32cc28d8ac471724a8469a..43c57235acdba5ff7dacf09ad33960b133bcbaa3 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,24 +1,12 @@ -# The DETERMINATOR +# DeepCritical -**Generalist Deep Research Agent - Stops at Nothing Until Finding Precise Answers** +**AI-Native Drug Repurposing Research Agent** -The DETERMINATOR is a powerful generalist deep research agent system that uses iterative search-and-judge loops to comprehensively investigate any research question. It stops at nothing until finding precise answers, only stopping at configured limits (budget, time, iterations). - -**Key Features**: -- **Generalist**: Handles queries from any domain (medical, technical, business, scientific, etc.) -- **Automatic Source Selection**: Automatically determines if medical knowledge sources (PubMed, ClinicalTrials.gov) are needed -- **Multi-Source Search**: Web search, PubMed, ClinicalTrials.gov, Europe PMC, RAG -- **Iterative Refinement**: Continues searching and refining until precise answers are found -- **Evidence Synthesis**: Comprehensive reports with proper citations - -**Important**: The DETERMINATOR is a research tool that synthesizes evidence. It cannot provide medical advice or answer medical questions directly. +DeepCritical is a deep research agent system that uses iterative search-and-judge loops to comprehensively answer research questions. The system supports multiple orchestration patterns, graph-based execution, parallel research workflows, and long-running task management with real-time streaming. ## Features -- **Generalist Research**: Handles any research question from any domain -- **Automatic Medical Detection**: Automatically determines if medical knowledge sources are needed -- **Multi-Source Search**: Web search, PubMed, ClinicalTrials.gov, Europe PMC (includes bioRxiv/medRxiv), RAG -- **Iterative Until Precise**: Stops at nothing until finding precise answers (only stops at configured limits) +- **Multi-Source Search**: PubMed, ClinicalTrials.gov, Europe PMC (includes bioRxiv/medRxiv) - **MCP Integration**: Use our tools from Claude Desktop or any MCP client - **HuggingFace OAuth**: Sign in with your HuggingFace account to automatically use your API token - **Modal Sandbox**: Secure execution of AI-generated statistical code @@ -30,15 +18,8 @@ The DETERMINATOR is a powerful generalist deep research agent system that uses i ## Quick Start ```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv +# Install uv if you haven't already +pip install uv # Sync dependencies uv sync @@ -53,9 +34,9 @@ For detailed installation and setup instructions, see the [Getting Started Guide ## Architecture -The DETERMINATOR uses a Vertical Slice Architecture: +DeepCritical uses a Vertical Slice Architecture: -1. **Search Slice**: Retrieving evidence from multiple sources (web, PubMed, ClinicalTrials.gov, Europe PMC, RAG) based on query analysis +1. **Search Slice**: Retrieving evidence from PubMed, ClinicalTrials.gov, and Europe PMC 2. **Judge Slice**: Evaluating evidence quality using LLMs 3. **Orchestrator Slice**: Managing the research loop and UI @@ -73,7 +54,7 @@ Learn more about the [Architecture](overview/architecture.md). - [Getting Started](getting-started/installation.md) - Installation and setup - [Configuration](configuration/index.md) - Configuration guide - [API Reference](api/agents.md) - API documentation -- [Contributing](contributing/index.md) - Development guidelines +- [Contributing](contributing.md) - Development guidelines ## Links diff --git a/docs/LICENSE.md b/docs/license.md similarity index 100% rename from docs/LICENSE.md rename to docs/license.md diff --git a/docs/overview/architecture.md b/docs/overview/architecture.md index 3f9c071dda107f9722b24610aae83617ad130693..e3c55c3d7eda510f0aca206f9113a4fef2055c71 100644 --- a/docs/overview/architecture.md +++ b/docs/overview/architecture.md @@ -1,6 +1,6 @@ # Architecture Overview -The DETERMINATOR is a powerful generalist deep research agent system that uses iterative search-and-judge loops to comprehensively investigate any research question. It stops at nothing until finding precise answers, only stopping at configured limits (budget, time, iterations). The system automatically determines if medical knowledge sources are needed and adapts its search strategy accordingly. It supports multiple orchestration patterns, graph-based execution, parallel research workflows, and long-running task management with real-time streaming. +DeepCritical is a deep research agent system that uses iterative search-and-judge loops to comprehensively answer research questions. The system supports multiple orchestration patterns, graph-based execution, parallel research workflows, and long-running task management with real-time streaming. ## Core Architecture @@ -134,11 +134,10 @@ The graph orchestrator (`src/orchestrator/graph_orchestrator.py`) implements a f - **Research Flows**: Iterative and deep research patterns (`src/orchestrator/research_flow.py`) - **Graph Builder**: Graph construction utilities (`src/agent_factory/graph_builder.py`) - **Agents**: Pydantic AI agents (`src/agents/`, `src/agent_factory/agents.py`) -- **Search Tools**: Neo4j knowledge graph, PubMed, ClinicalTrials.gov, Europe PMC, Web search, RAG (`src/tools/`) +- **Search Tools**: PubMed, ClinicalTrials.gov, Europe PMC, RAG (`src/tools/`) - **Judge Handler**: LLM-based evidence assessment (`src/agent_factory/judges.py`) - **Embeddings**: Semantic search & deduplication (`src/services/embeddings.py`) - **Statistical Analyzer**: Modal sandbox execution (`src/services/statistical_analyzer.py`) -- **Multimodal Processing**: Image OCR and audio STT/TTS services (`src/services/multimodal_processing.py`, `src/services/audio_processing.py`) - **Middleware**: State management, budget tracking, workflow coordination (`src/middleware/`) - **MCP Tools**: Claude Desktop integration (`src/mcp_tools.py`) - **Gradio UI**: Web interface with MCP server and streaming (`src/app.py`) @@ -170,25 +169,24 @@ The system supports complex research workflows through: - **Orchestrator Factory** (`src/orchestrator_factory.py`): - Auto-detects mode: "advanced" if OpenAI key available, else "simple" - - Supports explicit mode selection: "simple", "magentic" (alias for "advanced"), "advanced", "iterative", "deep", "auto" + - Supports explicit mode selection: "simple", "magentic", "advanced" - Lazy imports for optional dependencies -- **Orchestrator Modes** (selected in UI or via factory): - - `simple`: Legacy linear search-judge loop (Free Tier) - - `advanced` or `magentic`: Multi-agent coordination using Microsoft Agent Framework (requires OpenAI API key) - - `iterative`: Knowledge-gap-driven research with single loop (Free Tier) - - `deep`: Parallel section-based research with planning (Free Tier) - - `auto`: Intelligent mode detection based on query complexity (Free Tier) - -- **Graph Research Modes** (used within graph orchestrator, separate from orchestrator mode): - - `iterative`: Single research loop pattern - - `deep`: Multi-section parallel research pattern - - `auto`: Auto-detect pattern based on query complexity +- **Research Modes**: + - `iterative`: Single research loop + - `deep`: Multi-section parallel research + - `auto`: Auto-detect based on query complexity - **Execution Modes**: - `use_graph=True`: Graph-based execution (parallel, conditional routing) - `use_graph=False`: Agent chains (sequential, backward compatible) -**Note**: The UI provides separate controls for orchestrator mode and graph research mode. When using graph-based orchestrators (iterative/deep/auto), the graph research mode determines the specific pattern used within the graph execution. + + + + + + + diff --git a/docs/overview/features.md b/docs/overview/features.md index 0afb21e6317f24d447fd3c6cf1103d5a9684104b..c5bbe713deee9b4c5e98aed945bd84cfe55da8e5 100644 --- a/docs/overview/features.md +++ b/docs/overview/features.md @@ -1,32 +1,27 @@ # Features -The DETERMINATOR provides a comprehensive set of features for AI-assisted research: +DeepCritical provides a comprehensive set of features for AI-assisted research: ## Core Features ### Multi-Source Search -- **General Web Search**: Search general knowledge sources for any domain -- **Neo4j Knowledge Graph**: Search structured knowledge graph for papers and disease relationships -- **PubMed**: Search peer-reviewed biomedical literature via NCBI E-utilities (automatically used when medical knowledge needed) -- **ClinicalTrials.gov**: Search interventional clinical trials (automatically used when medical knowledge needed) +- **PubMed**: Search peer-reviewed biomedical literature via NCBI E-utilities +- **ClinicalTrials.gov**: Search interventional clinical trials - **Europe PMC**: Search preprints and peer-reviewed articles (includes bioRxiv/medRxiv) - **RAG**: Semantic search within collected evidence using LlamaIndex -- **Automatic Source Selection**: Automatically determines which sources are needed based on query analysis ### MCP Integration - **Model Context Protocol**: Expose search tools via MCP server -- **Claude Desktop**: Use The DETERMINATOR tools directly from Claude Desktop +- **Claude Desktop**: Use DeepCritical tools directly from Claude Desktop - **MCP Clients**: Compatible with any MCP-compatible client ### Authentication -- **REQUIRED**: Authentication is mandatory before using the application -- **HuggingFace OAuth**: Sign in with HuggingFace account for automatic API token usage (recommended) -- **Manual API Keys**: Support for HuggingFace API keys via environment variables (`HF_TOKEN` or `HUGGINGFACE_API_KEY`) -- **Free Tier Support**: Automatic fallback to HuggingFace Inference API (public models) when no API key is available -- **Authentication Check**: The application will display an error message if authentication is not provided +- **HuggingFace OAuth**: Sign in with HuggingFace account for automatic API token usage +- **Manual API Keys**: Support for OpenAI, Anthropic, and HuggingFace API keys +- **Free Tier Support**: Automatic fallback to HuggingFace Inference API ### Secure Code Execution @@ -45,26 +40,9 @@ The DETERMINATOR provides a comprehensive set of features for AI-assisted resear - **Graph-Based Execution**: Flexible graph orchestration with conditional routing - **Parallel Research Loops**: Run multiple research tasks concurrently -- **Iterative Research**: Single-loop research with search-judge-synthesize cycles that continues until precise answers are found +- **Iterative Research**: Single-loop research with search-judge-synthesize cycles - **Deep Research**: Multi-section parallel research with planning and synthesis -- **Magentic Orchestration**: Multi-agent coordination using Microsoft Agent Framework (alias: "advanced" mode) -- **Stops at Nothing**: Only stops at configured limits (budget, time, iterations), otherwise continues until finding precise answers - -**Orchestrator Modes**: -- `simple`: Legacy linear search-judge loop -- `advanced` (or `magentic`): Multi-agent coordination (requires OpenAI API key) -- `iterative`: Knowledge-gap-driven research with single loop -- `deep`: Parallel section-based research with planning -- `auto`: Intelligent mode detection based on query complexity - -**Graph Research Modes** (used within graph orchestrator): -- `iterative`: Single research loop pattern -- `deep`: Multi-section parallel research pattern -- `auto`: Auto-detect pattern based on query complexity - -**Execution Modes**: -- `use_graph=True`: Graph-based execution with parallel and conditional routing -- `use_graph=False`: Agent chains with sequential execution (backward compatible) +- **Magentic Orchestration**: Multi-agent coordination using Microsoft Agent Framework ### Real-Time Streaming @@ -86,16 +64,6 @@ The DETERMINATOR provides a comprehensive set of features for AI-assisted resear - **Conversation History**: Track iteration history and agent interactions - **State Synchronization**: Share evidence across parallel loops -### Multimodal Input & Output - -- **Image Input (OCR)**: Upload images and extract text using optical character recognition -- **Audio Input (STT)**: Record or upload audio files and transcribe to text using speech-to-text -- **Audio Output (TTS)**: Generate audio responses with text-to-speech synthesis -- **Configurable Settings**: Enable/disable multimodal features via sidebar settings -- **Voice Selection**: Choose from multiple TTS voices (American English: af_*, am_*) -- **Speech Speed Control**: Adjust TTS speech speed (0.5x to 2.0x) -- **Multimodal Processing Service**: Integrated service for processing images and audio files - ## Advanced Features ### Agent System @@ -137,12 +105,10 @@ The DETERMINATOR provides a comprehensive set of features for AI-assisted resear ### Gradio Interface -- **Real-Time Chat**: Interactive chat interface with multimodal support +- **Real-Time Chat**: Interactive chat interface - **Streaming Updates**: Live progress updates - **Accordion UI**: Organized display of pending/done operations - **OAuth Integration**: Seamless HuggingFace authentication -- **Multimodal Input**: Support for text, images, and audio input in the same interface -- **Sidebar Settings**: Configuration accordions for research, multimodal, and audio settings ### MCP Server @@ -167,3 +133,12 @@ The DETERMINATOR provides a comprehensive set of features for AI-assisted resear - **Architecture Diagrams**: Visual architecture documentation - **API Reference**: Complete API documentation + + + + + + + + + diff --git a/docs/overview/quick-start.md b/docs/overview/quick-start.md index 8b36d4864c1284f5ad97e330093d6ba9760313ad..b9b45df09e850296c0659435d2916f36187cd614 100644 --- a/docs/overview/quick-start.md +++ b/docs/overview/quick-start.md @@ -5,15 +5,8 @@ Get started with DeepCritical in minutes. ## Installation ```bash -# Install uv if you haven't already (recommended: standalone installer) -# Unix/macOS/Linux: -curl -LsSf https://astral.sh/uv/install.sh | sh - -# Windows (PowerShell): -powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" - -# Alternative: pipx install uv -# Or: pip install uv +# Install uv if you haven't already +pip install uv # Sync dependencies uv sync @@ -30,26 +23,21 @@ Open your browser to `http://localhost:7860`. ## Basic Usage -### 1. Authentication (REQUIRED) - -**Authentication is mandatory** - you must authenticate before using the application. The app will display an error message if you try to use it without authentication. +### 1. Authentication (Optional) -**HuggingFace OAuth Login** (Recommended): +**HuggingFace OAuth Login**: - Click the "Sign in with HuggingFace" button at the top of the app - Your HuggingFace API token will be automatically used for AI inference - No need to manually enter API keys when logged in -**Manual API Key** (Alternative): -- Set environment variable `HF_TOKEN` or `HUGGINGFACE_API_KEY` before starting the app -- The app will automatically use these tokens if OAuth login is not available -- Supports HuggingFace API keys only (OpenAI/Anthropic keys are not used in the current implementation) +**Manual API Key (BYOK)**: +- Provide your own API key in the Settings accordion +- Supports HuggingFace, OpenAI, or Anthropic API keys +- Manual keys take priority over OAuth tokens ### 2. Start a Research Query 1. Enter your research question in the chat interface - - **Text Input**: Type your question directly - - **Image Input**: Click the 📷 icon to upload images (OCR will extract text) - - **Audio Input**: Click the 🎤 icon to record or upload audio (STT will transcribe to text) 2. Click "Submit" or press Enter 3. Watch the real-time progress as the system: - Generates observations @@ -58,12 +46,6 @@ Open your browser to `http://localhost:7860`. - Evaluates evidence - Synthesizes findings 4. Review the final research report - - **Audio Output**: If enabled, the final response will include audio synthesis (TTS) - -**Multimodal Features**: -- Configure image/audio input and output in the sidebar settings -- Image OCR and audio STT/TTS can be enabled/disabled independently -- TTS voice and speed can be customized in the Audio Output settings ### 3. MCP Integration (Optional) @@ -88,16 +70,13 @@ Connect DeepCritical to Claude Desktop: - `search_pubmed`: Search peer-reviewed biomedical literature - `search_clinical_trials`: Search ClinicalTrials.gov - `search_biorxiv`: Search bioRxiv/medRxiv preprints -- `search_neo4j`: Search Neo4j knowledge graph for papers and disease relationships - `search_all`: Search all sources simultaneously - `analyze_hypothesis`: Secure statistical analysis using Modal sandboxes -**Note**: The application automatically uses all available search tools (Neo4j, PubMed, ClinicalTrials.gov, Europe PMC, Web search, RAG) based on query analysis. Neo4j knowledge graph search is included by default for biomedical queries. - ## Next Steps - Read the [Installation Guide](../getting-started/installation.md) for detailed setup - Learn about [Configuration](../configuration/index.md) -- Explore the [Architecture](../architecture/graph_orchestration.md) +- Explore the [Architecture](../architecture/graph-orchestration.md) - Check out [Examples](../getting-started/examples.md) diff --git a/docs/team.md b/docs/team.md index 6e9be5c763245d2caec553acc314c1532b335950..e6901a846f7dafd627375238c5d4284ad05fe4c5 100644 --- a/docs/team.md +++ b/docs/team.md @@ -4,31 +4,17 @@ DeepCritical is developed by a team of researchers and developers working on AI- ## Team Members -### ZJ +### The-Obstacle-Is-The-Way -- 💼 [LinkedIn](https://www.linkedin.com/in//) +- GitHub: [The-Obstacle-Is-The-Way](https://github.com/The-Obstacle-Is-The-Way) -### Mario Aderman +### MarioAderman -- 🤗 [HuggingFace](https://huggingface.co/SeasonalFall84) -- 💼 [LinkedIn](https://www.linkedin.com/in/mario-aderman/) -- 𝕏 [X](https://x.com/marioaderman) +- GitHub: [MarioAderman](https://github.com/MarioAderman) -### Joseph Pollack +### Josephrp -- 🤗 [HuggingFace](https://huggingface.co/Tonic) -- 💼 [LinkedIn](https://www.linkedin.com/in/josephpollack/) -- 𝕏 [X](https://x.com/josephpollack) - -### Virat Chauran - -- 𝕏 [X](https://x.com/viratzzs/) -- 💼 [LinkedIn](https://www.linkedin.com/in/viratchauhan/) -- 🤗 [HuggingFace](https://huggingface.co/ViratChauhan) - -### Anna Bossler - -- 💼 [LinkedIn](https://www.linkedin.com/in/ana-bossler-07304717) +- GitHub: [Josephrp](https://github.com/Josephrp) ## About @@ -42,3 +28,13 @@ We welcome contributions! See the [Contributing Guide](contributing/index.md) fo - [GitHub Repository](https://github.com/DeepCritical/GradioDemo) - [HuggingFace Space](https://huggingface.co/spaces/DataQuests/DeepCritical) + + + + + + + + + + diff --git a/examples/README.md b/examples/README.md new file mode 100644 index 0000000000000000000000000000000000000000..856e74fbc3f15a6080bb12b0c6501309392c484b --- /dev/null +++ b/examples/README.md @@ -0,0 +1,184 @@ +# DeepCritical Examples + +**NO MOCKS. NO FAKE DATA. REAL SCIENCE.** + +These demos run the REAL drug repurposing research pipeline with actual API calls. + +--- + +## Prerequisites + +You MUST have API keys configured: + +```bash +# Copy the example and add your keys +cp .env.example .env + +# Required (pick one): +OPENAI_API_KEY=sk-... +ANTHROPIC_API_KEY=sk-ant-... + +# Optional (higher PubMed rate limits): +NCBI_API_KEY=your-key +``` + +--- + +## Examples + +### 1. Search Demo (No LLM Required) + +Demonstrates REAL parallel search across PubMed, ClinicalTrials.gov, and Europe PMC. + +```bash +uv run python examples/search_demo/run_search.py "metformin cancer" +``` + +**What's REAL:** +- Actual NCBI E-utilities API calls (PubMed) +- Actual ClinicalTrials.gov API calls +- Actual Europe PMC API calls (includes preprints) +- Real papers, real trials, real preprints + +--- + +### 2. Embeddings Demo (No LLM Required) + +Demonstrates REAL semantic search and deduplication. + +```bash +uv run python examples/embeddings_demo/run_embeddings.py +``` + +**What's REAL:** +- Actual sentence-transformers model (all-MiniLM-L6-v2) +- Actual ChromaDB vector storage +- Real cosine similarity computations +- Real semantic deduplication + +--- + +### 3. Orchestrator Demo (LLM Required) + +Demonstrates the REAL search-judge-synthesize loop. + +```bash +uv run python examples/orchestrator_demo/run_agent.py "metformin cancer" +uv run python examples/orchestrator_demo/run_agent.py "aspirin alzheimer" --iterations 5 +``` + +**What's REAL:** +- Real PubMed + ClinicalTrials + Europe PMC searches +- Real LLM judge evaluating evidence quality +- Real iterative refinement based on LLM decisions +- Real research synthesis + +--- + +### 4. Magentic Demo (OpenAI Required) + +Demonstrates REAL multi-agent coordination using Microsoft Agent Framework. + +```bash +# Requires OPENAI_API_KEY specifically +uv run python examples/orchestrator_demo/run_magentic.py "metformin cancer" +``` + +**What's REAL:** +- Real MagenticBuilder orchestration +- Real SearchAgent, JudgeAgent, HypothesisAgent, ReportAgent +- Real manager-based coordination + +--- + +### 5. Hypothesis Demo (LLM Required) + +Demonstrates REAL mechanistic hypothesis generation. + +```bash +uv run python examples/hypothesis_demo/run_hypothesis.py "metformin Alzheimer's" +uv run python examples/hypothesis_demo/run_hypothesis.py "sildenafil heart failure" +``` + +**What's REAL:** +- Real PubMed + Web search first +- Real embedding-based deduplication +- Real LLM generating Drug -> Target -> Pathway -> Effect chains +- Real knowledge gap identification + +--- + +### 6. Full-Stack Demo (LLM Required) + +**THE COMPLETE PIPELINE** - All phases working together. + +```bash +uv run python examples/full_stack_demo/run_full.py "metformin Alzheimer's" +uv run python examples/full_stack_demo/run_full.py "sildenafil heart failure" -i 3 +``` + +**What's REAL:** +1. Real PubMed + ClinicalTrials + Europe PMC evidence collection +2. Real embedding-based semantic deduplication +3. Real LLM mechanistic hypothesis generation +4. Real LLM evidence quality assessment +5. Real LLM structured scientific report generation + +Output: Publication-quality research report with validated citations. + +--- + +## API Key Requirements + +| Example | LLM Required | Keys | +|---------|--------------|------| +| search_demo | No | Optional: `NCBI_API_KEY` | +| embeddings_demo | No | None | +| orchestrator_demo | Yes | `OPENAI_API_KEY` or `ANTHROPIC_API_KEY` | +| run_magentic | Yes | `OPENAI_API_KEY` (Magentic requires OpenAI) | +| hypothesis_demo | Yes | `OPENAI_API_KEY` or `ANTHROPIC_API_KEY` | +| full_stack_demo | Yes | `OPENAI_API_KEY` or `ANTHROPIC_API_KEY` | + +--- + +## Architecture + +```text +User Query + | + v +[REAL Search] --> PubMed + ClinicalTrials + Europe PMC APIs + | + v +[REAL Embeddings] --> Actual sentence-transformers + | + v +[REAL Hypothesis] --> Actual LLM reasoning + | + v +[REAL Judge] --> Actual LLM assessment + | + +---> Need more? --> Loop back to Search + | + +---> Sufficient --> Continue + | + v +[REAL Report] --> Actual LLM synthesis + | + v +Publication-Quality Research Report +``` + +--- + +## Why No Mocks? + +> "Authenticity is the feature." + +Mocks belong in `tests/unit/`, not in demos. When you run these examples, you see: +- Real papers from real databases +- Real AI reasoning about real evidence +- Real scientific hypotheses +- Real research reports + +This is what DeepCritical actually does. No fake data. No canned responses. diff --git a/examples/embeddings_demo/run_embeddings.py b/examples/embeddings_demo/run_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..26ba4d374326a8dcdf272ac552527b5d77171529 --- /dev/null +++ b/examples/embeddings_demo/run_embeddings.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 +""" +Demo: Semantic Search & Deduplication (Phase 6). + +This script demonstrates embedding-based capabilities using REAL data: +- Fetches REAL abstracts from PubMed +- Embeds text with sentence-transformers +- Performs semantic deduplication on LIVE research data + +Usage: + uv run python examples/embeddings_demo/run_embeddings.py +""" + +import asyncio + +from src.services.embeddings import EmbeddingService +from src.tools.pubmed import PubMedTool + + +def create_fresh_service(name_suffix: str = "") -> EmbeddingService: + """Create a fresh embedding service with unique collection name.""" + import uuid + + # Create service with unique collection by modifying the internal collection + service = EmbeddingService.__new__(EmbeddingService) + service._model = __import__("sentence_transformers").SentenceTransformer("all-MiniLM-L6-v2") + service._client = __import__("chromadb").Client() + collection_name = f"demo_{name_suffix}_{uuid.uuid4().hex[:8]}" + service._collection = service._client.create_collection( + name=collection_name, metadata={"hnsw:space": "cosine"} + ) + return service + + +async def demo_real_pipeline() -> None: + """Run the demo using REAL PubMed data.""" + print("\n" + "=" * 60) + print("DeepCritical Embeddings Demo (REAL DATA)") + print("=" * 60) + + # 1. Fetch Real Data + query = "metformin mechanism of action" + print(f"\n[1] Fetching real papers for: '{query}'...") + pubmed = PubMedTool() + # Fetch enough results to likely get some overlap/redundancy + evidence = await pubmed.search(query, max_results=10) + + print(f" Found {len(evidence)} papers.") + print("\n Sample Titles:") + for i, e in enumerate(evidence[:3], 1): + print(f" {i}. {e.citation.title[:80]}...") + + # 2. Embed Data + print("\n[2] Embedding abstracts (sentence-transformers)...") + service = create_fresh_service("real_demo") + + # 3. Semantic Search + print("\n[3] Semantic Search Demo") + print(" Indexing evidence...") + for e in evidence: + # Use URL as ID for uniqueness + await service.add_evidence( + evidence_id=e.citation.url, + content=e.content, + metadata={ + "source": e.citation.source, + "title": e.citation.title, + "date": e.citation.date, + }, + ) + + semantic_query = "activation of AMPK pathway" + print(f" Searching for concept: '{semantic_query}'") + results = await service.search_similar(semantic_query, n_results=2) + + print(" Top matches:") + for i, r in enumerate(results, 1): + similarity = 1 - r["distance"] + print(f" {i}. [{similarity:.1%} match] {r['metadata']['title'][:70]}...") + + # 4. Semantic Deduplication + print("\n[4] Semantic Deduplication Demo") + # Create a FRESH service for deduplication so we don't clash with Step 3's index + dedup_service = create_fresh_service("dedup_demo") + + print(" Checking for redundant papers (threshold=0.85)...") + + # To force a duplicate for demo purposes, let's double the evidence list + # simulating finding the same papers again or very similar ones + duplicated_evidence = evidence + evidence[:2] + print(f" Input pool: {len(duplicated_evidence)} items (with artificial duplicates added)") + + unique = await dedup_service.deduplicate(duplicated_evidence, threshold=0.85) + + print(f" Output pool: {len(unique)} unique items") + print(f" Removed {len(duplicated_evidence) - len(unique)} duplicates.") + + print("\n" + "=" * 60) + print("Demo complete! Verified with REAL PubMed data.") + print("=" * 60 + "\n") + + +if __name__ == "__main__": + asyncio.run(demo_real_pipeline()) diff --git a/examples/full_stack_demo/run_full.py b/examples/full_stack_demo/run_full.py new file mode 100644 index 0000000000000000000000000000000000000000..2464084cd802c55285cebc4f54cf7c4832f5ba4e --- /dev/null +++ b/examples/full_stack_demo/run_full.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +""" +Demo: Full Stack DeepCritical Agent (Phases 1-8). + +This script demonstrates the COMPLETE REAL drug repurposing research pipeline: +- Phase 2: REAL Search (PubMed + ClinicalTrials + Europe PMC) +- Phase 6: REAL Embeddings (sentence-transformers + ChromaDB) +- Phase 7: REAL Hypothesis (LLM mechanistic reasoning) +- Phase 3: REAL Judge (LLM evidence assessment) +- Phase 8: REAL Report (LLM structured scientific report) + +NO MOCKS. NO FAKE DATA. REAL SCIENCE. + +Usage: + uv run python examples/full_stack_demo/run_full.py "metformin Alzheimer's" + uv run python examples/full_stack_demo/run_full.py "sildenafil heart failure" -i 3 + +Requires: OPENAI_API_KEY or ANTHROPIC_API_KEY +""" + +import argparse +import asyncio +import os +import sys +from typing import Any + +from src.utils.models import Evidence + + +def print_header(title: str) -> None: + """Print a formatted section header.""" + print(f"\n{'=' * 70}") + print(f" {title}") + print(f"{'=' * 70}\n") + + +def print_step(step: int, name: str) -> None: + """Print a step indicator.""" + print(f"\n[Step {step}] {name}") + print("-" * 50) + + +_MAX_DISPLAY_LEN = 600 + + +def _print_truncated(text: str) -> None: + """Print text, truncating if too long.""" + if len(text) > _MAX_DISPLAY_LEN: + print(text[:_MAX_DISPLAY_LEN] + "\n... [truncated for display]") + else: + print(text) + + +async def _run_search_iteration( + query: str, + iteration: int, + evidence_store: dict[str, Any], + all_evidence: list[Evidence], + search_handler: Any, + embedding_service: Any, +) -> list[Evidence]: + """Run a single search iteration with deduplication.""" + search_queries = [query] + if evidence_store.get("hypotheses"): + for h in evidence_store["hypotheses"][-2:]: + search_queries.extend(h.search_suggestions[:1]) + + for q in search_queries[:2]: + result = await search_handler.execute(q, max_results_per_tool=5) + print(f" '{q}' -> {result.total_found} results") + new_unique = await embedding_service.deduplicate(result.evidence) + print(f" After dedup: {len(new_unique)} unique") + all_evidence.extend(new_unique) + + evidence_store["current"] = all_evidence + evidence_store["iteration_count"] = iteration + return all_evidence + + +async def _handle_judge_step( + judge_handler: Any, query: str, all_evidence: list[Evidence], evidence_store: dict[str, Any] +) -> tuple[bool, str]: + """Handle the judge assessment step. Returns (should_stop, next_query).""" + print("\n[Judge] Assessing evidence quality (REAL LLM)...") + assessment = await judge_handler.assess(query, all_evidence) + print(f" Mechanism Score: {assessment.details.mechanism_score}/10") + print(f" Clinical Score: {assessment.details.clinical_evidence_score}/10") + print(f" Confidence: {assessment.confidence:.0%}") + print(f" Recommendation: {assessment.recommendation.upper()}") + + if assessment.recommendation == "synthesize": + print("\n[Judge] Evidence sufficient! Proceeding to report generation...") + evidence_store["last_assessment"] = assessment.details.model_dump() + return True, query + + next_queries = assessment.next_search_queries[:2] if assessment.next_search_queries else [] + if next_queries: + print(f"\n[Judge] Need more evidence. Next queries: {next_queries}") + return False, next_queries[0] + + print("\n[Judge] Need more evidence but no suggested queries. Continuing with original query.") + return False, query + + +async def run_full_demo(query: str, max_iterations: int) -> None: + """Run the REAL full stack pipeline.""" + print_header("DeepCritical Full Stack Demo (REAL)") + print(f"Query: {query}") + print(f"Max iterations: {max_iterations}") + print("Mode: REAL (All live API calls - no mocks)\n") + + # Import real components + from src.agent_factory.judges import JudgeHandler + from src.agents.hypothesis_agent import HypothesisAgent + from src.agents.report_agent import ReportAgent + from src.services.embeddings import EmbeddingService + from src.tools.clinicaltrials import ClinicalTrialsTool + from src.tools.europepmc import EuropePMCTool + from src.tools.pubmed import PubMedTool + from src.tools.search_handler import SearchHandler + + # Initialize REAL services + print("[Init] Loading embedding model...") + embedding_service = EmbeddingService() + search_handler = SearchHandler( + tools=[PubMedTool(), ClinicalTrialsTool(), EuropePMCTool()], timeout=30.0 + ) + judge_handler = JudgeHandler() + + # Shared evidence store + evidence_store: dict[str, Any] = {"current": [], "hypotheses": [], "iteration_count": 0} + all_evidence: list[Evidence] = [] + + for iteration in range(1, max_iterations + 1): + print_step(iteration, f"ITERATION {iteration}/{max_iterations}") + + # Step 1: REAL Search + print("\n[Search] Querying PubMed + ClinicalTrials + Europe PMC (REAL API calls)...") + all_evidence = await _run_search_iteration( + query, iteration, evidence_store, all_evidence, search_handler, embedding_service + ) + + if not all_evidence: + print("\nNo evidence found. Try a different query.") + return + + # Step 2: REAL Hypothesis generation (first iteration only) + if iteration == 1: + print("\n[Hypothesis] Generating mechanistic hypotheses (REAL LLM)...") + hypothesis_agent = HypothesisAgent(evidence_store, embedding_service) + hyp_response = await hypothesis_agent.run(query) + _print_truncated(hyp_response.messages[0].text) + + # Step 3: REAL Judge + should_stop, query = await _handle_judge_step( + judge_handler, query, all_evidence, evidence_store + ) + if should_stop: + break + + # Step 4: REAL Report generation + print_step(iteration + 1, "REPORT GENERATION (REAL LLM)") + report_agent = ReportAgent(evidence_store, embedding_service) + report_response = await report_agent.run(query) + + print("\n" + "=" * 70) + print(" FINAL RESEARCH REPORT") + print("=" * 70) + print(report_response.messages[0].text) + + +async def main() -> None: + """Entry point.""" + parser = argparse.ArgumentParser( + description="DeepCritical Full Stack Demo - REAL, No Mocks", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +This demo runs the COMPLETE pipeline with REAL API calls: + 1. REAL search: Actual PubMed queries + 2. REAL embeddings: Actual sentence-transformers model + 3. REAL hypothesis: Actual LLM generating mechanistic chains + 4. REAL judge: Actual LLM assessing evidence quality + 5. REAL report: Actual LLM generating structured report + +Examples: + uv run python examples/full_stack_demo/run_full.py "metformin Alzheimer's" + uv run python examples/full_stack_demo/run_full.py "sildenafil heart failure" -i 3 + uv run python examples/full_stack_demo/run_full.py "aspirin cancer prevention" + """, + ) + parser.add_argument( + "query", + help="Research query (e.g., 'metformin Alzheimer's disease')", + ) + parser.add_argument( + "-i", + "--iterations", + type=int, + default=2, + help="Max search iterations (default: 2)", + ) + + args = parser.parse_args() + + if args.iterations < 1: + print("Error: iterations must be at least 1") + sys.exit(1) + + # Fail fast: require API key + if not (os.getenv("OPENAI_API_KEY") or os.getenv("ANTHROPIC_API_KEY")): + print("=" * 70) + print("ERROR: This demo requires a real LLM.") + print() + print("Set one of the following in your .env file:") + print(" OPENAI_API_KEY=sk-...") + print(" ANTHROPIC_API_KEY=sk-ant-...") + print() + print("This is a REAL demo. No mocks. No fake data.") + print("=" * 70) + sys.exit(1) + + await run_full_demo(args.query, args.iterations) + + print("\n" + "=" * 70) + print(" DeepCritical Full Stack Demo Complete!") + print(" ") + print(" Everything you just saw was REAL:") + print(" - Real PubMed + ClinicalTrials + Europe PMC searches") + print(" - Real embedding computations") + print(" - Real LLM reasoning") + print(" - Real scientific report") + print("=" * 70 + "\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/hypothesis_demo/run_hypothesis.py b/examples/hypothesis_demo/run_hypothesis.py new file mode 100644 index 0000000000000000000000000000000000000000..65a24224a0d8a32ced8f25de702e152ecba13590 --- /dev/null +++ b/examples/hypothesis_demo/run_hypothesis.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python3 +""" +Demo: Hypothesis Generation (Phase 7). + +This script demonstrates the REAL hypothesis generation pipeline: +1. REAL search: PubMed + ClinicalTrials + Europe PMC (actual API calls) +2. REAL embeddings: Semantic deduplication +3. REAL LLM: Mechanistic hypothesis generation + +Usage: + # Requires OPENAI_API_KEY or ANTHROPIC_API_KEY + uv run python examples/hypothesis_demo/run_hypothesis.py "metformin Alzheimer's" + uv run python examples/hypothesis_demo/run_hypothesis.py "sildenafil heart failure" +""" + +import argparse +import asyncio +import os +import sys +from typing import Any + +from src.agents.hypothesis_agent import HypothesisAgent +from src.services.embeddings import EmbeddingService +from src.tools.clinicaltrials import ClinicalTrialsTool +from src.tools.europepmc import EuropePMCTool +from src.tools.pubmed import PubMedTool +from src.tools.search_handler import SearchHandler + + +async def run_hypothesis_demo(query: str) -> None: + """Run the REAL hypothesis generation pipeline.""" + try: + print(f"\n{'=' * 60}") + print("DeepCritical Hypothesis Agent Demo (Phase 7)") + print(f"Query: {query}") + print("Mode: REAL (Live API calls)") + print(f"{'=' * 60}\n") + + # Step 1: REAL Search + print("[Step 1] Searching PubMed + ClinicalTrials + Europe PMC...") + search_handler = SearchHandler( + tools=[PubMedTool(), ClinicalTrialsTool(), EuropePMCTool()], timeout=30.0 + ) + result = await search_handler.execute(query, max_results_per_tool=5) + + print(f" Found {result.total_found} results from {result.sources_searched}") + if result.errors: + print(f" Warnings: {result.errors}") + + if not result.evidence: + print("\nNo evidence found. Try a different query.") + return + + # Step 2: REAL Embeddings - Deduplicate + print("\n[Step 2] Semantic deduplication...") + embedding_service = EmbeddingService() + unique_evidence = await embedding_service.deduplicate(result.evidence, threshold=0.85) + print(f" {len(result.evidence)} -> {len(unique_evidence)} unique papers") + + # Show what we found + print("\n[Evidence collected]") + max_title_len = 50 + for i, e in enumerate(unique_evidence[:5], 1): + raw_title = e.citation.title + if len(raw_title) > max_title_len: + title = raw_title[:max_title_len] + "..." + else: + title = raw_title + print(f" {i}. [{e.citation.source.upper()}] {title}") + + # Step 3: REAL LLM - Generate hypotheses + print("\n[Step 3] Generating mechanistic hypotheses (LLM)...") + evidence_store: dict[str, Any] = {"current": unique_evidence, "hypotheses": []} + agent = HypothesisAgent(evidence_store, embedding_service) + + print("-" * 60) + response = await agent.run(query) + print(response.messages[0].text) + print("-" * 60) + + # Show stored hypotheses + hypotheses = evidence_store.get("hypotheses", []) + print(f"\n{len(hypotheses)} hypotheses stored") + + if hypotheses: + print("\nGenerated search queries for further investigation:") + for h in hypotheses: + queries = h.to_search_queries() + print(f" {h.drug} -> {h.target}:") + for q in queries[:3]: + print(f" - {q}") + + except Exception as e: + print(f"\n❌ Error during hypothesis generation: {e}") + raise + + +async def main() -> None: + """Entry point.""" + parser = argparse.ArgumentParser( + description="Hypothesis Generation Demo (REAL - No Mocks)", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + uv run python examples/hypothesis_demo/run_hypothesis.py "metformin Alzheimer's" + uv run python examples/hypothesis_demo/run_hypothesis.py "sildenafil heart failure" + uv run python examples/hypothesis_demo/run_hypothesis.py "aspirin cancer prevention" + """, + ) + parser.add_argument( + "query", + nargs="?", + default="metformin Alzheimer's disease", + help="Research query", + ) + args = parser.parse_args() + + # Fail fast: require API key + if not (os.getenv("OPENAI_API_KEY") or os.getenv("ANTHROPIC_API_KEY")): + print("=" * 60) + print("ERROR: This demo requires a real LLM.") + print() + print("Set one of the following in your .env file:") + print(" OPENAI_API_KEY=sk-...") + print(" ANTHROPIC_API_KEY=sk-ant-...") + print() + print("This is a REAL demo, not a mock. No fake data.") + print("=" * 60) + sys.exit(1) + + await run_hypothesis_demo(args.query) + + print("\n" + "=" * 60) + print("Demo complete! This was a REAL pipeline:") + print(" 1. REAL search: PubMed + ClinicalTrials + Europe PMC APIs") + print(" 2. REAL embeddings: Actual sentence-transformers") + print(" 3. REAL LLM: Actual hypothesis generation") + print("=" * 60 + "\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/modal_demo/run_analysis.py b/examples/modal_demo/run_analysis.py new file mode 100644 index 0000000000000000000000000000000000000000..82bbe7ff0fcdedb4b871d4479924db2108affcd8 --- /dev/null +++ b/examples/modal_demo/run_analysis.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +"""Demo: Modal-powered statistical analysis. + +This script uses StatisticalAnalyzer directly (NO agent_framework dependency). + +Usage: + uv run python examples/modal_demo/run_analysis.py "metformin alzheimer" +""" + +import argparse +import asyncio +import os +import sys + +from src.services.statistical_analyzer import get_statistical_analyzer +from src.tools.pubmed import PubMedTool +from src.utils.config import settings + + +async def main() -> None: + """Run the Modal analysis demo.""" + parser = argparse.ArgumentParser(description="Modal Analysis Demo") + parser.add_argument("query", help="Research query") + args = parser.parse_args() + + if not settings.modal_available: + print("Error: Modal credentials not configured.") + sys.exit(1) + + if not (os.getenv("OPENAI_API_KEY") or os.getenv("ANTHROPIC_API_KEY")): + print("Error: No LLM API key found.") + sys.exit(1) + + print(f"\n{'=' * 60}") + print("DeepCritical Modal Analysis Demo") + print(f"Query: {args.query}") + print(f"{'=' * 60}\n") + + # Step 1: Gather Evidence + print("Step 1: Gathering evidence from PubMed...") + pubmed = PubMedTool() + evidence = await pubmed.search(args.query, max_results=5) + print(f" Found {len(evidence)} papers\n") + + # Step 2: Run Modal Analysis + print("Step 2: Running statistical analysis in Modal sandbox...") + analyzer = get_statistical_analyzer() + result = await analyzer.analyze(query=args.query, evidence=evidence) + + # Step 3: Display Results + print("\n" + "=" * 60) + print("ANALYSIS RESULTS") + print("=" * 60) + print(f"\nVerdict: {result.verdict}") + print(f"Confidence: {result.confidence:.0%}") + print("\nKey Findings:") + for finding in result.key_findings: + print(f" - {finding}") + + print("\n[Demo Complete - Code executed in Modal, not locally]") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/modal_demo/test_code_execution.py b/examples/modal_demo/test_code_execution.py new file mode 100644 index 0000000000000000000000000000000000000000..7addd107e03541fed68a85bf8ed5cb484cc44bc1 --- /dev/null +++ b/examples/modal_demo/test_code_execution.py @@ -0,0 +1,169 @@ +"""Demo script to test Modal code execution integration. + +Run with: uv run python examples/modal_demo/test_code_execution.py +""" + +import sys +from pathlib import Path + +# Add src to path +sys.path.insert(0, str(Path(__file__).parent.parent.parent)) + +from src.tools.code_execution import CodeExecutionError, get_code_executor + + +def test_basic_execution(): + """Test basic code execution.""" + print("\n=== Test 1: Basic Execution ===") + executor = get_code_executor() + + code = """ +print("Hello from Modal sandbox!") +result = 2 + 2 +print(f"2 + 2 = {result}") +""" + + result = executor.execute(code) + print(f"Success: {result['success']}") + print(f"Stdout:\n{result['stdout']}") + if result["stderr"]: + print(f"Stderr:\n{result['stderr']}") + + +def test_scientific_computing(): + """Test scientific computing libraries.""" + print("\n=== Test 2: Scientific Computing ===") + executor = get_code_executor() + + code = """ +import pandas as pd +import numpy as np + +# Create sample data +data = { + 'drug': ['DrugA', 'DrugB', 'DrugC'], + 'efficacy': [0.75, 0.82, 0.68], + 'sample_size': [100, 150, 120] +} + +df = pd.DataFrame(data) + +# Calculate weighted average +weighted_avg = np.average(df['efficacy'], weights=df['sample_size']) + +print(f"Drugs tested: {len(df)}") +print(f"Weighted average efficacy: {weighted_avg:.3f}") +print("\\nDataFrame:") +print(df.to_string()) +""" + + result = executor.execute(code) + print(f"Success: {result['success']}") + print(f"Output:\n{result['stdout']}") + + +def test_statistical_analysis(): + """Test statistical analysis.""" + print("\n=== Test 3: Statistical Analysis ===") + executor = get_code_executor() + + code = """ +import numpy as np +from scipy import stats + +# Simulate two treatment groups +np.random.seed(42) +control_group = np.random.normal(100, 15, 50) +treatment_group = np.random.normal(110, 15, 50) + +# Perform t-test +t_stat, p_value = stats.ttest_ind(treatment_group, control_group) + +print(f"Control mean: {np.mean(control_group):.2f}") +print(f"Treatment mean: {np.mean(treatment_group):.2f}") +print(f"T-statistic: {t_stat:.3f}") +print(f"P-value: {p_value:.4f}") + +if p_value < 0.05: + print("Result: Statistically significant difference") +else: + print("Result: No significant difference") +""" + + result = executor.execute(code) + print(f"Success: {result['success']}") + print(f"Output:\n{result['stdout']}") + + +def test_with_return_value(): + """Test execute_with_return method.""" + print("\n=== Test 4: Return Value ===") + executor = get_code_executor() + + code = """ +import numpy as np + +# Calculate something +data = np.array([1, 2, 3, 4, 5]) +result = { + 'mean': float(np.mean(data)), + 'std': float(np.std(data)), + 'sum': int(np.sum(data)) +} +""" + + try: + result = executor.execute_with_return(code) + print(f"Returned result: {result}") + print(f"Mean: {result['mean']}") + print(f"Std: {result['std']}") + print(f"Sum: {result['sum']}") + except CodeExecutionError as e: + print(f"Error: {e}") + + +def test_error_handling(): + """Test error handling.""" + print("\n=== Test 5: Error Handling ===") + executor = get_code_executor() + + code = """ +# This will fail +x = 1 / 0 +""" + + result = executor.execute(code) + print(f"Success: {result['success']}") + print(f"Error: {result['error']}") + + +def main(): + """Run all tests.""" + print("=" * 60) + print("Modal Code Execution Demo") + print("=" * 60) + + tests = [ + test_basic_execution, + test_scientific_computing, + test_statistical_analysis, + test_with_return_value, + test_error_handling, + ] + + for test in tests: + try: + test() + except Exception as e: + print(f"\n❌ Test failed: {e}") + import traceback + + traceback.print_exc() + + print("\n" + "=" * 60) + print("Demo completed!") + print("=" * 60) + + +if __name__ == "__main__": + main() diff --git a/examples/modal_demo/verify_sandbox.py b/examples/modal_demo/verify_sandbox.py new file mode 100644 index 0000000000000000000000000000000000000000..8ac94503607aafa506056fdf4434c96469a91834 --- /dev/null +++ b/examples/modal_demo/verify_sandbox.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +"""Verify that Modal sandbox is properly isolated. + +This script proves to judges that code runs in Modal, not locally. +NO agent_framework dependency - uses only src.tools.code_execution. + +Usage: + uv run python examples/modal_demo/verify_sandbox.py +""" + +import asyncio +from functools import partial + +from src.tools.code_execution import CodeExecutionError, get_code_executor +from src.utils.config import settings + + +def print_result(result: dict) -> None: + """Print execution result, surfacing errors when they occur.""" + if result.get("success"): + print(f" {result['stdout'].strip()}\n") + else: + error = result.get("error") or result.get("stderr", "").strip() or "Unknown error" + print(f" ERROR: {error}\n") + + +async def main() -> None: + """Verify Modal sandbox isolation.""" + if not settings.modal_available: + print("Error: Modal credentials not configured.") + print("Set MODAL_TOKEN_ID and MODAL_TOKEN_SECRET in .env") + return + + try: + executor = get_code_executor() + loop = asyncio.get_running_loop() + + print("=" * 60) + print("Modal Sandbox Isolation Verification") + print("=" * 60 + "\n") + + # Test 1: Hostname + print("Test 1: Check hostname (should NOT be your machine)") + code1 = "import socket; print(f'Hostname: {socket.gethostname()}')" + result1 = await loop.run_in_executor(None, partial(executor.execute, code1)) + print_result(result1) + + # Test 2: Scientific libraries + print("Test 2: Verify scientific libraries") + code2 = """ +import pandas as pd +import numpy as np +import scipy +print(f"pandas: {pd.__version__}") +print(f"numpy: {np.__version__}") +print(f"scipy: {scipy.__version__}") +""" + result2 = await loop.run_in_executor(None, partial(executor.execute, code2)) + print_result(result2) + + # Test 3: Network blocked + print("Test 3: Verify network isolation") + code3 = """ +import urllib.request +try: + urllib.request.urlopen("https://google.com", timeout=2) + print("Network: ALLOWED (unexpected!)") +except Exception: + print("Network: BLOCKED (as expected)") +""" + result3 = await loop.run_in_executor(None, partial(executor.execute, code3)) + print_result(result3) + + # Test 4: Real statistics + print("Test 4: Execute statistical analysis") + code4 = """ +import pandas as pd +import scipy.stats as stats + +data = pd.DataFrame({'effect': [0.42, 0.38, 0.51]}) +mean = data['effect'].mean() +t_stat, p_val = stats.ttest_1samp(data['effect'], 0) + +print(f"Mean Effect: {mean:.3f}") +print(f"P-value: {p_val:.4f}") +print(f"Verdict: {'SUPPORTED' if p_val < 0.05 else 'INCONCLUSIVE'}") +""" + result4 = await loop.run_in_executor(None, partial(executor.execute, code4)) + print_result(result4) + + print("=" * 60) + print("All tests complete - Modal sandbox verified!") + print("=" * 60) + + except CodeExecutionError as e: + print(f"Error: Modal code execution failed: {e}") + print("Hint: Ensure Modal SDK is installed and credentials are valid.") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/orchestrator_demo/run_agent.py b/examples/orchestrator_demo/run_agent.py new file mode 100644 index 0000000000000000000000000000000000000000..44e569b0bf66a882e51ddebfd46e462c7d575365 --- /dev/null +++ b/examples/orchestrator_demo/run_agent.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +""" +Demo: DeepCritical Agent Loop (Search + Judge + Orchestrator). + +This script demonstrates the REAL Phase 4 orchestration: +- REAL Iterative Search (PubMed + ClinicalTrials + Europe PMC) +- REAL Evidence Evaluation (LLM Judge) +- REAL Orchestration Loop +- REAL Final Synthesis + +NO MOCKS. REAL API CALLS. + +Usage: + uv run python examples/orchestrator_demo/run_agent.py "metformin cancer" + uv run python examples/orchestrator_demo/run_agent.py "sildenafil heart failure" --iterations 5 + +Requires: OPENAI_API_KEY or ANTHROPIC_API_KEY +""" + +import argparse +import asyncio +import os +import sys + +from src.agent_factory.judges import JudgeHandler +from src.orchestrator import Orchestrator +from src.tools.clinicaltrials import ClinicalTrialsTool +from src.tools.europepmc import EuropePMCTool +from src.tools.pubmed import PubMedTool +from src.tools.search_handler import SearchHandler +from src.utils.models import OrchestratorConfig + +MAX_ITERATIONS = 10 + + +async def main() -> None: + """Run the REAL agent demo.""" + parser = argparse.ArgumentParser( + description="DeepCritical Agent Demo - REAL, No Mocks", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +This demo runs the REAL search-judge-synthesize loop: + 1. REAL search: PubMed + ClinicalTrials + Europe PMC queries + 2. REAL judge: Actual LLM assessing evidence quality + 3. REAL loop: Actual iterative refinement based on LLM decisions + 4. REAL synthesis: Actual research summary generation + +Examples: + uv run python examples/orchestrator_demo/run_agent.py "metformin cancer" + uv run python examples/orchestrator_demo/run_agent.py "aspirin alzheimer" --iterations 5 + """, + ) + parser.add_argument("query", help="Research query (e.g., 'metformin cancer')") + parser.add_argument("--iterations", type=int, default=3, help="Max iterations (default: 3)") + args = parser.parse_args() + + if not 1 <= args.iterations <= MAX_ITERATIONS: + print(f"Error: iterations must be between 1 and {MAX_ITERATIONS}") + sys.exit(1) + + # Fail fast: require API key + if not (os.getenv("OPENAI_API_KEY") or os.getenv("ANTHROPIC_API_KEY")): + print("=" * 60) + print("ERROR: This demo requires a real LLM.") + print() + print("Set one of the following in your .env file:") + print(" OPENAI_API_KEY=sk-...") + print(" ANTHROPIC_API_KEY=sk-ant-...") + print() + print("This is a REAL demo. No mocks. No fake data.") + print("=" * 60) + sys.exit(1) + + print(f"\n{'=' * 60}") + print("DeepCritical Agent Demo (REAL)") + print(f"Query: {args.query}") + print(f"Max Iterations: {args.iterations}") + print("Mode: REAL (All live API calls)") + print(f"{'=' * 60}\n") + + # Setup REAL components + search_handler = SearchHandler( + tools=[PubMedTool(), ClinicalTrialsTool(), EuropePMCTool()], timeout=30.0 + ) + judge_handler = JudgeHandler() # REAL LLM judge + + config = OrchestratorConfig(max_iterations=args.iterations) + orchestrator = Orchestrator( + search_handler=search_handler, judge_handler=judge_handler, config=config + ) + + # Run the REAL loop + try: + async for event in orchestrator.run(args.query): + # Print event with icon (remove markdown bold for CLI) + print(event.to_markdown().replace("**", "")) + + # Show search results count + if event.type == "search_complete" and event.data: + print(f" -> Found {event.data.get('new_count', 0)} new items") + + except Exception as e: + print(f"\n❌ Error: {e}") + raise + + print("\n" + "=" * 60) + print("Demo complete! Everything was REAL:") + print(" - Real PubMed + ClinicalTrials + Europe PMC searches") + print(" - Real LLM judge decisions") + print(" - Real iterative refinement") + print("=" * 60 + "\n") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/orchestrator_demo/run_magentic.py b/examples/orchestrator_demo/run_magentic.py new file mode 100644 index 0000000000000000000000000000000000000000..fe74450d9a19d40c706d32ffe97d452a2aa4f36b --- /dev/null +++ b/examples/orchestrator_demo/run_magentic.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +""" +Demo: Magentic-One Orchestrator for DeepCritical. + +This script demonstrates Phase 5 functionality: +- Multi-Agent Coordination (Searcher + Judge + Manager) +- Magentic-One Workflow + +Usage: + export OPENAI_API_KEY=... + uv run python examples/orchestrator_demo/run_magentic.py "metformin cancer" +""" + +import argparse +import asyncio +import os +import sys + +from src.agent_factory.judges import JudgeHandler +from src.orchestrator_factory import create_orchestrator +from src.tools.clinicaltrials import ClinicalTrialsTool +from src.tools.europepmc import EuropePMCTool +from src.tools.pubmed import PubMedTool +from src.tools.search_handler import SearchHandler +from src.utils.models import OrchestratorConfig + + +async def main() -> None: + """Run the magentic agent demo.""" + parser = argparse.ArgumentParser(description="Run DeepCritical Magentic Agent") + parser.add_argument("query", help="Research query (e.g., 'metformin cancer')") + parser.add_argument("--iterations", type=int, default=10, help="Max rounds") + args = parser.parse_args() + + # Check for OpenAI key specifically - Magentic requires function calling + # which is only supported by OpenAI's API (not Anthropic or HF Inference) + if not os.getenv("OPENAI_API_KEY"): + print("Error: OPENAI_API_KEY required. Magentic uses function calling") + print(" which requires OpenAI's API. For other providers, use mode='simple'.") + sys.exit(1) + + print(f"\n{'=' * 60}") + print("DeepCritical Magentic Agent Demo") + print(f"Query: {args.query}") + print("Mode: MAGENTIC (Multi-Agent)") + print(f"{'=' * 60}\n") + + # 1. Setup Search Tools + search_handler = SearchHandler( + tools=[PubMedTool(), ClinicalTrialsTool(), EuropePMCTool()], timeout=30.0 + ) + + # 2. Setup Judge + judge_handler = JudgeHandler() + + # 3. Setup Orchestrator via Factory + config = OrchestratorConfig(max_iterations=args.iterations) + orchestrator = create_orchestrator( + search_handler=search_handler, + judge_handler=judge_handler, + config=config, + mode="magentic", + ) + + if not orchestrator: + print("Failed to create Magentic orchestrator. Is agent-framework installed?") + sys.exit(1) + + # 4. Run Loop + try: + async for event in orchestrator.run(args.query): + # Print event with icon + # Clean up markdown for CLI + msg_obj = event.message + msg_text = "" + if hasattr(msg_obj, "text"): + msg_text = msg_obj.text + else: + msg_text = str(msg_obj) + + msg = msg_text.replace("\n", " ").replace("**", "")[:150] + print(f"[{event.type.upper()}] {msg}...") + + if event.type == "complete": + print("\n--- FINAL OUTPUT ---\n") + print(msg_text) + + except Exception as e: + print(f"\n❌ Error: {e}") + import traceback + + traceback.print_exc() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/rate_limiting_demo.py b/examples/rate_limiting_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..90aab639ab00741300bd3e5c2f53f21e86789a1e --- /dev/null +++ b/examples/rate_limiting_demo.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python3 +"""Demo script to verify rate limiting works correctly.""" + +import asyncio +import time + +from src.tools.pubmed import PubMedTool +from src.tools.rate_limiter import RateLimiter, get_pubmed_limiter, reset_pubmed_limiter + + +async def test_basic_limiter(): + """Test basic rate limiter behavior.""" + print("=" * 60) + print("Rate Limiting Demo") + print("=" * 60) + + # Test 1: Basic limiter + print("\n[Test 1] Testing 3/second limiter...") + limiter = RateLimiter("3/second") + + start = time.monotonic() + for i in range(6): + await limiter.acquire() + elapsed = time.monotonic() - start + print(f" Request {i + 1} at {elapsed:.2f}s") + + total = time.monotonic() - start + print(f" Total time for 6 requests: {total:.2f}s (expected ~2s)") + + +async def test_pubmed_limiter(): + """Test PubMed-specific limiter.""" + print("\n[Test 2] Testing PubMed limiter (shared)...") + + reset_pubmed_limiter() # Clean state + + # Without API key: 3/sec + limiter = get_pubmed_limiter(api_key=None) + print(f" Rate without key: {limiter.rate}") + + # Multiple tools should share the same limiter + tool1 = PubMedTool() + tool2 = PubMedTool() + + # Verify they share the limiter + print(f" Tools share limiter: {tool1._limiter is tool2._limiter}") + + +async def test_concurrent_requests(): + """Test rate limiting under concurrent load.""" + print("\n[Test 3] Testing concurrent request limiting...") + + limiter = RateLimiter("5/second") + + async def make_request(i: int): + await limiter.acquire() + return time.monotonic() + + start = time.monotonic() + # Launch 10 concurrent requests + tasks = [make_request(i) for i in range(10)] + times = await asyncio.gather(*tasks) + + # Calculate distribution + relative_times = [t - start for t in times] + print(f" Request times: {[f'{t:.2f}s' for t in sorted(relative_times)]}") + + total = max(relative_times) + print(f" All 10 requests completed in {total:.2f}s (expected ~2s)") + + +async def main(): + await test_basic_limiter() + await test_pubmed_limiter() + await test_concurrent_requests() + + print("\n" + "=" * 60) + print("Demo complete!") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/search_demo/run_search.py b/examples/search_demo/run_search.py new file mode 100644 index 0000000000000000000000000000000000000000..05f46d37bbef26d5674f050488c0f8b16f822ef1 --- /dev/null +++ b/examples/search_demo/run_search.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +""" +Demo: Search for drug repurposing evidence. + +This script demonstrates multi-source search functionality: +- PubMed search (biomedical literature) +- ClinicalTrials.gov search (clinical trial evidence) +- SearchHandler (parallel scatter-gather orchestration) + +Usage: + # From project root: + uv run python examples/search_demo/run_search.py + + # With custom query: + uv run python examples/search_demo/run_search.py "metformin cancer" + +Requirements: + - Optional: NCBI_API_KEY in .env for higher PubMed rate limits +""" + +import asyncio +import sys + +from src.tools.clinicaltrials import ClinicalTrialsTool +from src.tools.europepmc import EuropePMCTool +from src.tools.pubmed import PubMedTool +from src.tools.search_handler import SearchHandler + + +async def main(query: str) -> None: + """Run search demo with the given query.""" + print(f"\n{'=' * 60}") + print("DeepCritical Search Demo") + print(f"Query: {query}") + print(f"{'=' * 60}\n") + + # Initialize tools + pubmed = PubMedTool() + trials = ClinicalTrialsTool() + preprints = EuropePMCTool() + handler = SearchHandler(tools=[pubmed, trials, preprints], timeout=30.0) + + # Execute search + print("Searching PubMed, ClinicalTrials.gov, and Europe PMC in parallel...") + result = await handler.execute(query, max_results_per_tool=5) + + # Display results + print(f"\n{'=' * 60}") + print(f"Results: {result.total_found} pieces of evidence") + print(f"Sources: {', '.join(result.sources_searched)}") + if result.errors: + print(f"Errors: {result.errors}") + print(f"{'=' * 60}\n") + + for i, evidence in enumerate(result.evidence, 1): + print(f"[{i}] {evidence.citation.source.upper()}: {evidence.citation.title[:80]}...") + print(f" URL: {evidence.citation.url}") + print(f" Content: {evidence.content[:150]}...") + print() + + +if __name__ == "__main__": + # Default query or use command line arg + default_query = "metformin Alzheimer's disease drug repurposing" + query = sys.argv[1] if len(sys.argv) > 1 else default_query + + asyncio.run(main(query)) diff --git a/mkdocs.yml b/mkdocs.yml index cd3cee71d85f1dc3660ccd047e1feddbbcceb6ee..8501ece60172cfd1672dae3a911e9e9e97a3879a 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -1,14 +1,11 @@ -site_name: The DETERMINATOR -site_description: Generalist Deep Research Agent that Stops at Nothing -site_author: The DeepCritical Team +site_name: DeepCritical +site_description: AI-Native Drug Repurposing Research Agent +site_author: DeepCritical Team site_url: https://deepcritical.github.io/GradioDemo/ repo_name: DeepCritical/GradioDemo repo_url: https://github.com/DeepCritical/GradioDemo -edit_uri: edit/dev/docs/ - -# Ensure all files are included even if not in nav -# strict: false +edit_uri: edit/main/docs/ theme: name: material @@ -28,55 +25,29 @@ theme: icon: material/brightness-4 name: Switch to light mode features: - # Navigation features - navigation.tabs - navigation.sections - navigation.expand - navigation.top - - navigation.indexes - - navigation.instant - - navigation.tracking - - navigation.smooth - # Search features - search.suggest - search.highlight - # Content features - content.code.annotate - content.code.copy - - content.tabs.link - - content.tooltips - - toc.integrate icon: repo: fontawesome/brands/github - language: en plugins: - - search: - lang: - - en - separator: '[\s\-,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|&' + - search - mermaid2 - codeinclude - - git-revision-date-localized: - enable_creation_date: true - enable_git_follow: false # Disable follow to avoid timestamp ordering issues - strict: false # Bypass warnings about timestamp ordering issues - type: timeago # Shows "2 days ago" format - fallback_to_build_date: true - minify: minify_html: true minify_js: true minify_css: true markdown_extensions: - - dev.docs_plugins: - base_path: "." - pymdownx.highlight: anchor_linenums: true - line_spans: __span # Allow line spans for highlighting - pygments_lang_class: true # Add language class to code blocks - use_pygments: true - noclasses: false # Use CSS classes for better theming - pymdownx.inlinehilite - pymdownx.superfences: custom_fences: @@ -84,35 +55,30 @@ markdown_extensions: class: mermaid format: !!python/name:pymdownx.superfences.fence_code_format preserve_tabs: true + - dev.docs_plugins: + base_path: . - pymdownx.tabbed: alternate_style: true - combine_header_slug: true # Better tab linking - pymdownx.tasklist: custom_checkbox: true - pymdownx.emoji: emoji_generator: !!python/name:pymdownx.emoji.to_svg emoji_index: !!python/name:pymdownx.emoji.twemoji - - pymdownx.snippets - admonition - pymdownx.details + - pymdownx.superfences - attr_list - md_in_html - tables - - meta # Frontmatter support for tags, categories, etc. - toc: permalink: true - permalink_title: "Anchor link to this section" - baselevel: 1 - toc_depth: 3 - slugify: !!python/object/apply:pymdownx.slugs.slugify - kwds: - case: lower nav: - Home: index.md - Overview: - overview/architecture.md - overview/features.md + - overview/quick-start.md - Getting Started: - getting-started/installation.md - getting-started/quick-start.md @@ -120,47 +86,33 @@ nav: - getting-started/examples.md - Configuration: - configuration/index.md + - configuration/CONFIGURATION.md - Architecture: - - "Graph Orchestration": architecture/graph_orchestration.md - - "Workflow Diagrams": architecture/workflow-diagrams.md - - "Agents": architecture/agents.md - - "Orchestrators": architecture/orchestrators.md - - "Tools": architecture/tools.md - - "Middleware": architecture/middleware.md - - "Services": architecture/services.md + - architecture/graph-orchestration.md + - architecture/graph_orchestration.md + - architecture/workflows.md + - architecture/workflow-diagrams.md + - architecture/agents.md + - architecture/orchestrators.md + - architecture/tools.md + - architecture/middleware.md + - architecture/services.md - API Reference: - api/agents.md - api/tools.md - api/orchestrators.md - api/services.md - api/models.md - - Contributing: - - contributing/index.md - - contributing/code-quality.md - - contributing/code-style.md - - contributing/error-handling.md - - contributing/implementation-patterns.md - - contributing/prompt-engineering.md - - contributing/testing.md - - License: LICENSE.md + - Contributing: contributing.md + - License: license.md - Team: team.md extra: social: - icon: fontawesome/brands/github link: https://github.com/DeepCritical/GradioDemo - name: GitHub - - icon: fontawesome/brands/twitter - link: https://twitter.com/josephpollack - name: Twitter - icon: material/web link: https://huggingface.co/spaces/DataQuests/DeepCritical - name: Live Demo - - icon: fontawesome/brands/discord - link: https://discord.gg/n8ytYeh25n - name: Discord - generator: - enabled: false # Hide generator meta tag copyright: Copyright © 2024 DeepCritical Team diff --git a/pyproject.toml b/pyproject.toml index 6f59cac5edf5e957ee88a6fec36ad45db3040bd0..11a8381a7df4cf76a851c34d49436d0bbdf03240 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,97 +1,66 @@ [project] -name = "determinator" +name = "deepcritical" version = "0.1.0" -description = "The DETERMINATOR - the Deep Research Agent that Stops at Nothing" +description = "AI-Native Drug Repurposing Research Agent" readme = "README.md" requires-python = ">=3.11" dependencies = [ "pydantic>=2.7", - "pydantic-settings>=2.2", # For BaseSettings (config) - "pydantic-ai>=0.0.16", # Agent framework + "pydantic-settings>=2.2", + "pydantic-ai[huggingface]>=0.0.16", "openai>=1.0.0", "anthropic>=0.18.0", - "httpx>=0.27", # Async HTTP client (PubMed) - "beautifulsoup4>=4.12", # HTML parsing - "xmltodict>=0.13", # PubMed XML -> dict - "huggingface-hub>=0.20.0", # Hugging Face Inference API - "gradio[mcp,oauth]>=6.0.0", # Chat interface with MCP server support (6.0 required for css in launch()) + "httpx>=0.27", + "beautifulsoup4>=4.12", + "xmltodict>=0.13", + "huggingface-hub>=0.20.0", + "gradio[mcp,oauth]>=6.0.0", "python-dotenv>=1.0", # .env loading "tenacity>=8.2", # Retry logic "structlog>=24.1", # Structured logging "requests>=2.32.5", # ClinicalTrials.gov (httpx blocked by WAF) "pydantic-graph>=1.22.0", - "limits>=3.0", # Web search + "limits>=3.0", # Rate limiting + "duckduckgo-search>=5.0", # Web search "llama-index-llms-huggingface>=0.6.1", "llama-index-llms-huggingface-api>=0.6.1", "llama-index-vector-stores-chroma>=0.5.3", "llama-index>=0.14.8", - "gradio-client>=1.0.0", # For STT/OCR API calls - "soundfile>=0.12.0", # For audio file I/O - "pillow>=10.0.0", # For image processing - "torch>=2.0.0", # Required by Kokoro TTS - "transformers>=4.57.2", # Required by Kokoro TTS - "modal>=0.63.0", # Required for TTS GPU execution "tokenizers>=0.22.0,<=0.23.0", - "rpds-py>=0.29.0", - "pydantic-ai-slim[huggingface]>=0.0.18", - "agent-framework-core>=1.0.0b251120,<2.0.0", + "transformers>=4.57.2", "chromadb>=0.4.0", + "rpds-py>=0.29.0", # Python implementation of rpds (required by chromadb on Windows) "sentence-transformers>=2.2.0", "numpy<2.0", + "agent-framework-core>=1.0.0b251120,<2.0.0", + "modal>=0.63.0", "llama-index-llms-openai>=0.6.9", "llama-index-embeddings-openai>=0.5.1", - "ddgs>=9.9.2", - "aiohttp>=3.13.2", - "lxml>=6.0.2", - "fake-useragent==2.2.0", - "socksio==1.0.0", - "neo4j>=6.0.3", - "md2pdf>=1.0.1", + "pydantic-ai-slim[huggingface]>=0.0.18", + "pytest>=9.0.1", + "pytest-cov>=7.0.0", ] [project.optional-dependencies] dev = [ - # Testing - "pytest>=8.0", - "pytest-asyncio>=0.23", - "pytest-sugar>=1.0", - "pytest-cov>=5.0", - "pytest-mock>=3.12", - "respx>=0.21", # Mock httpx requests - "typer>=0.9.0", # Gradio CLI dependency for smoke tests - - # Quality - "ruff>=0.4.0", - "mypy>=1.10", + "pytest>=9.0.1", + "pytest-asyncio>=1.3.0", + "pytest-sugar>=1.1.1", + "pytest-cov>=7.0.0", + "pytest-mock>=3.15.1", + "respx>=0.22.0", + "typer>=0.9.0", + "ruff>=0.14.6", + "mypy>=1.18.2", "pre-commit>=3.7", - - # Documentation - "mkdocs>=1.6.0", - "mkdocs-material>=9.0.0", - "mkdocs-mermaid2-plugin>=1.1.0", - "mkdocs-codeinclude-plugin>=0.2.0", - "mkdocs-git-revision-date-localized-plugin>=1.2.0", + "mkdocs>=1.5.0", + "mkdocs-material>=9.7.0", + "mkdocs-mermaid2-plugin>=1.2.3", "mkdocs-minify-plugin>=0.8.0", + "mkdocs-codeinclude-plugin>=0.2.1", + "mkdocs-macros-plugin>=1.5.0", "pymdown-extensions>=10.17.2", ] -magentic = [ - "agent-framework-core>=1.0.0b251120,<2.0.0", # Microsoft Agent Framework (PyPI) -] -embeddings = [ - "chromadb>=0.4.0", - "sentence-transformers>=2.2.0", - "numpy<2.0", # chromadb compatibility: uses np.float_ removed in NumPy 2.0 -] -modal = [ - # Mario's Modal code execution + LlamaIndex RAG - # Note: modal>=0.63.0 is now in main dependencies for TTS support - "llama-index>=0.11.0", - "llama-index-llms-openai>=0.6.9", - "llama-index-embeddings-openai>=0.5.1", - "llama-index-vector-stores-chroma", - "chromadb>=0.4.0", - "numpy<2.0", # chromadb compatibility: uses np.float_ removed in NumPy 2.0 -] [build-system] requires = ["hatchling"] @@ -127,7 +96,6 @@ ignore = [ "PLR0913", # Too many arguments (agents need many params) "PLR0912", # Too many branches (complex orchestrator logic) "PLR0911", # Too many return statements (complex agent logic) - "PLR0915", # Too many statements (Gradio UI setup functions) "PLR2004", # Magic values (statistical constants like p-values) "PLW0603", # Global statement (singleton pattern for Modal) "PLC0415", # Lazy imports for optional dependencies @@ -136,6 +104,10 @@ ignore = [ "RUF100", # Unused noqa (version differences between local/CI) ] +[tool.ruff.lint.per-file-ignores] +"src/app.py" = ["PLR0915"] # Too many statements (Gradio UI setup is complex) +".pre-commit-hooks/run_pytest_with_sync.py" = ["PLR0915"] # Too many statements (pre-commit hook with comprehensive cache cleaning) + [tool.ruff.lint.isort] known-first-party = ["src"] @@ -153,7 +125,7 @@ exclude = [ "^reference_repos/", "^examples/", "^folder/", - "^src/app.py", + "^src/app\\.py$", # Gradio UI setup - ignore mypy checks ] # ============== PYTEST CONFIG ============== @@ -167,6 +139,25 @@ addopts = [ "-p", "no:logfire", ] +# Suppress known warnings that don't indicate test failures +# These are from third-party libraries and don't affect test correctness +filterwarnings = [ + # Pydantic deprecation warnings from unittest.mock introspection + # These occur when mock tries to introspect Pydantic models + "ignore::pydantic.warnings.PydanticDeprecatedSince20", + "ignore::pydantic.warnings.PydanticDeprecatedSince211", + # Gradio UI warnings (not relevant for unit tests) + "ignore::UserWarning:gradio.components.dropdown", + "ignore::UserWarning:gradio.oauth", + # Pattern-based filters for Pydantic deprecation messages (catch-all) + "ignore:The `__fields__` attribute is deprecated.*", + "ignore:The `__fields_set__` attribute is deprecated.*", + "ignore:Accessing the 'model_computed_fields' attribute.*", + "ignore:Accessing the 'model_fields' attribute.*", + # Also catch warnings from unittest.mock module + "ignore::DeprecationWarning:unittest.mock", +] +# Note: pytest only runs test files, so source files don't need exclusion markers = [ "unit: Unit tests (mocked)", "integration: Integration tests (real APIs)", @@ -180,7 +171,10 @@ markers = [ # ============== COVERAGE CONFIG ============== [tool.coverage.run] source = ["src"] -omit = ["*/__init__.py"] +omit = [ + "*/__init__.py", + "src/app.py", # Exclude Gradio UI from coverage +] [tool.coverage.report] exclude_lines = [ @@ -188,18 +182,3 @@ exclude_lines = [ "if TYPE_CHECKING:", "raise NotImplementedError", ] - -[dependency-groups] -dev = [ - "mkdocs>=1.6.1", - "mkdocs-codeinclude-plugin>=0.2.1", - "mkdocs-material>=9.7.0", - "mkdocs-mermaid2-plugin>=1.2.3", - "mkdocs-git-revision-date-localized-plugin>=1.2.0", - "mkdocs-minify-plugin>=0.8.0", - "structlog>=25.5.0", - "ty>=0.0.1a28", -] - -# Note: agent-framework-core is optional for magentic mode (multi-agent orchestration) -# Version pinned to 1.0.0b* to avoid breaking changes. CI skips tests via pytest.importorskip diff --git a/requirements.txt b/requirements.txt index cf1b66bb960423122615ba3340799ef6e9d92435..a50255a27c2a7e2568e6328e9f632f125eb609a8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,9 +15,7 @@ anthropic>=0.18.0 # HTTP & Parsing httpx>=0.27 -aiohttp>=3.13.2 # Required for website crawling beautifulsoup4>=4.12 -lxml>=6.0.2 # Required for BeautifulSoup lxml parser (faster than html.parser) xmltodict>=0.13 # HuggingFace Hub @@ -35,58 +33,29 @@ limits>=3.0 # Rate limiting pydantic-graph>=1.22.0 # Web search -ddgs>=9.9.2 # duckduckgo-search has been renamed to ddgs -fake-useragent==2.2.0 -socksio==1.0.0 +duckduckgo-search>=5.0 + +# Multi-agent orchestration (Advanced mode) +agent-framework-core>=1.0.0b251120,<2.0.0 + # LlamaIndex RAG llama-index-llms-huggingface>=0.6.1 llama-index-llms-huggingface-api>=0.6.1 llama-index-vector-stores-chroma>=0.5.3 llama-index>=0.14.8 - -# Audio/Image processing -gradio-client>=1.0.0 # For STT/OCR API calls -soundfile>=0.12.0 # For audio file I/O -pillow>=10.0.0 # For image processing - -# TTS dependencies (for Modal GPU TTS) -torch>=2.0.0 # Required by Kokoro TTS -transformers>=4.57.2 # Required by Kokoro TTS -modal>=0.63.0 # Required for TTS GPU execution -# Note: Kokoro is installed in Modal image from: git+https://github.com/hexgrad/kokoro.git +llama-index-llms-openai>=0.6.9 +llama-index-embeddings-openai>=0.5.1 # Embeddings & Vector Store tokenizers>=0.22.0,<=0.23.0 -rpds-py>=0.29.0 # Python implementation of rpds (required by chromadb on Windows) +transformers>=4.57.2 chromadb>=0.4.0 +rpds-py>=0.29.0 # Python implementation of rpds (required by chromadb on Windows) sentence-transformers>=2.2.0 -numpy<2.0 # chromadb compatibility: uses np.float_ removed in NumPy 2.0 -neo4j>=6.0.3 - -### DOCUMENT STUFF - -cssselect2==0.8.0 -docopt==0.6.2 -fonttools==4.61.0 -markdown2==2.5.4 -md2pdf==1.0.1 -pydyf==0.11.0 -pyphen==0.17.2 -tinycss2==1.5.1 -tinyhtml5==2.0.0 -weasyprint==66.0 -webencodings==0.5.1 -zopfli==0.4.0 +numpy<2.0 # Optional: Modal for code execution modal>=0.63.0 # Pydantic AI with HuggingFace support pydantic-ai-slim[huggingface]>=0.0.18 - -# Multi-agent orchestration (Advanced mode) -agent-framework-core>=1.0.0b251120,<2.0.0 - -# LlamaIndex RAG - OpenAI -llama-index-llms-openai>=0.6.9 -llama-index-embeddings-openai>=0.5.1 diff --git a/site/404.html b/site/404.html deleted file mode 100644 index e27584cf925588e1877fa35980693a314efa832c..0000000000000000000000000000000000000000 --- a/site/404.html +++ /dev/null @@ -1 +0,0 @@ - The DETERMINATOR

404 - Not found

\ No newline at end of file diff --git a/site/api/agents/index.html b/site/api/agents/index.html deleted file mode 100644 index 47398e7e76b2e53e625f3576fa4685056935bbe8..0000000000000000000000000000000000000000 --- a/site/api/agents/index.html +++ /dev/null @@ -1 +0,0 @@ - Agents API Reference - The DETERMINATOR

Agents API Reference

This page documents the API for DeepCritical agents.

KnowledgeGapAgent

Module: src.agents.knowledge_gap

Purpose: Evaluates research state and identifies knowledge gaps.

Methods

evaluate

Evaluates research completeness and identifies outstanding knowledge gaps.

Parameters: - query: Research query string - background_context: Background context for the query (default: "") - conversation_history: History of actions, findings, and thoughts as string (default: "") - iteration: Current iteration number (default: 0) - time_elapsed_minutes: Elapsed time in minutes (default: 0.0) - max_time_minutes: Maximum time limit in minutes (default: 10)

Returns: KnowledgeGapOutput with: - research_complete: Boolean indicating if research is complete - outstanding_gaps: List of remaining knowledge gaps

ToolSelectorAgent

Module: src.agents.tool_selector

Purpose: Selects appropriate tools for addressing knowledge gaps.

Methods

select_tools

Selects tools for addressing a knowledge gap.

Parameters: - gap: The knowledge gap to address - query: Research query string - background_context: Optional background context (default: "") - conversation_history: History of actions, findings, and thoughts as string (default: "")

Returns: AgentSelectionPlan with list of AgentTask objects.

WriterAgent

Module: src.agents.writer

Purpose: Generates final reports from research findings.

Methods

write_report

Generates a markdown report from research findings.

Parameters: - query: Research query string - findings: Research findings to include in report - output_length: Optional description of desired output length (default: "") - output_instructions: Optional additional instructions for report generation (default: "")

Returns: Markdown string with numbered citations.

LongWriterAgent

Module: src.agents.long_writer

Purpose: Long-form report generation with section-by-section writing.

Methods

write_next_section

Writes the next section of a long-form report.

Parameters: - original_query: The original research query - report_draft: Current report draft as string (all sections written so far) - next_section_title: Title of the section to write - next_section_draft: Draft content for the next section

Returns: LongWriterOutput with formatted section and references.

write_report

Generates final report from draft.

Parameters: - query: Research query string - report_title: Title of the report - report_draft: Complete report draft

Returns: Final markdown report string.

ProofreaderAgent

Module: src.agents.proofreader

Purpose: Proofreads and polishes report drafts.

Methods

proofread

Proofreads and polishes a report draft.

Parameters: - query: Research query string - report_title: Title of the report - report_draft: Report draft to proofread

Returns: Polished markdown string.

ThinkingAgent

Module: src.agents.thinking

Purpose: Generates observations from conversation history.

Methods

generate_observations

Generates observations from conversation history.

Parameters: - query: Research query string - background_context: Optional background context (default: "") - conversation_history: History of actions, findings, and thoughts as string (default: "") - iteration: Current iteration number (default: 1)

Returns: Observation string.

InputParserAgent

Module: src.agents.input_parser

Purpose: Parses and improves user queries, detects research mode.

Methods

parse

Parses and improves a user query.

Parameters: - query: Original query string

Returns: ParsedQuery with: - original_query: Original query string - improved_query: Refined query string - research_mode: "iterative" or "deep" - key_entities: List of key entities - research_questions: List of research questions

Factory Functions

All agents have factory functions in src.agent_factory.agents:

Parameters: - model: Optional Pydantic AI model. If None, uses get_model() from settings. - oauth_token: Optional OAuth token from HuggingFace login (takes priority over env vars)

Returns: Agent instance.

See Also

\ No newline at end of file diff --git a/site/api/models/index.html b/site/api/models/index.html deleted file mode 100644 index 06a0e6dbadc4385037c571d9624f3d5766b1709f..0000000000000000000000000000000000000000 --- a/site/api/models/index.html +++ /dev/null @@ -1 +0,0 @@ - Models API Reference - The DETERMINATOR

Models API Reference

This page documents the Pydantic models used throughout DeepCritical.

Evidence

Module: src.utils.models

Purpose: Represents evidence from search results.

Fields: - citation: Citation information (title, URL, date, authors) - content: Evidence text content - relevance: Relevance score (0.0-1.0) - metadata: Additional metadata dictionary

Citation

Module: src.utils.models

Purpose: Citation information for evidence.

Fields: - source: Source name (e.g., "pubmed", "clinicaltrials", "europepmc", "web", "rag") - title: Article/trial title - url: Source URL - date: Publication date (YYYY-MM-DD or "Unknown") - authors: List of authors (optional)

KnowledgeGapOutput

Module: src.utils.models

Purpose: Output from knowledge gap evaluation.

Fields: - research_complete: Boolean indicating if research is complete - outstanding_gaps: List of remaining knowledge gaps

AgentSelectionPlan

Module: src.utils.models

Purpose: Plan for tool/agent selection.

Fields: - tasks: List of agent tasks to execute

AgentTask

Module: src.utils.models

Purpose: Individual agent task.

Fields: - gap: The knowledge gap being addressed (optional) - agent: Name of agent to use - query: The specific query for the agent - entity_website: The website of the entity being researched, if known (optional)

ReportDraft

Module: src.utils.models

Purpose: Draft structure for long-form reports.

Fields: - sections: List of report sections

ReportSection

Module: src.utils.models

Purpose: Individual section in a report draft.

Fields: - section_title: The title of the section - section_content: The content of the section

ParsedQuery

Module: src.utils.models

Purpose: Parsed and improved query.

Fields: - original_query: Original query string - improved_query: Refined query string - research_mode: Research mode ("iterative" or "deep") - key_entities: List of key entities - research_questions: List of research questions

Conversation

Module: src.utils.models

Purpose: Conversation history with iterations.

Fields: - history: List of iteration data

IterationData

Module: src.utils.models

Purpose: Data for a single iteration.

Fields: - gap: The gap addressed in the iteration - tool_calls: The tool calls made - findings: The findings collected from tool calls - thought: The thinking done to reflect on the success of the iteration and next steps

AgentEvent

Module: src.utils.models

Purpose: Event emitted during research execution.

Fields: - type: Event type (e.g., "started", "search_complete", "complete") - iteration: Iteration number (optional) - data: Event data dictionary

BudgetStatus

Module: src.utils.models

Purpose: Current budget status.

Fields: - tokens_used: Total tokens used - tokens_limit: Token budget limit - time_elapsed_seconds: Time elapsed in seconds - time_limit_seconds: Time budget limit (default: 600.0 seconds / 10 minutes) - iterations: Number of iterations completed - iterations_limit: Maximum iterations (default: 10) - iteration_tokens: Tokens used per iteration (iteration number -> token count)

See Also

\ No newline at end of file diff --git a/site/api/orchestrators/index.html b/site/api/orchestrators/index.html deleted file mode 100644 index ac74f5543f764e9d4dba1b4b6c1cb6bfd7d013e5..0000000000000000000000000000000000000000 --- a/site/api/orchestrators/index.html +++ /dev/null @@ -1 +0,0 @@ - Orchestrators API Reference - The DETERMINATOR

Orchestrators API Reference

This page documents the API for DeepCritical orchestrators.

IterativeResearchFlow

Module: src.orchestrator.research_flow

Purpose: Single-loop research with search-judge-synthesize cycles.

Methods

run

Runs iterative research flow.

Parameters: - query: Research query string - background_context: Background context (default: "") - output_length: Optional description of desired output length (default: "") - output_instructions: Optional additional instructions for report generation (default: "")

Returns: Final report string.

Note: max_iterations, max_time_minutes, and token_budget are constructor parameters, not run() parameters.

DeepResearchFlow

Module: src.orchestrator.research_flow

Purpose: Multi-section parallel research with planning and synthesis.

Methods

run

Runs deep research flow.

Parameters: - query: Research query string

Returns: Final report string.

Note: max_iterations_per_section, max_time_minutes, and token_budget are constructor parameters, not run() parameters.

GraphOrchestrator

Module: src.orchestrator.graph_orchestrator

Purpose: Graph-based execution using Pydantic AI agents as nodes.

Methods

run

Runs graph-based research orchestration.

Parameters: - query: Research query string

Yields: AgentEvent objects during graph execution.

Note: research_mode and use_graph are constructor parameters, not run() parameters.

Orchestrator Factory

Module: src.orchestrator_factory

Purpose: Factory for creating orchestrators.

Functions

create_orchestrator

Creates an orchestrator instance.

Parameters: - search_handler: Search handler protocol implementation (optional, required for simple mode) - judge_handler: Judge handler protocol implementation (optional, required for simple mode) - config: Configuration object (optional) - mode: Orchestrator mode ("simple", "advanced", "magentic", "iterative", "deep", "auto", or None for auto-detect) - oauth_token: Optional OAuth token from HuggingFace login (takes priority over env vars)

Returns: Orchestrator instance.

Raises: - ValueError: If requirements not met

Modes: - "simple": Legacy orchestrator - "advanced" or "magentic": Magentic orchestrator (requires OpenAI API key) - None: Auto-detect based on API key availability

MagenticOrchestrator

Module: src.orchestrator_magentic

Purpose: Multi-agent coordination using Microsoft Agent Framework.

Methods

run

Runs Magentic orchestration.

Parameters: - query: Research query string

Yields: AgentEvent objects converted from Magentic events.

Note: max_rounds and max_stalls are constructor parameters, not run() parameters.

Requirements: - agent-framework-core package - OpenAI API key

See Also

\ No newline at end of file diff --git a/site/api/services/index.html b/site/api/services/index.html deleted file mode 100644 index b9d99cd369ec61c46fd4414a7b88c680e0481759..0000000000000000000000000000000000000000 --- a/site/api/services/index.html +++ /dev/null @@ -1,49 +0,0 @@ - Services API Reference - The DETERMINATOR

Services API Reference

This page documents the API for DeepCritical services.

EmbeddingService

Module: src.services.embeddings

Purpose: Local sentence-transformers for semantic search and deduplication.

Methods

embed

Generates embedding for a text string.

Parameters: - text: Text to embed

Returns: Embedding vector as list of floats.

embed_batch

async def embed_batch(self, texts: list[str]) -> list[list[float]]
-

Generates embeddings for multiple texts.

Parameters: - texts: List of texts to embed

Returns: List of embedding vectors.

similarity

async def similarity(self, text1: str, text2: str) -> float
-

Calculates similarity between two texts.

Parameters: - text1: First text - text2: Second text

Returns: Similarity score (0.0-1.0).

find_duplicates

async def find_duplicates(
-    self,
-    texts: list[str],
-    threshold: float = 0.85
-) -> list[tuple[int, int]]
-

Finds duplicate texts based on similarity threshold.

Parameters: - texts: List of texts to check - threshold: Similarity threshold (default: 0.85)

Returns: List of (index1, index2) tuples for duplicate pairs.

add_evidence

async def add_evidence(
-    self,
-    evidence_id: str,
-    content: str,
-    metadata: dict[str, Any]
-) -> None
-

Adds evidence to vector store for semantic search.

Parameters: - evidence_id: Unique identifier for the evidence - content: Evidence text content - metadata: Additional metadata dictionary

search_similar

async def search_similar(
-    self,
-    query: str,
-    n_results: int = 5
-) -> list[dict[str, Any]]
-

Finds semantically similar evidence.

Parameters: - query: Search query string - n_results: Number of results to return (default: 5)

Returns: List of dictionaries with id, content, metadata, and distance keys.

deduplicate

async def deduplicate(
-    self,
-    new_evidence: list[Evidence],
-    threshold: float = 0.9
-) -> list[Evidence]
-

Removes semantically duplicate evidence.

Parameters: - new_evidence: List of evidence items to deduplicate - threshold: Similarity threshold (default: 0.9, where 0.9 = 90% similar is duplicate)

Returns: List of unique evidence items (not already in vector store).

Factory Function

get_embedding_service

@lru_cache(maxsize=1)
-def get_embedding_service() -> EmbeddingService
-

Returns singleton EmbeddingService instance.

LlamaIndexRAGService

Module: src.services.rag

Purpose: Retrieval-Augmented Generation using LlamaIndex.

Methods

ingest_evidence

Ingests evidence into RAG service.

Parameters: - evidence_list: List of Evidence objects to ingest

Note: Supports multiple embedding providers (OpenAI, local sentence-transformers, Hugging Face).

retrieve

def retrieve(
-    self,
-    query: str,
-    top_k: int | None = None
-) -> list[dict[str, Any]]
-

Retrieves relevant documents for a query.

Parameters: - query: Search query string - top_k: Number of top results to return (defaults to similarity_top_k from constructor)

Returns: List of dictionaries with text, score, and metadata keys.

query

def query(
-    self,
-    query_str: str,
-    top_k: int | None = None
-) -> str
-

Queries RAG service and returns synthesized response.

Parameters: - query_str: Query string - top_k: Number of results to use (defaults to similarity_top_k from constructor)

Returns: Synthesized response string.

Raises: - ConfigurationError: If no LLM API key is available for query synthesis

ingest_documents

def ingest_documents(self, documents: list[Any]) -> None
-

Ingests raw LlamaIndex Documents.

Parameters: - documents: List of LlamaIndex Document objects

clear_collection

def clear_collection(self) -> None
-

Clears all documents from the collection.

Factory Function

get_rag_service

def get_rag_service(
-    collection_name: str = "deepcritical_evidence",
-    oauth_token: str | None = None,
-    **kwargs: Any
-) -> LlamaIndexRAGService
-

Get or create a RAG service instance.

Parameters: - collection_name: Name of the ChromaDB collection (default: "deepcritical_evidence") - oauth_token: Optional OAuth token from HuggingFace login (takes priority over env vars) - **kwargs: Additional arguments for LlamaIndexRAGService (e.g., use_openai_embeddings=False)

Returns: Configured LlamaIndexRAGService instance.

Note: By default, uses local embeddings (sentence-transformers) which require no API keys.

StatisticalAnalyzer

Module: src.services.statistical_analyzer

Purpose: Secure execution of AI-generated statistical code.

Methods

analyze

async def analyze(
-    self,
-    query: str,
-    evidence: list[Evidence],
-    hypothesis: dict[str, Any] | None = None
-) -> AnalysisResult
-

Analyzes a research question using statistical methods.

Parameters: - query: The research question - evidence: List of Evidence objects to analyze - hypothesis: Optional hypothesis dict with drug, target, pathway, effect, confidence keys

Returns: AnalysisResult with: - verdict: SUPPORTED, REFUTED, or INCONCLUSIVE - confidence: Confidence in verdict (0.0-1.0) - statistical_evidence: Summary of statistical findings - code_generated: Python code that was executed - execution_output: Output from code execution - key_takeaways: Key takeaways from analysis - limitations: List of limitations

Note: Requires Modal credentials for sandbox execution.

See Also

\ No newline at end of file diff --git a/site/api/tools/index.html b/site/api/tools/index.html deleted file mode 100644 index d5798e8b58ccf5ee1834bcb8cca0aa319e6a1f1a..0000000000000000000000000000000000000000 --- a/site/api/tools/index.html +++ /dev/null @@ -1,51 +0,0 @@ - Tools API Reference - The DETERMINATOR

Tools API Reference

This page documents the API for DeepCritical search tools.

SearchTool Protocol

All tools implement the SearchTool protocol:

class SearchTool(Protocol):
-    @property
-    def name(self) -> str: ...
-    
-    async def search(
-        self, 
-        query: str, 
-        max_results: int = 10
-    ) -> list[Evidence]: ...
-

PubMedTool

Module: src.tools.pubmed

Purpose: Search peer-reviewed biomedical literature from PubMed.

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "pubmed"

Methods

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches PubMed for articles.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects with PubMed articles.

Raises: - SearchError: If search fails (timeout, HTTP error, XML parsing error) - RateLimitError: If rate limit is exceeded (429 status code)

Note: Uses NCBI E-utilities (ESearch → EFetch). Rate limit: 0.34s between requests. Handles single vs. multiple articles.

ClinicalTrialsTool

Module: src.tools.clinicaltrials

Purpose: Search ClinicalTrials.gov for interventional studies.

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "clinicaltrials"

Methods

search

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches ClinicalTrials.gov for trials.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects with clinical trials.

Note: Only returns interventional studies with status: COMPLETED, ACTIVE_NOT_RECRUITING, RECRUITING, ENROLLING_BY_INVITATION. Uses requests library (NOT httpx - WAF blocks httpx). Runs in thread pool for async compatibility.

Raises: - SearchError: If search fails (HTTP error, request exception)

EuropePMCTool

Module: src.tools.europepmc

Purpose: Search Europe PMC for preprints and peer-reviewed articles.

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "europepmc"

Methods

search

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches Europe PMC for articles and preprints.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects with articles/preprints.

Note: Includes both preprints (marked with [PREPRINT - Not peer-reviewed]) and peer-reviewed articles. Handles preprint markers. Builds URLs from DOI or PMID.

Raises: - SearchError: If search fails (HTTP error, connection error)

RAGTool

Module: src.tools.rag_tool

Purpose: Semantic search within collected evidence.

Initialization

def __init__(
-    self,
-    rag_service: LlamaIndexRAGService | None = None,
-    oauth_token: str | None = None
-) -> None
-

Parameters: - rag_service: Optional RAG service instance. If None, will be lazy-initialized. - oauth_token: Optional OAuth token from HuggingFace login (for RAG LLM)

Properties

name

@property
-def name(self) -> str
-

Returns tool name: "rag"

Methods

search

async def search(
-    self,
-    query: str,
-    max_results: int = 10
-) -> list[Evidence]
-

Searches collected evidence using semantic similarity.

Parameters: - query: Search query string - max_results: Maximum number of results to return (default: 10)

Returns: List of Evidence objects from collected evidence.

Raises: - ConfigurationError: If RAG service is unavailable

Note: Requires evidence to be ingested into RAG service first. Wraps LlamaIndexRAGService. Returns Evidence from RAG results.

SearchHandler

Module: src.tools.search_handler

Purpose: Orchestrates parallel searches across multiple tools.

Initialization

def __init__(
-    self,
-    tools: list[SearchTool],
-    timeout: float = 30.0,
-    include_rag: bool = False,
-    auto_ingest_to_rag: bool = True,
-    oauth_token: str | None = None
-) -> None
-

Parameters: - tools: List of search tools to use - timeout: Timeout for each search in seconds (default: 30.0) - include_rag: Whether to include RAG tool in searches (default: False) - auto_ingest_to_rag: Whether to automatically ingest results into RAG (default: True) - oauth_token: Optional OAuth token from HuggingFace login (for RAG LLM)

Methods

execute

Searches multiple tools in parallel.

Parameters: - query: Search query string - max_results_per_tool: Maximum results per tool (default: 10)

Returns: SearchResult with: - query: The search query - evidence: Aggregated list of evidence - sources_searched: List of source names searched - total_found: Total number of results - errors: List of error messages from failed tools

Raises: - SearchError: If search times out

Note: Uses asyncio.gather() for parallel execution. Handles tool failures gracefully (returns errors in SearchResult.errors). Automatically ingests evidence into RAG if enabled.

See Also

\ No newline at end of file diff --git a/site/architecture/agents/index.html b/site/architecture/agents/index.html deleted file mode 100644 index 00b7543c6b33ccde185a2eb1af4da8d64d41f12d..0000000000000000000000000000000000000000 --- a/site/architecture/agents/index.html +++ /dev/null @@ -1 +0,0 @@ - Agents - The DETERMINATOR

Agents Architecture

DeepCritical uses Pydantic AI agents for all AI-powered operations. All agents follow a consistent pattern and use structured output types.

Agent Pattern

Pydantic AI Agents

Pydantic AI agents use the Agent class with the following structure:

  • System Prompt: Module-level constant with date injection
  • Agent Class: __init__(model: Any | None = None)
  • Main Method: Async method (e.g., async def evaluate(), async def write_report())
  • Factory Function: def create_agent_name(model: Any | None = None, oauth_token: str | None = None) -> AgentName

Note: Factory functions accept an optional oauth_token parameter for HuggingFace authentication, which takes priority over environment variables.

Model Initialization

Agents use get_model() from src/agent_factory/judges.py if no model is provided. This supports:

  • OpenAI models
  • Anthropic models
  • HuggingFace Inference API models

The model selection is based on the configured LLM_PROVIDER in settings.

Error Handling

Agents return fallback values on failure rather than raising exceptions:

  • KnowledgeGapOutput(research_complete=False, outstanding_gaps=[...])
  • Empty strings for text outputs
  • Default structured outputs

All errors are logged with context using structlog.

Input Validation

All agents validate inputs:

  • Check that queries/inputs are not empty
  • Truncate very long inputs with warnings
  • Handle None values gracefully

Output Types

Agents use structured output types from src/utils/models.py:

  • KnowledgeGapOutput: Research completeness evaluation
  • AgentSelectionPlan: Tool selection plan
  • ReportDraft: Long-form report structure
  • ParsedQuery: Query parsing and mode detection

For text output (writer agents), agents return str directly.

Agent Types

Knowledge Gap Agent

File: src/agents/knowledge_gap.py

Purpose: Evaluates research state and identifies knowledge gaps.

Output: KnowledgeGapOutput with: - research_complete: Boolean indicating if research is complete - outstanding_gaps: List of remaining knowledge gaps

Methods: - async def evaluate(query, background_context, conversation_history, iteration, time_elapsed_minutes, max_time_minutes) -> KnowledgeGapOutput

Tool Selector Agent

File: src/agents/tool_selector.py

Purpose: Selects appropriate tools for addressing knowledge gaps.

Output: AgentSelectionPlan with list of AgentTask objects.

Available Agents: - WebSearchAgent: General web search for fresh information - SiteCrawlerAgent: Research specific entities/companies - RAGAgent: Semantic search within collected evidence

Writer Agent

File: src/agents/writer.py

Purpose: Generates final reports from research findings.

Output: Markdown string with numbered citations.

Methods: - async def write_report(query, findings, output_length, output_instructions) -> str

Features: - Validates inputs - Truncates very long findings (max 50000 chars) with warning - Retry logic for transient failures (3 retries) - Citation validation before returning

Long Writer Agent

File: src/agents/long_writer.py

Purpose: Long-form report generation with section-by-section writing.

Input/Output: Uses ReportDraft models.

Methods: - async def write_next_section(query, draft, section_title, section_content) -> LongWriterOutput - async def write_report(query, report_title, report_draft) -> str

Features: - Writes sections iteratively - Aggregates references across sections - Reformats section headings and references - Deduplicates and renumbers references

Proofreader Agent

File: src/agents/proofreader.py

Purpose: Proofreads and polishes report drafts.

Input: ReportDraft Output: Polished markdown string

Methods: - async def proofread(query, report_title, report_draft) -> str

Features: - Removes duplicate content across sections - Adds executive summary if multiple sections - Preserves all references and citations - Improves flow and readability

Thinking Agent

File: src/agents/thinking.py

Purpose: Generates observations from conversation history.

Output: Observation string

Methods: - async def generate_observations(query, background_context, conversation_history) -> str

Input Parser Agent

File: src/agents/input_parser.py

Purpose: Parses and improves user queries, detects research mode.

Output: ParsedQuery with: - original_query: Original query string - improved_query: Refined query string - research_mode: "iterative" or "deep" - key_entities: List of key entities - research_questions: List of research questions

Magentic Agents

The following agents use the BaseAgent pattern from agent-framework and are used exclusively with MagenticOrchestrator:

Hypothesis Agent

File: src/agents/hypothesis_agent.py

Purpose: Generates mechanistic hypotheses based on evidence.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Uses internal Pydantic AI Agent with HypothesisAssessment output type - Accesses shared evidence_store for evidence - Uses embedding service for diverse evidence selection (MMR algorithm) - Stores hypotheses in shared context

Search Agent

File: src/agents/search_agent.py

Purpose: Wraps SearchHandler as an agent for Magentic orchestrator.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Executes searches via SearchHandlerProtocol - Deduplicates evidence using embedding service - Searches for semantically related evidence - Updates shared evidence store

Analysis Agent

File: src/agents/analysis_agent.py

Purpose: Performs statistical analysis using Modal sandbox.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Wraps StatisticalAnalyzer service - Analyzes evidence and hypotheses - Returns verdict (SUPPORTED/REFUTED/INCONCLUSIVE) - Stores analysis results in shared context

Report Agent (Magentic)

File: src/agents/report_agent.py

Purpose: Generates structured scientific reports from evidence and hypotheses.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse

Features: - Uses internal Pydantic AI Agent with ResearchReport output type - Accesses shared evidence store and hypotheses - Validates citations before returning - Formats report as markdown

Judge Agent

File: src/agents/judge_agent.py

Purpose: Evaluates evidence quality and determines if sufficient for synthesis.

Pattern: BaseAgent from agent-framework

Methods: - async def run(messages, thread, **kwargs) -> AgentRunResponse - async def run_stream(messages, thread, **kwargs) -> AsyncIterable[AgentRunResponseUpdate]

Features: - Wraps JudgeHandlerProtocol - Accesses shared evidence store - Returns JudgeAssessment with sufficient flag, confidence, and recommendation

Agent Patterns

DeepCritical uses two distinct agent patterns:

1. Pydantic AI Agents (Traditional Pattern)

These agents use the Pydantic AI Agent class directly and are used in iterative and deep research flows:

  • Pattern: Agent(model, output_type, system_prompt)
  • Initialization: __init__(model: Any | None = None)
  • Methods: Agent-specific async methods (e.g., async def evaluate(), async def write_report())
  • Examples: KnowledgeGapAgent, ToolSelectorAgent, WriterAgent, LongWriterAgent, ProofreaderAgent, ThinkingAgent, InputParserAgent

2. Magentic Agents (Agent-Framework Pattern)

These agents use the BaseAgent class from agent-framework and are used in Magentic orchestrator:

  • Pattern: BaseAgent from agent-framework with async def run() method
  • Initialization: __init__(evidence_store, embedding_service, ...)
  • Methods: async def run(messages, thread, **kwargs) -> AgentRunResponse
  • Examples: HypothesisAgent, SearchAgent, AnalysisAgent, ReportAgent, JudgeAgent

Note: Magentic agents are used exclusively with the MagenticOrchestrator and follow the agent-framework protocol for multi-agent coordination.

Factory Functions

All agents have factory functions in src/agent_factory/agents.py:

Factory functions: - Use get_model() if no model provided - Accept oauth_token parameter for HuggingFace authentication - Raise ConfigurationError if creation fails - Log agent creation

See Also

\ No newline at end of file diff --git a/site/architecture/graph_orchestration/index.html b/site/architecture/graph_orchestration/index.html deleted file mode 100644 index 6f35dca9170f12c7f670b5de3ef547337f1ead61..0000000000000000000000000000000000000000 --- a/site/architecture/graph_orchestration/index.html +++ /dev/null @@ -1,75 +0,0 @@ - Graph Orchestration - The DETERMINATOR

Graph Orchestration Architecture

Overview

DeepCritical implements a graph-based orchestration system for research workflows using Pydantic AI agents as nodes. This enables better parallel execution, conditional routing, and state management compared to simple agent chains.

Graph Patterns

Iterative Research Graph

The iterative research graph follows this pattern:

[Input] → [Thinking] → [Knowledge Gap] → [Decision: Complete?]
-                                              ↓ No          ↓ Yes
-                                    [Tool Selector]    [Writer]
-                                              ↓
-                                    [Execute Tools] → [Loop Back]
-

Node IDs: thinkingknowledge_gapcontinue_decisiontool_selector/writerexecute_tools → (loop back to thinking)

Special Node Handling: - execute_tools: State node that uses search_handler to execute searches and add evidence to workflow state - continue_decision: Decision node that routes based on research_complete flag from KnowledgeGapOutput

Deep Research Graph

The deep research graph follows this pattern:

[Input] → [Planner] → [Store Plan] → [Parallel Loops] → [Collect Drafts] → [Synthesizer]
-                                        ↓         ↓         ↓
-                                     [Loop1]  [Loop2]  [Loop3]
-

Node IDs: plannerstore_planparallel_loopscollect_draftssynthesizer

Special Node Handling: - planner: Agent node that creates ReportPlan with report outline - store_plan: State node that stores ReportPlan in context for parallel loops - parallel_loops: Parallel node that executes IterativeResearchFlow instances for each section - collect_drafts: State node that collects section drafts from parallel loops - synthesizer: Agent node that calls LongWriterAgent.write_report() directly with ReportDraft

Deep Research


-sequenceDiagram
-    actor User
-    participant GraphOrchestrator
-    participant InputParser
-    participant GraphBuilder
-    participant GraphExecutor
-    participant Agent
-    participant BudgetTracker
-    participant WorkflowState
-
-    User->>GraphOrchestrator: run(query)
-    GraphOrchestrator->>InputParser: detect_research_mode(query)
-    InputParser-->>GraphOrchestrator: mode (iterative/deep)
-    GraphOrchestrator->>GraphBuilder: build_graph(mode)
-    GraphBuilder-->>GraphOrchestrator: ResearchGraph
-    GraphOrchestrator->>WorkflowState: init_workflow_state()
-    GraphOrchestrator->>BudgetTracker: create_budget()
-    GraphOrchestrator->>GraphExecutor: _execute_graph(graph)
-    
-    loop For each node in graph
-        GraphExecutor->>Agent: execute_node(agent_node)
-        Agent->>Agent: process_input
-        Agent-->>GraphExecutor: result
-        GraphExecutor->>WorkflowState: update_state(result)
-        GraphExecutor->>BudgetTracker: add_tokens(used)
-        GraphExecutor->>BudgetTracker: check_budget()
-        alt Budget exceeded
-            GraphExecutor->>GraphOrchestrator: emit(error_event)
-        else Continue
-            GraphExecutor->>GraphOrchestrator: emit(progress_event)
-        end
-    end
-    
-    GraphOrchestrator->>User: AsyncGenerator[AgentEvent]
-

Iterative Research

sequenceDiagram
-    participant IterativeFlow
-    participant ThinkingAgent
-    participant KnowledgeGapAgent
-    participant ToolSelector
-    participant ToolExecutor
-    participant JudgeHandler
-    participant WriterAgent
-
-    IterativeFlow->>IterativeFlow: run(query)
-    
-    loop Until complete or max_iterations
-        IterativeFlow->>ThinkingAgent: generate_observations()
-        ThinkingAgent-->>IterativeFlow: observations
-        
-        IterativeFlow->>KnowledgeGapAgent: evaluate_gaps()
-        KnowledgeGapAgent-->>IterativeFlow: KnowledgeGapOutput
-        
-        alt Research complete
-            IterativeFlow->>WriterAgent: create_final_report()
-            WriterAgent-->>IterativeFlow: final_report
-        else Gaps remain
-            IterativeFlow->>ToolSelector: select_agents(gap)
-            ToolSelector-->>IterativeFlow: AgentSelectionPlan
-            
-            IterativeFlow->>ToolExecutor: execute_tool_tasks()
-            ToolExecutor-->>IterativeFlow: ToolAgentOutput[]
-            
-            IterativeFlow->>JudgeHandler: assess_evidence()
-            JudgeHandler-->>IterativeFlow: should_continue
-        end
-    end

Graph Structure

Nodes

Graph nodes represent different stages in the research workflow:

  1. Agent Nodes: Execute Pydantic AI agents
  2. Input: Prompt/query
  3. Output: Structured or unstructured response
  4. Examples: KnowledgeGapAgent, ToolSelectorAgent, ThinkingAgent

  5. State Nodes: Update or read workflow state

  6. Input: Current state
  7. Output: Updated state
  8. Examples: Update evidence, update conversation history

  9. Decision Nodes: Make routing decisions based on conditions

  10. Input: Current state/results
  11. Output: Next node ID
  12. Examples: Continue research vs. complete research

  13. Parallel Nodes: Execute multiple nodes concurrently

  14. Input: List of node IDs
  15. Output: Aggregated results
  16. Examples: Parallel iterative research loops

Edges

Edges define transitions between nodes:

  1. Sequential Edges: Always traversed (no condition)
  2. From: Source node
  3. To: Target node
  4. Condition: None (always True)

  5. Conditional Edges: Traversed based on condition

  6. From: Source node
  7. To: Target node
  8. Condition: Callable that returns bool
  9. Example: If research complete → go to writer, else → continue loop

  10. Parallel Edges: Used for parallel execution branches

  11. From: Parallel node
  12. To: Multiple target nodes
  13. Execution: All targets run concurrently

State Management

State is managed via WorkflowState using ContextVar for thread-safe isolation:

  • Evidence: Collected evidence from searches
  • Conversation: Iteration history (gaps, tool calls, findings, thoughts)
  • Embedding Service: For semantic search

State transitions occur at state nodes, which update the global workflow state.

Execution Flow

  1. Graph Construction: Build graph from nodes and edges using create_iterative_graph() or create_deep_graph()
  2. Graph Validation: Ensure graph is valid (no cycles, all nodes reachable) via ResearchGraph.validate_structure()
  3. Graph Execution: Traverse graph from entry node using GraphOrchestrator._execute_graph()
  4. Node Execution: Execute each node based on type:
  5. Agent Nodes: Call agent.run() with transformed input
  6. State Nodes: Update workflow state via state_updater function
  7. Decision Nodes: Evaluate decision_function to get next node ID
  8. Parallel Nodes: Execute all parallel nodes concurrently via asyncio.gather()
  9. Edge Evaluation: Determine next node(s) based on edges and conditions
  10. Parallel Execution: Use asyncio.gather() for parallel nodes
  11. State Updates: Update state at state nodes via GraphExecutionContext.update_state()
  12. Event Streaming: Yield AgentEvent objects during execution for UI

GraphExecutionContext

The GraphExecutionContext class manages execution state during graph traversal:

  • State: Current WorkflowState instance
  • Budget Tracker: BudgetTracker instance for budget enforcement
  • Node Results: Dictionary storing results from each node execution
  • Visited Nodes: Set of node IDs that have been executed
  • Current Node: ID of the node currently being executed

Methods: - set_node_result(node_id, result): Store result from node execution - get_node_result(node_id): Retrieve stored result - has_visited(node_id): Check if node was visited - mark_visited(node_id): Mark node as visited - update_state(updater, data): Update workflow state

Conditional Routing

Decision nodes evaluate conditions and return next node IDs:

  • Knowledge Gap Decision: If research_complete → writer, else → tool selector
  • Budget Decision: If budget exceeded → exit, else → continue
  • Iteration Decision: If max iterations → exit, else → continue

Parallel Execution

Parallel nodes execute multiple nodes concurrently:

  • Each parallel branch runs independently
  • Results are aggregated after all branches complete
  • State is synchronized after parallel execution
  • Errors in one branch don't stop other branches

Budget Enforcement

Budget constraints are enforced at decision nodes:

  • Token Budget: Track LLM token usage
  • Time Budget: Track elapsed time
  • Iteration Budget: Track iteration count

If any budget is exceeded, execution routes to exit node.

Error Handling

Errors are handled at multiple levels:

  1. Node Level: Catch errors in individual node execution
  2. Graph Level: Handle errors during graph traversal
  3. State Level: Rollback state changes on error

Errors are logged and yield error events for UI.

Backward Compatibility

Graph execution is optional via feature flag:

  • USE_GRAPH_EXECUTION=true: Use graph-based execution
  • USE_GRAPH_EXECUTION=false: Use agent chain execution (existing)

This allows gradual migration and fallback if needed.

See Also

\ No newline at end of file diff --git a/site/architecture/middleware/index.html b/site/architecture/middleware/index.html deleted file mode 100644 index 54435ced90617eb287b0b1dd8ef87da616890ec8..0000000000000000000000000000000000000000 --- a/site/architecture/middleware/index.html +++ /dev/null @@ -1,40 +0,0 @@ - Middleware - The DETERMINATOR

Middleware Architecture

DeepCritical uses middleware for state management, budget tracking, and workflow coordination.

State Management

WorkflowState

File: src/middleware/state_machine.py

Purpose: Thread-safe state management for research workflows

Implementation: Uses ContextVar for thread-safe isolation

State Components: - evidence: list[Evidence]: Collected evidence from searches - conversation: Conversation: Iteration history (gaps, tool calls, findings, thoughts) - embedding_service: Any: Embedding service for semantic search

Methods: - add_evidence(new_evidence: list[Evidence]) -> int: Adds evidence with URL-based deduplication. Returns the number of new items added (excluding duplicates). - async search_related(query: str, n_results: int = 5) -> list[Evidence]: Semantic search for related evidence using embedding service

Initialization:

Access:

Workflow Manager

File: src/middleware/workflow_manager.py

Purpose: Coordinates parallel research loops

Methods: - async add_loop(loop_id: str, query: str) -> ResearchLoop: Add a new research loop to manage - async run_loops_parallel(loop_configs: list[dict], loop_func: Callable, judge_handler: Any | None = None, budget_tracker: Any | None = None) -> list[Any]: Run multiple research loops in parallel. Takes configuration dicts and a loop function. - async update_loop_status(loop_id: str, status: LoopStatus, error: str | None = None): Update loop status - async sync_loop_evidence_to_state(loop_id: str): Synchronize evidence from a specific loop to global state

Features: - Uses asyncio.gather() for parallel execution - Handles errors per loop (doesn't fail all if one fails) - Tracks loop status: pending, running, completed, failed, cancelled - Evidence deduplication across parallel loops

Usage:

from src.middleware.workflow_manager import WorkflowManager
-
-manager = WorkflowManager()
-await manager.add_loop("loop1", "Research query 1")
-await manager.add_loop("loop2", "Research query 2")
-
-async def run_research(config: dict) -> str:
-    loop_id = config["loop_id"]
-    query = config["query"]
-    # ... research logic ...
-    return "report"
-
-results = await manager.run_loops_parallel(
-    loop_configs=[
-        {"loop_id": "loop1", "query": "Research query 1"},
-        {"loop_id": "loop2", "query": "Research query 2"},
-    ],
-    loop_func=run_research,
-)
-

Budget Tracker

File: src/middleware/budget_tracker.py

Purpose: Tracks and enforces resource limits

Budget Components: - Tokens: LLM token usage - Time: Elapsed time in seconds - Iterations: Number of iterations

Methods: - create_budget(loop_id: str, tokens_limit: int = 100000, time_limit_seconds: float = 600.0, iterations_limit: int = 10) -> BudgetStatus: Create a budget for a specific loop - add_tokens(loop_id: str, tokens: int): Add token usage to a loop's budget - start_timer(loop_id: str): Start time tracking for a loop - update_timer(loop_id: str): Update elapsed time for a loop - increment_iteration(loop_id: str): Increment iteration count for a loop - check_budget(loop_id: str) -> tuple[bool, str]: Check if a loop's budget has been exceeded. Returns (exceeded: bool, reason: str) - can_continue(loop_id: str) -> bool: Check if a loop can continue based on budget

Token Estimation: - estimate_tokens(text: str) -> int: ~4 chars per token - estimate_llm_call_tokens(prompt: str, response: str) -> int: Estimate LLM call tokens

Usage:

from src.middleware.budget_tracker import BudgetTracker
-
-tracker = BudgetTracker()
-budget = tracker.create_budget(
-    loop_id="research_loop",
-    tokens_limit=100000,
-    time_limit_seconds=600,
-    iterations_limit=10
-)
-tracker.start_timer("research_loop")
-# ... research operations ...
-tracker.add_tokens("research_loop", 5000)
-tracker.update_timer("research_loop")
-exceeded, reason = tracker.check_budget("research_loop")
-if exceeded:
-    # Budget exceeded, stop research
-    pass
-if not tracker.can_continue("research_loop"):
-    # Budget exceeded, stop research
-    pass
-

Models

All middleware models are defined in src/utils/models.py:

  • IterationData: Data for a single iteration
  • Conversation: Conversation history with iterations
  • ResearchLoop: Research loop state and configuration
  • BudgetStatus: Current budget status

Thread Safety

All middleware components use ContextVar for thread-safe isolation:

  • Each request/thread has its own workflow state
  • No global mutable state
  • Safe for concurrent requests

See Also

\ No newline at end of file diff --git a/site/architecture/orchestrators/index.html b/site/architecture/orchestrators/index.html deleted file mode 100644 index fcd6738c81d1096bf2112ab7f4f30c80b18b00e4..0000000000000000000000000000000000000000 --- a/site/architecture/orchestrators/index.html +++ /dev/null @@ -1 +0,0 @@ - Orchestrators - The DETERMINATOR

Orchestrators Architecture

DeepCritical supports multiple orchestration patterns for research workflows.

Research Flows

IterativeResearchFlow

File: src/orchestrator/research_flow.py

Pattern: Generate observations → Evaluate gaps → Select tools → Execute → Judge → Continue/Complete

Agents Used: - KnowledgeGapAgent: Evaluates research completeness - ToolSelectorAgent: Selects tools for addressing gaps - ThinkingAgent: Generates observations - WriterAgent: Creates final report - JudgeHandler: Assesses evidence sufficiency

Features: - Tracks iterations, time, budget - Supports graph execution (use_graph=True) and agent chains (use_graph=False) - Iterates until research complete or constraints met

Usage:

DeepResearchFlow

File: src/orchestrator/research_flow.py

Pattern: Planner → Parallel iterative loops per section → Synthesizer

Agents Used: - PlannerAgent: Breaks query into report sections - IterativeResearchFlow: Per-section research (parallel) - LongWriterAgent or ProofreaderAgent: Final synthesis

Features: - Uses WorkflowManager for parallel execution - Budget tracking per section and globally - State synchronization across parallel loops - Supports graph execution and agent chains

Usage:

Graph Orchestrator

File: src/orchestrator/graph_orchestrator.py

Purpose: Graph-based execution using Pydantic AI agents as nodes

Features: - Uses graph execution (use_graph=True) or agent chains (use_graph=False) as fallback - Routes based on research mode (iterative/deep/auto) - Streams AgentEvent objects for UI - Uses GraphExecutionContext to manage execution state

Node Types: - Agent Nodes: Execute Pydantic AI agents - State Nodes: Update or read workflow state - Decision Nodes: Make routing decisions - Parallel Nodes: Execute multiple nodes concurrently

Edge Types: - Sequential Edges: Always traversed - Conditional Edges: Traversed based on condition - Parallel Edges: Used for parallel execution branches

Special Node Handling:

The GraphOrchestrator has special handling for certain nodes:

  • execute_tools node: State node that uses search_handler to execute searches and add evidence to workflow state
  • parallel_loops node: Parallel node that executes IterativeResearchFlow instances for each section in deep research mode
  • synthesizer node: Agent node that calls LongWriterAgent.write_report() directly with ReportDraft instead of using agent.run()
  • writer node: Agent node that calls WriterAgent.write_report() directly with findings instead of using agent.run()

GraphExecutionContext:

The orchestrator uses GraphExecutionContext to manage execution state: - Tracks current node, visited nodes, and node results - Manages workflow state and budget tracker - Provides methods to store and retrieve node execution results

Orchestrator Factory

File: src/orchestrator_factory.py

Purpose: Factory for creating orchestrators

Modes: - Simple: Legacy orchestrator (backward compatible) - Advanced: Magentic orchestrator (requires OpenAI API key) - Auto-detect: Chooses based on API key availability

Usage:

Magentic Orchestrator

File: src/orchestrator_magentic.py

Purpose: Multi-agent coordination using Microsoft Agent Framework

Features: - Uses agent-framework-core - ChatAgent pattern with internal LLMs per agent - MagenticBuilder with participants: - searcher: SearchAgent (wraps SearchHandler) - hypothesizer: HypothesisAgent (generates hypotheses) - judge: JudgeAgent (evaluates evidence) - reporter: ReportAgent (generates final report) - Manager orchestrates agents via chat client (OpenAI or HuggingFace) - Event-driven: converts Magentic events to AgentEvent for UI streaming via _process_event() method - Supports max rounds, stall detection, and reset handling

Event Processing:

The orchestrator processes Magentic events and converts them to AgentEvent: - MagenticOrchestratorMessageEventAgentEvent with type based on message content - MagenticAgentMessageEventAgentEvent with type based on agent name - MagenticAgentDeltaEventAgentEvent for streaming updates - MagenticFinalResultEventAgentEvent with type "complete"

Requirements: - agent-framework-core package - OpenAI API key or HuggingFace authentication

Hierarchical Orchestrator

File: src/orchestrator_hierarchical.py

Purpose: Hierarchical orchestrator using middleware and sub-teams

Features: - Uses SubIterationMiddleware with ResearchTeam and LLMSubIterationJudge - Adapts Magentic ChatAgent to SubIterationTeam protocol - Event-driven via asyncio.Queue for coordination - Supports sub-iteration patterns for complex research tasks

Legacy Simple Mode

File: src/legacy_orchestrator.py

Purpose: Linear search-judge-synthesize loop

Features: - Uses SearchHandlerProtocol and JudgeHandlerProtocol - Generator-based design yielding AgentEvent objects - Backward compatibility for simple use cases

State Initialization

All orchestrators must initialize workflow state:

Event Streaming

All orchestrators yield AgentEvent objects:

Event Types: - started: Research started - searching: Search in progress - search_complete: Search completed - judging: Evidence evaluation in progress - judge_complete: Evidence evaluation completed - looping: Iteration in progress - hypothesizing: Generating hypotheses - analyzing: Statistical analysis in progress - analysis_complete: Statistical analysis completed - synthesizing: Synthesizing results - complete: Research completed - error: Error occurred - streaming: Streaming update (delta events)

Event Structure:

See Also

\ No newline at end of file diff --git a/site/architecture/services/index.html b/site/architecture/services/index.html deleted file mode 100644 index 100a2966b8d7b3403b5cf31d9d1a7b1402f52cf1..0000000000000000000000000000000000000000 --- a/site/architecture/services/index.html +++ /dev/null @@ -1,30 +0,0 @@ - Services - The DETERMINATOR

Services Architecture

DeepCritical provides several services for embeddings, RAG, and statistical analysis.

Embedding Service

File: src/services/embeddings.py

Purpose: Local sentence-transformers for semantic search and deduplication

Features: - No API Key Required: Uses local sentence-transformers models - Async-Safe: All operations use run_in_executor() to avoid blocking the event loop - ChromaDB Storage: In-memory vector storage for embeddings - Deduplication: 0.9 similarity threshold by default (90% similarity = duplicate, configurable)

Model: Configurable via settings.local_embedding_model (default: all-MiniLM-L6-v2)

Methods: - async def embed(text: str) -> list[float]: Generate embeddings (async-safe via run_in_executor()) - async def embed_batch(texts: list[str]) -> list[list[float]]: Batch embedding (more efficient) - async def add_evidence(evidence_id: str, content: str, metadata: dict[str, Any]) -> None: Add evidence to vector store - async def search_similar(query: str, n_results: int = 5) -> list[dict[str, Any]]: Find semantically similar evidence - async def deduplicate(new_evidence: list[Evidence], threshold: float = 0.9) -> list[Evidence]: Remove semantically duplicate evidence

Usage:

from src.services.embeddings import get_embedding_service
-
-service = get_embedding_service()
-embedding = await service.embed("text to embed")
-

LlamaIndex RAG Service

File: src/services/llamaindex_rag.py

Purpose: Retrieval-Augmented Generation using LlamaIndex

Features: - Multiple Embedding Providers: OpenAI embeddings (requires OPENAI_API_KEY) or local sentence-transformers (no API key) - Multiple LLM Providers: HuggingFace LLM (preferred) or OpenAI LLM (fallback) for query synthesis - ChromaDB Storage: Vector database for document storage (supports in-memory mode) - Metadata Preservation: Preserves source, title, URL, date, authors - Lazy Initialization: Graceful fallback if dependencies not available

Initialization Parameters: - use_openai_embeddings: bool | None: Force OpenAI embeddings (None = auto-detect) - use_in_memory: bool: Use in-memory ChromaDB client (useful for tests) - oauth_token: str | None: Optional OAuth token from HuggingFace login (takes priority over env vars)

Methods: - async def ingest_evidence(evidence: list[Evidence]) -> None: Ingest evidence into RAG - async def retrieve(query: str, top_k: int = 5) -> list[Document]: Retrieve relevant documents - async def query(query: str, top_k: int = 5) -> str: Query with RAG

Usage:

from src.services.llamaindex_rag import get_rag_service
-
-service = get_rag_service(
-    use_openai_embeddings=False,  # Use local embeddings
-    use_in_memory=True,  # Use in-memory ChromaDB
-    oauth_token=token  # Optional HuggingFace token
-)
-if service:
-    documents = await service.retrieve("query", top_k=5)
-

Statistical Analyzer

File: src/services/statistical_analyzer.py

Purpose: Secure execution of AI-generated statistical code

Features: - Modal Sandbox: Secure, isolated execution environment - Code Generation: Generates Python code via LLM - Library Pinning: Version-pinned libraries in SANDBOX_LIBRARIES - Network Isolation: block_network=True by default

Libraries Available: - pandas, numpy, scipy - matplotlib, scikit-learn - statsmodels

Output: AnalysisResult with: - verdict: SUPPORTED, REFUTED, or INCONCLUSIVE - code: Generated analysis code - output: Execution output - error: Error message if execution failed

Usage:

from src.services.statistical_analyzer import StatisticalAnalyzer
-
-analyzer = StatisticalAnalyzer()
-result = await analyzer.analyze(
-    hypothesis="Metformin reduces cancer risk",
-    evidence=evidence_list
-)
-

Singleton Pattern

Services use singleton patterns for lazy initialization:

EmbeddingService: Uses a global variable pattern:

LlamaIndexRAGService: Direct instantiation (no caching):

This ensures: - Single instance per process - Lazy initialization - No dependencies required at import time

Service Availability

Services check availability before use:

from src.utils.config import settings
-
-if settings.modal_available:
-    # Use Modal sandbox
-    pass
-
-if settings.has_openai_key:
-    # Use OpenAI embeddings for RAG
-    pass
-

See Also

\ No newline at end of file diff --git a/site/architecture/tools/index.html b/site/architecture/tools/index.html deleted file mode 100644 index 0efb987e90752c5f16b1ebc32a23cee8cc3add39..0000000000000000000000000000000000000000 --- a/site/architecture/tools/index.html +++ /dev/null @@ -1,19 +0,0 @@ - Tools - The DETERMINATOR

Tools Architecture

DeepCritical implements a protocol-based search tool system for retrieving evidence from multiple sources.

SearchTool Protocol

All tools implement the SearchTool protocol from src/tools/base.py:

Rate Limiting

All tools use the @retry decorator from tenacity:

Tools with API rate limits implement _rate_limit() method and use shared rate limiters from src/tools/rate_limiter.py.

Error Handling

Tools raise custom exceptions:

  • SearchError: General search failures
  • RateLimitError: Rate limit exceeded

Tools handle HTTP errors (429, 500, timeout) and return empty lists on non-critical errors (with warning logs).

Query Preprocessing

Tools use preprocess_query() from src/tools/query_utils.py to:

  • Remove noise from queries
  • Expand synonyms
  • Normalize query format

Evidence Conversion

All tools convert API responses to Evidence objects with:

  • Citation: Title, URL, date, authors
  • content: Evidence text
  • relevance_score: 0.0-1.0 relevance score
  • metadata: Additional metadata

Missing fields are handled gracefully with defaults.

Tool Implementations

PubMed Tool

File: src/tools/pubmed.py

API: NCBI E-utilities (ESearch → EFetch)

Rate Limiting: - 0.34s between requests (3 req/sec without API key) - 0.1s between requests (10 req/sec with NCBI API key)

Features: - XML parsing with xmltodict - Handles single vs. multiple articles - Query preprocessing - Evidence conversion with metadata extraction

ClinicalTrials Tool

File: src/tools/clinicaltrials.py

API: ClinicalTrials.gov API v2

Important: Uses requests library (NOT httpx) because WAF blocks httpx TLS fingerprint.

Execution: Runs in thread pool: await asyncio.to_thread(requests.get, ...)

Filtering: - Only interventional studies - Status: COMPLETED, ACTIVE_NOT_RECRUITING, RECRUITING, ENROLLING_BY_INVITATION

Features: - Parses nested JSON structure - Extracts trial metadata - Evidence conversion

Europe PMC Tool

File: src/tools/europepmc.py

API: Europe PMC REST API

Features: - Handles preprint markers: [PREPRINT - Not peer-reviewed] - Builds URLs from DOI or PMID - Checks pubTypeList for preprint detection - Includes both preprints and peer-reviewed articles

RAG Tool

File: src/tools/rag_tool.py

Purpose: Semantic search within collected evidence

Implementation: Wraps LlamaIndexRAGService

Features: - Returns Evidence from RAG results - Handles evidence ingestion - Semantic similarity search - Metadata preservation

Search Handler

File: src/tools/search_handler.py

Purpose: Orchestrates parallel searches across multiple tools

Initialization Parameters: - tools: list[SearchTool]: List of search tools to use - timeout: float = 30.0: Timeout for each search in seconds - include_rag: bool = False: Whether to include RAG tool in searches - auto_ingest_to_rag: bool = True: Whether to automatically ingest results into RAG - oauth_token: str | None = None: Optional OAuth token from HuggingFace login (for RAG LLM)

Methods: - async def execute(query: str, max_results_per_tool: int = 10) -> SearchResult: Execute search across all tools in parallel

Features: - Uses asyncio.gather() with return_exceptions=True for parallel execution - Aggregates results into SearchResult with evidence and metadata - Handles tool failures gracefully (continues with other tools) - Deduplicates results by URL - Automatically ingests results into RAG if auto_ingest_to_rag=True - Can add RAG tool dynamically via add_rag_tool() method

Tool Registration

Tools are registered in the search handler:

from src.tools.pubmed import PubMedTool
-from src.tools.clinicaltrials import ClinicalTrialsTool
-from src.tools.europepmc import EuropePMCTool
-from src.tools.search_handler import SearchHandler
-
-search_handler = SearchHandler(
-    tools=[
-        PubMedTool(),
-        ClinicalTrialsTool(),
-        EuropePMCTool(),
-    ],
-    include_rag=True,  # Include RAG tool for semantic search
-    auto_ingest_to_rag=True,  # Automatically ingest results into RAG
-    oauth_token=token  # Optional HuggingFace token for RAG LLM
-)
-
-# Execute search
-result = await search_handler.execute("query", max_results_per_tool=10)
-

See Also

\ No newline at end of file diff --git a/site/architecture/workflow-diagrams/index.html b/site/architecture/workflow-diagrams/index.html deleted file mode 100644 index 418df6422b20c0fbf9fcfb6434093667a2793c4d..0000000000000000000000000000000000000000 --- a/site/architecture/workflow-diagrams/index.html +++ /dev/null @@ -1,488 +0,0 @@ - Workflow Diagrams - The DETERMINATOR

DeepCritical Workflow - Simplified Magentic Architecture

Architecture Pattern: Microsoft Magentic Orchestration Design Philosophy: Simple, dynamic, manager-driven coordination Key Innovation: Intelligent manager replaces rigid sequential phases


1. High-Level Magentic Workflow

flowchart TD
-    Start([User Query]) --> Manager[Magentic Manager<br/>Plan • Select • Assess • Adapt]
-
-    Manager -->|Plans| Task1[Task Decomposition]
-    Task1 --> Manager
-
-    Manager -->|Selects & Executes| HypAgent[Hypothesis Agent]
-    Manager -->|Selects & Executes| SearchAgent[Search Agent]
-    Manager -->|Selects & Executes| AnalysisAgent[Analysis Agent]
-    Manager -->|Selects & Executes| ReportAgent[Report Agent]
-
-    HypAgent -->|Results| Manager
-    SearchAgent -->|Results| Manager
-    AnalysisAgent -->|Results| Manager
-    ReportAgent -->|Results| Manager
-
-    Manager -->|Assesses Quality| Decision{Good Enough?}
-    Decision -->|No - Refine| Manager
-    Decision -->|No - Different Agent| Manager
-    Decision -->|No - Stalled| Replan[Reset Plan]
-    Replan --> Manager
-
-    Decision -->|Yes| Synthesis[Synthesize Final Result]
-    Synthesis --> Output([Research Report])
-
-    style Start fill:#e1f5e1
-    style Manager fill:#ffe6e6
-    style HypAgent fill:#fff4e6
-    style SearchAgent fill:#fff4e6
-    style AnalysisAgent fill:#fff4e6
-    style ReportAgent fill:#fff4e6
-    style Decision fill:#ffd6d6
-    style Synthesis fill:#d4edda
-    style Output fill:#e1f5e1

2. Magentic Manager: The 6-Phase Cycle

flowchart LR
-    P1[1. Planning<br/>Analyze task<br/>Create strategy] --> P2[2. Agent Selection<br/>Pick best agent<br/>for subtask]
-    P2 --> P3[3. Execution<br/>Run selected<br/>agent with tools]
-    P3 --> P4[4. Assessment<br/>Evaluate quality<br/>Check progress]
-    P4 --> Decision{Quality OK?<br/>Progress made?}
-    Decision -->|Yes| P6[6. Synthesis<br/>Combine results<br/>Generate report]
-    Decision -->|No| P5[5. Iteration<br/>Adjust plan<br/>Try again]
-    P5 --> P2
-    P6 --> Done([Complete])
-
-    style P1 fill:#fff4e6
-    style P2 fill:#ffe6e6
-    style P3 fill:#e6f3ff
-    style P4 fill:#ffd6d6
-    style P5 fill:#fff3cd
-    style P6 fill:#d4edda
-    style Done fill:#e1f5e1

3. Simplified Agent Architecture

graph TB
-    subgraph "Orchestration Layer"
-        Manager[Magentic Manager<br/>• Plans workflow<br/>• Selects agents<br/>• Assesses quality<br/>• Adapts strategy]
-        SharedContext[(Shared Context<br/>• Hypotheses<br/>• Search Results<br/>• Analysis<br/>• Progress)]
-        Manager <--> SharedContext
-    end
-
-    subgraph "Specialist Agents"
-        HypAgent[Hypothesis Agent<br/>• Domain understanding<br/>• Hypothesis generation<br/>• Testability refinement]
-        SearchAgent[Search Agent<br/>• Multi-source search<br/>• RAG retrieval<br/>• Result ranking]
-        AnalysisAgent[Analysis Agent<br/>• Evidence extraction<br/>• Statistical analysis<br/>• Code execution]
-        ReportAgent[Report Agent<br/>• Report assembly<br/>• Visualization<br/>• Citation formatting]
-    end
-
-    subgraph "MCP Tools"
-        WebSearch[Web Search<br/>PubMed • arXiv • bioRxiv]
-        CodeExec[Code Execution<br/>Sandboxed Python]
-        RAG[RAG Retrieval<br/>Vector DB • Embeddings]
-        Viz[Visualization<br/>Charts • Graphs]
-    end
-
-    Manager -->|Selects & Directs| HypAgent
-    Manager -->|Selects & Directs| SearchAgent
-    Manager -->|Selects & Directs| AnalysisAgent
-    Manager -->|Selects & Directs| ReportAgent
-
-    HypAgent --> SharedContext
-    SearchAgent --> SharedContext
-    AnalysisAgent --> SharedContext
-    ReportAgent --> SharedContext
-
-    SearchAgent --> WebSearch
-    SearchAgent --> RAG
-    AnalysisAgent --> CodeExec
-    ReportAgent --> CodeExec
-    ReportAgent --> Viz
-
-    style Manager fill:#ffe6e6
-    style SharedContext fill:#ffe6f0
-    style HypAgent fill:#fff4e6
-    style SearchAgent fill:#fff4e6
-    style AnalysisAgent fill:#fff4e6
-    style ReportAgent fill:#fff4e6
-    style WebSearch fill:#e6f3ff
-    style CodeExec fill:#e6f3ff
-    style RAG fill:#e6f3ff
-    style Viz fill:#e6f3ff

4. Dynamic Workflow Example

sequenceDiagram
-    participant User
-    participant Manager
-    participant HypAgent
-    participant SearchAgent
-    participant AnalysisAgent
-    participant ReportAgent
-
-    User->>Manager: "Research protein folding in Alzheimer's"
-
-    Note over Manager: PLAN: Generate hypotheses → Search → Analyze → Report
-
-    Manager->>HypAgent: Generate 3 hypotheses
-    HypAgent-->>Manager: Returns 3 hypotheses
-    Note over Manager: ASSESS: Good quality, proceed
-
-    Manager->>SearchAgent: Search literature for hypothesis 1
-    SearchAgent-->>Manager: Returns 15 papers
-    Note over Manager: ASSESS: Good results, continue
-
-    Manager->>SearchAgent: Search for hypothesis 2
-    SearchAgent-->>Manager: Only 2 papers found
-    Note over Manager: ASSESS: Insufficient, refine search
-
-    Manager->>SearchAgent: Refined query for hypothesis 2
-    SearchAgent-->>Manager: Returns 12 papers
-    Note over Manager: ASSESS: Better, proceed
-
-    Manager->>AnalysisAgent: Analyze evidence for all hypotheses
-    AnalysisAgent-->>Manager: Returns analysis with code
-    Note over Manager: ASSESS: Complete, generate report
-
-    Manager->>ReportAgent: Create comprehensive report
-    ReportAgent-->>Manager: Returns formatted report
-    Note over Manager: SYNTHESIZE: Combine all results
-
-    Manager->>User: Final Research Report

5. Manager Decision Logic

flowchart TD
-    Start([Manager Receives Task]) --> Plan[Create Initial Plan]
-
-    Plan --> Select[Select Agent for Next Subtask]
-    Select --> Execute[Execute Agent]
-    Execute --> Collect[Collect Results]
-
-    Collect --> Assess[Assess Quality & Progress]
-
-    Assess --> Q1{Quality Sufficient?}
-    Q1 -->|No| Q2{Same Agent Can Fix?}
-    Q2 -->|Yes| Feedback[Provide Specific Feedback]
-    Feedback --> Execute
-    Q2 -->|No| Different[Try Different Agent]
-    Different --> Select
-
-    Q1 -->|Yes| Q3{Task Complete?}
-    Q3 -->|No| Q4{Making Progress?}
-    Q4 -->|Yes| Select
-    Q4 -->|No - Stalled| Replan[Reset Plan & Approach]
-    Replan --> Plan
-
-    Q3 -->|Yes| Synth[Synthesize Final Result]
-    Synth --> Done([Return Report])
-
-    style Start fill:#e1f5e1
-    style Plan fill:#fff4e6
-    style Select fill:#ffe6e6
-    style Execute fill:#e6f3ff
-    style Assess fill:#ffd6d6
-    style Q1 fill:#ffe6e6
-    style Q2 fill:#ffe6e6
-    style Q3 fill:#ffe6e6
-    style Q4 fill:#ffe6e6
-    style Synth fill:#d4edda
-    style Done fill:#e1f5e1

6. Hypothesis Agent Workflow

flowchart LR
-    Input[Research Query] --> Domain[Identify Domain<br/>& Key Concepts]
-    Domain --> Context[Retrieve Background<br/>Knowledge]
-    Context --> Generate[Generate 3-5<br/>Initial Hypotheses]
-    Generate --> Refine[Refine for<br/>Testability]
-    Refine --> Rank[Rank by<br/>Quality Score]
-    Rank --> Output[Return Top<br/>Hypotheses]
-
-    Output --> Struct[Hypothesis Structure:<br/>• Statement<br/>• Rationale<br/>• Testability Score<br/>• Data Requirements<br/>• Expected Outcomes]
-
-    style Input fill:#e1f5e1
-    style Output fill:#fff4e6
-    style Struct fill:#e6f3ff

7. Search Agent Workflow

flowchart TD
-    Input[Hypotheses] --> Strategy[Formulate Search<br/>Strategy per Hypothesis]
-
-    Strategy --> Multi[Multi-Source Search]
-
-    Multi --> PubMed[PubMed Search<br/>via MCP]
-    Multi --> ArXiv[arXiv Search<br/>via MCP]
-    Multi --> BioRxiv[bioRxiv Search<br/>via MCP]
-
-    PubMed --> Aggregate[Aggregate Results]
-    ArXiv --> Aggregate
-    BioRxiv --> Aggregate
-
-    Aggregate --> Filter[Filter & Rank<br/>by Relevance]
-    Filter --> Dedup[Deduplicate<br/>Cross-Reference]
-    Dedup --> Embed[Embed Documents<br/>via MCP]
-    Embed --> Vector[(Vector DB)]
-    Vector --> RAGRetrieval[RAG Retrieval<br/>Top-K per Hypothesis]
-    RAGRetrieval --> Output[Return Contextualized<br/>Search Results]
-
-    style Input fill:#fff4e6
-    style Multi fill:#ffe6e6
-    style Vector fill:#ffe6f0
-    style Output fill:#e6f3ff

8. Analysis Agent Workflow

flowchart TD
-    Input1[Hypotheses] --> Extract
-    Input2[Search Results] --> Extract[Extract Evidence<br/>per Hypothesis]
-
-    Extract --> Methods[Determine Analysis<br/>Methods Needed]
-
-    Methods --> Branch{Requires<br/>Computation?}
-    Branch -->|Yes| GenCode[Generate Python<br/>Analysis Code]
-    Branch -->|No| Qual[Qualitative<br/>Synthesis]
-
-    GenCode --> Execute[Execute Code<br/>via MCP Sandbox]
-    Execute --> Interpret1[Interpret<br/>Results]
-    Qual --> Interpret2[Interpret<br/>Findings]
-
-    Interpret1 --> Synthesize[Synthesize Evidence<br/>Across Sources]
-    Interpret2 --> Synthesize
-
-    Synthesize --> Verdict[Determine Verdict<br/>per Hypothesis]
-    Verdict --> Support[• Supported<br/>• Refuted<br/>• Inconclusive]
-    Support --> Gaps[Identify Knowledge<br/>Gaps & Limitations]
-    Gaps --> Output[Return Analysis<br/>Report]
-
-    style Input1 fill:#fff4e6
-    style Input2 fill:#e6f3ff
-    style Execute fill:#ffe6e6
-    style Output fill:#e6ffe6

9. Report Agent Workflow

flowchart TD
-    Input1[Query] --> Assemble
-    Input2[Hypotheses] --> Assemble
-    Input3[Search Results] --> Assemble
-    Input4[Analysis] --> Assemble[Assemble Report<br/>Sections]
-
-    Assemble --> Exec[Executive Summary]
-    Assemble --> Intro[Introduction]
-    Assemble --> Methods[Methods]
-    Assemble --> Results[Results per<br/>Hypothesis]
-    Assemble --> Discussion[Discussion]
-    Assemble --> Future[Future Directions]
-    Assemble --> Refs[References]
-
-    Results --> VizCheck{Needs<br/>Visualization?}
-    VizCheck -->|Yes| GenViz[Generate Viz Code]
-    GenViz --> ExecViz[Execute via MCP<br/>Create Charts]
-    ExecViz --> Combine
-    VizCheck -->|No| Combine[Combine All<br/>Sections]
-
-    Exec --> Combine
-    Intro --> Combine
-    Methods --> Combine
-    Discussion --> Combine
-    Future --> Combine
-    Refs --> Combine
-
-    Combine --> Format[Format Output]
-    Format --> MD[Markdown]
-    Format --> PDF[PDF]
-    Format --> JSON[JSON]
-
-    MD --> Output[Return Final<br/>Report]
-    PDF --> Output
-    JSON --> Output
-
-    style Input1 fill:#e1f5e1
-    style Input2 fill:#fff4e6
-    style Input3 fill:#e6f3ff
-    style Input4 fill:#e6ffe6
-    style Output fill:#d4edda

10. Data Flow & Event Streaming

flowchart TD
-    User[👤 User] -->|Research Query| UI[Gradio UI]
-    UI -->|Submit| Manager[Magentic Manager]
-
-    Manager -->|Event: Planning| UI
-    Manager -->|Select Agent| HypAgent[Hypothesis Agent]
-    HypAgent -->|Event: Delta/Message| UI
-    HypAgent -->|Hypotheses| Context[(Shared Context)]
-
-    Context -->|Retrieved by| Manager
-    Manager -->|Select Agent| SearchAgent[Search Agent]
-    SearchAgent -->|MCP Request| WebSearch[Web Search Tool]
-    WebSearch -->|Results| SearchAgent
-    SearchAgent -->|Event: Delta/Message| UI
-    SearchAgent -->|Documents| Context
-    SearchAgent -->|Embeddings| VectorDB[(Vector DB)]
-
-    Context -->|Retrieved by| Manager
-    Manager -->|Select Agent| AnalysisAgent[Analysis Agent]
-    AnalysisAgent -->|MCP Request| CodeExec[Code Execution Tool]
-    CodeExec -->|Results| AnalysisAgent
-    AnalysisAgent -->|Event: Delta/Message| UI
-    AnalysisAgent -->|Analysis| Context
-
-    Context -->|Retrieved by| Manager
-    Manager -->|Select Agent| ReportAgent[Report Agent]
-    ReportAgent -->|MCP Request| CodeExec
-    ReportAgent -->|Event: Delta/Message| UI
-    ReportAgent -->|Report| Context
-
-    Manager -->|Event: Final Result| UI
-    UI -->|Display| User
-
-    style User fill:#e1f5e1
-    style UI fill:#e6f3ff
-    style Manager fill:#ffe6e6
-    style Context fill:#ffe6f0
-    style VectorDB fill:#ffe6f0
-    style WebSearch fill:#f0f0f0
-    style CodeExec fill:#f0f0f0

11. MCP Tool Architecture

graph TB
-    subgraph "Agent Layer"
-        Manager[Magentic Manager]
-        HypAgent[Hypothesis Agent]
-        SearchAgent[Search Agent]
-        AnalysisAgent[Analysis Agent]
-        ReportAgent[Report Agent]
-    end
-
-    subgraph "MCP Protocol Layer"
-        Registry[MCP Tool Registry<br/>• Discovers tools<br/>• Routes requests<br/>• Manages connections]
-    end
-
-    subgraph "MCP Servers"
-        Server1[Web Search Server<br/>localhost:8001<br/>• PubMed<br/>• arXiv<br/>• bioRxiv]
-        Server2[Code Execution Server<br/>localhost:8002<br/>• Sandboxed Python<br/>• Package management]
-        Server3[RAG Server<br/>localhost:8003<br/>• Vector embeddings<br/>• Similarity search]
-        Server4[Visualization Server<br/>localhost:8004<br/>• Chart generation<br/>• Plot rendering]
-    end
-
-    subgraph "External Services"
-        PubMed[PubMed API]
-        ArXiv[arXiv API]
-        BioRxiv[bioRxiv API]
-        Modal[Modal Sandbox]
-        ChromaDB[(ChromaDB)]
-    end
-
-    SearchAgent -->|Request| Registry
-    AnalysisAgent -->|Request| Registry
-    ReportAgent -->|Request| Registry
-
-    Registry --> Server1
-    Registry --> Server2
-    Registry --> Server3
-    Registry --> Server4
-
-    Server1 --> PubMed
-    Server1 --> ArXiv
-    Server1 --> BioRxiv
-    Server2 --> Modal
-    Server3 --> ChromaDB
-
-    style Manager fill:#ffe6e6
-    style Registry fill:#fff4e6
-    style Server1 fill:#e6f3ff
-    style Server2 fill:#e6f3ff
-    style Server3 fill:#e6f3ff
-    style Server4 fill:#e6f3ff

12. Progress Tracking & Stall Detection

stateDiagram-v2
-    [*] --> Initialization: User Query
-
-    Initialization --> Planning: Manager starts
-
-    Planning --> AgentExecution: Select agent
-
-    AgentExecution --> Assessment: Collect results
-
-    Assessment --> QualityCheck: Evaluate output
-
-    QualityCheck --> AgentExecution: Poor quality<br/>(retry < max_rounds)
-    QualityCheck --> Planning: Poor quality<br/>(try different agent)
-    QualityCheck --> NextAgent: Good quality<br/>(task incomplete)
-    QualityCheck --> Synthesis: Good quality<br/>(task complete)
-
-    NextAgent --> AgentExecution: Select next agent
-
-    state StallDetection <<choice>>
-    Assessment --> StallDetection: Check progress
-    StallDetection --> Planning: No progress<br/>(stall count < max)
-    StallDetection --> ErrorRecovery: No progress<br/>(max stalls reached)
-
-    ErrorRecovery --> PartialReport: Generate partial results
-    PartialReport --> [*]
-
-    Synthesis --> FinalReport: Combine all outputs
-    FinalReport --> [*]
-
-    note right of QualityCheck
-        Manager assesses:
-        • Output completeness
-        • Quality metrics
-        • Progress made
-    end note
-
-    note right of StallDetection
-        Stall = no new progress
-        after agent execution
-        Triggers plan reset
-    end note

13. Gradio UI Integration

graph TD
-    App[Gradio App<br/>DeepCritical Research Agent]
-
-    App --> Input[Input Section]
-    App --> Status[Status Section]
-    App --> Output[Output Section]
-
-    Input --> Query[Research Question<br/>Text Area]
-    Input --> Controls[Controls]
-    Controls --> MaxHyp[Max Hypotheses: 1-10]
-    Controls --> MaxRounds[Max Rounds: 5-20]
-    Controls --> Submit[Start Research Button]
-
-    Status --> Log[Real-time Event Log<br/>• Manager planning<br/>• Agent selection<br/>• Execution updates<br/>• Quality assessment]
-    Status --> Progress[Progress Tracker<br/>• Current agent<br/>• Round count<br/>• Stall count]
-
-    Output --> Tabs[Tabbed Results]
-    Tabs --> Tab1[Hypotheses Tab<br/>Generated hypotheses with scores]
-    Tabs --> Tab2[Search Results Tab<br/>Papers & sources found]
-    Tabs --> Tab3[Analysis Tab<br/>Evidence & verdicts]
-    Tabs --> Tab4[Report Tab<br/>Final research report]
-    Tab4 --> Download[Download Report<br/>MD / PDF / JSON]
-
-    Submit -.->|Triggers| Workflow[Magentic Workflow]
-    Workflow -.->|MagenticOrchestratorMessageEvent| Log
-    Workflow -.->|MagenticAgentDeltaEvent| Log
-    Workflow -.->|MagenticAgentMessageEvent| Log
-    Workflow -.->|MagenticFinalResultEvent| Tab4
-
-    style App fill:#e1f5e1
-    style Input fill:#fff4e6
-    style Status fill:#e6f3ff
-    style Output fill:#e6ffe6
-    style Workflow fill:#ffe6e6

14. Complete System Context

graph LR
-    User[👤 Researcher<br/>Asks research questions] -->|Submits query| DC[DeepCritical<br/>Magentic Workflow]
-
-    DC -->|Literature search| PubMed[PubMed API<br/>Medical papers]
-    DC -->|Preprint search| ArXiv[arXiv API<br/>Scientific preprints]
-    DC -->|Biology search| BioRxiv[bioRxiv API<br/>Biology preprints]
-    DC -->|Agent reasoning| Claude[Claude API<br/>Sonnet 4 / Opus]
-    DC -->|Code execution| Modal[Modal Sandbox<br/>Safe Python env]
-    DC -->|Vector storage| Chroma[ChromaDB<br/>Embeddings & RAG]
-
-    DC -->|Deployed on| HF[HuggingFace Spaces<br/>Gradio 6.0]
-
-    PubMed -->|Results| DC
-    ArXiv -->|Results| DC
-    BioRxiv -->|Results| DC
-    Claude -->|Responses| DC
-    Modal -->|Output| DC
-    Chroma -->|Context| DC
-
-    DC -->|Research report| User
-
-    style User fill:#e1f5e1
-    style DC fill:#ffe6e6
-    style PubMed fill:#e6f3ff
-    style ArXiv fill:#e6f3ff
-    style BioRxiv fill:#e6f3ff
-    style Claude fill:#ffd6d6
-    style Modal fill:#f0f0f0
-    style Chroma fill:#ffe6f0
-    style HF fill:#d4edda

15. Workflow Timeline (Simplified)

gantt
-    title DeepCritical Magentic Workflow - Typical Execution
-    dateFormat mm:ss
-    axisFormat %M:%S
-
-    section Manager Planning
-    Initial planning         :p1, 00:00, 10s
-
-    section Hypothesis Agent
-    Generate hypotheses      :h1, after p1, 30s
-    Manager assessment       :h2, after h1, 5s
-
-    section Search Agent
-    Search hypothesis 1      :s1, after h2, 20s
-    Search hypothesis 2      :s2, after s1, 20s
-    Search hypothesis 3      :s3, after s2, 20s
-    RAG processing          :s4, after s3, 15s
-    Manager assessment      :s5, after s4, 5s
-
-    section Analysis Agent
-    Evidence extraction     :a1, after s5, 15s
-    Code generation        :a2, after a1, 20s
-    Code execution         :a3, after a2, 25s
-    Synthesis              :a4, after a3, 20s
-    Manager assessment     :a5, after a4, 5s
-
-    section Report Agent
-    Report assembly        :r1, after a5, 30s
-    Visualization          :r2, after r1, 15s
-    Formatting             :r3, after r2, 10s
-
-    section Manager Synthesis
-    Final synthesis        :f1, after r3, 10s

Key Differences from Original Design

Aspect Original (Judge-in-Loop) New (Magentic)
Control Flow Fixed sequential phases Dynamic agent selection
Quality Control Separate Judge Agent Manager assessment built-in
Retry Logic Phase-level with feedback Agent-level with adaptation
Flexibility Rigid 4-phase pipeline Adaptive workflow
Complexity 5 agents (including Judge) 4 agents (no Judge)
Progress Tracking Manual state management Built-in round/stall detection
Agent Coordination Sequential handoff Manager-driven dynamic selection
Error Recovery Retry same phase Try different agent or replan

Simplified Design Principles

  1. Manager is Intelligent: LLM-powered manager handles planning, selection, and quality assessment
  2. No Separate Judge: Manager's assessment phase replaces dedicated Judge Agent
  3. Dynamic Workflow: Agents can be called multiple times in any order based on need
  4. Built-in Safety: max_round_count (15) and max_stall_count (3) prevent infinite loops
  5. Event-Driven UI: Real-time streaming updates to Gradio interface
  6. MCP-Powered Tools: All external capabilities via Model Context Protocol
  7. Shared Context: Centralized state accessible to all agents
  8. Progress Awareness: Manager tracks what's been done and what's needed

Legend

  • 🔴 Red/Pink: Manager, orchestration, decision-making
  • 🟡 Yellow/Orange: Specialist agents, processing
  • 🔵 Blue: Data, tools, MCP services
  • 🟣 Purple/Pink: Storage, databases, state
  • 🟢 Green: User interactions, final outputs
  • Gray: External services, APIs

Implementation Highlights

Simple 4-Agent Setup:

Manager handles quality assessment in its instructions: - Checks hypothesis quality (testable, novel, clear) - Validates search results (relevant, authoritative, recent) - Assesses analysis soundness (methodology, evidence, conclusions) - Ensures report completeness (all sections, proper citations)

No separate Judge Agent needed - manager does it all!


Document Version: 2.0 (Magentic Simplified) Last Updated: 2025-11-24 Architecture: Microsoft Magentic Orchestration Pattern Agents: 4 (Hypothesis, Search, Analysis, Report) + 1 Manager License: MIT

See Also

\ No newline at end of file diff --git a/site/assets/images/favicon.png b/site/assets/images/favicon.png deleted file mode 100644 index 1cf13b9f9d978896599290a74f77d5dbe7d1655c..0000000000000000000000000000000000000000 Binary files a/site/assets/images/favicon.png and /dev/null differ diff --git a/site/assets/javascripts/bundle.e71a0d61.min.js b/site/assets/javascripts/bundle.e71a0d61.min.js deleted file mode 100644 index c76b3b2b18a0e8a097ad2690dd51fa8adc12d0be..0000000000000000000000000000000000000000 --- a/site/assets/javascripts/bundle.e71a0d61.min.js +++ /dev/null @@ -1,16 +0,0 @@ -"use strict";(()=>{var Zi=Object.create;var _r=Object.defineProperty;var ea=Object.getOwnPropertyDescriptor;var ta=Object.getOwnPropertyNames,Bt=Object.getOwnPropertySymbols,ra=Object.getPrototypeOf,Ar=Object.prototype.hasOwnProperty,bo=Object.prototype.propertyIsEnumerable;var ho=(e,t,r)=>t in e?_r(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,P=(e,t)=>{for(var r in t||(t={}))Ar.call(t,r)&&ho(e,r,t[r]);if(Bt)for(var r of Bt(t))bo.call(t,r)&&ho(e,r,t[r]);return e};var vo=(e,t)=>{var r={};for(var o in e)Ar.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Bt)for(var o of Bt(e))t.indexOf(o)<0&&bo.call(e,o)&&(r[o]=e[o]);return r};var Cr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var oa=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of ta(t))!Ar.call(e,n)&&n!==r&&_r(e,n,{get:()=>t[n],enumerable:!(o=ea(t,n))||o.enumerable});return e};var $t=(e,t,r)=>(r=e!=null?Zi(ra(e)):{},oa(t||!e||!e.__esModule?_r(r,"default",{value:e,enumerable:!0}):r,e));var go=(e,t,r)=>new Promise((o,n)=>{var i=c=>{try{a(r.next(c))}catch(p){n(p)}},s=c=>{try{a(r.throw(c))}catch(p){n(p)}},a=c=>c.done?o(c.value):Promise.resolve(c.value).then(i,s);a((r=r.apply(e,t)).next())});var xo=Cr((kr,yo)=>{(function(e,t){typeof kr=="object"&&typeof yo!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(kr,(function(){"use strict";function e(r){var o=!0,n=!1,i=null,s={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function a(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function c(k){var ut=k.type,je=k.tagName;return!!(je==="INPUT"&&s[ut]&&!k.readOnly||je==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function p(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(a(r.activeElement)&&p(r.activeElement),o=!0)}function u(k){o=!1}function d(k){a(k.target)&&(o||c(k.target))&&p(k.target)}function v(k){a(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function S(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",ee),document.addEventListener("mousedown",ee),document.addEventListener("mouseup",ee),document.addEventListener("pointermove",ee),document.addEventListener("pointerdown",ee),document.addEventListener("pointerup",ee),document.addEventListener("touchmove",ee),document.addEventListener("touchstart",ee),document.addEventListener("touchend",ee)}function re(){document.removeEventListener("mousemove",ee),document.removeEventListener("mousedown",ee),document.removeEventListener("mouseup",ee),document.removeEventListener("pointermove",ee),document.removeEventListener("pointerdown",ee),document.removeEventListener("pointerup",ee),document.removeEventListener("touchmove",ee),document.removeEventListener("touchstart",ee),document.removeEventListener("touchend",ee)}function ee(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,re())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",S,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",v,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)}))});var ro=Cr((jy,Rn)=>{"use strict";/*! - * escape-html - * Copyright(c) 2012-2013 TJ Holowaychuk - * Copyright(c) 2015 Andreas Lubbe - * Copyright(c) 2015 Tiancheng "Timothy" Gu - * MIT Licensed - */var qa=/["'&<>]/;Rn.exports=Ka;function Ka(e){var t=""+e,r=qa.exec(t);if(!r)return t;var o,n="",i=0,s=0;for(i=r.index;i{/*! - * clipboard.js v2.0.11 - * https://clipboardjs.com/ - * - * Licensed MIT © Zeno Rocha - */(function(t,r){typeof Nt=="object"&&typeof io=="object"?io.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Nt=="object"?Nt.ClipboardJS=r():t.ClipboardJS=r()})(Nt,function(){return(function(){var e={686:(function(o,n,i){"use strict";i.d(n,{default:function(){return Xi}});var s=i(279),a=i.n(s),c=i(370),p=i.n(c),l=i(817),f=i.n(l);function u(q){try{return document.execCommand(q)}catch(C){return!1}}var d=function(C){var _=f()(C);return u("cut"),_},v=d;function S(q){var C=document.documentElement.getAttribute("dir")==="rtl",_=document.createElement("textarea");_.style.fontSize="12pt",_.style.border="0",_.style.padding="0",_.style.margin="0",_.style.position="absolute",_.style[C?"right":"left"]="-9999px";var D=window.pageYOffset||document.documentElement.scrollTop;return _.style.top="".concat(D,"px"),_.setAttribute("readonly",""),_.value=q,_}var X=function(C,_){var D=S(C);_.container.appendChild(D);var N=f()(D);return u("copy"),D.remove(),N},re=function(C){var _=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},D="";return typeof C=="string"?D=X(C,_):C instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(C==null?void 0:C.type)?D=X(C.value,_):(D=f()(C),u("copy")),D},ee=re;function k(q){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(_){return typeof _}:k=function(_){return _&&typeof Symbol=="function"&&_.constructor===Symbol&&_!==Symbol.prototype?"symbol":typeof _},k(q)}var ut=function(){var C=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},_=C.action,D=_===void 0?"copy":_,N=C.container,G=C.target,We=C.text;if(D!=="copy"&&D!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(G!==void 0)if(G&&k(G)==="object"&&G.nodeType===1){if(D==="copy"&&G.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(D==="cut"&&(G.hasAttribute("readonly")||G.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(We)return ee(We,{container:N});if(G)return D==="cut"?v(G):ee(G,{container:N})},je=ut;function R(q){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?R=function(_){return typeof _}:R=function(_){return _&&typeof Symbol=="function"&&_.constructor===Symbol&&_!==Symbol.prototype?"symbol":typeof _},R(q)}function se(q,C){if(!(q instanceof C))throw new TypeError("Cannot call a class as a function")}function ce(q,C){for(var _=0;_0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof N.action=="function"?N.action:this.defaultAction,this.target=typeof N.target=="function"?N.target:this.defaultTarget,this.text=typeof N.text=="function"?N.text:this.defaultText,this.container=R(N.container)==="object"?N.container:document.body}},{key:"listenClick",value:function(N){var G=this;this.listener=p()(N,"click",function(We){return G.onClick(We)})}},{key:"onClick",value:function(N){var G=N.delegateTarget||N.currentTarget,We=this.action(G)||"copy",Yt=je({action:We,container:this.container,target:this.target(G),text:this.text(G)});this.emit(Yt?"success":"error",{action:We,text:Yt,trigger:G,clearSelection:function(){G&&G.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(N){return Mr("action",N)}},{key:"defaultTarget",value:function(N){var G=Mr("target",N);if(G)return document.querySelector(G)}},{key:"defaultText",value:function(N){return Mr("text",N)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(N){var G=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return ee(N,G)}},{key:"cut",value:function(N){return v(N)}},{key:"isSupported",value:function(){var N=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],G=typeof N=="string"?[N]:N,We=!!document.queryCommandSupported;return G.forEach(function(Yt){We=We&&!!document.queryCommandSupported(Yt)}),We}}]),_})(a()),Xi=Ji}),828:(function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function s(a,c){for(;a&&a.nodeType!==n;){if(typeof a.matches=="function"&&a.matches(c))return a;a=a.parentNode}}o.exports=s}),438:(function(o,n,i){var s=i(828);function a(l,f,u,d,v){var S=p.apply(this,arguments);return l.addEventListener(u,S,v),{destroy:function(){l.removeEventListener(u,S,v)}}}function c(l,f,u,d,v){return typeof l.addEventListener=="function"?a.apply(null,arguments):typeof u=="function"?a.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(S){return a(S,f,u,d,v)}))}function p(l,f,u,d){return function(v){v.delegateTarget=s(v.target,f),v.delegateTarget&&d.call(l,v)}}o.exports=c}),879:(function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var s=Object.prototype.toString.call(i);return i!==void 0&&(s==="[object NodeList]"||s==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var s=Object.prototype.toString.call(i);return s==="[object Function]"}}),370:(function(o,n,i){var s=i(879),a=i(438);function c(u,d,v){if(!u&&!d&&!v)throw new Error("Missing required arguments");if(!s.string(d))throw new TypeError("Second argument must be a String");if(!s.fn(v))throw new TypeError("Third argument must be a Function");if(s.node(u))return p(u,d,v);if(s.nodeList(u))return l(u,d,v);if(s.string(u))return f(u,d,v);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function p(u,d,v){return u.addEventListener(d,v),{destroy:function(){u.removeEventListener(d,v)}}}function l(u,d,v){return Array.prototype.forEach.call(u,function(S){S.addEventListener(d,v)}),{destroy:function(){Array.prototype.forEach.call(u,function(S){S.removeEventListener(d,v)})}}}function f(u,d,v){return a(document.body,u,d,v)}o.exports=c}),817:(function(o){function n(i){var s;if(i.nodeName==="SELECT")i.focus(),s=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var a=i.hasAttribute("readonly");a||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),a||i.removeAttribute("readonly"),s=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var c=window.getSelection(),p=document.createRange();p.selectNodeContents(i),c.removeAllRanges(),c.addRange(p),s=c.toString()}return s}o.exports=n}),279:(function(o){function n(){}n.prototype={on:function(i,s,a){var c=this.e||(this.e={});return(c[i]||(c[i]=[])).push({fn:s,ctx:a}),this},once:function(i,s,a){var c=this;function p(){c.off(i,p),s.apply(a,arguments)}return p._=s,this.on(i,p,a)},emit:function(i){var s=[].slice.call(arguments,1),a=((this.e||(this.e={}))[i]||[]).slice(),c=0,p=a.length;for(c;c0&&i[i.length-1])&&(p[0]===6||p[0]===2)){r=0;continue}if(p[0]===3&&(!i||p[1]>i[0]&&p[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function K(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],s;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(a){s={error:a}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(s)throw s.error}}return i}function B(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||c(d,S)})},v&&(n[d]=v(n[d])))}function c(d,v){try{p(o[d](v))}catch(S){u(i[0][3],S)}}function p(d){d.value instanceof dt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){c("next",d)}function f(d){c("throw",d)}function u(d,v){d(v),i.shift(),i.length&&c(i[0][0],i[0][1])}}function To(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof Oe=="function"?Oe(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(s){return new Promise(function(a,c){s=e[i](s),n(a,c,s.done,s.value)})}}function n(i,s,a,c){Promise.resolve(c).then(function(p){i({value:p,done:a})},s)}}function I(e){return typeof e=="function"}function yt(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var Jt=yt(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: -`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` - `):"",this.name="UnsubscriptionError",this.errors=r}});function Ze(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var qe=(function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var s=this._parentage;if(s)if(this._parentage=null,Array.isArray(s))try{for(var a=Oe(s),c=a.next();!c.done;c=a.next()){var p=c.value;p.remove(this)}}catch(S){t={error:S}}finally{try{c&&!c.done&&(r=a.return)&&r.call(a)}finally{if(t)throw t.error}}else s.remove(this);var l=this.initialTeardown;if(I(l))try{l()}catch(S){i=S instanceof Jt?S.errors:[S]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=Oe(f),d=u.next();!d.done;d=u.next()){var v=d.value;try{So(v)}catch(S){i=i!=null?i:[],S instanceof Jt?i=B(B([],K(i)),K(S.errors)):i.push(S)}}}catch(S){o={error:S}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new Jt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)So(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Ze(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Ze(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=(function(){var t=new e;return t.closed=!0,t})(),e})();var $r=qe.EMPTY;function Xt(e){return e instanceof qe||e&&"closed"in e&&I(e.remove)&&I(e.add)&&I(e.unsubscribe)}function So(e){I(e)?e():e.unsubscribe()}var De={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var xt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,s=n.isStopped,a=n.observers;return i||s?$r:(this.currentObservers=null,a.push(r),new qe(function(){o.currentObservers=null,Ze(a,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,s=o.isStopped;n?r.error(i):s&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Ho(r,o)},t})(F);var Ho=(function(e){ie(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:$r},t})(T);var jr=(function(e){ie(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t})(T);var Rt={now:function(){return(Rt.delegate||Date).now()},delegate:void 0};var It=(function(e){ie(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Rt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,s=o._infiniteTimeWindow,a=o._timestampProvider,c=o._windowTime;n||(i.push(r),!s&&i.push(a.now()+c)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,s=n._buffer,a=s.slice(),c=0;c0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t})(St);var Ro=(function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t})(Ot);var Dr=new Ro(Po);var Io=(function(e){ie(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=Tt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var s=r.actions;o!=null&&o===r._scheduled&&((i=s[s.length-1])===null||i===void 0?void 0:i.id)!==o&&(Tt.cancelAnimationFrame(o),r._scheduled=void 0)},t})(St);var Fo=(function(e){ie(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o;r?o=r.id:(o=this._scheduled,this._scheduled=void 0);var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t})(Ot);var ye=new Fo(Io);var y=new F(function(e){return e.complete()});function tr(e){return e&&I(e.schedule)}function Vr(e){return e[e.length-1]}function pt(e){return I(Vr(e))?e.pop():void 0}function Fe(e){return tr(Vr(e))?e.pop():void 0}function rr(e,t){return typeof Vr(e)=="number"?e.pop():t}var Lt=(function(e){return e&&typeof e.length=="number"&&typeof e!="function"});function or(e){return I(e==null?void 0:e.then)}function nr(e){return I(e[wt])}function ir(e){return Symbol.asyncIterator&&I(e==null?void 0:e[Symbol.asyncIterator])}function ar(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function fa(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var sr=fa();function cr(e){return I(e==null?void 0:e[sr])}function pr(e){return wo(this,arguments,function(){var r,o,n,i;return Gt(this,function(s){switch(s.label){case 0:r=e.getReader(),s.label=1;case 1:s.trys.push([1,,9,10]),s.label=2;case 2:return[4,dt(r.read())];case 3:return o=s.sent(),n=o.value,i=o.done,i?[4,dt(void 0)]:[3,5];case 4:return[2,s.sent()];case 5:return[4,dt(n)];case 6:return[4,s.sent()];case 7:return s.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function lr(e){return I(e==null?void 0:e.getReader)}function U(e){if(e instanceof F)return e;if(e!=null){if(nr(e))return ua(e);if(Lt(e))return da(e);if(or(e))return ha(e);if(ir(e))return jo(e);if(cr(e))return ba(e);if(lr(e))return va(e)}throw ar(e)}function ua(e){return new F(function(t){var r=e[wt]();if(I(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function da(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?g(function(n,i){return e(n,i,o)}):be,Ee(1),r?Qe(t):tn(function(){return new fr}))}}function Yr(e){return e<=0?function(){return y}:E(function(t,r){var o=[];t.subscribe(w(r,function(n){o.push(n),e=2,!0))}function le(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new T}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,s=i===void 0?!0:i,a=e.resetOnRefCountZero,c=a===void 0?!0:a;return function(p){var l,f,u,d=0,v=!1,S=!1,X=function(){f==null||f.unsubscribe(),f=void 0},re=function(){X(),l=u=void 0,v=S=!1},ee=function(){var k=l;re(),k==null||k.unsubscribe()};return E(function(k,ut){d++,!S&&!v&&X();var je=u=u!=null?u:r();ut.add(function(){d--,d===0&&!S&&!v&&(f=Br(ee,c))}),je.subscribe(ut),!l&&d>0&&(l=new bt({next:function(R){return je.next(R)},error:function(R){S=!0,X(),f=Br(re,n,R),je.error(R)},complete:function(){v=!0,X(),f=Br(re,s),je.complete()}}),U(k).subscribe(l))})(p)}}function Br(e,t){for(var r=[],o=2;oe.next(document)),e}function M(e,t=document){return Array.from(t.querySelectorAll(e))}function j(e,t=document){let r=ue(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function ue(e,t=document){return t.querySelector(e)||void 0}function Ne(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var Ra=L(h(document.body,"focusin"),h(document.body,"focusout")).pipe(Ae(1),Q(void 0),m(()=>Ne()||document.body),Z(1));function Ye(e){return Ra.pipe(m(t=>e.contains(t)),Y())}function it(e,t){return H(()=>L(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?jt(r=>He(+!r*t)):be,Q(e.matches(":hover"))))}function sn(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)sn(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)sn(o,n);return o}function br(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function _t(e){let t=x("script",{src:e});return H(()=>(document.head.appendChild(t),L(h(t,"load"),h(t,"error").pipe(b(()=>Nr(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),A(()=>document.head.removeChild(t)),Ee(1))))}var cn=new T,Ia=H(()=>typeof ResizeObserver=="undefined"?_t("https://unpkg.com/resize-observer-polyfill"):$(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>cn.next(t)))),b(e=>L(tt,$(e)).pipe(A(()=>e.disconnect()))),Z(1));function de(e){return{width:e.offsetWidth,height:e.offsetHeight}}function Le(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ia.pipe(O(r=>r.observe(t)),b(r=>cn.pipe(g(o=>o.target===t),A(()=>r.unobserve(t)))),m(()=>de(e)),Q(de(e)))}function At(e){return{width:e.scrollWidth,height:e.scrollHeight}}function vr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function pn(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Be(e){return{x:e.offsetLeft,y:e.offsetTop}}function ln(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function mn(e){return L(h(window,"load"),h(window,"resize")).pipe($e(0,ye),m(()=>Be(e)),Q(Be(e)))}function gr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ge(e){return L(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe($e(0,ye),m(()=>gr(e)),Q(gr(e)))}var fn=new T,Fa=H(()=>$(new IntersectionObserver(e=>{for(let t of e)fn.next(t)},{threshold:0}))).pipe(b(e=>L(tt,$(e)).pipe(A(()=>e.disconnect()))),Z(1));function mt(e){return Fa.pipe(O(t=>t.observe(e)),b(t=>fn.pipe(g(({target:r})=>r===e),A(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function un(e,t=16){return Ge(e).pipe(m(({y:r})=>{let o=de(e),n=At(e);return r>=n.height-o.height-t}),Y())}var yr={drawer:j("[data-md-toggle=drawer]"),search:j("[data-md-toggle=search]")};function dn(e){return yr[e].checked}function at(e,t){yr[e].checked!==t&&yr[e].click()}function Je(e){let t=yr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function ja(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ua(){return L(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function hn(){let e=h(window,"keydown").pipe(g(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:dn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),g(({mode:t,type:r})=>{if(t==="global"){let o=Ne();if(typeof o!="undefined")return!ja(o,r)}return!0}),le());return Ua().pipe(b(t=>t?y:e))}function we(){return new URL(location.href)}function st(e,t=!1){if(V("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function bn(){return new T}function vn(){return location.hash.slice(1)}function gn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Zr(e){return L(h(window,"hashchange"),e).pipe(m(vn),Q(vn()),g(t=>t.length>0),Z(1))}function yn(e){return Zr(e).pipe(m(t=>ue(`[id="${t}"]`)),g(t=>typeof t!="undefined"))}function Wt(e){let t=matchMedia(e);return ur(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function xn(){let e=matchMedia("print");return L(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function eo(e,t){return e.pipe(b(r=>r?t():y))}function to(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let s=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+s*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function ze(e,t){return to(e,t).pipe(b(r=>r.text()),m(r=>JSON.parse(r)),Z(1))}function xr(e,t){let r=new DOMParser;return to(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),Z(1))}function En(e,t){let r=new DOMParser;return to(e,t).pipe(b(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),Z(1))}function wn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function Tn(){return L(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(wn),Q(wn()))}function Sn(){return{width:innerWidth,height:innerHeight}}function On(){return h(window,"resize",{passive:!0}).pipe(m(Sn),Q(Sn()))}function Ln(){return z([Tn(),On()]).pipe(m(([e,t])=>({offset:e,size:t})),Z(1))}function Er(e,{viewport$:t,header$:r}){let o=t.pipe(ne("size")),n=z([o,r]).pipe(m(()=>Be(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:s,size:a},{x:c,y:p}])=>({offset:{x:s.x-c,y:s.y-p+i},size:a})))}function Wa(e){return h(e,"message",t=>t.data)}function Da(e){let t=new T;return t.subscribe(r=>e.postMessage(r)),t}function Mn(e,t=new Worker(e)){let r=Wa(t),o=Da(t),n=new T;n.subscribe(o);let i=o.pipe(oe(),ae(!0));return n.pipe(oe(),Ve(r.pipe(W(i))),le())}var Va=j("#__config"),Ct=JSON.parse(Va.textContent);Ct.base=`${new URL(Ct.base,we())}`;function Te(){return Ct}function V(e){return Ct.features.includes(e)}function Me(e,t){return typeof t!="undefined"?Ct.translations[e].replace("#",t.toString()):Ct.translations[e]}function Ce(e,t=document){return j(`[data-md-component=${e}]`,t)}function me(e,t=document){return M(`[data-md-component=${e}]`,t)}function Na(e){let t=j(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>j(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function _n(e){if(!V("announce.dismiss")||!e.childElementCount)return y;if(!e.hidden){let t=j(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return H(()=>{let t=new T;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),Na(e).pipe(O(r=>t.next(r)),A(()=>t.complete()),m(r=>P({ref:e},r)))})}function za(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function An(e,t){let r=new T;return r.subscribe(({hidden:o})=>{e.hidden=o}),za(e,t).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))}function Dt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wr(...e){return x("div",{class:"md-tooltip2",role:"dialog"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Cn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function kn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Dt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Dt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Hn(e){return x("button",{class:"md-code__button",title:Me("clipboard.copy"),"data-clipboard-target":`#${e} > code`,"data-md-type":"copy"})}function $n(){return x("button",{class:"md-code__button",title:"Toggle line selection","data-md-type":"select"})}function Pn(){return x("nav",{class:"md-code__nav"})}var In=$t(ro());function oo(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(c=>!e.terms[c]).reduce((c,p)=>[...c,x("del",null,(0,In.default)(p))," "],[]).slice(0,-1),i=Te(),s=new URL(e.location,i.base);V("search.highlight")&&s.searchParams.set("h",Object.entries(e.terms).filter(([,c])=>c).reduce((c,[p])=>`${c} ${p}`.trim(),""));let{tags:a}=Te();return x("a",{href:`${s}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(c=>{let p=a?c in a?`md-tag-icon md-tag--${a[c]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${p}`},c)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Me("search.result.term.missing"),": ",...n)))}function Fn(e){let t=e[0].score,r=[...e],o=Te(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),s=r.findIndex(l=>l.scoreoo(l,1)),...c.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,c.length>0&&c.length===1?Me("search.result.more.one"):Me("search.result.more.other",c.length))),...c.map(l=>oo(l,1)))]:[]];return x("li",{class:"md-search-result__item"},p)}function jn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?br(r):r)))}function no(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function Un(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Qa(e){var o;let t=Te(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Wn(e,t){var o;let r=Te();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Me("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Qa)))}var Ya=0;function Ba(e,t=250){let r=z([Ye(e),it(e,t)]).pipe(m(([n,i])=>n||i),Y()),o=H(()=>pn(e)).pipe(J(Ge),gt(1),Pe(r),m(()=>ln(e)));return r.pipe(Re(n=>n),b(()=>z([r,o])),m(([n,i])=>({active:n,offset:i})),le())}function Vt(e,t,r=250){let{content$:o,viewport$:n}=t,i=`__tooltip2_${Ya++}`;return H(()=>{let s=new T,a=new jr(!1);s.pipe(oe(),ae(!1)).subscribe(a);let c=a.pipe(jt(l=>He(+!l*250,Dr)),Y(),b(l=>l?o:y),O(l=>l.id=i),le());z([s.pipe(m(({active:l})=>l)),c.pipe(b(l=>it(l,250)),Q(!1))]).pipe(m(l=>l.some(f=>f))).subscribe(a);let p=a.pipe(g(l=>l),te(c,n),m(([l,f,{size:u}])=>{let d=e.getBoundingClientRect(),v=d.width/2;if(f.role==="tooltip")return{x:v,y:8+d.height};if(d.y>=u.height/2){let{height:S}=de(f);return{x:v,y:-16-S}}else return{x:v,y:16+d.height}}));return z([c,s,p]).subscribe(([l,{offset:f},u])=>{l.style.setProperty("--md-tooltip-host-x",`${f.x}px`),l.style.setProperty("--md-tooltip-host-y",`${f.y}px`),l.style.setProperty("--md-tooltip-x",`${u.x}px`),l.style.setProperty("--md-tooltip-y",`${u.y}px`),l.classList.toggle("md-tooltip2--top",u.y<0),l.classList.toggle("md-tooltip2--bottom",u.y>=0)}),a.pipe(g(l=>l),te(c,(l,f)=>f),g(l=>l.role==="tooltip")).subscribe(l=>{let f=de(j(":scope > *",l));l.style.setProperty("--md-tooltip-width",`${f.width}px`),l.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(Y(),xe(ye),te(c)).subscribe(([l,f])=>{f.classList.toggle("md-tooltip2--active",l)}),z([a.pipe(g(l=>l)),c]).subscribe(([l,f])=>{f.role==="dialog"?(e.setAttribute("aria-controls",i),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",i)}),a.pipe(g(l=>!l)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),Ba(e,r).pipe(O(l=>s.next(l)),A(()=>s.complete()),m(l=>P({ref:e},l)))})}function Xe(e,{viewport$:t},r=document.body){return Vt(e,{content$:new F(o=>{let n=e.title,i=Cn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t},0)}function Ga(e,t){let r=H(()=>z([mn(e),Ge(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:s,height:a}=de(e);return{x:o-i.x+s/2,y:n-i.y+a/2}}));return Ye(e).pipe(b(o=>r.pipe(m(n=>({active:o,offset:n})),Ee(+!o||1/0))))}function Dn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return H(()=>{let i=new T,s=i.pipe(oe(),ae(!0));return i.subscribe({next({offset:a}){e.style.setProperty("--md-tooltip-x",`${a.x}px`),e.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),mt(e).pipe(W(s)).subscribe(a=>{e.toggleAttribute("data-md-visible",a)}),L(i.pipe(g(({active:a})=>a)),i.pipe(Ae(250),g(({active:a})=>!a))).subscribe({next({active:a}){a?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe($e(16,ye)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(gt(125,ye),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?e.style.setProperty("--md-tooltip-0",`${-a}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(s),g(a=>!(a.metaKey||a.ctrlKey))).subscribe(a=>{a.stopPropagation(),a.preventDefault()}),h(n,"mousedown").pipe(W(s),te(i)).subscribe(([a,{active:c}])=>{var p;if(a.button!==0||a.metaKey||a.ctrlKey)a.preventDefault();else if(c){a.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(p=Ne())==null||p.blur()}}),r.pipe(W(s),g(a=>a===o),nt(125)).subscribe(()=>e.focus()),Ga(e,t).pipe(O(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))})}function Ja(e){let t=Te();if(e.tagName!=="CODE")return[e];let r=[".c",".c1",".cm"];if(t.annotate&&typeof t.annotate=="object"){let o=e.closest("[class|=language]");if(o)for(let n of Array.from(o.classList)){if(!n.startsWith("language-"))continue;let[,i]=n.split("-");i in t.annotate&&r.push(...t.annotate[i])}}return M(r.join(", "),e)}function Xa(e){let t=[];for(let r of Ja(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let s;for(;s=/(\(\d+\))(!)?/.exec(i.textContent);){let[,a,c]=s;if(typeof c=="undefined"){let p=i.splitText(s.index);i=p.splitText(a.length),t.push(p)}else{i.textContent=a,t.push(i);break}}}}return t}function Vn(e,t){t.append(...Array.from(e.childNodes))}function Tr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,s=new Map;for(let a of Xa(t)){let[,c]=a.textContent.match(/\((\d+)\)/);ue(`:scope > li:nth-child(${c})`,e)&&(s.set(c,kn(c,i)),a.replaceWith(s.get(c)))}return s.size===0?y:H(()=>{let a=new T,c=a.pipe(oe(),ae(!0)),p=[];for(let[l,f]of s)p.push([j(".md-typeset",f),j(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(c)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of p)l?Vn(f,u):Vn(u,f)}),L(...[...s].map(([,l])=>Dn(l,t,{target$:r}))).pipe(A(()=>a.complete()),le())})}function Nn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Nn(t)}}function zn(e,t){return H(()=>{let r=Nn(e);return typeof r!="undefined"?Tr(r,e,t):y})}var Kn=$t(ao());var Za=0,qn=L(h(window,"keydown").pipe(m(()=>!0)),L(h(window,"keyup"),h(window,"contextmenu")).pipe(m(()=>!1))).pipe(Q(!1),Z(1));function Qn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return Qn(t)}}function es(e){return Le(e).pipe(m(({width:t})=>({scrollable:At(e).width>t})),ne("scrollable"))}function Yn(e,t){let{matches:r}=matchMedia("(hover)"),o=H(()=>{let n=new T,i=n.pipe(Yr(1));n.subscribe(({scrollable:d})=>{d&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let s=[],a=e.closest("pre"),c=a.closest("[id]"),p=c?c.id:Za++;a.id=`__code_${p}`;let l=[],f=e.closest(".highlight");if(f instanceof HTMLElement){let d=Qn(f);if(typeof d!="undefined"&&(f.classList.contains("annotate")||V("content.code.annotate"))){let v=Tr(d,e,t);l.push(Le(f).pipe(W(i),m(({width:S,height:X})=>S&&X),Y(),b(S=>S?v:y)))}}let u=M(":scope > span[id]",e);if(u.length&&(e.classList.add("md-code__content"),e.closest(".select")||V("content.code.select")&&!e.closest(".no-select"))){let d=+u[0].id.split("-").pop(),v=$n();s.push(v),V("content.tooltips")&&l.push(Xe(v,{viewport$}));let S=h(v,"click").pipe(Ut(R=>!R,!1),O(()=>v.blur()),le());S.subscribe(R=>{v.classList.toggle("md-code__button--active",R)});let X=fe(u).pipe(J(R=>it(R).pipe(m(se=>[R,se]))));S.pipe(b(R=>R?X:y)).subscribe(([R,se])=>{let ce=ue(".hll.select",R);if(ce&&!se)ce.replaceWith(...Array.from(ce.childNodes));else if(!ce&&se){let he=document.createElement("span");he.className="hll select",he.append(...Array.from(R.childNodes).slice(1)),R.append(he)}});let re=fe(u).pipe(J(R=>h(R,"mousedown").pipe(O(se=>se.preventDefault()),m(()=>R)))),ee=S.pipe(b(R=>R?re:y),te(qn),m(([R,se])=>{var he;let ce=u.indexOf(R)+d;if(se===!1)return[ce,ce];{let Se=M(".hll",e).map(Ue=>u.indexOf(Ue.parentElement)+d);return(he=window.getSelection())==null||he.removeAllRanges(),[Math.min(ce,...Se),Math.max(ce,...Se)]}})),k=Zr(y).pipe(g(R=>R.startsWith(`__codelineno-${p}-`)));k.subscribe(R=>{let[,,se]=R.split("-"),ce=se.split(":").map(Se=>+Se-d+1);ce.length===1&&ce.push(ce[0]);for(let Se of M(".hll:not(.select)",e))Se.replaceWith(...Array.from(Se.childNodes));let he=u.slice(ce[0]-1,ce[1]);for(let Se of he){let Ue=document.createElement("span");Ue.className="hll",Ue.append(...Array.from(Se.childNodes).slice(1)),Se.append(Ue)}}),k.pipe(Ee(1),xe(pe)).subscribe(R=>{if(R.includes(":")){let se=document.getElementById(R.split(":")[0]);se&&setTimeout(()=>{let ce=se,he=-64;for(;ce!==document.body;)he+=ce.offsetTop,ce=ce.offsetParent;window.scrollTo({top:he})},1)}});let je=fe(M('a[href^="#__codelineno"]',f)).pipe(J(R=>h(R,"click").pipe(O(se=>se.preventDefault()),m(()=>R)))).pipe(W(i),te(qn),m(([R,se])=>{let he=+j(`[id="${R.hash.slice(1)}"]`).parentElement.id.split("-").pop();if(se===!1)return[he,he];{let Se=M(".hll",e).map(Ue=>+Ue.parentElement.id.split("-").pop());return[Math.min(he,...Se),Math.max(he,...Se)]}}));L(ee,je).subscribe(R=>{let se=`#__codelineno-${p}-`;R[0]===R[1]?se+=R[0]:se+=`${R[0]}:${R[1]}`,history.replaceState({},"",se),window.dispatchEvent(new HashChangeEvent("hashchange",{newURL:window.location.origin+window.location.pathname+se,oldURL:window.location.href}))})}if(Kn.default.isSupported()&&(e.closest(".copy")||V("content.code.copy")&&!e.closest(".no-copy"))){let d=Hn(a.id);s.push(d),V("content.tooltips")&&l.push(Xe(d,{viewport$}))}if(s.length){let d=Pn();d.append(...s),a.insertBefore(d,e)}return es(e).pipe(O(d=>n.next(d)),A(()=>n.complete()),m(d=>P({ref:e},d)),Ve(L(...l).pipe(W(i))))});return V("content.lazy")?mt(e).pipe(g(n=>n),Ee(1),b(()=>o)):o}function ts(e,{target$:t,print$:r}){let o=!0;return L(t.pipe(m(n=>n.closest("details:not([open])")),g(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(g(n=>n||!o),O(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Bn(e,t){return H(()=>{let r=new T;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),ts(e,t).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}var Gn=0;function rs(e){let t=document.createElement("h3");t.innerHTML=e.innerHTML;let r=[t],o=e.nextElementSibling;for(;o&&!(o instanceof HTMLHeadingElement);)r.push(o),o=o.nextElementSibling;return r}function os(e,t){for(let r of M("[href], [src]",e))for(let o of["href","src"]){let n=r.getAttribute(o);if(n&&!/^(?:[a-z]+:)?\/\//i.test(n)){r[o]=new URL(r.getAttribute(o),t).toString();break}}for(let r of M("[name^=__], [for]",e))for(let o of["id","for","name"]){let n=r.getAttribute(o);n&&r.setAttribute(o,`${n}$preview_${Gn}`)}return Gn++,$(e)}function Jn(e,t){let{sitemap$:r}=t;if(!(e instanceof HTMLAnchorElement))return y;if(!(V("navigation.instant.preview")||e.hasAttribute("data-preview")))return y;e.removeAttribute("title");let o=z([Ye(e),it(e)]).pipe(m(([i,s])=>i||s),Y(),g(i=>i));return rt([r,o]).pipe(b(([i])=>{let s=new URL(e.href);return s.search=s.hash="",i.has(`${s}`)?$(s):y}),b(i=>xr(i).pipe(b(s=>os(s,i)))),b(i=>{let s=e.hash?`article [id="${e.hash.slice(1)}"]`:"article h1",a=ue(s,i);return typeof a=="undefined"?y:$(rs(a))})).pipe(b(i=>{let s=new F(a=>{let c=wr(...i);return a.next(c),document.body.append(c),()=>c.remove()});return Vt(e,P({content$:s},t))}))}var Xn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.flowchartTitleText{fill:var(--md-mermaid-label-fg-color)}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color)}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}.classDiagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs marker.marker.composition.class path,defs marker.marker.dependency.class path,defs marker.marker.extension.class path{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs marker.marker.aggregation.class path{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}.statediagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}[id^=entity] path,[id^=entity] rect{fill:var(--md-default-bg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs .marker.oneOrMore.er *,defs .marker.onlyOne.er *,defs .marker.zeroOrMore.er *,defs .marker.zeroOrOne.er *{stroke:var(--md-mermaid-edge-color)!important}text:not([class]):last-child{fill:var(--md-mermaid-label-fg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var so,is=0;function as(){return typeof mermaid=="undefined"||mermaid instanceof Element?_t("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):$(void 0)}function Zn(e){return e.classList.remove("mermaid"),so||(so=as().pipe(O(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Xn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),Z(1))),so.subscribe(()=>go(null,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${is++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),s=r.attachShadow({mode:"closed"});s.innerHTML=n,e.replaceWith(r),i==null||i(s)})),so.pipe(m(()=>({ref:e})))}var ei=x("table");function ti(e){return e.replaceWith(ei),ei.replaceWith(Un(e)),$({ref:e})}function ss(e){let t=e.find(r=>r.checked)||e[0];return L(...e.map(r=>h(r,"change").pipe(m(()=>j(`label[for="${r.id}"]`))))).pipe(Q(j(`label[for="${t.id}"]`)),m(r=>({active:r})))}function ri(e,{viewport$:t,target$:r}){let o=j(".tabbed-labels",e),n=M(":scope > input",e),i=no("prev");e.append(i);let s=no("next");return e.append(s),H(()=>{let a=new T,c=a.pipe(oe(),ae(!0));z([a,Le(e),mt(e)]).pipe(W(c),$e(1,ye)).subscribe({next([{active:p},l]){let f=Be(p),{width:u}=de(p);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=gr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ge(o),Le(o)]).pipe(W(c)).subscribe(([p,l])=>{let f=At(o);i.hidden=p.x<16,s.hidden=p.x>f.width-l.width-16}),L(h(i,"click").pipe(m(()=>-1)),h(s,"click").pipe(m(()=>1))).pipe(W(c)).subscribe(p=>{let{width:l}=de(o);o.scrollBy({left:l*p,behavior:"smooth"})}),r.pipe(W(c),g(p=>n.includes(p))).subscribe(p=>p.click()),o.classList.add("tabbed-labels--linked");for(let p of n){let l=j(`label[for="${p.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(c),g(f=>!(f.metaKey||f.ctrlKey)),O(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return V("content.tabs.link")&&a.pipe(Ie(1),te(t)).subscribe(([{active:p},{offset:l}])=>{let f=p.innerText.trim();if(p.hasAttribute("data-md-switching"))p.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let v of M("[data-tabs]"))for(let S of M(":scope > input",v)){let X=j(`label[for="${S.id}"]`);if(X!==p&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),S.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),a.pipe(W(c)).subscribe(()=>{for(let p of M("audio, video",e))p.offsetWidth&&p.autoplay?p.play().catch(()=>{}):p.pause()}),ss(n).pipe(O(p=>a.next(p)),A(()=>a.complete()),m(p=>P({ref:e},p)))}).pipe(et(pe))}function oi(e,t){let{viewport$:r,target$:o,print$:n}=t;return L(...M(".annotate:not(.highlight)",e).map(i=>zn(i,{target$:o,print$:n})),...M("pre:not(.mermaid) > code",e).map(i=>Yn(i,{target$:o,print$:n})),...M("a",e).map(i=>Jn(i,t)),...M("pre.mermaid",e).map(i=>Zn(i)),...M("table:not([class])",e).map(i=>ti(i)),...M("details",e).map(i=>Bn(i,{target$:o,print$:n})),...M("[data-tabs]",e).map(i=>ri(i,{viewport$:r,target$:o})),...M("[title]:not([data-preview])",e).filter(()=>V("content.tooltips")).map(i=>Xe(i,{viewport$:r})),...M(".footnote-ref",e).filter(()=>V("content.footnote.tooltips")).map(i=>Vt(i,{content$:new F(s=>{let a=new URL(i.href).hash.slice(1),c=Array.from(document.getElementById(a).cloneNode(!0).children),p=wr(...c);return s.next(p),document.body.append(p),()=>p.remove()}),viewport$:r})))}function cs(e,{alert$:t}){return t.pipe(b(r=>L($(!0),$(!1).pipe(nt(2e3))).pipe(m(o=>({message:r,active:o})))))}function ni(e,t){let r=j(".md-typeset",e);return H(()=>{let o=new T;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),cs(e,t).pipe(O(n=>o.next(n)),A(()=>o.complete()),m(n=>P({ref:e},n)))})}var ps=0;function ls(e,t){document.body.append(e);let{width:r}=de(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=vr(t),n=typeof o!="undefined"?Ge(o):$({x:0,y:0}),i=L(Ye(t),it(t)).pipe(Y());return z([i,n]).pipe(m(([s,a])=>{let{x:c,y:p}=Be(t),l=de(t),f=t.closest("table");return f&&t.parentElement&&(c+=f.offsetLeft+t.parentElement.offsetLeft,p+=f.offsetTop+t.parentElement.offsetTop),{active:s,offset:{x:c-a.x+l.width/2-r/2,y:p-a.y+l.height+8}}}))}function ii(e){let t=e.title;if(!t.length)return y;let r=`__tooltip_${ps++}`,o=Dt(r,"inline"),n=j(".md-typeset",o);return n.innerHTML=t,H(()=>{let i=new T;return i.subscribe({next({offset:s}){o.style.setProperty("--md-tooltip-x",`${s.x}px`),o.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),L(i.pipe(g(({active:s})=>s)),i.pipe(Ae(250),g(({active:s})=>!s))).subscribe({next({active:s}){s?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe($e(16,ye)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(gt(125,ye),g(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?o.style.setProperty("--md-tooltip-0",`${-s}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),ls(o,e).pipe(O(s=>i.next(s)),A(()=>i.complete()),m(s=>P({ref:e},s)))}).pipe(et(pe))}function ms({viewport$:e}){if(!V("header.autohide"))return $(!1);let t=e.pipe(m(({offset:{y:n}})=>n),ot(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),Y()),o=Je("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),Y(),b(n=>n?r:$(!1)),Q(!1))}function ai(e,t){return H(()=>z([Le(e),ms(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),Y((r,o)=>r.height===o.height&&r.hidden===o.hidden),Z(1))}function si(e,{header$:t,main$:r}){return H(()=>{let o=new T,n=o.pipe(oe(),ae(!0));o.pipe(ne("active"),Pe(t)).subscribe(([{active:s},{hidden:a}])=>{e.classList.toggle("md-header--shadow",s&&!a),e.hidden=a});let i=fe(M("[title]",e)).pipe(g(()=>V("content.tooltips")),J(s=>ii(s)));return r.subscribe(o),t.pipe(W(n),m(s=>P({ref:e},s)),Ve(i.pipe(W(n))))})}function fs(e,{viewport$:t,header$:r}){return Er(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=de(e);return{active:n>0&&o>=n}}),ne("active"))}function ci(e,t){return H(()=>{let r=new T;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=ue(".md-content h1");return typeof o=="undefined"?y:fs(o,t).pipe(O(n=>r.next(n)),A(()=>r.complete()),m(n=>P({ref:e},n)))})}function pi(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),Y()),n=o.pipe(b(()=>Le(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ne("bottom"))));return z([o,n,t]).pipe(m(([i,{top:s,bottom:a},{offset:{y:c},size:{height:p}}])=>(p=Math.max(0,p-Math.max(0,s-c,i)-Math.max(0,p+c-a)),{offset:s-i,height:p,active:s-i<=c})),Y((i,s)=>i.offset===s.offset&&i.height===s.height&&i.active===s.active))}function us(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return $(...e).pipe(J(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),Z(1))}function li(e){let t=M("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Wt("(prefers-color-scheme: light)");return H(()=>{let i=new T;return i.subscribe(s=>{if(document.body.setAttribute("data-md-color-switching",""),s.color.media==="(prefers-color-scheme)"){let a=matchMedia("(prefers-color-scheme: light)"),c=document.querySelector(a.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");s.color.scheme=c.getAttribute("data-md-color-scheme"),s.color.primary=c.getAttribute("data-md-color-primary"),s.color.accent=c.getAttribute("data-md-color-accent")}for(let[a,c]of Object.entries(s.color))document.body.setAttribute(`data-md-color-${a}`,c);for(let a=0;as.key==="Enter"),te(i,(s,a)=>a)).subscribe(({index:s})=>{s=(s+1)%t.length,t[s].click(),t[s].focus()}),i.pipe(m(()=>{let s=Ce("header"),a=window.getComputedStyle(s);return o.content=a.colorScheme,a.backgroundColor.match(/\d+/g).map(c=>(+c).toString(16).padStart(2,"0")).join("")})).subscribe(s=>r.content=`#${s}`),i.pipe(xe(pe)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),us(t).pipe(W(n.pipe(Ie(1))),vt(),O(s=>i.next(s)),A(()=>i.complete()),m(s=>P({ref:e},s)))})}function mi(e,{progress$:t}){return H(()=>{let r=new T;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(O(o=>r.next({value:o})),A(()=>r.complete()),m(o=>({ref:e,value:o})))})}function fi(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function ds(e,t){let r=new Map;for(let o of M("url",e)){let n=j("loc",o),i=[fi(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let s of M("[rel=alternate]",o)){let a=s.getAttribute("href");a!=null&&i.push(fi(new URL(a),t))}}return r}function kt(e){return En(new URL("sitemap.xml",e)).pipe(m(t=>ds(t,new URL(e))),ve(()=>$(new Map)),le())}function ui({document$:e}){let t=new Map;e.pipe(b(()=>M("link[rel=alternate]")),m(r=>new URL(r.href)),g(r=>!t.has(r.toString())),J(r=>kt(r).pipe(m(o=>[r,o]),ve(()=>y)))).subscribe(([r,o])=>{t.set(r.toString().replace(/\/$/,""),o)}),h(document.body,"click").pipe(g(r=>!r.metaKey&&!r.ctrlKey),b(r=>{if(r.target instanceof Element){let o=r.target.closest("a");if(o&&!o.target){let n=[...t].find(([f])=>o.href.startsWith(`${f}/`));if(typeof n=="undefined")return y;let[i,s]=n,a=we();if(a.href.startsWith(i))return y;let c=Te(),p=a.href.replace(c.base,"");p=`${i}/${p}`;let l=s.has(p.split("#")[0])?new URL(p,c.base):new URL(i);return r.preventDefault(),$(l)}}return y})).subscribe(r=>st(r,!0))}var co=$t(ao());function hs(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function di({alert$:e}){co.default.isSupported()&&new F(t=>{new co.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||hs(j(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(O(t=>{t.trigger.focus()}),m(()=>Me("clipboard.copied"))).subscribe(e)}function hi(e,t){if(!(e.target instanceof Element))return y;let r=e.target.closest("a");if(r===null)return y;if(r.target||e.metaKey||e.ctrlKey)return y;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),$(r)):y}function bi(e){let t=new Map;for(let r of M(":scope > *",e.head))t.set(r.outerHTML,r);return t}function vi(e){for(let t of M("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return $(e)}function bs(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...V("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=ue(o),i=ue(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=bi(document);for(let[o,n]of bi(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Ce("container");return Ke(M("script",r)).pipe(b(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),y}),oe(),ae(document))}function gi({sitemap$:e,location$:t,viewport$:r,progress$:o}){if(location.protocol==="file:")return y;$(document).subscribe(vi);let n=h(document.body,"click").pipe(Pe(e),b(([a,c])=>hi(a,c)),m(({href:a})=>new URL(a)),le()),i=h(window,"popstate").pipe(m(we),le());n.pipe(te(r)).subscribe(([a,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",a)}),L(n,i).subscribe(t);let s=t.pipe(ne("pathname"),b(a=>xr(a,{progress$:o}).pipe(ve(()=>(st(a,!0),y)))),b(vi),b(bs),le());return L(s.pipe(te(t,(a,c)=>c)),s.pipe(b(()=>t),ne("hash")),t.pipe(Y((a,c)=>a.pathname===c.pathname&&a.hash===c.hash),b(()=>n),O(()=>history.back()))).subscribe(a=>{var c,p;history.state!==null||!a.hash?window.scrollTo(0,(p=(c=history.state)==null?void 0:c.y)!=null?p:0):(history.scrollRestoration="auto",gn(a.hash),history.scrollRestoration="manual")}),t.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),r.pipe(ne("offset"),Ae(100)).subscribe(({offset:a})=>{history.replaceState(a,"")}),V("navigation.instant.prefetch")&&L(h(document.body,"mousemove"),h(document.body,"focusin")).pipe(Pe(e),b(([a,c])=>hi(a,c)),Ae(25),Qr(({href:a})=>a),hr(a=>{let c=document.createElement("link");return c.rel="prefetch",c.href=a.toString(),document.head.appendChild(c),h(c,"load").pipe(m(()=>c),Ee(1))})).subscribe(a=>a.remove()),s}var yi=$t(ro());function xi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,s)=>`${i}${s}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").replace(/&/g,"&").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return s=>(0,yi.default)(s).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function zt(e){return e.type===1}function Sr(e){return e.type===3}function Ei(e,t){let r=Mn(e);return L($(location.protocol!=="file:"),Je("search")).pipe(Re(o=>o),b(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:V("search.suggest")}}})),r}function wi(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=po(n))==null?void 0:l.pathname;if(i===void 0)return;let s=ys(o.pathname,i);if(s===void 0)return;let a=Es(t.keys());if(!t.has(a))return;let c=po(s,a);if(!c||!t.has(c.href))return;let p=po(s,r);if(p)return p.hash=o.hash,p.search=o.search,p}function po(e,t){try{return new URL(e,t)}catch(r){return}}function ys(e,t){if(e.startsWith(t))return e.slice(t.length)}function xs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oy)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:s,aliases:a})=>s===i||a.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),b(n=>h(document.body,"click").pipe(g(i=>!i.metaKey&&!i.ctrlKey),te(o),b(([i,s])=>{if(i.target instanceof Element){let a=i.target.closest("a");if(a&&!a.target&&n.has(a.href)){let c=a.href;return!i.target.closest(".md-version")&&n.get(c)===s?y:(i.preventDefault(),$(new URL(c)))}}return y}),b(i=>kt(i).pipe(m(s=>{var a;return(a=wi({selectedVersionSitemap:s,selectedVersionBaseURL:i,currentLocation:we(),currentBaseURL:t.base}))!=null?a:i})))))).subscribe(n=>st(n,!0)),z([r,o]).subscribe(([n,i])=>{j(".md-header__topic").appendChild(Wn(n,i))}),e.pipe(b(()=>o)).subscribe(n=>{var a;let i=new URL(t.base),s=__md_get("__outdated",sessionStorage,i);if(s===null){s=!0;let c=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(c)||(c=[c]);e:for(let p of c)for(let l of n.aliases.concat(n.version))if(new RegExp(p,"i").test(l)){s=!1;break e}__md_set("__outdated",s,sessionStorage,i)}if(s)for(let c of me("outdated"))c.hidden=!1})}function ws(e,{worker$:t}){let{searchParams:r}=we();r.has("q")&&(at("search",!0),e.value=r.get("q"),e.focus(),Je("search").pipe(Re(i=>!i)).subscribe(()=>{let i=we();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=Ye(e),n=L(t.pipe(Re(zt)),h(e,"keyup"),o).pipe(m(()=>e.value),Y());return z([n,o]).pipe(m(([i,s])=>({value:i,focus:s})),Z(1))}function Si(e,{worker$:t}){let r=new T,o=r.pipe(oe(),ae(!0));z([t.pipe(Re(zt)),r],(i,s)=>s).pipe(ne("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ne("focus")).subscribe(({focus:i})=>{i&&at("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=j("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ws(e,{worker$:t}).pipe(O(i=>r.next(i)),A(()=>r.complete()),m(i=>P({ref:e},i)),Z(1))}function Oi(e,{worker$:t,query$:r}){let o=new T,n=un(e.parentElement).pipe(g(Boolean)),i=e.parentElement,s=j(":scope > :first-child",e),a=j(":scope > :last-child",e);Je("search").subscribe(l=>{a.setAttribute("role",l?"list":"presentation"),a.hidden=!l}),o.pipe(te(r),Gr(t.pipe(Re(zt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:s.textContent=f.length?Me("search.result.none"):Me("search.result.placeholder");break;case 1:s.textContent=Me("search.result.one");break;default:let u=br(l.length);s.textContent=Me("search.result.other",u)}});let c=o.pipe(O(()=>a.innerHTML=""),b(({items:l})=>L($(...l.slice(0,10)),$(...l.slice(10)).pipe(ot(4),Xr(n),b(([f])=>f)))),m(Fn),le());return c.subscribe(l=>a.appendChild(l)),c.pipe(J(l=>{let f=ue("details",l);return typeof f=="undefined"?y:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(g(Sr),m(({data:l})=>l)).pipe(O(l=>o.next(l)),A(()=>o.complete()),m(l=>P({ref:e},l)))}function Ts(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=we();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function Li(e,t){let r=new T,o=r.pipe(oe(),ae(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),Ts(e,t).pipe(O(n=>r.next(n)),A(()=>r.complete()),m(n=>P({ref:e},n)))}function Mi(e,{worker$:t,keyboard$:r}){let o=new T,n=Ce("search-query"),i=L(h(n,"keydown"),h(n,"focus")).pipe(xe(pe),m(()=>n.value),Y());return o.pipe(Pe(i),m(([{suggest:a},c])=>{let p=c.split(/([\s-]+)/);if(a!=null&&a.length&&p[p.length-1]){let l=a[a.length-1];l.startsWith(p[p.length-1])&&(p[p.length-1]=l)}else p.length=0;return p})).subscribe(a=>e.innerHTML=a.join("").replace(/\s/g," ")),r.pipe(g(({mode:a})=>a==="search")).subscribe(a=>{switch(a.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(g(Sr),m(({data:a})=>a)).pipe(O(a=>o.next(a)),A(()=>o.complete()),m(()=>({ref:e})))}function _i(e,{index$:t,keyboard$:r}){let o=Te();try{let n=Ei(o.search,t),i=Ce("search-query",e),s=Ce("search-result",e);h(e,"click").pipe(g(({target:c})=>c instanceof Element&&!!c.closest("a"))).subscribe(()=>at("search",!1)),r.pipe(g(({mode:c})=>c==="search")).subscribe(c=>{let p=Ne();switch(c.type){case"Enter":if(p===i){let l=new Map;for(let f of M(":first-child [href]",s)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}c.claim()}break;case"Escape":case"Tab":at("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof p=="undefined")i.focus();else{let l=[i,...M(":not(details) > [href], summary, details[open] [href]",s)],f=Math.max(0,(Math.max(0,l.indexOf(p))+l.length+(c.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}c.claim();break;default:i!==Ne()&&i.focus()}}),r.pipe(g(({mode:c})=>c==="global")).subscribe(c=>{switch(c.type){case"f":case"s":case"/":i.focus(),i.select(),c.claim();break}});let a=Si(i,{worker$:n});return L(a,Oi(s,{worker$:n,query$:a})).pipe(Ve(...me("search-share",e).map(c=>Li(c,{query$:a})),...me("search-suggest",e).map(c=>Mi(c,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,tt}}function Ai(e,{index$:t,location$:r}){return z([t,r.pipe(Q(we()),g(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>xi(o.config)(n.searchParams.get("h"))),m(o=>{var s;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let a=i.nextNode();a;a=i.nextNode())if((s=a.parentElement)!=null&&s.offsetHeight){let c=a.textContent,p=o(c);p.length>c.length&&n.set(a,p)}for(let[a,c]of n){let{childNodes:p}=x("span",null,c);a.replaceWith(...Array.from(p))}return{ref:e,nodes:n}}))}function Ss(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:s},{offset:{y:a}}])=>(s=s+Math.min(n,Math.max(0,a-i))-n,{height:s,locked:a>=i+n})),Y((i,s)=>i.height===s.height&&i.locked===s.locked))}function lo(e,o){var n=o,{header$:t}=n,r=vo(n,["header$"]);let i=j(".md-sidebar__scrollwrap",e),{y:s}=Be(i);return H(()=>{let a=new T,c=a.pipe(oe(),ae(!0)),p=a.pipe($e(0,ye));return p.pipe(te(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*s}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),p.pipe(Re()).subscribe(()=>{for(let l of M(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=de(f);f.scrollTo({top:u-d/2})}}}),fe(M("label[tabindex]",e)).pipe(J(l=>h(l,"click").pipe(xe(pe),m(()=>l),W(c)))).subscribe(l=>{let f=j(`[id="${l.htmlFor}"]`);j(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),V("content.tooltips")&&fe(M("abbr[title]",e)).pipe(J(l=>Xe(l,{viewport$})),W(c)).subscribe(),Ss(e,r).pipe(O(l=>a.next(l)),A(()=>a.complete()),m(l=>P({ref:e},l)))})}function Ci(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return rt(ze(`${r}/releases/latest`).pipe(ve(()=>y),m(o=>({version:o.tag_name})),Qe({})),ze(r).pipe(ve(()=>y),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Qe({}))).pipe(m(([o,n])=>P(P({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return ze(r).pipe(m(o=>({repositories:o.public_repos})),Qe({}))}}function ki(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return rt(ze(`${r}/releases/permalink/latest`).pipe(ve(()=>y),m(({tag_name:o})=>({version:o})),Qe({})),ze(r).pipe(ve(()=>y),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Qe({}))).pipe(m(([o,n])=>P(P({},o),n)))}function Hi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return Ci(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ki(r,o)}return y}var Os;function Ls(e){return Os||(Os=H(()=>{let t=__md_get("__source",sessionStorage);if(t)return $(t);if(me("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return y}return Hi(e.href).pipe(O(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>y),g(t=>Object.keys(t).length>0),m(t=>({facts:t})),Z(1)))}function $i(e){let t=j(":scope > :last-child",e);return H(()=>{let r=new T;return r.subscribe(({facts:o})=>{t.appendChild(jn(o)),t.classList.add("md-source__repository--active")}),Ls(e).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}function Ms(e,{viewport$:t,header$:r}){return Le(document.body).pipe(b(()=>Er(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ne("hidden"))}function Pi(e,t){return H(()=>{let r=new T;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(V("navigation.tabs.sticky")?$({hidden:!1}):Ms(e,t)).pipe(O(o=>r.next(o)),A(()=>r.complete()),m(o=>P({ref:e},o)))})}function _s(e,{viewport$:t,header$:r}){let o=new Map,n=M(".md-nav__link",e);for(let a of n){let c=decodeURIComponent(a.hash.substring(1)),p=ue(`[id="${c}"]`);typeof p!="undefined"&&o.set(a,p)}let i=r.pipe(ne("height"),m(({height:a})=>{let c=Ce("main"),p=j(":scope > :first-child",c);return a+.8*(p.offsetTop-c.offsetTop)}),le());return Le(document.body).pipe(ne("height"),b(a=>H(()=>{let c=[];return $([...o].reduce((p,[l,f])=>{for(;c.length&&o.get(c[c.length-1]).tagName>=f.tagName;)c.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return p.set([...c=[...c,l]].reverse(),u)},new Map))}).pipe(m(c=>new Map([...c].sort(([,p],[,l])=>p-l))),Pe(i),b(([c,p])=>t.pipe(Ut(([l,f],{offset:{y:u},size:d})=>{let v=u+d.height>=Math.floor(a.height);for(;f.length;){let[,S]=f[0];if(S-p=u&&!v)f=[l.pop(),...f];else break}return[l,f]},[[],[...c]]),Y((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([a,c])=>({prev:a.map(([p])=>p),next:c.map(([p])=>p)})),Q({prev:[],next:[]}),ot(2,1),m(([a,c])=>a.prev.length{let i=new T,s=i.pipe(oe(),ae(!0));if(i.subscribe(({prev:a,next:c})=>{for(let[p]of c)p.classList.remove("md-nav__link--passed"),p.classList.remove("md-nav__link--active");for(let[p,[l]]of a.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",p===a.length-1)}),V("toc.follow")){let a=L(t.pipe(Ae(1),m(()=>{})),t.pipe(Ae(250),m(()=>"smooth")));i.pipe(g(({prev:c})=>c.length>0),Pe(o.pipe(xe(pe))),te(a)).subscribe(([[{prev:c}],p])=>{let[l]=c[c.length-1];if(l.offsetHeight){let f=vr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=de(f);f.scrollTo({top:u-d/2,behavior:p})}}})}return V("navigation.tracking")&&t.pipe(W(s),ne("offset"),Ae(250),Ie(1),W(n.pipe(Ie(1))),vt({delay:250}),te(i)).subscribe(([,{prev:a}])=>{let c=we(),p=a[a.length-1];if(p&&p.length){let[l]=p,{hash:f}=new URL(l.href);c.hash!==f&&(c.hash=f,history.replaceState({},"",`${c}`))}else c.hash="",history.replaceState({},"",`${c}`)}),_s(e,{viewport$:t,header$:r}).pipe(O(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))})}function As(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:s}})=>s),ot(2,1),m(([s,a])=>s>a&&a>0),Y()),i=r.pipe(m(({active:s})=>s));return z([i,n]).pipe(m(([s,a])=>!(s&&a)),Y(),W(o.pipe(Ie(1))),ae(!0),vt({delay:250}),m(s=>({hidden:s})))}function Ii(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new T,s=i.pipe(oe(),ae(!0));return i.subscribe({next({hidden:a}){e.hidden=a,a?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(s),ne("height")).subscribe(({height:a})=>{e.style.top=`${a+16}px`}),h(e,"click").subscribe(a=>{a.preventDefault(),window.scrollTo({top:0})}),As(e,{viewport$:t,main$:o,target$:n}).pipe(O(a=>i.next(a)),A(()=>i.complete()),m(a=>P({ref:e},a)))}function Fi({document$:e,viewport$:t}){e.pipe(b(()=>M(".md-ellipsis")),J(r=>mt(r).pipe(W(e.pipe(Ie(1))),g(o=>o),m(()=>r),Ee(1))),g(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,V("content.tooltips")?Xe(n,{viewport$:t}).pipe(W(e.pipe(Ie(1))),A(()=>n.removeAttribute("title"))):y})).subscribe(),V("content.tooltips")&&e.pipe(b(()=>M(".md-status")),J(r=>Xe(r,{viewport$:t}))).subscribe()}function ji({document$:e,tablet$:t}){e.pipe(b(()=>M(".md-toggle--indeterminate")),O(r=>{r.indeterminate=!0,r.checked=!1}),J(r=>h(r,"change").pipe(Jr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),te(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function Cs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ui({document$:e}){e.pipe(b(()=>M("[data-md-scrollfix]")),O(t=>t.removeAttribute("data-md-scrollfix")),g(Cs),J(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Wi({viewport$:e,tablet$:t}){z([Je("search"),t]).pipe(m(([r,o])=>r&&!o),b(r=>$(r).pipe(nt(r?400:100))),te(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ks(){return location.protocol==="file:"?_t(`${new URL("search/search_index.js",Or.base)}`).pipe(m(()=>__index),Z(1)):ze(new URL("search/search_index.json",Or.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ct=an(),Kt=bn(),Ht=yn(Kt),mo=hn(),ke=Ln(),Lr=Wt("(min-width: 60em)"),Vi=Wt("(min-width: 76.25em)"),Ni=xn(),Or=Te(),zi=document.forms.namedItem("search")?ks():tt,fo=new T;di({alert$:fo});ui({document$:ct});var uo=new T,qi=kt(Or.base);V("navigation.instant")&&gi({sitemap$:qi,location$:Kt,viewport$:ke,progress$:uo}).subscribe(ct);var Di;((Di=Or.version)==null?void 0:Di.provider)==="mike"&&Ti({document$:ct});L(Kt,Ht).pipe(nt(125)).subscribe(()=>{at("drawer",!1),at("search",!1)});mo.pipe(g(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=ue("link[rel=prev]");typeof t!="undefined"&&st(t);break;case"n":case".":let r=ue("link[rel=next]");typeof r!="undefined"&&st(r);break;case"Enter":let o=Ne();o instanceof HTMLLabelElement&&o.click()}});Fi({viewport$:ke,document$:ct});ji({document$:ct,tablet$:Lr});Ui({document$:ct});Wi({viewport$:ke,tablet$:Lr});var ft=ai(Ce("header"),{viewport$:ke}),qt=ct.pipe(m(()=>Ce("main")),b(e=>pi(e,{viewport$:ke,header$:ft})),Z(1)),Hs=L(...me("consent").map(e=>An(e,{target$:Ht})),...me("dialog").map(e=>ni(e,{alert$:fo})),...me("palette").map(e=>li(e)),...me("progress").map(e=>mi(e,{progress$:uo})),...me("search").map(e=>_i(e,{index$:zi,keyboard$:mo})),...me("source").map(e=>$i(e))),$s=H(()=>L(...me("announce").map(e=>_n(e)),...me("content").map(e=>oi(e,{sitemap$:qi,viewport$:ke,target$:Ht,print$:Ni})),...me("content").map(e=>V("search.highlight")?Ai(e,{index$:zi,location$:Kt}):y),...me("header").map(e=>si(e,{viewport$:ke,header$:ft,main$:qt})),...me("header-title").map(e=>ci(e,{viewport$:ke,header$:ft})),...me("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?eo(Vi,()=>lo(e,{viewport$:ke,header$:ft,main$:qt})):eo(Lr,()=>lo(e,{viewport$:ke,header$:ft,main$:qt}))),...me("tabs").map(e=>Pi(e,{viewport$:ke,header$:ft})),...me("toc").map(e=>Ri(e,{viewport$:ke,header$:ft,main$:qt,target$:Ht})),...me("top").map(e=>Ii(e,{viewport$:ke,header$:ft,main$:qt,target$:Ht})))),Ki=ct.pipe(b(()=>$s),Ve(Hs),Z(1));Ki.subscribe();window.document$=ct;window.location$=Kt;window.target$=Ht;window.keyboard$=mo;window.viewport$=ke;window.tablet$=Lr;window.screen$=Vi;window.print$=Ni;window.alert$=fo;window.progress$=uo;window.component$=Ki;})(); -//# sourceMappingURL=bundle.e71a0d61.min.js.map - diff --git a/site/assets/javascripts/bundle.e71a0d61.min.js.map b/site/assets/javascripts/bundle.e71a0d61.min.js.map deleted file mode 100644 index 23451b54d11b39ef33a5f94d16d9e351dec9972c..0000000000000000000000000000000000000000 --- a/site/assets/javascripts/bundle.e71a0d61.min.js.map +++ /dev/null @@ -1,7 +0,0 @@ -{ - "version": 3, - "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinct.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/exhaustMap.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/link/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/alternate/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], - "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2025 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n fetchSitemap,\n setupAlternate,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 60em)\")\nconst screen$ = watchMedia(\"(min-width: 76.25em)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up language selector */\nsetupAlternate({ document$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up sitemap for instant navigation and previews */\nconst sitemap$ = fetchSitemap(config.base)\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ sitemap$, location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { sitemap$, viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n */\nexport class Subscription implements SubscriptionLike {\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param value The `next` value.\n */\n next(value: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param err The `error` exception.\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as ((value: T) => void) | undefined,\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent.\n * @param subscriber The stopped subscriber.\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @param subscribe The function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @param subscribe the subscriber function to be passed to the Observable constructor\n * @return A new observable.\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @param operator the operator defining the operation to take on the observable\n * @return A new observable with the Operator applied.\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param observerOrNext Either an {@link Observer} with some or all callback methods,\n * or the `next` handler that is called for each value emitted from the subscribed Observable.\n * @param error A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param complete A handler for a terminal event resulting from successful completion.\n * @return A subscription reference to the registered handlers.\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next A handler for each value emitted by the observable.\n * @return A promise that either resolves on observable completion or\n * rejects with the handled error.\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @return This instance of the observable.\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n *\n * @return The Observable result of all the operators having been called\n * in the order they were passed in.\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return Observable that this Subject casts to.\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param _bufferSize The size of the buffer to replay on subscription\n * @param _windowTime The amount of time the buffered items will stay buffered\n * @param _timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param state Some contextual data that the `work` function uses when called by the\n * Scheduler.\n * @param delay Time to wait before executing the work, where the time unit is implicit\n * and defined by the Scheduler.\n * @return A subscription in order to be able to unsubscribe the scheduled work.\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param work A function representing a task, or some unit of work to be\n * executed by the Scheduler.\n * @param delay Time to wait before executing the work, where the time unit is\n * implicit and defined by the Scheduler itself.\n * @param state Some contextual data that the `work` function uses when called\n * by the Scheduler.\n * @return A subscription in order to be able to unsubscribe the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && id === scheduler._scheduled && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n let flushId;\n if (action) {\n flushId = action.id;\n } else {\n flushId = this._scheduled;\n this._scheduled = undefined;\n }\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an