Spaces:
Running
Running
| """ | |
| AURA Chat β Gradio Space | |
| Single-file Gradio app that: | |
| - Accepts newline-separated prompts (data queries) from the user. | |
| - On "Analyze" scrapes those queries, sends the aggregated text to a locked LLM, | |
| and returns a polished analysis with a ranked list of best stocks and an | |
| "Investment Duration" (when to enter / when to exit) for each stock. | |
| - Seeds a chat component with the generated analysis; user can then chat about it. | |
| Notes: | |
| - Model, max tokens, and delay between scrapes are fixed and cannot be changed via UI. | |
| - Set OPENAI_API_KEY in environment (Space Secrets). | |
| """ | |
| import os | |
| import time | |
| import sys | |
| import asyncio | |
| import requests | |
| import atexit | |
| import traceback | |
| from datetime import datetime | |
| from typing import List | |
| import gradio as gr | |
| # Defensive: ensure a fresh event loop early to avoid fd race on shutdown. | |
| if sys.platform != "win32": | |
| try: | |
| loop = asyncio.new_event_loop() | |
| asyncio.set_event_loop(loop) | |
| except Exception: | |
| traceback.print_exc() | |
| # ============================================================================= | |
| # CONFIGURATION (fixed) | |
| # ============================================================================= | |
| SCRAPER_API_URL = os.getenv("SCRAPER_API_URL", "https://deep-scraper-96.created.app/api/deep-scrape") | |
| SCRAPER_HEADERS = { | |
| "User-Agent": "Mozilla/5.0", | |
| "Content-Type": "application/json" | |
| } | |
| # FIXED model & tokens (cannot be changed from UI) | |
| LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b:free") | |
| MAX_TOKENS = int(os.getenv("LLM_MAX_TOKENS", "3000")) | |
| SCRAPE_DELAY = float(os.getenv("SCRAPE_DELAY", "1.0")) | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| OPENAI_BASE_URL = os.getenv("OPENAI_BASE_URL", "https://openrouter.ai/api/v1") | |
| # ============================================================================= | |
| # PROMPT ENGINEERING (fixed) | |
| # ============================================================================= | |
| PROMPT_TEMPLATE = f"""You are AURA, a concise, professional hedge-fund research assistant. | |
| Task: | |
| - Given scraped data below, produce a clear, readable analysis that: | |
| 1) Lists the top 5 stock picks (or fewer if not enough data). | |
| 2) For each stock provide: Ticker / Company name, short rationale (2-3 bullets), | |
| and an explicit **Investment Duration** entry: a one-line "When to Invest" | |
| and a one-line "When to Sell" instruction (these two lines are mandatory | |
| for each stock). | |
| 3) Keep each stock entry short and scannable. Use a bullet list or numbered list. | |
| 4) At the top, provide a 2-3 sentence summary conclusion (market context + | |
| highest conviction pick). | |
| 5) Output in plain text, clean formatting, easy for humans to read. No JSON. | |
| 6) After the list, include a concise "Assumptions & Risks" section (2-3 bullet points). | |
| Important: Be decisive. If data is insufficient, state that clearly and provide | |
| the best-available picks with lower confidence. | |
| Max tokens for the LLM response: {MAX_TOKENS} | |
| Model: {LLM_MODEL}""" | |
| # ============================================================================= | |
| # SCRAPING HELPERS | |
| # ============================================================================= | |
| def deep_scrape(query: str, retries: int = 3, timeout: int = 40) -> str: | |
| """Post a query to SCRAPER_API_URL and return a readable aggregation (or an error string).""" | |
| payload = {"query": query} | |
| last_err = None | |
| for attempt in range(1, retries + 1): | |
| try: | |
| resp = requests.post( | |
| SCRAPER_API_URL, | |
| headers=SCRAPER_HEADERS, | |
| json=payload, | |
| timeout=timeout | |
| ) | |
| resp.raise_for_status() | |
| data = resp.json() | |
| # Format into readable text | |
| if isinstance(data, dict): | |
| parts = [f"{k.upper()}:\n{v}\n" for k, v in data.items()] | |
| return "\n".join(parts) | |
| else: | |
| return str(data) | |
| except Exception as e: | |
| last_err = e | |
| if attempt < retries: | |
| time.sleep(1.0) | |
| return f"ERROR: Scraper failed: {last_err}" | |
| def multi_scrape(queries: List[str], delay: float = SCRAPE_DELAY) -> str: | |
| """Scrape multiple queries and join results into one large string.""" | |
| aggregated = [] | |
| for q in queries: | |
| q = q.strip() | |
| if not q: | |
| continue | |
| aggregated.append(f"\n=== QUERY: {q} ===\n") | |
| scraped = deep_scrape(q) | |
| aggregated.append(scraped) | |
| time.sleep(delay) | |
| return "\n".join(aggregated) | |
| # ============================================================================= | |
| # LLM INTERACTION | |
| # ============================================================================= | |
| try: | |
| from openai import OpenAI | |
| except Exception: | |
| OpenAI = None | |
| def run_llm_system_and_user( | |
| system_prompt: str, | |
| user_text: str, | |
| model: str = LLM_MODEL, | |
| max_tokens: int = MAX_TOKENS | |
| ) -> str: | |
| """Create the OpenAI client lazily, call the chat completions endpoint, then close.""" | |
| if OpenAI is None: | |
| return "ERROR: openai package not installed or available. See requirements." | |
| if not OPENAI_API_KEY: | |
| return "ERROR: OPENAI_API_KEY not set in environment. Please add it to Space Secrets." | |
| client = None | |
| try: | |
| client = OpenAI(base_url=OPENAI_BASE_URL, api_key=OPENAI_API_KEY) | |
| completion = client.chat.completions.create( | |
| model=model, | |
| messages=[ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_text}, | |
| ], | |
| max_tokens=max_tokens, | |
| ) | |
| # Extract content robustly | |
| if hasattr(completion, "choices") and len(completion.choices) > 0: | |
| try: | |
| return completion.choices[0].message.content | |
| except Exception: | |
| return str(completion.choices[0]) | |
| return str(completion) | |
| except Exception as e: | |
| return f"ERROR: LLM call failed: {e}" | |
| finally: | |
| # Try to close client transport | |
| try: | |
| if client is not None: | |
| try: | |
| client.close() | |
| except Exception: | |
| try: | |
| asyncio.get_event_loop().run_until_complete(client.aclose()) | |
| except Exception: | |
| pass | |
| except Exception: | |
| pass | |
| # ============================================================================= | |
| # MAIN PIPELINE | |
| # ============================================================================= | |
| def analyze_and_seed_chat(prompts_text: str): | |
| """Called when user clicks Analyze. Returns: (analysis_text, initial_chat_messages_list)""" | |
| if not prompts_text.strip(): | |
| return "Please enter at least one prompt (query) describing what data to gather.", [] | |
| queries = [line.strip() for line in prompts_text.splitlines() if line.strip()] | |
| scraped = multi_scrape(queries, delay=SCRAPE_DELAY) | |
| if scraped.startswith("ERROR"): | |
| return scraped, [] | |
| # Compose user payload for LLM | |
| user_payload = f"SCRAPED DATA:\n\n{scraped}\n\nPlease follow the system instructions and output the analysis." | |
| analysis = run_llm_system_and_user(PROMPT_TEMPLATE, user_payload) | |
| if analysis.startswith("ERROR"): | |
| return analysis, [] | |
| # Seed chat with user request and assistant analysis | |
| initial_chat = [ | |
| {"role": "user", "content": f"Analyze the data I provided (prompts: {', '.join(queries)})"}, | |
| {"role": "assistant", "content": analysis} | |
| ] | |
| return analysis, initial_chat | |
| def continue_chat(chat_messages, user_message: str, analysis_text: str): | |
| """Handle chat follow-ups. Returns updated list of message dicts.""" | |
| if chat_messages is None: | |
| chat_messages = [] | |
| if not user_message or not user_message.strip(): | |
| return chat_messages | |
| # Append user's new message | |
| chat_messages.append({"role": "user", "content": user_message}) | |
| # Build LLM input using analysis as reference context | |
| followup_system = ( | |
| "You are AURA, a helpful analyst. The conversation context includes a recently " | |
| "generated analysis from scraped data. Use that analysis as ground truth context; " | |
| "answer follow-up questions, explain rationale, and provide clarifications. " | |
| "Be concise and actionable." | |
| ) | |
| user_payload = f"REFERENCE ANALYSIS:\n\n{analysis_text}\n\nUSER QUESTION: {user_message}\n\nRespond concisely and reference lines from the analysis where appropriate." | |
| assistant_reply = run_llm_system_and_user(followup_system, user_payload) | |
| if assistant_reply.startswith("ERROR"): | |
| assistant_reply = assistant_reply | |
| # Append assistant reply | |
| chat_messages.append({"role": "assistant", "content": assistant_reply}) | |
| return chat_messages | |
| # ============================================================================= | |
| # GRADIO UI | |
| # ============================================================================= | |
| def build_demo(): | |
| with gr.Blocks(title="AURA Chat β Hedge Fund Picks") as demo: | |
| # Custom CSS | |
| gr.HTML(""" | |
| <style> | |
| .gradio-container { max-width: 1100px; margin: 18px auto; } | |
| .header { text-align: left; margin-bottom: 6px; } | |
| .muted { color: #7d8590; font-size: 14px; } | |
| .analysis-box { background: #ffffff; border-radius: 8px; padding: 12px; box-shadow: 0 4px 14px rgba(0,0,0,0.06); } | |
| </style> | |
| """) | |
| gr.Markdown("# AURA Chat β Hedge Fund Picks") | |
| gr.Markdown( | |
| "**Enter one or more data prompts (one per line)** β e.g. SEC insider transactions october 2025 company XYZ.\n\n" | |
| "Only input prompts; model, tokens and timing are fixed. Press **Analyze** to fetch & generate the picks. " | |
| "After analysis you can chat with the assistant about the results." | |
| ) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| prompts = gr.Textbox( | |
| lines=6, | |
| label="Data Prompts (one per line)", | |
| placeholder="SEC insider transactions october 2025\n13F filings Q3 2025\ncompany: ACME corp insider buys" | |
| ) | |
| analyze_btn = gr.Button("Analyze", variant="primary") | |
| error_box = gr.Markdown("", visible=False) | |
| gr.Markdown(f"**Fixed settings:** Model = {LLM_MODEL} β’ Max tokens = {MAX_TOKENS} β’ Scrape delay = {SCRAPE_DELAY}s") | |
| gr.Markdown("**Important:** Add your OPENAI_API_KEY to Space Secrets before running.") | |
| with gr.Column(scale=1): | |
| analysis_out = gr.Textbox( | |
| label="Generated Analysis (Top picks with Investment Duration)", | |
| lines=18, | |
| interactive=False | |
| ) | |
| gr.Markdown("**Chat with AURA about this analysis**") | |
| chatbot = gr.Chatbot(label="AURA Chat", height=420) | |
| user_input = gr.Textbox( | |
| placeholder="Ask a follow-up question about the analysis...", | |
| label="Your question" | |
| ) | |
| send_btn = gr.Button("Send") | |
| # States | |
| analysis_state = gr.State("") | |
| chat_state = gr.State([]) | |
| # Handler functions | |
| def on_analyze(prompts_text): | |
| analysis_text, initial_chat = analyze_and_seed_chat(prompts_text) | |
| if analysis_text.startswith("ERROR"): | |
| return "", f"**Error:** {analysis_text}", "", [] | |
| return analysis_text, "", analysis_text, initial_chat | |
| def on_send(chat_state_list, user_msg, analysis_text): | |
| if not user_msg or not user_msg.strip(): | |
| return chat_state_list or [], "" | |
| updated_history = continue_chat(chat_state_list or [], user_msg, analysis_text) | |
| return updated_history, "" | |
| def render_chat(chat_messages): | |
| return chat_messages or [] | |
| # Wire handlers | |
| analyze_btn.click( | |
| fn=on_analyze, | |
| inputs=[prompts], | |
| outputs=[analysis_out, error_box, analysis_state, chat_state] | |
| ) | |
| send_btn.click( | |
| fn=on_send, | |
| inputs=[chat_state, user_input, analysis_state], | |
| outputs=[chat_state, user_input] | |
| ) | |
| user_input.submit( | |
| fn=on_send, | |
| inputs=[chat_state, user_input, analysis_state], | |
| outputs=[chat_state, user_input] | |
| ) | |
| chat_state.change( | |
| fn=render_chat, | |
| inputs=[chat_state], | |
| outputs=[chatbot] | |
| ) | |
| return demo | |
| # ============================================================================= | |
| # CLEAN SHUTDOWN | |
| # ============================================================================= | |
| def _cleanup_on_exit(): | |
| try: | |
| loop = asyncio.get_event_loop() | |
| if loop and not loop.is_closed(): | |
| try: | |
| loop.stop() | |
| except Exception: | |
| pass | |
| try: | |
| loop.close() | |
| except Exception: | |
| pass | |
| except Exception: | |
| pass | |
| atexit.register(_cleanup_on_exit) | |
| # ============================================================================= | |
| # RUN | |
| # ============================================================================= | |
| if __name__ == "__main__": | |
| demo = build_demo() | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=int(os.environ.get("PORT", 7860)) | |
| ) | |