Spaces:
Build error
Build error
| import os | |
| import json | |
| import time | |
| import random | |
| from collections import defaultdict | |
| from datetime import date, datetime, timedelta | |
| import gradio as gr | |
| import pandas as pd | |
| import finnhub | |
| from io import StringIO | |
| import requests | |
| from requests.adapters import HTTPAdapter | |
| from urllib3.util.retry import Retry | |
| from huggingface_hub import InferenceClient, hf_hub_download | |
| try: | |
| from llama_cpp import Llama | |
| except Exception: | |
| Llama = None | |
| try: | |
| import torch | |
| except Exception: | |
| torch = None | |
| import psutil | |
| # Suppress Google Cloud warnings | |
| os.environ['GRPC_VERBOSITY'] = 'ERROR' | |
| os.environ['GRPC_TRACE'] = '' | |
| # Suppress other warnings | |
| import warnings | |
| warnings.filterwarnings('ignore', category=UserWarning) | |
| warnings.filterwarnings('ignore', category=FutureWarning) | |
| # ---------- CẤU HÌNH --------------------------------------------------------- | |
| # Model and Inference configuration (Hugging Face Inference API) | |
| FIN_MODEL_ID = "TheFinAI/Fin-o1-14B" | |
| # RapidAPI Configuration | |
| RAPIDAPI_HOST = "alpha-vantage.p.rapidapi.com" | |
| # Load Finnhub API keys from single secret (multiple keys separated by newlines) | |
| FINNHUB_KEYS_RAW = os.getenv("FINNHUB_KEYS", "") | |
| if FINNHUB_KEYS_RAW: | |
| FINNHUB_KEYS = [key.strip() for key in FINNHUB_KEYS_RAW.split('\n') if key.strip()] | |
| else: | |
| FINNHUB_KEYS = [] | |
| # Load RapidAPI keys from single secret (multiple keys separated by newlines) | |
| RAPIDAPI_KEYS_RAW = os.getenv("RAPIDAPI_KEYS", "") | |
| if RAPIDAPI_KEYS_RAW: | |
| RAPIDAPI_KEYS = [key.strip() for key in RAPIDAPI_KEYS_RAW.split('\n') if key.strip()] | |
| else: | |
| RAPIDAPI_KEYS = [] | |
| # Hugging Face token (support multiple common env var names) | |
| HF_TOKEN = ( | |
| os.getenv("HF_TOKEN") | |
| or os.getenv("HF_API_TOKEN") | |
| or os.getenv("HUGGINGFACEHUB_API_TOKEN") | |
| or os.getenv("HUGGINGFACE_TOKEN") | |
| or "" | |
| ) | |
| # Optional local GGUF configuration for CPU inference via llama.cpp | |
| FIN_GGUF_PATH = os.getenv("FIN_GGUF_PATH", "").strip() | |
| FIN_GGUF_REPO = os.getenv("FIN_GGUF_REPO", "").strip() # e.g., "TheFinAI/Fin-o1-14B-GGUF" | |
| FIN_GGUF_FILE = os.getenv("FIN_GGUF_FILE", "").strip() # e.g., "fino1-14b-q4_k_m.gguf" | |
| # Filter out empty keys | |
| FINNHUB_KEYS = [key for key in FINNHUB_KEYS if key.strip()] | |
| # Validate that we have at least one key for each service | |
| if not FINNHUB_KEYS: | |
| print("⚠️ Warning: No Finnhub API keys found in secrets") | |
| if not RAPIDAPI_KEYS: | |
| print("⚠️ Warning: No RapidAPI keys found in secrets") | |
| if not HF_TOKEN: | |
| print("⚠️ Warning: No Hugging Face token (HF_TOKEN) found in secrets – Fin-o1-14B will use mock responses") | |
| # Initialize inference backends (prefer local GGUF if provided) | |
| hf_client = None | |
| llama_local = None | |
| # Try resolve GGUF path from repo if not directly provided | |
| if not FIN_GGUF_PATH and FIN_GGUF_REPO and FIN_GGUF_FILE: | |
| try: | |
| FIN_GGUF_PATH = hf_hub_download(repo_id=FIN_GGUF_REPO, filename=FIN_GGUF_FILE) | |
| print(f"✅ Downloaded GGUF from {FIN_GGUF_REPO}/{FIN_GGUF_FILE}") | |
| except Exception as e: | |
| print(f"⚠️ Failed to download GGUF: {e}") | |
| if FIN_GGUF_PATH and Llama is not None: | |
| try: | |
| llama_local = Llama( | |
| model_path=FIN_GGUF_PATH, | |
| n_ctx=8192, | |
| logits_all=False, | |
| n_threads=max(1, os.cpu_count() or 2), | |
| ) | |
| print(f"✅ Local llama.cpp initialized with GGUF at {FIN_GGUF_PATH}") | |
| except Exception as e: | |
| print(f"⚠️ Failed to initialize local llama.cpp: {e}") | |
| if llama_local is None and HF_TOKEN: | |
| try: | |
| hf_client = InferenceClient(model=FIN_MODEL_ID, token=HF_TOKEN, timeout=60) | |
| print(f"✅ Hugging Face Inference Client initialized for {FIN_MODEL_ID}") | |
| except Exception as e: | |
| print(f"⚠️ Failed to initialize HF Inference Client: {e}") | |
| print("=" * 50) | |
| print("🚀 FinRobot Forecaster Starting Up...") | |
| print("=" * 50) | |
| if FINNHUB_KEYS: | |
| print(f"📊 Finnhub API: {len(FINNHUB_KEYS)} keys loaded") | |
| else: | |
| print("📊 Finnhub API: Not configured") | |
| if RAPIDAPI_KEYS: | |
| print(f"📈 RapidAPI Alpha Vantage: {RAPIDAPI_HOST} ({len(RAPIDAPI_KEYS)} keys loaded)") | |
| else: | |
| print("📈 RapidAPI Alpha Vantage: Not configured") | |
| if HF_TOKEN: | |
| print("🤖 HF Inference: Token detected for Fin-o1-14B") | |
| else: | |
| print("🤖 HF Inference: No token provided (mock responses will be used)") | |
| print("✅ Application started successfully!") | |
| print("=" * 50) | |
| # Cấu hình Google Generative AI (if keys available) | |
| # No Gemini setup needed; using HF Inference API instead | |
| if llama_local is not None: | |
| print("🤖 LLM: Fin-o1-14B via local GGUF (llama.cpp, CPU)") | |
| else: | |
| print("🤖 LLM: Fin-o1-14B via Hugging Face Inference API") | |
| # Cấu hình Finnhub client (if keys available) | |
| if FINNHUB_KEYS: | |
| # Configure with first key for initial setup | |
| finnhub_client = finnhub.Client(api_key=FINNHUB_KEYS[0]) | |
| print(f"✅ Finnhub configured with {len(FINNHUB_KEYS)} keys") | |
| else: | |
| finnhub_client = None | |
| print("⚠️ Finnhub not configured - will use mock news data") | |
| # Tạo session với retry strategy cho requests | |
| def create_session(): | |
| session = requests.Session() | |
| retry_strategy = Retry( | |
| total=3, | |
| backoff_factor=1, | |
| status_forcelist=[429, 500, 502, 503, 504], | |
| ) | |
| adapter = HTTPAdapter(max_retries=retry_strategy) | |
| session.mount("http://", adapter) | |
| session.mount("https://", adapter) | |
| return session | |
| # Tạo session global | |
| requests_session = create_session() | |
| SYSTEM_PROMPT = ( | |
| "You are a seasoned stock-market analyst. " | |
| "Given recent company news and optional basic financials, " | |
| "return:\n" | |
| "[Positive Developments] – 2-4 bullets\n" | |
| "[Potential Concerns] – 2-4 bullets\n" | |
| "[Prediction & Analysis] – a one-week price outlook with rationale." | |
| ) | |
| # ---------- UTILITY HELPERS ---------------------------------------- | |
| def today() -> str: | |
| return date.today().strftime("%Y-%m-%d") | |
| def n_weeks_before(date_string: str, n: int) -> str: | |
| return (datetime.strptime(date_string, "%Y-%m-%d") - | |
| timedelta(days=7 * n)).strftime("%Y-%m-%d") | |
| # ---------- DATA FETCHING -------------------------------------------------- | |
| def get_stock_data(symbol: str, steps: list[str]) -> pd.DataFrame: | |
| # Thử tất cả RapidAPI Alpha Vantage keys | |
| for rapidapi_key in RAPIDAPI_KEYS: | |
| try: | |
| print(f"📈 Fetching stock data for {symbol} via RapidAPI (key: {rapidapi_key[:8]}...)") | |
| # RapidAPI Alpha Vantage endpoint | |
| url = f"https://{RAPIDAPI_HOST}/query" | |
| headers = { | |
| "X-RapidAPI-Host": RAPIDAPI_HOST, | |
| "X-RapidAPI-Key": rapidapi_key | |
| } | |
| params = { | |
| "function": "TIME_SERIES_DAILY", | |
| "symbol": symbol, | |
| "outputsize": "full", | |
| "datatype": "csv" | |
| } | |
| # Thử lại 3 lần với RapidAPI key hiện tại | |
| for attempt in range(3): | |
| try: | |
| resp = requests_session.get(url, headers=headers, params=params, timeout=30) | |
| if not resp.ok: | |
| print(f"RapidAPI HTTP error {resp.status_code} with key {rapidapi_key[:8]}..., attempt {attempt + 1}") | |
| time.sleep(2 ** attempt) | |
| continue | |
| text = resp.text.strip() | |
| if text.startswith("{"): | |
| info = resp.json() | |
| msg = info.get("Note") or info.get("Error Message") or info.get("Information") or str(info) | |
| if "rate limit" in msg.lower() or "quota" in msg.lower(): | |
| print(f"RapidAPI rate limit hit with key {rapidapi_key[:8]}..., trying next key") | |
| break # Thử key tiếp theo | |
| raise RuntimeError(f"RapidAPI Alpha Vantage Error: {msg}") | |
| # Parse CSV data | |
| df = pd.read_csv(StringIO(text)) | |
| date_col = "timestamp" if "timestamp" in df.columns else df.columns[0] | |
| df[date_col] = pd.to_datetime(df[date_col]) | |
| df = df.sort_values(date_col).set_index(date_col) | |
| data = {"Start Date": [], "End Date": [], "Start Price": [], "End Price": []} | |
| for i in range(len(steps) - 1): | |
| s_date = pd.to_datetime(steps[i]) | |
| e_date = pd.to_datetime(steps[i+1]) | |
| seg = df.loc[s_date:e_date] | |
| if seg.empty: | |
| raise RuntimeError( | |
| f"RapidAPI Alpha Vantage cannot get {symbol} data for {steps[i]} – {steps[i+1]}" | |
| ) | |
| data["Start Date"].append(seg.index[0]) | |
| data["Start Price"].append(seg["close"].iloc[0]) | |
| data["End Date"].append(seg.index[-1]) | |
| data["End Price"].append(seg["close"].iloc[-1]) | |
| time.sleep(1) # RapidAPI has higher limits | |
| print(f"✅ Successfully retrieved {symbol} data via RapidAPI (key: {rapidapi_key[:8]}...)") | |
| return pd.DataFrame(data) | |
| except requests.exceptions.Timeout: | |
| print(f"RapidAPI timeout with key {rapidapi_key[:8]}..., attempt {attempt + 1}") | |
| if attempt < 2: | |
| time.sleep(5 * (attempt + 1)) | |
| continue | |
| else: | |
| break | |
| except requests.exceptions.RequestException as e: | |
| print(f"RapidAPI request error with key {rapidapi_key[:8]}..., attempt {attempt + 1}: {e}") | |
| if attempt < 2: | |
| time.sleep(3) | |
| continue | |
| else: | |
| break | |
| except Exception as e: | |
| print(f"RapidAPI Alpha Vantage failed with key {rapidapi_key[:8]}...: {e}") | |
| continue # Thử key tiếp theo | |
| # Fallback: Tạo mock data nếu tất cả RapidAPI keys đều fail | |
| print("⚠️ All RapidAPI keys failed, using mock data for demonstration...") | |
| return create_mock_stock_data(symbol, steps) | |
| def create_mock_stock_data(symbol: str, steps: list[str]) -> pd.DataFrame: | |
| """Tạo mock data để demo khi API không hoạt động""" | |
| import numpy as np | |
| data = {"Start Date": [], "End Date": [], "Start Price": [], "End Price": []} | |
| # Giá cơ bản khác nhau cho các symbol khác nhau | |
| base_prices = { | |
| "AAPL": 180.0, "MSFT": 350.0, "GOOGL": 140.0, | |
| "TSLA": 200.0, "NVDA": 450.0, "AMZN": 150.0 | |
| } | |
| base_price = base_prices.get(symbol.upper(), 150.0) | |
| for i in range(len(steps) - 1): | |
| s_date = pd.to_datetime(steps[i]) | |
| e_date = pd.to_datetime(steps[i+1]) | |
| # Tạo giá ngẫu nhiên với xu hướng tăng nhẹ | |
| start_price = base_price + np.random.normal(0, 5) | |
| end_price = start_price + np.random.normal(2, 8) # Xu hướng tăng nhẹ | |
| data["Start Date"].append(s_date) | |
| data["Start Price"].append(round(start_price, 2)) | |
| data["End Date"].append(e_date) | |
| data["End Price"].append(round(end_price, 2)) | |
| base_price = end_price # Cập nhật giá cơ bản cho tuần tiếp theo | |
| return pd.DataFrame(data) | |
| def current_basics(symbol: str, curday: str) -> dict: | |
| # Check if Finnhub is configured | |
| if not FINNHUB_KEYS: | |
| print(f"⚠️ Finnhub not configured, skipping financial basics for {symbol}") | |
| return {} | |
| # Thử với tất cả các Finnhub API keys | |
| for api_key in FINNHUB_KEYS: | |
| try: | |
| client = finnhub.Client(api_key=api_key) | |
| # Thêm timeout cho Finnhub client | |
| raw = client.company_basic_financials(symbol, "all") | |
| if not raw["series"]: | |
| continue | |
| merged = defaultdict(dict) | |
| for metric, vals in raw["series"]["quarterly"].items(): | |
| for v in vals: | |
| merged[v["period"]][metric] = v["v"] | |
| latest = max((p for p in merged if p <= curday), default=None) | |
| if latest is None: | |
| continue | |
| d = dict(merged[latest]) | |
| d["period"] = latest | |
| return d | |
| except Exception as e: | |
| print(f"Error getting basics for {symbol} with key {api_key[:8]}...: {e}") | |
| time.sleep(2) # Thêm delay trước khi thử key tiếp theo | |
| continue | |
| return {} | |
| def attach_news(symbol: str, df: pd.DataFrame) -> pd.DataFrame: | |
| news_col = [] | |
| for _, row in df.iterrows(): | |
| start = row["Start Date"].strftime("%Y-%m-%d") | |
| end = row["End Date"].strftime("%Y-%m-%d") | |
| time.sleep(2) # Tăng delay để tránh rate limit | |
| # Check if Finnhub is configured | |
| if not FINNHUB_KEYS: | |
| print(f"⚠️ Finnhub not configured, using mock news for {symbol}") | |
| news_data = create_mock_news(symbol, start, end) | |
| news_col.append(json.dumps(news_data)) | |
| continue | |
| # Thử với tất cả các Finnhub API keys | |
| news_data = [] | |
| for api_key in FINNHUB_KEYS: | |
| try: | |
| client = finnhub.Client(api_key=api_key) | |
| weekly = client.company_news(symbol, _from=start, to=end) | |
| weekly_fmt = [ | |
| { | |
| "date" : datetime.fromtimestamp(n["datetime"]).strftime("%Y%m%d%H%M%S"), | |
| "headline": n["headline"], | |
| "summary" : n["summary"], | |
| } | |
| for n in weekly | |
| ] | |
| weekly_fmt.sort(key=lambda x: x["date"]) | |
| news_data = weekly_fmt | |
| break # Thành công, thoát khỏi loop | |
| except Exception as e: | |
| print(f"Error with Finnhub key {api_key[:8]}... for {symbol} from {start} to {end}: {e}") | |
| time.sleep(3) # Thêm delay trước khi thử key tiếp theo | |
| continue | |
| # Nếu không có news data, tạo mock news | |
| if not news_data: | |
| news_data = create_mock_news(symbol, start, end) | |
| news_col.append(json.dumps(news_data)) | |
| df["News"] = news_col | |
| return df | |
| def create_mock_news(symbol: str, start: str, end: str) -> list: | |
| """Tạo mock news data khi API không hoạt động""" | |
| mock_news = [ | |
| { | |
| "date": f"{start}120000", | |
| "headline": f"{symbol} Shows Strong Performance in Recent Trading", | |
| "summary": f"Company {symbol} has demonstrated resilience in the current market conditions with positive investor sentiment." | |
| }, | |
| { | |
| "date": f"{end}090000", | |
| "headline": f"Analysts Maintain Positive Outlook for {symbol}", | |
| "summary": f"Financial analysts continue to recommend {symbol} based on strong fundamentals and growth prospects." | |
| } | |
| ] | |
| return mock_news | |
| # ---------- PROMPT CONSTRUCTION ------------------------------------------- | |
| def sample_news(news: list[str], k: int = 5) -> list[str]: | |
| if len(news) <= k: | |
| return news | |
| return [news[i] for i in sorted(random.sample(range(len(news)), k))] | |
| def make_prompt(symbol: str, df: pd.DataFrame, curday: str, use_basics=False) -> str: | |
| # Thử với tất cả các Finnhub API keys để lấy company profile | |
| company_blurb = f"[Company Introduction]:\n{symbol} is a publicly traded company.\n" | |
| if FINNHUB_KEYS: | |
| for api_key in FINNHUB_KEYS: | |
| try: | |
| client = finnhub.Client(api_key=api_key) | |
| prof = client.company_profile2(symbol=symbol) | |
| company_blurb = ( | |
| f"[Company Introduction]:\n{prof['name']} operates in the " | |
| f"{prof['finnhubIndustry']} sector ({prof['country']}). " | |
| f"Founded {prof['ipo']}, market cap {prof['marketCapitalization']:.1f} " | |
| f"{prof['currency']}; ticker {symbol} on {prof['exchange']}.\n" | |
| ) | |
| break # Thành công, thoát khỏi loop | |
| except Exception as e: | |
| print(f"Error getting company profile for {symbol} with key {api_key[:8]}...: {e}") | |
| time.sleep(2) # Thêm delay trước khi thử key tiếp theo | |
| continue | |
| else: | |
| print(f"⚠️ Finnhub not configured, using basic company info for {symbol}") | |
| # Past weeks block | |
| past_block = "" | |
| for _, row in df.iterrows(): | |
| term = "increased" if row["End Price"] > row["Start Price"] else "decreased" | |
| head = (f"From {row['Start Date']:%Y-%m-%d} to {row['End Date']:%Y-%m-%d}, " | |
| f"{symbol}'s stock price {term} from " | |
| f"{row['Start Price']:.2f} to {row['End Price']:.2f}.") | |
| news_items = json.loads(row["News"]) | |
| summaries = [ | |
| f"[Headline] {n['headline']}\n[Summary] {n['summary']}\n" | |
| for n in news_items | |
| if not n["summary"].startswith("Looking for stock market analysis") | |
| ] | |
| past_block += "\n" + head + "\n" + "".join(sample_news(summaries, 5)) | |
| # Optional basic financials | |
| if use_basics: | |
| basics = current_basics(symbol, curday) | |
| if basics: | |
| basics_txt = "\n".join(f"{k}: {v}" for k, v in basics.items() if k != "period") | |
| basics_block = (f"\n[Basic Financials] (reported {basics['period']}):\n{basics_txt}\n") | |
| else: | |
| basics_block = "\n[Basic Financials]: not available\n" | |
| else: | |
| basics_block = "\n[Basic Financials]: not requested\n" | |
| horizon = f"{curday} to {n_weeks_before(curday, -1)}" | |
| final_user_msg = ( | |
| company_blurb | |
| + past_block | |
| + basics_block | |
| + f"\nBased on all information before {curday}, analyse positive " | |
| "developments and potential concerns for {symbol}, then predict its " | |
| f"price movement for next week ({horizon})." | |
| ) | |
| return final_user_msg | |
| # ---------- LLM CALL ------------------------------------------------------- | |
| def chat_completion(prompt: str, | |
| model: str = FIN_MODEL_ID, | |
| temperature: float = 0.2, | |
| stream: bool = False, | |
| symbol: str = "STOCK") -> str: | |
| full_prompt = f"{SYSTEM_PROMPT}\n\n{prompt}" | |
| # Prefer local llama.cpp if available | |
| if llama_local is not None: | |
| try: | |
| params = { | |
| "max_tokens": 800, | |
| "temperature": temperature, | |
| "top_p": 0.9, | |
| "repeat_penalty": 1.05, | |
| "stop": ["</s>", "\n\n\n"], | |
| } | |
| if stream: | |
| collected = [] | |
| for token in llama_local( | |
| full_prompt, | |
| stream=True, | |
| **params, | |
| ): | |
| if token and "choices" in token and token["choices"]: | |
| t = token["choices"][0].get("text", "") | |
| print(t, end="", flush=True) | |
| collected.append(t) | |
| print() | |
| return "".join(collected) | |
| else: | |
| out = llama_local(full_prompt, **params) | |
| return out["choices"][0]["text"] | |
| except Exception as e: | |
| print(f"⚠️ Local llama.cpp error: {e}") | |
| # Fallback to HF Inference API | |
| if hf_client is not None: | |
| gen_kwargs = { | |
| "max_new_tokens": 800, | |
| "temperature": temperature, | |
| "top_p": 0.9, | |
| "do_sample": True, | |
| "repetition_penalty": 1.05, | |
| "return_full_text": False, | |
| } | |
| try: | |
| if stream: | |
| collected = [] | |
| for event in hf_client.text_generation(full_prompt, stream=True, **gen_kwargs): | |
| if isinstance(event, str): | |
| print(event, end="", flush=True) | |
| collected.append(event) | |
| print() | |
| return "".join(collected) | |
| else: | |
| output = hf_client.text_generation(full_prompt, **gen_kwargs) | |
| return output | |
| except Exception as e: | |
| print(f"⚠️ HF Inference error for {model}: {e}") | |
| # Last resort | |
| print(f"⚠️ No LLM backend available, using mock response for {symbol}") | |
| return create_mock_ai_response(symbol) | |
| # ---------- DEBUG INFO ------------------------------------------------------- | |
| def get_debug_info() -> str: | |
| lines = [] | |
| # Backend/model | |
| backend = ( | |
| "local-gguf-llama.cpp" if llama_local is not None else ( | |
| "hf-inference" if hf_client is not None else "mock") | |
| ) | |
| model_name = FIN_MODEL_ID if hf_client is not None else (os.path.basename(FIN_GGUF_PATH) if FIN_GGUF_PATH else "mock-model") | |
| lines.append(f"Backend: {backend}") | |
| lines.append(f"Model: {model_name}") | |
| # Libraries | |
| try: | |
| import gradio as _gr | |
| gradio_ver = getattr(_gr, "__version__", "unknown") | |
| except Exception: | |
| gradio_ver = "unavailable" | |
| try: | |
| import pandas as _pd | |
| pandas_ver = getattr(_pd, "__version__", "unknown") | |
| except Exception: | |
| pandas_ver = "unavailable" | |
| try: | |
| import requests as _req | |
| requests_ver = getattr(_req, "__version__", "unknown") | |
| except Exception: | |
| requests_ver = "unavailable" | |
| llama_cpp_ver = "available" if Llama is not None else "unavailable" | |
| hf_hub_ver = getattr(InferenceClient, "__module__", "huggingface_hub") | |
| lines.append(f"Libraries: gradio={gradio_ver}, pandas={pandas_ver}, requests={requests_ver}, llama_cpp={llama_cpp_ver}, hf_hub={hf_hub_ver}") | |
| # Torch | |
| if torch is not None: | |
| lines.append(f"torch: {torch.__version__}, cuda_available={torch.cuda.is_available() if hasattr(torch, 'cuda') else False}") | |
| else: | |
| lines.append("torch: unavailable") | |
| # System CPU/RAM | |
| try: | |
| cpu_percent = psutil.cpu_percent(interval=0.5) | |
| ram = psutil.virtual_memory() | |
| lines.append(f"CPU: {cpu_percent}%") | |
| lines.append(f"RAM: {ram.percent}% used ({round(ram.used/1e9,2)}GB/{round(ram.total/1e9,2)}GB)") | |
| except Exception as e: | |
| lines.append(f"System: psutil error: {e}") | |
| # Env flags | |
| lines.append(f"HF_TOKEN set: {'yes' if bool(HF_TOKEN) else 'no'}") | |
| lines.append(f"FIN_GGUF_PATH: {FIN_GGUF_PATH or '-'}") | |
| lines.append(f"FIN_GGUF_REPO/FILE: {FIN_GGUF_REPO or '-'} / {FIN_GGUF_FILE or '-'}") | |
| return "\n".join(lines) | |
| def create_mock_ai_response(symbol: str) -> str: | |
| """Tạo mock AI response khi LLM API không hoạt động""" | |
| return f""" | |
| [Positive Developments] | |
| • Strong market position and brand recognition for {symbol} | |
| • Recent quarterly earnings showing growth potential | |
| • Positive analyst sentiment and institutional investor interest | |
| • Technological innovation and market expansion opportunities | |
| [Potential Concerns] | |
| • Market volatility and economic uncertainty | |
| • Competitive pressures in the industry | |
| • Regulatory changes that may impact operations | |
| • Global economic factors affecting stock performance | |
| [Prediction & Analysis] | |
| Based on the current market conditions and company fundamentals, {symbol} is expected to show moderate growth over the next week. The stock may experience some volatility but should maintain an upward trend with a potential price increase of 2-5%. This prediction is based on current market sentiment and technical analysis patterns. | |
| Note: This is a demonstration response using mock data. For real investment decisions, please consult with qualified financial professionals. | |
| """ | |
| # ---------- MAIN PREDICTION FUNCTION ----------------------------------------- | |
| def predict(symbol: str = "AAPL", | |
| curday: str = today(), | |
| n_weeks: int = 3, | |
| use_basics: bool = False, | |
| stream: bool = False) -> tuple[str, str]: | |
| try: | |
| steps = [n_weeks_before(curday, n) for n in range(n_weeks + 1)][::-1] | |
| df = get_stock_data(symbol, steps) | |
| df = attach_news(symbol, df) | |
| prompt_info = make_prompt(symbol, df, curday, use_basics) | |
| answer = chat_completion(prompt_info, stream=stream, symbol=symbol) | |
| return prompt_info, answer | |
| except Exception as e: | |
| error_msg = f"Error in prediction: {str(e)}" | |
| print(f"Prediction error: {e}") # Log the error for debugging | |
| return error_msg, error_msg | |
| # ---------- HUGGINGFACE SPACES INTERFACE ----------------------------------------- | |
| def hf_predict(symbol, n_weeks, use_basics): | |
| # 1. get curday | |
| curday = date.today().strftime("%Y-%m-%d") | |
| # 2. call predict | |
| prompt, answer = predict( | |
| symbol=symbol.upper(), | |
| curday=curday, | |
| n_weeks=int(n_weeks), | |
| use_basics=bool(use_basics), | |
| stream=False | |
| ) | |
| return prompt, answer | |
| # ---------- GRADIO INTERFACE ----------------------------------------- | |
| def create_interface(): | |
| with gr.Blocks( | |
| title="FinRobot Forecaster (Fin-o1-14B)", | |
| theme=gr.themes.Soft(), | |
| css=""" | |
| .gradio-container { | |
| max-width: 1200px !important; | |
| margin: auto !important; | |
| } | |
| #model_prompt_textbox textarea { | |
| overflow-y: auto !important; | |
| max-height: none !important; | |
| min-height: 400px !important; | |
| resize: vertical !important; | |
| white-space: pre-wrap !important; | |
| word-wrap: break-word !important; | |
| height: auto !important; | |
| } | |
| #model_prompt_textbox { | |
| height: auto !important; | |
| } | |
| #analysis_results_textbox textarea { | |
| overflow-y: auto !important; | |
| max-height: none !important; | |
| min-height: 400px !important; | |
| resize: vertical !important; | |
| white-space: pre-wrap !important; | |
| word-wrap: break-word !important; | |
| height: auto !important; | |
| } | |
| #analysis_results_textbox { | |
| height: auto !important; | |
| } | |
| .textarea textarea { | |
| overflow-y: auto !important; | |
| max-height: 500px !important; | |
| resize: vertical !important; | |
| } | |
| .textarea { | |
| height: auto !important; | |
| min-height: 300px !important; | |
| } | |
| .gradio-textbox { | |
| height: auto !important; | |
| max-height: none !important; | |
| } | |
| .gradio-textbox textarea { | |
| height: auto !important; | |
| max-height: none !important; | |
| overflow-y: auto !important; | |
| } | |
| """ | |
| ) as demo: | |
| gr.Markdown(""" | |
| # 🤖 FinRobot Forecaster (Fin-o1-14B) | |
| **AI-powered stock market analysis and prediction using TheFinAI/Fin-o1-14B** | |
| This application analyzes stock market data, company news, and financial metrics to provide comprehensive market insights and predictions. | |
| Model: `TheFinAI/Fin-o1-14B` via Hugging Face Inference API (CPU-friendly inference). | |
| If no `HF_TOKEN` is set, mock responses will be used for demonstration. | |
| """) | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| symbol = gr.Textbox( | |
| label="Stock Symbol", | |
| value="AAPL", | |
| placeholder="Enter stock symbol (e.g., AAPL, MSFT, GOOGL)", | |
| info="Enter the ticker symbol of the stock you want to analyze" | |
| ) | |
| n_weeks = gr.Slider( | |
| 1, 6, | |
| value=3, | |
| step=1, | |
| label="Historical Weeks to Analyze", | |
| info="Number of weeks of historical data to include in analysis" | |
| ) | |
| use_basics = gr.Checkbox( | |
| label="Include Basic Financials", | |
| value=True, | |
| info="Include basic financial metrics in the analysis" | |
| ) | |
| btn = gr.Button( | |
| "🚀 Run Analysis", | |
| variant="primary" | |
| ) | |
| with gr.Column(scale=2): | |
| with gr.Tabs(): | |
| with gr.Tab("📊 Analysis Results"): | |
| gr.Markdown("**AI Analysis & Prediction**") | |
| output_answer = gr.Textbox( | |
| label="", | |
| lines=40, | |
| show_copy_button=True, | |
| interactive=False, | |
| placeholder="AI analysis and predictions will appear here...", | |
| container=True, | |
| scale=1, | |
| elem_id="analysis_results_textbox" | |
| ) | |
| with gr.Tab("🔍 Model Prompt"): | |
| gr.Markdown("**Generated Prompt**") | |
| output_prompt = gr.Textbox( | |
| label="", | |
| lines=40, | |
| show_copy_button=True, | |
| interactive=False, | |
| placeholder="Generated prompt will appear here...", | |
| container=True, | |
| scale=1, | |
| elem_id="model_prompt_textbox" | |
| ) | |
| with gr.Tab("🧰 Debug"): | |
| gr.Markdown("**Runtime Debug Information**") | |
| debug_box = gr.Textbox( | |
| label="", | |
| lines=30, | |
| show_copy_button=True, | |
| interactive=False, | |
| value=get_debug_info(), | |
| container=True, | |
| scale=1, | |
| ) | |
| refresh_btn = gr.Button("🔄 Refresh Debug Info") | |
| # Examples | |
| gr.Examples( | |
| examples=[ | |
| ["AAPL", 3, False], | |
| ["MSFT", 4, True], | |
| ["GOOGL", 2, False], | |
| ["TSLA", 5, True], | |
| ["NVDA", 3, True] | |
| ], | |
| inputs=[symbol, n_weeks, use_basics], | |
| label="💡 Try these examples" | |
| ) | |
| # Event handlers | |
| btn.click( | |
| fn=hf_predict, | |
| inputs=[symbol, n_weeks, use_basics], | |
| outputs=[output_prompt, output_answer], | |
| show_progress=True | |
| ) | |
| # Refresh debug info on demand and after run | |
| refresh_btn.click( | |
| fn=lambda: get_debug_info(), | |
| inputs=[], | |
| outputs=[debug_box], | |
| ) | |
| btn.click( | |
| fn=lambda: get_debug_info(), | |
| inputs=[], | |
| outputs=[debug_box], | |
| ) | |
| # Footer | |
| gr.Markdown(""" | |
| --- | |
| **Disclaimer**: This application is for educational and research purposes only. | |
| The predictions and analysis provided should not be considered as financial advice. | |
| Always consult with qualified financial professionals before making investment decisions. | |
| """) | |
| return demo | |
| # ---------- MAIN EXECUTION ----------------------------------------- | |
| if __name__ == "__main__": | |
| demo = create_interface() | |
| demo.launch( | |
| server_name="0.0.0.0", | |
| server_port=7860, | |
| share=False, | |
| show_error=True, | |
| debug=False, | |
| quiet=True | |
| ) | |