Spaces:
Running
Running
| from fastapi import FastAPI | |
| from fastapi.staticfiles import StaticFiles | |
| from fastapi.responses import HTMLResponse, JSONResponse | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from pydantic import BaseModel | |
| import os | |
| import logging | |
| # Configure logging | |
| logging.basicConfig(level=logging.INFO) | |
| logger = logging.getLogger(__name__) | |
| from app.intent_parser import parser | |
| from app.forecasting import forecast_demand | |
| from app.data_loader import loader | |
| from app.config import DEFAULT_HORIZON | |
| from app.llm_engine import llm | |
| from app.pdf_generator import generate_forecast_pdf, generate_general_pdf | |
| app = FastAPI(title="BMS AI Assistant") | |
| # CORS | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Mount Static Files | |
| # We mount this to serve assets like CSS/JS if they are referenced in the HTML | |
| static_dir = "/app/static" | |
| if not os.path.exists(static_dir): | |
| # Fallback for local testing | |
| static_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), "static") | |
| if os.path.exists(static_dir): | |
| app.mount("/static", StaticFiles(directory=static_dir), name="static") | |
| logger.info(f"Mounted static files from: {static_dir}") | |
| else: | |
| logger.warning(f"Static directory not found at {static_dir}") | |
| class ChatRequest(BaseModel): | |
| message: str | |
| # Root Endpoint | |
| async def read_root(): | |
| # Logic: Read the file content and return it. | |
| possible_paths = [ | |
| "/app/static/index.html", # Docker absolute path | |
| os.path.join(os.path.dirname(os.path.dirname(__file__)), "static", "index.html"), # Local relative | |
| "static/index.html" # Simple relative | |
| ] | |
| for path in possible_paths: | |
| if os.path.exists(path): | |
| logger.info(f"Serving HTML from: {path}") | |
| with open(path, "r", encoding="utf-8") as f: | |
| return HTMLResponse(content=f.read()) | |
| logger.error("Could not find index.html in any known location.") | |
| return HTMLResponse(content="<h1>Error: index.html not found</h1>", status_code=500) | |
| async def health_check(): | |
| return {"status": "ok", "message": "BMS AI Assistant is running"} | |
| # Chat Endpoint | |
| chat_context = { | |
| "last_forecast": None, | |
| "last_answer": None | |
| } | |
| async def chat_endpoint(request: ChatRequest): | |
| user_text = request.message | |
| try: | |
| parsed = parser.parse(user_text) | |
| intent = parsed["intent"] | |
| item_code = parsed["item_code"] | |
| location = parsed["location"] | |
| horizon = parsed["horizon_days"] | |
| response_data = { | |
| "intent": intent, | |
| "answer": "", | |
| "forecast": [] | |
| } | |
| if intent == "demand_forecast": | |
| if not item_code: | |
| response_data["answer"] = "I can help with forecasting, but I need to know which item you are interested in." | |
| else: | |
| if not horizon: horizon = DEFAULT_HORIZON | |
| forecast = forecast_demand(item_code, horizon, location) | |
| response_data["forecast"] = forecast | |
| chat_context["last_forecast"] = {"item_code": item_code, "forecast": forecast, "location": location} | |
| total = sum(d['qty'] for d in forecast) if forecast else 0 | |
| response_data["answer"] = f"Forecast for {item_code} over next {horizon} days is {total} units." | |
| elif intent == "list_items": | |
| items = loader.get_items() | |
| msg = "**Available Items:**\n" | |
| for item in items[:10]: | |
| msg += f"- {item['item_code']}: {item['description']}\n" | |
| response_data["answer"] = msg | |
| elif intent == "check_inventory": | |
| if not item_code: | |
| response_data["answer"] = "Please specify an item code to check inventory for." | |
| else: | |
| inv_data = loader.get_inventory(item_code, location) | |
| if not inv_data: | |
| response_data["answer"] = f"No inventory data found for {item_code}." | |
| else: | |
| msg = f"**Inventory for {item_code}:**\n" | |
| for record in inv_data: | |
| loc = record.get('region', 'Unknown') | |
| qty = record.get('qty_on_hand', 0) | |
| status = record.get('status', 'Unknown') | |
| msg += f"- Location: {loc}, On Hand: {qty}, Status: {status}\n" | |
| response_data["answer"] = msg | |
| # Update context for potential report generation | |
| chat_context["last_forecast"] = {"item_code": item_code, "forecast": [], "location": location} | |
| elif intent == "generate_report": | |
| # Check if we have a valid context | |
| if chat_context.get("last_forecast") and chat_context["last_forecast"].get("item_code"): | |
| lf = chat_context["last_forecast"] | |
| # Use the item code from the last interaction | |
| target_item = lf["item_code"] | |
| # If the user explicitly mentioned a different item in this request, use that instead | |
| if item_code and item_code != target_item: | |
| target_item = item_code | |
| # If we don't have forecast data for this new item, we might need to generate it or just produce a generic report | |
| # For now, let's just warn or proceed with what we have | |
| filename = generate_forecast_pdf(target_item, lf.get("forecast", []), lf.get("location")) | |
| response_data["answer"] = f"Report generated for {target_item}: <a href='/static/reports/{filename}' target='_blank'>Download PDF</a>" | |
| else: | |
| response_data["answer"] = "I don't have enough context to generate a report. Please ask for a forecast or inventory check first." | |
| # Fallback to LLM | |
| response_data["answer"] = llm.generate_response(user_text) | |
| chat_context["last_answer"] = response_data["answer"] | |
| return JSONResponse(content=response_data) | |
| except Exception as e: | |
| logger.error(f"Error in chat endpoint: {e}") | |
| return JSONResponse(content={"answer": f"Error processing request: {str(e)}"}, status_code=500) | |