import os import re from ctransformers import AutoModelForCausalLM from huggingface_hub import hf_hub_download # Configuration MODEL_REPO = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF" MODEL_FILE = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" MODEL_TYPE = "llama" # Content Moderation Lists PROFANITY_LIST = [ 'damn', 'hell', 'crap', 'stupid', 'idiot', 'dumb', 'suck', ] OFF_TOPIC_KEYWORDS = [ 'weather', 'sports', 'politics', 'religion', 'movie', 'game', 'recipe', 'joke', 'story', 'music', 'celebrity' ] class LLMEngine: def __init__(self): self.model = None self.context = "" self.load_context() def load_context(self): try: context_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data", "company_context.txt") if os.path.exists(context_path): with open(context_path, "r", encoding="utf-8") as f: self.context = f.read() except Exception as e: print(f"Error loading context: {e}") def load_model(self): if self.model is None: print("Loading LLM... this may take a moment.") try: self.model = AutoModelForCausalLM.from_pretrained( MODEL_REPO, model_file=MODEL_FILE, model_type=MODEL_TYPE, context_length=2048, gpu_layers=0 ) print("LLM Loaded successfully.") except Exception as e: print(f"Failed to load LLM: {e}") def check_profanity(self, text): """Check for inappropriate language""" text_lower = text.lower() for word in PROFANITY_LIST: if re.search(r'\b' + word + r'\b', text_lower): return True return False def check_off_topic(self, text): """Check if query is off-topic""" text_lower = text.lower() for keyword in OFF_TOPIC_KEYWORDS: if keyword in text_lower: return True return False def generate_response(self, user_query): """Generate response with content moderation""" # Content Moderation if self.check_profanity(user_query): return ("I'm here to assist with business operations. " "Please keep our conversation professional and focused on " "demand forecasting, inventory management, or order processing.") # Off-topic detection if self.check_off_topic(user_query): return ("I specialize in demand forecasting, inventory management, and order processing. " "How can I help you with these business functions?") # Greetings - Enhanced greetings = ["hi", "hello", "hey", "greetings", "good morning", "good afternoon", "good evening"] query_clean = user_query.lower().strip().strip("!.,?") if query_clean in greetings: return ("Hello! I'm BMS AI Assistant. I can help you with:\n" "• Demand Forecasting\n" "• Inventory Checks\n" "• Supplier Information\n" "• Order Requisitions\n" "• PDF Reports\n\n" "What would you like to know?") # Capabilities query if any(word in user_query.lower() for word in ['capabilities', 'what can you do', 'help me', 'functions']): return ("I can assist you with:\n\n" "1. Demand Forecasting - Predict future demand for items\n" "2. Inventory Management - Check stock levels across warehouses\n" "3. Supplier Information - Get supplier details and lead times\n" "4. Order Processing - Create purchase requisitions\n" "5. PDF Reports - Download detailed reports\n\n" "Try asking: 'Forecast for BMS0015' or 'Check inventory for BMS0042'") # Who developed you if any(word in user_query.lower() for word in ['who made you', 'who developed', 'who created', 'who built']): return "I'm BMS AI Assistant, developed to help you manage inventory and forecast demand efficiently." # How are you if any(phrase in user_query.lower() for phrase in ['how are you', 'how do you do', 'how is it going']): return "I'm functioning well and ready to assist you! How can I help with your inventory or forecasting needs today?" # Load model if needed if self.model is None: self.load_model() if self.model is None: return "I'm sorry, I couldn't load my language model. Please try asking about specific items or inventory." # Construct Prompt system_prompt = ( "You are BMS AI Assistant for business operations. " "Answer questions using ONLY the provided context. " "If the answer is not in the context, say you don't know. " "Do not make up facts. " "Keep answers concise (under 3 sentences). " "Be professional and helpful." ) full_prompt = f"<|system|>\n{system_prompt}\n\nContext:\n{self.context}\n