BMS-AI-BOT / app /llm_engine.py
BMS Deployer
Remove header and fix inventory
9916f7c
raw
history blame
4.62 kB
import os
import re
from ctransformers import AutoModelForCausalLM
MODEL_REPO = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
MODEL_FILE = "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
MODEL_TYPE = "llama"
PROFANITY_LIST = ['damn', 'hell', 'crap', 'stupid', 'idiot', 'dumb', 'suck']
OFF_TOPIC_KEYWORDS = ['weather', 'sports', 'politics', 'religion', 'movie', 'game', 'recipe', 'joke', 'story', 'music', 'celebrity']
class LLMEngine:
def __init__(self):
self.model = None
self.context = ""
self.load_context()
def load_context(self):
try:
context_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data", "company_context.txt")
if os.path.exists(context_path):
with open(context_path, "r", encoding="utf-8") as f:
self.context = f.read()
except Exception as e:
print(f"Error loading context: {e}")
def load_model(self):
if self.model is None:
print("Loading LLM...")
try:
self.model = AutoModelForCausalLM.from_pretrained(MODEL_REPO, model_file=MODEL_FILE, model_type=MODEL_TYPE, context_length=2048, gpu_layers=0)
print("LLM Loaded.")
except Exception as e:
print(f"Failed to load LLM: {e}")
def check_profanity(self, text):
for word in PROFANITY_LIST:
if re.search(r'\b' + word + r'\b', text.lower()):
return True
return False
def check_off_topic(self, text):
for keyword in OFF_TOPIC_KEYWORDS:
if keyword in text.lower():
return True
return False
def generate_response(self, user_query):
if self.check_profanity(user_query):
return "I'm here to assist with business operations. Please keep our conversation professional."
if self.check_off_topic(user_query):
return "I specialize in demand forecasting, inventory management, and order processing. How can I help you with these?"
greetings = ["hi", "hello", "hey", "greetings", "good morning", "good afternoon", "good evening"]
if user_query.lower().strip().strip("!.,?") in greetings:
return "Hello! I'm BMS AI Assistant. I can help you with:\n• Demand Forecasting\n• Inventory Checks\n• Supplier Information\n• Order Requisitions\n• PDF Reports\n\nWhat would you like to know?"
if any(w in user_query.lower() for w in ['capabilities', 'what can you do', 'help me', 'functions']):
return "I can assist you with:\n1. Demand Forecasting\n2. Inventory Management\n3. Supplier Information\n4. Order Processing\n5. PDF Reports\n\nTry asking: 'Forecast for BMS0015'"
if any(w in user_query.lower() for w in ['who made you', 'who developed', 'who created', 'who built']):
return "I'm BMS AI Assistant, developed to help you manage inventory and forecast demand efficiently."
if any(p in user_query.lower() for p in ['how are you', 'how do you do', 'how is it going']):
return "I'm functioning well and ready to assist you! How can I help with your inventory or forecasting needs today?"
if self.model is None:
self.load_model()
if self.model is None:
return "I'm sorry, I couldn't load my language model. Please try asking about specific items."
system_prompt = (
"You are BMS AI Assistant, a helpful and professional AI for Business Management Systems. "
"Your goal is to assist users with demand forecasting, inventory management, and supplier information. "
"Answer questions based ONLY on the provided context. If the answer is not in the context, politely state that you don't have that information. "
"Be concise but friendly."
)
full_prompt = f"<|system|>\n{system_prompt}\n\nContext:\n{self.context}</s>\n<|user|>\n{user_query}</s>\n<|assistant|>"
try:
response = self.model(
full_prompt,
max_new_tokens=150,
temperature=0.1,
repetition_penalty=1.1,
top_k=40,
top_p=0.9,
stop=["</s>", "<|user|>", "<|system|>"]
)
if "<|system|>" in response:
response = response.split("<|system|>")[0]
return response.strip()
except Exception as e:
return f"Error generating response: {e}"
llm = LLMEngine()