import os import subprocess import sys import streamlit as st # Ensure /app (root) is in path so /app/utils/ is importable as utils.* sys.path.append(os.path.dirname(os.path.abspath(__file__))) # Ensure /app is in sys.path so we can import utils.* from anywhere sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) # Environment setup os.environ["MODEL_PATH"] = "/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf" os.environ["STREAMLIT_HOME"] = "/tmp/.streamlit" os.environ["XDG_CONFIG_HOME"] = "/tmp/.streamlit" os.environ["BROWSER_GATHER_USAGE_STATS"] = "false" os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache" # Create required directories os.makedirs("/tmp/.streamlit", exist_ok=True) os.makedirs("/tmp/hf_cache", exist_ok=True) os.makedirs("/tmp/models", exist_ok=True) # Runtime model download if needed MODEL_PATH = "/tmp/models/TinyLlama-1.1B-Chat-v1.0.Q4_K_M.gguf" if not os.path.exists(MODEL_PATH): st.warning("Model not found. Downloading...") try: subprocess.run(["python3", "model/download_model.py"], check=True, capture_output=True) st.success("Model downloaded successfully.") except subprocess.CalledProcessError as e: st.error("Model download failed. Check HF_TOKEN or permissions.") st.text(f"Exit code: {e.returncode}") st.text(f"Command: {e.cmd}") st.text(f"Output: {e.output if hasattr(e, 'output') else 'N/A'}") st.stop() # Add local subdirectories to Python path sys.path.append(os.path.join(os.path.dirname(__file__), "modules")) sys.path.append(os.path.join(os.path.dirname(__file__), "model")) sys.path.append(os.path.join(os.path.dirname(__file__), "utils")) # Lab imports from modules import ( prompt_injection_2025v1, insecure_output_handling_2025v1, training_data_poisoning_2025v1, sensitive_information_disclosure_2025v1 ) import sys print("โœ… prompt_injection_2025v1 loaded", file=sys.stderr) print("โœ… insecure_output_handling_2025v1 loaded", file=sys.stderr) print("โœ… training_data_poisoning_2025v1 loaded", file=sys.stderr) print("โœ… sensitive_information_disclosure_2025v1 loaded", file=sys.stderr) # Streamlit UI setup st.set_page_config( page_title="LLM Security Labs", layout="wide", initial_sidebar_state="expanded" ) # Map Streamlit URL paths to lab modules query_params = st.experimental_get_query_params() lab_key = query_params.get("lab", [None])[0] lab_map = { "prompt-injection": prompt_injection_2025v1, "insecure-output-handling": insecure_output_handling_2025v1, "training-data-poisoning": training_data_poisoning_2025v1, "sensitive-information-disclosure": sensitive_information_disclosure_2025v1 } # Routing if lab_key in lab_map: st.title(f"๐Ÿงช LLM Security Lab โ€“ {lab_key.replace('-', ' ').title()} (2025v1)") lab_map[lab_key].run() else: st.title("๐Ÿงช LLM Security Labs โ€“ OWASP-Inspired Threat Scenarios") st.markdown(""" This is the landing page for the LLM security labs. Each lab demonstrates a known class of risk aligned with the evolving OWASP LLM Top 10. Access a lab directly via one of the following URLs: #- `/app?lab=prompt-injection` #- `/app?lab=insecure-output-handling` #- `/app?lab=training-data-poisoning` #- `/app?lab=sensitive-information-disclosure` - [Prompt Injection](?lab=prompt-injection) - [Insecure Output Handling (coming soon)](#) - [Training Data Poisoning (coming soon)](#) - [Sensitive Information Disclosure (coming soon)](#) Each lab includes: - **Realistic model interaction** - **Risk scoring and feedback** - **Detailed logging** - **Optional RAG integration** where applicable """) st.markdown("Built using Zephyr-7B + llama.cpp")