Spaces:
Running
Running
File size: 4,618 Bytes
550775a cf901fd 550775a ea2b219 1295432 32a7395 1295432 ad8017c 32a7395 550775a ad8017c 32a7395 550775a 32a7395 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
import os
import subprocess
import sys
import streamlit as st
# Ensure /app (root) is in path so /app/utils/ is importable as utils.*
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# Ensure /app is in sys.path so we can import utils.* from anywhere
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
# Environment setup
os.environ["MODEL_PATH"] = "/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
os.environ["STREAMLIT_HOME"] = "/tmp/.streamlit"
os.environ["XDG_CONFIG_HOME"] = "/tmp/.streamlit"
os.environ["BROWSER_GATHER_USAGE_STATS"] = "false"
os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache"
# Create required directories
os.makedirs("/tmp/.streamlit", exist_ok=True)
os.makedirs("/tmp/hf_cache", exist_ok=True)
os.makedirs("/tmp/models", exist_ok=True)
# Runtime model download if needed
MODEL_PATH = "/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
# Always run download_model.py for troubleshooting
#st.warning("Running model download step...")
#try:
# result = subprocess.run(
# ["python3", "model/download_model.py"],
# check=True,
# capture_output=True,
# text=True # ensures stdout/stderr are strings
# )
# st.success("Model download attempted.")
# st.text("STDOUT:")
# st.text(result.stdout)
# st.text("STDERR:")
# st.text(result.stderr)
#except subprocess.CalledProcessError as e:
# st.error("Model download failed. Check HF_TOKEN or permissions.")
# st.text(f"Exit code: {e.returncode}")
# st.text(f"Command: {e.cmd}")
# st.text("STDOUT:")
# st.text(e.stdout or "No stdout")
# st.text("STDERR:")
# st.text(e.stderr or "No stderr")
# st.stop()
#end of temp code'''
#if not os.path.exists(MODEL_PATH):
# st.warning("Model not found. Downloading...")
# try:
# subprocess.run(["python3", "model/download_model.py"], check=True, capture_output=True)
# st.success("Model downloaded successfully.")
# except subprocess.CalledProcessError as e:
# st.error("Model download failed. Check HF_TOKEN or permissions.")
# st.text(f"Exit code: {e.returncode}")
# st.text(f"Command: {e.cmd}")
# st.text(f"Output: {e.output if hasattr(e, 'output') else 'N/A'}")
# st.stop()
#st.markdown("## π /tmp/models content:")
#st.text('\n'.join(os.listdir("/tmp/models")))
# Add local subdirectories to Python path
sys.path.append(os.path.join(os.path.dirname(__file__), "modules"))
sys.path.append(os.path.join(os.path.dirname(__file__), "model"))
sys.path.append(os.path.join(os.path.dirname(__file__), "utils"))
# Lab imports
from modules import (
prompt_injection_2025v1,
insecure_output_handling_2025v1,
training_data_poisoning_2025v1,
sensitive_information_disclosure_2025v1
)
import sys
print("β
prompt_injection_2025v1 loaded", file=sys.stderr)
print("β
insecure_output_handling_2025v1 loaded", file=sys.stderr)
print("β
training_data_poisoning_2025v1 loaded", file=sys.stderr)
print("β
sensitive_information_disclosure_2025v1 loaded", file=sys.stderr)
# Streamlit UI setup
st.set_page_config(
page_title="LLM Security Labs",
layout="wide",
initial_sidebar_state="expanded"
)
# Map Streamlit URL paths to lab modules
query_params = st.experimental_get_query_params()
lab_key = query_params.get("lab", [None])[0]
lab_map = {
"prompt-injection": prompt_injection_2025v1,
"insecure-output-handling": insecure_output_handling_2025v1,
"training-data-poisoning": training_data_poisoning_2025v1,
"sensitive-information-disclosure": sensitive_information_disclosure_2025v1
}
# Routing
if lab_key in lab_map:
st.title(f"π§ͺ LLM Security Lab β {lab_key.replace('-', ' ').title()} (2025v1)")
lab_map[lab_key].run()
else:
st.title("π§ͺ LLM Security Labs β OWASP-Inspired Threat Scenarios")
st.markdown("""
This is the landing page for the LLM security labs. Each lab demonstrates a known class of risk aligned with the evolving OWASP LLM Top 10.
Access a lab directly via one of the following URLs:
#- `/app?lab=prompt-injection`
#- `/app?lab=insecure-output-handling`
#- `/app?lab=training-data-poisoning`
#- `/app?lab=sensitive-information-disclosure`
- [Prompt Injection](?lab=prompt-injection)
- [Insecure Output Handling (coming soon)](#)
- [Training Data Poisoning (coming soon)](#)
- [Sensitive Information Disclosure (coming soon)](#)
Each lab includes:
- **Realistic model interaction**
- **Risk scoring and feedback**
- **Detailed logging**
- **Optional RAG integration** where applicable
""")
|