Dan Flower commited on
Commit
550775a
Β·
1 Parent(s): d322cef

rename for debugging

Browse files
Files changed (1) hide show
  1. streamlit_app.py +102 -0
streamlit_app.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+ import streamlit as st
5
+
6
+ # Ensure /app (root) is in path so /app/utils/ is importable as utils.*
7
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
8
+ # Ensure /app is in sys.path so we can import utils.* from anywhere
9
+ sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
10
+ # Environment setup
11
+ os.environ["MODEL_PATH"] = "/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
12
+ os.environ["STREAMLIT_HOME"] = "/tmp/.streamlit"
13
+ os.environ["XDG_CONFIG_HOME"] = "/tmp/.streamlit"
14
+ os.environ["BROWSER_GATHER_USAGE_STATS"] = "false"
15
+ os.environ["HF_HUB_CACHE"] = "/tmp/hf_cache"
16
+
17
+ # Create required directories
18
+ os.makedirs("/tmp/.streamlit", exist_ok=True)
19
+ os.makedirs("/tmp/hf_cache", exist_ok=True)
20
+ os.makedirs("/tmp/models", exist_ok=True)
21
+
22
+ # Runtime model download if needed
23
+
24
+ MODEL_PATH = "/tmp/models/TinyLlama-1.1B-Chat-v1.0.Q4_K_M.gguf"
25
+ if not os.path.exists(MODEL_PATH):
26
+ st.warning("Model not found. Downloading...")
27
+ try:
28
+ subprocess.run(["python3", "model/download_model.py"], check=True, capture_output=True)
29
+ st.success("Model downloaded successfully.")
30
+ except subprocess.CalledProcessError as e:
31
+ st.error("Model download failed. Check HF_TOKEN or permissions.")
32
+ st.text(f"Exit code: {e.returncode}")
33
+ st.text(f"Command: {e.cmd}")
34
+ st.text(f"Output: {e.output if hasattr(e, 'output') else 'N/A'}")
35
+ st.stop()
36
+
37
+
38
+ # Add local subdirectories to Python path
39
+ sys.path.append(os.path.join(os.path.dirname(__file__), "modules"))
40
+ sys.path.append(os.path.join(os.path.dirname(__file__), "model"))
41
+ sys.path.append(os.path.join(os.path.dirname(__file__), "utils"))
42
+
43
+ # Lab imports
44
+ from modules import (
45
+ prompt_injection_2025v1,
46
+ insecure_output_handling_2025v1,
47
+ training_data_poisoning_2025v1,
48
+ sensitive_information_disclosure_2025v1
49
+ )
50
+
51
+ import sys
52
+ print("βœ… prompt_injection_2025v1 loaded", file=sys.stderr)
53
+ print("βœ… insecure_output_handling_2025v1 loaded", file=sys.stderr)
54
+ print("βœ… training_data_poisoning_2025v1 loaded", file=sys.stderr)
55
+ print("βœ… sensitive_information_disclosure_2025v1 loaded", file=sys.stderr)
56
+
57
+ # Streamlit UI setup
58
+ st.set_page_config(
59
+ page_title="LLM Security Labs",
60
+ layout="wide",
61
+ initial_sidebar_state="expanded"
62
+ )
63
+
64
+ # Map Streamlit URL paths to lab modules
65
+ query_params = st.experimental_get_query_params()
66
+ lab_key = query_params.get("lab", [None])[0]
67
+
68
+ lab_map = {
69
+ "prompt-injection": prompt_injection_2025v1,
70
+ "insecure-output-handling": insecure_output_handling_2025v1,
71
+ "training-data-poisoning": training_data_poisoning_2025v1,
72
+ "sensitive-information-disclosure": sensitive_information_disclosure_2025v1
73
+ }
74
+
75
+ # Routing
76
+ if lab_key in lab_map:
77
+ st.title(f"πŸ§ͺ LLM Security Lab – {lab_key.replace('-', ' ').title()} (2025v1)")
78
+ lab_map[lab_key].run()
79
+ else:
80
+ st.title("πŸ§ͺ LLM Security Labs – OWASP-Inspired Threat Scenarios")
81
+ st.markdown("""
82
+ This is the landing page for the LLM security labs. Each lab demonstrates a known class of risk aligned with the evolving OWASP LLM Top 10.
83
+
84
+ Access a lab directly via one of the following URLs:
85
+
86
+ #- `/app?lab=prompt-injection`
87
+ #- `/app?lab=insecure-output-handling`
88
+ #- `/app?lab=training-data-poisoning`
89
+ #- `/app?lab=sensitive-information-disclosure`
90
+
91
+ - [Prompt Injection](?lab=prompt-injection)
92
+ - [Insecure Output Handling (coming soon)](#)
93
+ - [Training Data Poisoning (coming soon)](#)
94
+ - [Sensitive Information Disclosure (coming soon)](#)
95
+
96
+ Each lab includes:
97
+ - **Realistic model interaction**
98
+ - **Risk scoring and feedback**
99
+ - **Detailed logging**
100
+ - **Optional RAG integration** where applicable
101
+ """)
102
+ st.markdown("Built using Zephyr-7B + llama.cpp")