File size: 1,558 Bytes
d7e8c05
 
 
 
c8a274c
56c9041
c8a274c
 
d7e8c05
2c3fbe4
 
 
 
d7e8c05
 
2c3fbe4
 
 
 
d7e8c05
2c3fbe4
 
 
d7e8c05
2c3fbe4
548f37f
2c3fbe4
 
 
 
 
d7e8c05
2c3fbe4
 
 
 
 
 
 
d7e8c05
 
 
 
 
2c3fbe4
56c9041
d7e8c05
548f37f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
FROM python:3.9-slim

ARG HF_TOKEN
ENV HF_TOKEN=${HF_TOKEN}
ENV MODEL_REPO="TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
ENV MODEL_FILE="tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf"
ENV MODEL_DIR="/tmp/models"
ENV MODEL_PATH="/tmp/models/${MODEL_FILE}"

# cache-bust on demand
ARG BUILD_ID=deploy-001
ENV BUILD_ID=${BUILD_ID}

WORKDIR /app

# System deps
RUN apt-get update && apt-get install -y --no-install-recommends \
    build-essential curl git git-lfs cmake python3-dev wget \
 && rm -rf /var/lib/apt/lists/*

# Python deps first (better cache)
COPY requirements.txt /app/requirements.txt
RUN pip3 install --no-cache-dir -r /app/requirements.txt huggingface_hub

# App code (TemplateA subtree root)
COPY streamlit_app.py /app/streamlit_app.py
COPY modules/ /app/modules/

# Canonical code (you rsync these into TemplateA before subtree push)
COPY model/ /app/model/
COPY utils/ /app/utils/

# (Optional) download model at build time; or leave for runtime
# RUN python3 /app/model/download_model.py

# Streamlit config
EXPOSE 8501
HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
RUN mkdir -p /tmp/.streamlit /.streamlit && chmod -R 777 /.streamlit
ENV STREAMLIT_HOME=/tmp/.streamlit
ENV XDG_CONFIG_HOME=/tmp/.streamlit
ENV BROWSER_GATHER_USAGE_STATS=false
RUN echo "[browser]\ngatherUsageStats = false" > /tmp/.streamlit/config.toml

# Path where your downloader stores the model
ENV MODEL_PATH=/tmp/models/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf

ENTRYPOINT ["streamlit", "run", "streamlit_app.py", "--server.port=8501", "--server.address=0.0.0.0"]