amitbhatt6075 commited on
Commit
28f6f67
Β·
1 Parent(s): a5c0333

fix: Implement Dockerfile for reliable deployment

Browse files
Files changed (2) hide show
  1. Dockerfile +11 -28
  2. README.md +3 -4
Dockerfile CHANGED
@@ -1,37 +1,20 @@
1
- # Start with a stable Python image
2
  FROM python:3.11-slim
3
 
4
- # Set environment variables for non-interactive installs and the app directory
5
- ENV PYTHONUNBUFFERED=1
6
- ENV APP_HOME=/app
7
 
8
- # Set the CMake arguments that llama-cpp-python now requires.
9
- # We set GGML_CUDA=OFF because the free CPU container doesn't have a GPU.
10
- ENV CMAKE_ARGS="-DGGML_CUDA=OFF"
 
11
 
12
- # Set the working directory
13
- WORKDIR $APP_HOME
14
-
15
- # Install system dependencies needed for building packages
16
- RUN apt-get update && apt-get install -y --no-install-recommends \
17
- build-essential \
18
- && apt-get clean \
19
- && rm -rf /var/lib/apt/lists/*
20
-
21
- # Copy ONLY the requirements file to leverage Docker's cache
22
- COPY requirements.txt ./
23
-
24
- # Install Python dependencies
25
- # The CMAKE_ARGS environment variable will be used during the installation of llama-cpp-python
26
- RUN pip install --no-cache-dir --upgrade pip && \
27
- pip install --no-cache-dir -r requirements.txt
28
-
29
- # Copy the entire application code into the container
30
- # This comes AFTER installing dependencies, so changes to code don't break the cache.
31
  COPY . .
32
 
33
- # Expose the port the app will run on
34
  EXPOSE 7860
35
 
36
- # The command to run your FastAPI application using uvicorn
 
37
  CMD ["uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "7860"]
 
1
+ # Start with the official Python 3.11 image
2
  FROM python:3.11-slim
3
 
4
+ # Set the working directory inside the container
5
+ WORKDIR /app
 
6
 
7
+ # Copy the requirements file first and install dependencies
8
+ # This is a best practice for Docker caching
9
+ COPY requirements.txt requirements.txt
10
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
11
 
12
+ # Copy the rest of your application code
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  COPY . .
14
 
15
+ # Expose the port the app will run on (Hugging Face uses 7860)
16
  EXPOSE 7860
17
 
18
+ # Command to run the application using uvicorn
19
+ # We need to specify the host and port for Hugging Face Spaces
20
  CMD ["uvicorn", "api.main:app", "--host", "0.0.0.0", "--port", "7860"]
README.md CHANGED
@@ -3,11 +3,10 @@ title: Reachify Ai Service
3
  emoji: πŸš€
4
  colorFrom: indigo
5
  colorTo: purple
6
- sdk: python
7
- python_version: 3.11
8
- app_file: api/main.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
3
  emoji: πŸš€
4
  colorFrom: indigo
5
  colorTo: purple
6
+ sdk: docker
7
+ app_port: 7860
 
8
  pinned: false
9
  license: mit
10
  ---
11
 
12
+ This is a FastAPI-based AI service running inside a Docker container.