amitbhatt6075 commited on
Commit
a5c0333
Β·
1 Parent(s): e00b7a2

fix: Correct Hugging Face config (sdk, python_version, app_file)

Browse files
Files changed (2) hide show
  1. README.md +6 -4
  2. api/main.py +79 -7
README.md CHANGED
@@ -1,11 +1,13 @@
1
  ---
2
  title: Reachify Ai Service
3
- emoji: πŸŒ–
4
  colorFrom: indigo
5
- colorTo: indigo
6
- sdk: docker
 
 
7
  pinned: false
8
  license: mit
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
  title: Reachify Ai Service
3
+ emoji: πŸš€
4
  colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: python
7
+ python_version: 3.11
8
+ app_file: api/main.py
9
  pinned: false
10
  license: mit
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
api/main.py CHANGED
@@ -169,13 +169,85 @@ app = FastAPI(title="Reachify AI Service (Deploy-Ready)", version="11.0.0")
169
 
170
  @app.on_event("startup")
171
  def startup_event():
172
- """
173
- BARE MINIMUM STARTUP FOR DEBUGGING.
174
- This function does nothing except print a message.
175
- If the server starts with this, we know the problem is in the original startup logic.
176
- """
177
- print("--- βœ… FINAL TEST: Bare minimum startup. If this works, the server is alive. ---")
178
- pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
179
 
180
  @app.get("/")
181
  def health_check():
 
169
 
170
  @app.on_event("startup")
171
  def startup_event():
172
+ # Make sure we can modify the global variables
173
+ global _llm_instance, _creative_director, _support_agent, _ai_strategist, _vector_store, \
174
+ _budget_predictor, _influencer_matcher, _performance_predictor, _payout_forecaster, \
175
+ _earnings_optimizer, _earnings_encoder, _likes_predictor, _comments_predictor, \
176
+ _revenue_forecaster, _performance_scorer
177
+
178
+ # --- STEP 1: DOWNLOAD AND LOAD THE LLM MODEL ---
179
+ print("--- πŸš€ AI Service Starting Up... ---")
180
+ try:
181
+ os.makedirs(MODEL_SAVE_DIRECTORY, exist_ok=True)
182
+ if not os.path.exists(LLAMA_MODEL_PATH):
183
+ print(f" - LLM model not found locally. Downloading '{MODEL_FILENAME}' from '{MODEL_REPO}'...")
184
+ hf_hub_download(
185
+ repo_id=MODEL_REPO,
186
+ filename=MODEL_FILENAME,
187
+ local_dir=MODEL_SAVE_DIRECTORY,
188
+ local_dir_use_symlinks=False
189
+ )
190
+ print(" - βœ… Model downloaded successfully.")
191
+ else:
192
+ print(f" - LLM model found locally at {LLAMA_MODEL_PATH}. Skipping download.")
193
+
194
+ # === LLM LOADING IS NOW ENABLED ===
195
+ print(" - Loading Llama LLM into memory...")
196
+ _llm_instance = Llama(model_path=LLAMA_MODEL_PATH, n_gpu_layers=0, n_ctx=2048, verbose=False)
197
+ print(" - βœ… LLM Loaded successfully.")
198
+
199
+ except Exception as e:
200
+ print(f" - ❌ FATAL ERROR: Could not download or load the LLM model. This could be due to a memory limit. LLM features will be disabled.")
201
+ traceback.print_exc()
202
+ _llm_instance = None # Ensure global variable is None on failure
203
+
204
+ # --- STEP 2: INITIALIZE ALL AI COMPONENTS THAT NEED THE LLM ---
205
+ if _llm_instance:
206
+ try:
207
+ print(" - Initializing AI components that depend on LLM...")
208
+
209
+ _creative_director = CreativeDirector(llm_instance=_llm_instance)
210
+ if VectorStore:
211
+ _vector_store = VectorStore()
212
+ print(" - RAG Engine Ready.")
213
+ _ai_strategist = AIStrategist(llm_instance=_llm_instance, store=_vector_store)
214
+ _support_agent = SupportAgent(llm_instance=_llm_instance, embedding_path=EMBEDDING_MODEL_PATH, db_path=DB_PATH)
215
+
216
+ print(" - βœ… Core AI components (Director, Strategist, Agent) are online.")
217
+
218
+ except Exception as e:
219
+ print(f" - ❌ FAILED to initialize core AI components: {e}")
220
+ traceback.print_exc()
221
+ else:
222
+ print(" - ⚠️ SKIPPING initialization of LLM-dependent components because LLM failed to load.")
223
+
224
+ # --- STEP 3: LOAD ALL OTHER MODELS (These don't depend on the LLM) ---
225
+ print(" - Loading ML models from joblib files...")
226
+ model_paths = {
227
+ 'budget': ('_budget_predictor', 'budget_predictor_v1.joblib'),
228
+ 'matcher': ('_influencer_matcher', 'influencer_matcher_v1.joblib'),
229
+ 'performance': ('_performance_predictor', 'performance_predictor_v1.joblib'),
230
+ 'payout': ('_payout_forecaster', 'payout_forecaster_v1.joblib'),
231
+ 'earnings': ('_earnings_optimizer', 'earnings_model.joblib'),
232
+ 'earnings_encoder': ('_earnings_encoder', 'earnings_encoder.joblib'),
233
+ 'likes_predictor': ('_likes_predictor', 'likes_predictor_v1.joblib'),
234
+ 'comments_predictor': ('_comments_predictor', 'comments_predictor_v1.joblib'),
235
+ 'revenue_forecaster': ('_revenue_forecaster', 'revenue_forecaster_v1.joblib'),
236
+ 'performance_scorer': ('_performance_scorer', 'performance_scorer_v1.joblib'),
237
+ }
238
+ for name, (var, file) in model_paths.items():
239
+ path = os.path.join(MODELS_DIR, file)
240
+ try:
241
+ globals()[var] = joblib.load(path)
242
+ print(f" - Loaded {name} model.")
243
+ except FileNotFoundError:
244
+ globals()[var] = None
245
+ print(f" - ⚠️ WARNING: Model '{name}' not found at {path}. Endpoint will be disabled.")
246
+
247
+ print(" - Initializing Text Embedding Model...")
248
+ load_embedding_model(EMBEDDING_MODEL_PATH)
249
+
250
+ print("\n--- βœ… AI Service startup sequence finished! ---")
251
 
252
  @app.get("/")
253
  def health_check():