# FILE: ai-service/core/strategist.py (REPLACE EVERYTHING IN YOUR FILE WITH THIS) import traceback from typing import Dict, Any, List import json import re from llama_cpp import Llama try: from core.guardrails.safety import SafetyGuard except ImportError: SafetyGuard = None print("⚠️ Safety module not found. Skipping checks.") class AIStrategist: # Saaf __init__ function. Aapke code mein do the, maine ek kar diya hai. def __init__(self, llm_instance: Llama, store=None): if llm_instance is None: raise ValueError("AIStrategist requires a valid Llama instance.") self.llm = llm_instance self.store = store # Vector DB Store print("--- AIStrategist initialized successfully (RAG Ready). ---") def generate_campaign_brief(self, brand_name: str, campaign_goal: str, target_audience: str, budget_range: str) -> Dict[str, Any]: """ Generates a structured, JSON-formatted campaign brief and cleans the output. """ print(f"--- Strategist Skill: Generating campaign brief for brand '{brand_name}'.") prompt = f""" [SYSTEM] You are an expert campaign strategist. Your task is to generate a creative and actionable campaign brief in a valid JSON object format. Do not add any text before or after the JSON object. [CLIENT INPUT] - Brand Name: {brand_name} - Primary Goal: {campaign_goal} - Target Audience: {target_audience} - Budget: {budget_range} [YOUR TASK] Generate a JSON object with keys: "title", "description", "goal_kpi", and "content_guidelines" (as a list of strings). - "title": A short, catchy campaign title. - "description": A one-paragraph summary of the campaign's core idea. - "goal_kpi": The single most important Key Performance Indicator (KPI) for this goal. - "content_guidelines": A list of 3 creative content ideas for influencers. [JSON OUTPUT] """ try: response_dict = self.llm( prompt, max_tokens=700, temperature=0.8, stop=["[CLIENT INPUT]", "\n\n", "User:"], echo=False ) raw_text = response_dict['choices'][0]['text'].strip() if '```json' in raw_text: raw_text = raw_text.split('```json\n')[1].split('```')[0] elif '{' in raw_text: raw_text = raw_text[raw_text.find('{'):raw_text.rfind('}') + 1] json_response = json.loads(raw_text) print("--- Strategist Skill: Successfully parsed brief from LLM.") if 'goal_kpi' in json_response and isinstance(json_response['goal_kpi'], list): print("--- Strategist Skill: Cleaning up 'goal_kpi' field (list -> string).") json_response['goal_kpi'] = json_response['goal_kpi'][0] if json_response['goal_kpi'] else 'N/A' if 'title' in json_response and isinstance(json_response['title'], list): json_response['title'] = json_response['title'][0] if json_response['title'] else 'AI Generated Title' if 'description' in json_response and isinstance(json_response['description'], list): json_response['description'] = json_response['description'][0] if json_response['description'] else 'AI Generated Description' return json_response except (json.JSONDecodeError, KeyError) as e: response_content = locals().get('raw_text', 'No raw text available') print(f"--- Strategist Skill FATAL ERROR: Failed to decode/parse JSON from model. Error: {e}. Raw output: '{response_content}'") return {"error": "The AI model returned an invalid format. Please try again."} except Exception as e: print(f"--- Strategist Skill FATAL ERROR in generate_campaign_brief: {e}") traceback.print_exc() return {"error": "An internal error occurred in the AI model."} def generate_strategy_from_prompt(self, user_prompt: str) -> str: """ Generates a general strategy from a raw prompt. """ print(f"--- Strategist Skill (General): Received prompt: '{user_prompt[:50]}...'") try: response = self.llm( user_prompt, max_tokens=750, temperature=0.75, stop=["User:", "Client:", "System:"], ) generated_text = response['choices'][0]['text'].strip() print("--- Strategist Skill (General): Received response from LLM.") return generated_text except Exception as e: print(f"--- Strategist Skill (General) ERROR: {e}") traceback.print_exc() return "An error occurred in the AI model while generating the strategy." def generate_weekly_summary(self, metrics: Dict[str, Any]) -> str: """ Generates a concise, human-readable weekly summary from structured metrics data. """ print(f"--- Strategist Skill (Summary): Received metrics for brand {metrics.get('brand_id')}") prompt_template = f""" You are an expert digital marketing analyst writing a weekly summary for a client. Your tone should be positive, encouraging, and easy to understand. Do not use jargon. Focus on the key results and what they mean. Client's Performance Data for the week of {metrics.get('start_date')} to {metrics.get('end_date')}: - Total Ad Spend: ${metrics.get('total_ad_spend', 0):.2f} - Clicks from Ads: {metrics.get('total_clicks', 0)} - New Social Media Followers: {metrics.get('new_followers', 0)} - Top Performing Campaign this week: "{metrics.get('top_performing_campaign', 'N/A')}" Based on this data, write a short summary (about 3-4 sentences). Start with a positive opening and end with an encouraging closing statement. Summary: """ print("--- Strategist Skill (Summary): Sending composed prompt to LLM...") try: # === INVOKE FIX #1 === response = self.llm( prompt_template, max_tokens=250, temperature=0.6, stop=["Client:", "Data:"], echo=False ) summary_text = response['choices'][0]['text'].strip() print("--- Strategist Skill (Summary): Received response from LLM.") if not summary_text: return "The AI model returned an empty summary." return summary_text except Exception as e: print(f"--- Strategist Skill (Summary) ERROR: {e}") traceback.print_exc() return "An error occurred in the AI model while generating the weekly summary." def generate_chat_response(self, prompt: str, context: str) -> str: """ RAG-Enabled Chat Response with Safety Checks """ print(f"--- Strategist Skill (Chat): Processing: '{prompt}'") # 1. SAFETY GUARDRAIL (Fail-Safe) if SafetyGuard and not SafetyGuard.validate_input(prompt): return "I cannot generate a response as the query contains restricted content." # 2. RAG RETRIEVAL (Knowledge injection) retrieved_knowledge = "" if self.store: try: print(" - 🔍 Searching knowledge base...") # Search DB for relevant context kb_docs = self.store.search(prompt, n_results=1) if kb_docs: retrieved_knowledge = f"\n[INTERNAL KNOWLEDGE]\n{kb_docs[0]}\n" except Exception as e: print(f" - ⚠️ RAG Search Warning: {e}") master_prompt = f""" [SYSTEM] You are a digital marketing strategist AI. Use the Context and Internal Knowledge below to answer the Client. [CONTEXT FROM DASHBOARD] {context} {retrieved_knowledge} [CLIENT'S QUESTION] {prompt} [YOUR RESPONSE] """ try: response = self.llm( master_prompt, max_tokens=500, temperature=0.5, stop=["[CLIENT'S QUESTION]", "[SYSTEM]"], echo=False ) return response['choices'][0]['text'].strip() except Exception as e: traceback.print_exc() return "Internal error in Chat Module." def generate_dashboard_insights(self, kpis: Dict[str, Any]) -> str: print(f"--- Strategist Skill (Insights): Received KPIs: {kpis}") prompt = f""" [SYSTEM] You are a senior data analyst at Reachify... [YOUR INSIGHTFUL BULLET POINTS] - """ try: response = self.llm(prompt, max_tokens=250, temperature=0.7, stop=["[SYSTEM]", "Human:", "\n\n"], echo=False) insight_text = response['choices'][0]['text'].strip() if not insight_text.startswith('-'): insight_text = '- ' + insight_text print("--- Strategist Skill (Insights): Successfully received response from LLM.") return insight_text except Exception as e: print(f"--- Strategist Skill (Insights) ERROR: {e}") traceback.print_exc() return "- Could not generate AI insights due to an internal model error." def generate_analytics_insights(self, analytics_data: dict) -> str: """ Takes campaign analytics data and generates 3 actionable insights using the LLM. """ print(f"--- Strategist Skill (Analytics Insights): Received analytics data.") prompt = f""" [SYSTEM] You are an expert Campaign Analyst... [YOUR ANALYSIS - 3 ACTIONABLE BULLET POINTS] - """ print("--- Strategist Skill (Analytics Insights): Sending composed prompt to LLM...") try: response = self.llm(prompt, max_tokens=200, temperature=0.6, stop=["[SYSTEM]", "\n\n-"], echo=False) insights_text = response['choices'][0]['text'].strip() if not insights_text.startswith('-'): insights_text = '- ' + insights_text print("--- Strategist Skill (Analytics Insights): Successfully received and processed response.") return insights_text except Exception as e: print(f"--- Strategist Skill (Analytics Insights) FATAL ERROR: {e}") traceback.print_exc() return "- AI insights could not be generated due to an internal model error." def get_caption_assistance(self, caption: str, action: str, guidelines: str = None) -> str: """ Provides AI assistance for writing captions based on a specified action. """ print(f"--- Strategist Skill (Caption Assist): Received action: '{action}'") system_prompt = "You are a helpful and creative social media marketing assistant for influencers. You are concise and direct." if action == 'improve': user_prompt = f"Make the following Instagram caption more engaging and impactful. Keep the core message but enhance the wording.\n\nOriginal:\n---\n{caption}\n\nImproved:" elif action == 'hashtags': user_prompt = f"Suggest a list of 7 relevant and trending hashtags for the following Instagram post. Provide ONLY the hashtags, starting with # and separated by spaces.\n\nPost Caption:\n---\n{caption}\n\nHashtags:" elif action == 'check_guidelines' and guidelines: user_prompt = f"Carefully check if the following caption meets ALL the rules in the provided guidelines. Be strict. First, respond with only 'YES' or 'NO'. Then, on a new line, explain which specific rules were broken, or confirm that all rules were followed.\n\nGuidelines:\n---\n{guidelines}\n\nCaption to Check:\n---\n{caption}\n\nAnalysis:" else: return "Invalid action or missing guidelines provided to the AI assistant." full_prompt = f"[SYSTEM]\n{system_prompt}\n\n[USER]\n{user_prompt}\n\n[ASSISTANT]\n" try: print(f" - Calling LLM for caption assistance (action: {action})...") response = self.llm( full_prompt, max_tokens=256, temperature=0.7, stop=["[USER]", "[SYSTEM]"], echo=False ) generated_text = response['choices'][0]['text'].strip() print(f" - ✅ LLM generated response.") return generated_text except Exception as e: print(f"--- Strategist Skill (Caption Assist) ERROR: {e}") traceback.print_exc() return "An error occurred while getting assistance from the AI." def generate_influencer_analytics_summary(self, kpis: Dict[str, Any]) -> str: """ Takes an influencer's KPIs and generates a short, encouraging, and actionable summary. """ print(f"--- Strategist Skill (Influencer Analytics): Received KPIs for analysis.") # ✅ THE FIX: A much stricter and more directive prompt. prompt = f""" [SYSTEM] You are "Spark", a friendly AI Analyst for social media influencers. Your task is to write a 2-sentence summary of the user's performance. - Sentence 1: Start with a positive highlight from the data. - Sentence 2: Give ONE simple, actionable tip for what to do next. - BE CONCISE and encouraging. DO NOT explain what KPIs are. DO NOT use lists. [INFLUENCER'S DATA] - Engagement Rate: {kpis.get('avgEngagementRate', 0.0):.2f}% - Total Reach on approved posts: {kpis.get('totalReach', 0)} - Approved Posts: {kpis.get('totalSubmissions', 0)} [YOUR 2-SENTENCE SUMMARY] """ try: print(" - Calling LLM for influencer analytics summary (v2 prompt)...") response = self.llm( prompt, max_tokens=100, # We only need a short response temperature=0.7, stop=["[SYSTEM]", "[USER]", "User:", "System:"], echo=False ) summary_text = response['choices'][0]['text'].strip() # Extra cleanup to remove any unwanted AI chit-chat if "\n" in summary_text: summary_text = summary_text.split('\n')[0] print(" - ✅ LLM generated summary successfully.") return summary_text except Exception as e: print(f"--- Strategist Skill (Influencer Analytics) ERROR: {e}") return "AI summary could not be generated at this time." def generate_influencer_growth_plan(self, influencer_data: Dict[str, Any]) -> List[str]: """ Influencer ke live data ko analyze karke personalized growth tips deta hai. (CRASH-PROOF VERSION) """ print(f"--- Strategist Skill (Growth Plan): Influencer {influencer_data.get('fullName')} ke liye plan banaya ja raha hai.") # --- FIX IS HERE: Hum pehle values ko aakhri mein badiya se handle kar rahe hain --- # Pythonic tareeka: `get()` se value nikalo, agar 'None' hai to 'N/A' use karo. best_caption = influencer_data.get('bestPostCaption') or 'N/A' worst_caption = influencer_data.get('worstPostCaption') or 'N/A' # --- END FIX --- prompt = f""" [INST] You are an expert social media coach. Analyze the following data for an influencer named {influencer_data.get('fullName')} and provide ONLY 3 short, actionable tips based on it. Start each tip on a new line. - Niche: {influencer_data.get('category', 'Not specified')} - Avg Engagement: {influencer_data.get('avgEngagementRate', 0.0):.2f}% - Best Post was about: '{best_caption[:50]}' - Worst Post was about: '{worst_caption[:50]}' Your 3 tips: [/INST] """ try: print("--- Strategist Skill (Growth Plan): Simplified LLM ko call kiya jaa raha hai...") response = self.llm( prompt, max_tokens=256, temperature=0.7, stop=["[INST]", "User:", "System:"], echo=False ) raw_text = response['choices'][0]['text'].strip() tips = [tip.strip().lstrip('- ').lstrip('1. ').lstrip('2. ').lstrip('3. ') for tip in raw_text.split('\n') if tip.strip()] print(f"--- Strategist Skill (Growth Plan): LLM se tips successfully generate ho gaye: {tips}") return tips[:3] except Exception as e: print(f"--- Strategist Skill (Growth Plan) FATAL ERROR: {e}") traceback.print_exc() return ["AI Coach is currently unavailable due to a technical error."] def generate_service_blueprint(self, service_type: str, requirements: str) -> Dict[str, Any]: """ Analyzes user requirements and generates a structured project blueprint using the LLM. (FINAL VERSION: Uses a "perfect example" in the prompt to force the AI into the correct summary format.) """ import re print(f"--- Strategist Skill (Blueprint): Generating plan for '{service_type}' request.") # === THE DEFINITIVE PROMPT WITH A PERFECT EXAMPLE === prompt = f""" [SYSTEM] You are an expert project planner for a top-tier digital agency. Analyze the client's request below and generate a concise project blueprint. YOU MUST FOLLOW THE FORMAT OF THE EXAMPLE BELOW EXACTLY. - For DELIVERABLES, provide a list of 4-5 specific features separated by the "|" pipe character. - For STACK, PRICE_EST, and TIMELINE, you MUST provide a single, summarized value. DO NOT provide a detailed itemized list for these. [PERFECT EXAMPLE] TITLE:: Modern E-Commerce Store for a Fashion Brand DELIVERABLES:: Dynamic Product Catalog | Secure Shopping Cart & Checkout | User Account & Order History | Admin Dashboard for Managing Products STACK:: Next.js & TailwindCSS (Frontend), Supabase (Backend) PRICE_EST:: $8,000 - $12,000 TIMELINE:: 8-10 Weeks [/PERFECT EXAMPLE] [CLIENT REQUEST] - Service Type: {service_type} - Description: {requirements} [YOUR BLUEPRINT] TITLE:: """ try: response_dict = self.llm( prompt, max_tokens=400, temperature=0.5, stop=["[CLIENT REQUEST]", "[SYSTEM]", "[/PERFECT EXAMPLE]"], echo=False ) raw_text = "TITLE:: " + response_dict['choices'][0]['text'].strip() print(f"--- Strategist Skill (Blueprint): Raw response from LLM:\n---\n{raw_text}\n---") # Initialize with default values blueprint = { 'title': 'AI Generated Title', 'deliverables': ['Analysis in progress...'], 'stack': 'To be determined', 'price_est': 'Pending', 'timeline': 'Pending' } # Use regex to find all key::value pairs pairs = re.findall(r'(\b[A-Z_]+\b)::(.*?)(?=\n\b[A-Z_]+\b::|$)', raw_text, re.DOTALL) for key, value in pairs: key = key.strip().upper() value = value.strip() if key == 'TITLE': # Only take the first line for summary fields blueprint['title'] = value.split('\n')[0].strip() elif key == 'STACK': blueprint['stack'] = value.split('\n')[0].strip() elif key == 'PRICE_EST': blueprint['price_est'] = value.split('\n')[0].strip() elif key == 'TIMELINE': blueprint['timeline'] = value.split('\n')[0].strip() elif key == 'DELIVERABLES': # Deliverables can be a list deliverables_list = [d.strip() for d in value.split('|') if d.strip()] if deliverables_list: blueprint['deliverables'] = deliverables_list print(f"--- Strategist Skill (Blueprint): Successfully parsed with final parser. Result: {blueprint}") return blueprint except Exception as e: error_msg = f"A critical error occurred. Error: {e}" print(f"--- Strategist Skill FATAL ERROR: {error_msg}") return { 'title': 'Error Generating Plan', 'deliverables': ['AI model failed to respond.'], 'stack': 'N/A', 'price_est': 'N/A', 'timeline': 'N/A' } def generate_service_blueprint(self, service_type: str, requirements: str) -> Dict[str, Any]: """ Analyzes user requirements and generates a structured project blueprint using the LLM. (This version is now only for the 'web-dev' service type) """ print(f"--- Strategist Skill (WEBSITE): Generating plan for '{service_type}' request.") prompt = f""" [SYSTEM] You are an expert project planner for a top-tier digital agency. Analyze the client's request below and generate a concise project blueprint for a WEBSITE. YOU MUST FOLLOW THE FORMAT OF THE EXAMPLE BELOW EXACTLY. - For DELIVERABLES, provide a list of 4-5 specific website features separated by "|". - For STACK, PRICE_EST, and TIMELINE, you MUST provide a single, summarized value. [PERFECT EXAMPLE] TITLE:: Modern E-Commerce Store for a Fashion Brand DELIVERABLES:: Dynamic Product Catalog | Secure Shopping Cart & Checkout | User Account & Order History | Admin Dashboard STACK:: Next.js & TailwindCSS (Frontend), Supabase (Backend) PRICE_EST:: $8,000 - $12,000 TIMELINE:: 8-10 Weeks [/PERFECT EXAMPLE] [CLIENT REQUEST] - Service Type: {service_type} - Description: {requirements} [YOUR BLUEPRINT] TITLE:: """ # We now use the helper function to get the response and parse it return self._get_ai_response_and_parse(prompt) # --- THIS IS THE NEW, SEPARATE FUNCTION FOR GROWTH MANAGEMENT --- def generate_growth_plan(self, platform_handle: str, goals: str, challenges: str) -> Dict[str, any]: """ [NEW & SEPARATE] Creates a 3-month management plan based on influencer's input. """ print(f"--- Strategist Skill (GROWTH): Generating plan for '{platform_handle}'.") prompt = f""" [SYSTEM] You are a Talent Manager for a top influencer agency. Create a 3-month management plan. Follow the example format exactly. Use "|" to separate monthly services. Use "Monthly Retainer" for Price Estimate and "Initial Contract Term" for Timeline. [PERFECT EXAMPLE] TITLE:: 3-Month YouTube Growth & Monetization Strategy DELIVERABLES:: Weekly Content Calendar (2 Videos, 5 Shorts) | SEO Title & Description Writing | Proactive Brand Outreach (5 brands/month) | Monthly Performance Analytics Report STACK:: YouTube Studio, TubeBuddy, Notion PRICE_EST:: $1,500 / month TIMELINE:: 3-Month Initial Contract [/PERFECT EXAMPLE] [CLIENT REQUEST] - Platform Handle: {platform_handle} - Goals: {goals} - Challenges: {challenges} [YOUR BLUEPRINT] TITLE:: """ # This function ALSO uses the same reliable helper return self._get_ai_response_and_parse(prompt) # --- THIS IS THE HELPER FUNCTION THAT BOTH METHODS USE --- def _get_ai_response_and_parse(self, prompt: str) -> Dict[str, any]: """ Internal helper to call the LLM and parse the key::value format robustly. """ try: response_dict = self.llm( prompt, max_tokens=400, temperature=0.5, stop=["[CLIENT REQUEST]", "[SYSTEM]", "[/PERFECT EXAMPLE]"], echo=False ) raw_text = "TITLE:: " + response_dict['choices'][0]['text'].strip() print(f"--- AI Raw Response ---\n{raw_text}\n---") blueprint = { 'title': 'AI Generated Plan', 'deliverables': ['Analysis in progress...'], 'stack': 'To be determined', 'price_est': 'Pending', 'timeline': 'Pending' } pairs = re.findall(r'(\b[A-Z_]+\b)::(.*?)(?=\n\b[A-Z_]+\b::|$)', raw_text, re.DOTALL) for key, value in pairs: key, value = key.strip().upper(), value.strip() if key == 'TITLE': blueprint['title'] = value.split('\n')[0].strip() elif key == 'STACK': blueprint['stack'] = value.split('\n')[0].strip() elif key == 'PRICE_EST': blueprint['price_est'] = value.split('\n')[0].strip() elif key == 'TIMELINE': blueprint['timeline'] = value.split('\n')[0].strip() elif key == 'DELIVERABLES': deliverables_list = [d.strip() for d in value.split('|') if d.strip()] if deliverables_list: blueprint['deliverables'] = deliverables_list print(f"--- Parser Result ---: {blueprint}") return blueprint except Exception as e: error_msg = f"A critical error occurred in the AI model or parser. Error: {e}" print(f"--- AI FATAL ERROR: {error_msg}") return { 'title': 'Error Generating Plan', 'deliverables': ['AI model failed to respond or there was a system error.'], 'stack': 'N/A', 'price_est': 'N/A', 'timeline': 'N/A' } def generate_weekly_content_plan(self, context: Dict[str, Any]) -> Dict[str, Any]: """ Generates 3 content options (MOCK MODE for Immediate Response). Use this until server capacity is upgraded. """ print(f"--- Strategist Skill (Plan): Generating for '{context.get('niche')}'.") niche = context.get("niche", "General") trends = [t['name'] for t in context.get("active_trends", [])] trend = trends[0] if trends else "Trending Audio" # Simulate dynamic response based on inputs return { "options": [ { "type": "Viral Bet", "title": f"Reel: {trend} Challenge", "platform": "Instagram", "contentType": "Reel", "instructions": f"Use the '{trend}' audio. Show a quick transition related to {niche}. Keep it under 15s.", "reasoning": "High viral potential due to current trend momentum." }, { "type": "Community", "title": "Story: Poll of the Day", "platform": "Instagram", "contentType": "Story", "instructions": "Post a 'This or That' poll related to your niche. Engage with replies.", "reasoning": "Boosts engagement rate by encouraging direct interaction." }, { "type": "Niche Authority", "title": "Carousel: Top 3 Tips", "platform": "Instagram", "contentType": "Carousel", "instructions": f"Share 3 lesser-known tips about {niche}. Use high-quality photos.", "reasoning": "Establishes authority and saves value for followers." } ] }