| import os | |
| import sys | |
| from llama_cpp import Llama | |
| import json | |
| import re | |
| current_dir = os.path.dirname(os.path.abspath(__file__)) | |
| parent_dir = os.path.dirname(current_dir) | |
| sys.path.append(parent_dir) | |
| from core.rag.store import VectorStore | |
| class CreativeDirector: | |
| def __init__(self, llm_instance: Llama): | |
| if not llm_instance: | |
| raise ValueError("CreativeDirector received an invalid LLM instance.") | |
| print("π§ Initializing AI Director with pre-loaded LLM...") | |
| self.llm = llm_instance | |
| self.memory = VectorStore(collection_name="creative_mind") | |
| print("β AI Director is Online.") | |
| def chat(self, user_message: str, history: list, task_context: str): | |
| """Main Chat Logic with RAG, optimized for speed.""" | |
| print(f" - π§ Thinking...") | |
| retrieved_docs = self.memory.search(user_message, n_results=1) | |
| expert_knowledge = retrieved_docs[0][:150] if retrieved_docs else "Be creative and direct." | |
| prompt = f"""Instruction: Act as a Viral Content Expert. Give 1 short tip for "{task_context}". | |
| Context: {expert_knowledge} | |
| User: {user_message} | |
| Response:""" | |
| try: | |
| response = self.llm( | |
| prompt, | |
| max_tokens=50, | |
| stop=["Instruction:", "User:", "\n\n"], | |
| temperature=0.7, | |
| echo=False | |
| ) | |
| reply = response['choices'][0]['text'].strip() | |
| if not reply: | |
| return "Try showing a 'before vs after' comparison. It always works!" | |
| print(f" - π£οΈ Reply: {reply}") | |
| return reply | |
| except Exception as e: | |
| print(f" - β AI Chat Error: {e}") | |
| return "My AI brain is a bit slow today. Please ask again!" | |
| def generate_final_plan(self, task_context: str, history: list): | |
| """Generates the final script using simple text fallback.""" | |
| print(f" - π¬ Generating final plan for: {task_context}") | |
| conversation_summary = "\n".join([f"- {msg['content']}" for msg in history[-3:]]) | |
| prompt = f"""Instruction: Create a video script for "{task_context}". | |
| Chat Summary: {conversation_summary} | |
| Format your answer exactly like this: | |
| HOOK: (Write hook here) | |
| SCRIPT: (Write script here) | |
| VISUALS: (Write visuals here) | |
| TOOLS: (Write tools here) | |
| Response:""" | |
| try: | |
| response = self.llm( | |
| prompt, | |
| max_tokens=300, | |
| stop=["Instruction:", "Response:"], | |
| temperature=0.6, | |
| echo=False | |
| ) | |
| raw_text = response['choices'][0]['text'].strip() | |
| print(f" - π€ Raw Text: {raw_text}") | |
| hook_match = re.search(r'HOOK:\s*(.*?)(?=\nSCRIPT:)', raw_text, re.DOTALL | re.IGNORECASE) | |
| script_match = re.search(r'SCRIPT:\s*(.*?)(?=\nVISUALS:)', raw_text, re.DOTALL | re.IGNORECASE) | |
| visuals_match = re.search(r'VISUALS:\s*(.*?)(?=\nTOOLS:)', raw_text, re.DOTALL | re.IGNORECASE) | |
| tools_match = re.search(r'TOOLS:\s*(.*)', raw_text, re.DOTALL | re.IGNORECASE) | |
| return { | |
| "hook": hook_match.group(1).strip() if hook_match else "Start with a bang!", | |
| "script": script_match.group(1).strip() if script_match else raw_text, | |
| "visuals": [visuals_match.group(1).strip()] if visuals_match else ["Talking Head"], | |
| "tools": [tools_match.group(1).strip()] if tools_match else ["CapCut"] | |
| } | |
| except Exception as e: | |
| print(f" - β Final Plan Generation Error: {e}") | |
| return { | |
| "hook": "Error generating plan.", | |
| "script": "Please try again later.", | |
| "visuals": [], | |
| "tools": [] | |
| } | |