Create app.py
Browse files
app.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
+
import torch
|
| 4 |
+
import firebase_admin
|
| 5 |
+
from firebase_admin import credentials, db
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
|
| 9 |
+
# Load Firebase credentials from firebase-key.json
|
| 10 |
+
firebase_key_path = os.environ.get("FIREBASE_KEY_PATH", "firebase-key.json")
|
| 11 |
+
with open(firebase_key_path, "r") as f:
|
| 12 |
+
firebase_config = json.load(f)
|
| 13 |
+
|
| 14 |
+
# Initialize Firebase
|
| 15 |
+
cred = credentials.Certificate(firebase_config)
|
| 16 |
+
firebase_admin.initialize_app(cred, {
|
| 17 |
+
"databaseURL": "https://taskmate-d6e71-default-rtdb.firebaseio.com/" # Confirm this URL!
|
| 18 |
+
})
|
| 19 |
+
ref = db.reference("tasks")
|
| 20 |
+
|
| 21 |
+
# Load IBM Granite model from Hugging Face
|
| 22 |
+
model_name = "ibm-granite/granite-7b-base—" # Switch to "granite-3b" if 7b is too heavy
|
| 23 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 24 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
| 25 |
+
|
| 26 |
+
# Function to generate text with Granite
|
| 27 |
+
def generate_response(prompt, max_length=100):
|
| 28 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
| 29 |
+
outputs = model.generate(**inputs, max_length=max_length, num_return_sequences=1)
|
| 30 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
|
| 31 |
+
|
| 32 |
+
# Parse user input into structured task
|
| 33 |
+
def parse_task(input_text, persona="default"):
|
| 34 |
+
prompt = f"For a {persona} employee, extract task, time, priority from: '{input_text}'"
|
| 35 |
+
response = generate_response(prompt)
|
| 36 |
+
return response # e.g., "Task: Email boss, Time: Today, Priority: High"
|
| 37 |
+
|
| 38 |
+
# Generate persona-specific subtasks
|
| 39 |
+
def generate_subtasks(task, persona="default"):
|
| 40 |
+
prompt = f"List 3 subtasks for '{task}' suited for a {persona} employee."
|
| 41 |
+
response = generate_response(prompt, max_length=150)
|
| 42 |
+
return response # e.g., "1. Draft email\n2. Send it\n3. Chill"
|
| 43 |
+
|
| 44 |
+
# Main chat function
|
| 45 |
+
def task_mate_chat(user_input, persona, chat_history):
|
| 46 |
+
# Parse the input
|
| 47 |
+
parsed = parse_task(user_input, persona)
|
| 48 |
+
task_name = parsed.split(",")[0].replace("Task: ", "").strip()
|
| 49 |
+
|
| 50 |
+
# Generate subtasks
|
| 51 |
+
subtasks = generate_subtasks(task_name, persona)
|
| 52 |
+
|
| 53 |
+
# Store in Firebase
|
| 54 |
+
task_data = {
|
| 55 |
+
"input": user_input,
|
| 56 |
+
"parsed": parsed,
|
| 57 |
+
"subtasks": subtasks,
|
| 58 |
+
"persona": persona,
|
| 59 |
+
"timestamp": str(db.ServerValue.TIMESTAMP)
|
| 60 |
+
}
|
| 61 |
+
ref.push().set(task_data)
|
| 62 |
+
|
| 63 |
+
# Format response
|
| 64 |
+
response = f"Parsed: {parsed}\nSubtasks:\n{subtasks}"
|
| 65 |
+
chat_history.append((user_input, response))
|
| 66 |
+
return "", chat_history
|
| 67 |
+
|
| 68 |
+
# Gradio Interface
|
| 69 |
+
with gr.Blocks(title="Task_Mate") as interface:
|
| 70 |
+
gr.Markdown("# Task_Mate: Your AI Task Buddy")
|
| 71 |
+
persona = gr.Dropdown(["lazy", "multitasker", "perfect"], label="Who are you?", value="lazy")
|
| 72 |
+
chatbot = gr.Chatbot(label="Chat with Task_Mate")
|
| 73 |
+
msg = gr.Textbox(label="Talk to me", placeholder="e.g., 'What’s today?' or 'Meeting at 2 PM'")
|
| 74 |
+
submit = gr.Button("Submit")
|
| 75 |
+
|
| 76 |
+
# Handle chat submission
|
| 77 |
+
submit.click(
|
| 78 |
+
fn=task_mate_chat,
|
| 79 |
+
inputs=[msg, persona, chatbot],
|
| 80 |
+
outputs=[msg, chatbot]
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
# Examples for each persona
|
| 84 |
+
gr.Examples(
|
| 85 |
+
examples=[
|
| 86 |
+
["What’s today?", "lazy"],
|
| 87 |
+
["Meeting Sarah, slides, IT call", "multitasker"],
|
| 88 |
+
["Email boss by 3 PM", "perfect"]
|
| 89 |
+
],
|
| 90 |
+
inputs=[msg, persona],
|
| 91 |
+
outputs=chatbot
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
interface.launch()
|