umerforsure commited on
Commit
0c32660
Β·
1 Parent(s): 4395930

Final: Updated app.py and requirements for production

Browse files
Files changed (1) hide show
  1. app.py +18 -27
app.py CHANGED
@@ -94,23 +94,17 @@ def process_file(file):
94
  return f"❌ Error: {str(e)}"
95
 
96
  def generate_prompt(context, question):
97
- return f"""
98
- You are a helpful academic tutor assisting a student strictly based on course slides or textbook material.
99
-
100
- Context:
101
  {context}
102
 
103
  Question:
104
  {question}
105
 
106
  Instructions:
107
- - Answer ONLY using the above context. Do NOT add outside knowledge.
108
- - Think clearly and deeply before answering.
109
- - Use structured academic language based strictly on the context.
110
- - Use clean formatting with helpful headings and minimal bullet points.
111
- - Do NOT repeat the question or include prompt labels.
112
- - If the context lacks an answer, say: "The provided material does not contain sufficient information to answer this question accurately."
113
- - Output must be academically concise, well-organized, and visually clear.
114
  """.strip()
115
 
116
  def detect_question_type(q):
@@ -134,13 +128,17 @@ def post_process_output(answer_text, question):
134
  "list": "πŸ“˜ **Key Points**",
135
  "general": "πŸ“˜ **Insight**",
136
  }
137
- answer_text = f"{label_map.get(qtype)}\n\n{answer_text}"
 
 
 
 
138
 
139
- if len(answer_text.split()) > 80:
140
- summary = summary_pipeline(answer_text, max_length=60, min_length=25, do_sample=False)[0]['summary_text']
141
- answer_text += f"\n\nπŸ“ **Summary:** {summary.strip()}"
142
 
143
- return answer_text
144
 
145
  def ask_question(question):
146
  global vectorstore
@@ -155,14 +153,7 @@ def ask_question(question):
155
  prompt = generate_prompt(context, question)
156
  result = reasoning_pipeline(prompt)[0]['generated_text']
157
 
158
- if result.startswith("You are a helpful academic tutor"):
159
- result = result.split("Instructions:")[-1].strip()
160
- for marker in ["Context:", "Question:", "Instructions:"]:
161
- result = result.replace(marker, "").strip()
162
- if "." in result:
163
- result = result.rsplit(".", 1)[0] + "."
164
-
165
- return post_process_output(result.strip(), question)
166
 
167
  # Gradio UI
168
  with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
@@ -176,7 +167,7 @@ with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
176
  file_input = gr.File(label="πŸ“„ Upload File", file_types=[".pdf", ".docx", ".pptx", ".txt"])
177
  upload_btn = gr.Button("Upload")
178
  upload_output = gr.Textbox(label="Upload Status", interactive=False)
179
- upload_btn.click(fn=process_file, inputs=file_input, outputs=upload_output)
180
 
181
  gr.Markdown("---")
182
 
@@ -184,11 +175,11 @@ with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
184
  question = gr.Textbox(label="❓ Ask a question")
185
  ask_btn = gr.Button("Ask")
186
  answer = gr.Textbox(label="πŸ’‘ Answer", interactive=False)
187
- ask_btn.click(fn=ask_question, inputs=question, outputs=answer)
188
 
189
  with gr.Tab("History"):
190
  gr.Markdown("""
191
- **⏳ Coming Soon**: Question-answer history, summarization view, more!
192
  """)
193
 
194
  if __name__ == "__main__":
 
94
  return f"❌ Error: {str(e)}"
95
 
96
  def generate_prompt(context, question):
97
+ return f"""Context:
 
 
 
98
  {context}
99
 
100
  Question:
101
  {question}
102
 
103
  Instructions:
104
+ - Answer ONLY using the above context.
105
+ - Use structured academic language.
106
+ - Think carefully and answer concisely.
107
+ - If context lacks information, say: "The material does not contain enough information to answer this accurately."
 
 
 
108
  """.strip()
109
 
110
  def detect_question_type(q):
 
128
  "list": "πŸ“˜ **Key Points**",
129
  "general": "πŸ“˜ **Insight**",
130
  }
131
+ clean_answer = answer_text.strip()
132
+
133
+ if clean_answer.lower().startswith("context:") or "instructions:" in clean_answer:
134
+ for marker in ["Context:", "Question:", "Instructions:"]:
135
+ clean_answer = clean_answer.replace(marker, "").strip()
136
 
137
+ if len(clean_answer.split()) > 80:
138
+ summary = summary_pipeline(clean_answer, max_length=60, min_length=25, do_sample=False)[0]['summary_text']
139
+ clean_answer += f"\n\nπŸ“ **Summary:** {summary.strip()}"
140
 
141
+ return f"{label_map.get(qtype)}\n\n{clean_answer}"
142
 
143
  def ask_question(question):
144
  global vectorstore
 
153
  prompt = generate_prompt(context, question)
154
  result = reasoning_pipeline(prompt)[0]['generated_text']
155
 
156
+ return post_process_output(result, question)
 
 
 
 
 
 
 
157
 
158
  # Gradio UI
159
  with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
 
167
  file_input = gr.File(label="πŸ“„ Upload File", file_types=[".pdf", ".docx", ".pptx", ".txt"])
168
  upload_btn = gr.Button("Upload")
169
  upload_output = gr.Textbox(label="Upload Status", interactive=False)
170
+ upload_btn.click(fn=process_file, inputs=[file_input], outputs=[upload_output])
171
 
172
  gr.Markdown("---")
173
 
 
175
  question = gr.Textbox(label="❓ Ask a question")
176
  ask_btn = gr.Button("Ask")
177
  answer = gr.Textbox(label="πŸ’‘ Answer", interactive=False)
178
+ ask_btn.click(fn=ask_question, inputs=[question], outputs=[answer])
179
 
180
  with gr.Tab("History"):
181
  gr.Markdown("""
182
+ **⏳ Coming Soon**: Question-answer history, summarization view, and more!
183
  """)
184
 
185
  if __name__ == "__main__":