umerforsure commited on
Commit
f2b8ccd
·
1 Parent(s): f582c24

Final: Updated app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -151,21 +151,23 @@ def ask_question(question):
151
 
152
  context = "\n".join([doc.page_content for doc in docs])
153
  prompt = generate_prompt(context, question)
154
- raw_output = reasoning_pipeline(prompt)[0]['generated_text']
155
 
156
- # 🧠 Smart cleanup: remove prompt leakage or echoed instructions
157
- for section in ["Context:", "Question:", "Instructions:", "Use structured academic language"]:
158
- raw_output = raw_output.replace(section, "").strip()
159
 
160
- # Remove anything before answer starts if needed
161
- if "Answer:" in raw_output:
162
- raw_output = raw_output.split("Answer:")[-1].strip()
 
 
163
 
164
- # Trim trailing junk
165
- if "." in raw_output:
166
- raw_output = raw_output.rsplit(".", 1)[0] + "."
167
 
168
- return post_process_output(raw_output.strip(), question)
169
 
170
 
171
  # Gradio UI
 
151
 
152
  context = "\n".join([doc.page_content for doc in docs])
153
  prompt = generate_prompt(context, question)
154
+ response = reasoning_pipeline(prompt)[0]['generated_text']
155
 
156
+ # Clean out prompt leakage
157
+ for token in ["Context:", "Question:", "Instructions:", "Use structured academic language"]:
158
+ response = response.replace(token, "").strip()
159
 
160
+ # Remove leading/trailing junk
161
+ if "Answer:" in response:
162
+ response = response.split("Answer:")[-1].strip()
163
+ if "." in response:
164
+ response = response.rsplit(".", 1)[0] + "."
165
 
166
+ # Fallback if answer is empty or nonsense
167
+ if len(response.strip()) < 10:
168
+ return " The model could not generate a meaningful answer based on the provided context."
169
 
170
+ return post_process_output(response.strip(), question)
171
 
172
 
173
  # Gradio UI