Spaces:
Running
Running
Update eval_final_results.py
Browse files- eval_final_results.py +7 -1
eval_final_results.py
CHANGED
|
@@ -3,7 +3,13 @@ from compute_accuracy import compute_accuracy
|
|
| 3 |
def eval_final(test_metafile,dev_metafile,to_eval):
|
| 4 |
print("Computing accuracy...")
|
| 5 |
result_test = compute_accuracy(to_eval, test_metafile)
|
| 6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
|
| 8 |
output = {"dev avg": result_dev['answered_acc'],
|
| 9 |
"test avg": result_test['answered_acc'],
|
|
|
|
| 3 |
def eval_final(test_metafile,dev_metafile,to_eval):
|
| 4 |
print("Computing accuracy...")
|
| 5 |
result_test = compute_accuracy(to_eval, test_metafile)
|
| 6 |
+
|
| 7 |
+
# permit not submitting dev results
|
| 8 |
+
try:
|
| 9 |
+
result_dev = compute_accuracy(to_eval, dev_metafile)
|
| 10 |
+
except:
|
| 11 |
+
print("Parsing dev answer error, return 0 as results")
|
| 12 |
+
result_dev = {'answered_acc': 0}
|
| 13 |
|
| 14 |
output = {"dev avg": result_dev['answered_acc'],
|
| 15 |
"test avg": result_test['answered_acc'],
|