Spaces:
Running
Running
| import os | |
| import streamlit as st | |
| from utils import get_configs, get_display_names, get_path_for_viz | |
| # st.header("EVREAL - Event-based Video Reconstruction Evaluation and Analysis Library") | |
| # | |
| # paper_link = "https://arxiv.org/abs/2305.00434" | |
| # code_link = "https://github.com/ercanburak/EVREAL" | |
| # page_link = "https://ercanburak.github.io/evreal.html" | |
| # instructions_video = "https://www.youtube.com/watch?v=" | |
| # | |
| # st.markdown("Paper: " + paper_link, unsafe_allow_html=True) | |
| # st.markdown("Code: " + paper_link, unsafe_allow_html=True) | |
| # st.markdown("Page: " + paper_link, unsafe_allow_html=True) | |
| # st.markdown("Please see this video for instructions on how to use this tool: " + instructions_video, unsafe_allow_html=True) | |
| st.title("Result Analysis Tool") | |
| data_base_path = "/home/bercan/ebv/evreal_data" | |
| dataset_cfg_path = os.path.join("cfg", "dataset") | |
| model_cfg_path = os.path.join("cfg", "model") | |
| metric_cfg_path = os.path.join("cfg", "metric") | |
| viz_cfg_path = os.path.join("cfg", "viz") | |
| datasets = get_configs(dataset_cfg_path) | |
| models = get_configs(model_cfg_path) | |
| metrics = get_configs(metric_cfg_path) | |
| visualizations = get_configs(viz_cfg_path) | |
| dataset_display_names = get_display_names(datasets) | |
| model_display_names = get_display_names(models) | |
| metric_display_names = get_display_names(metrics) | |
| viz_display_names = get_display_names(visualizations) | |
| assert len(set(dataset_display_names)) == len(dataset_display_names), "Dataset display names are not unique" | |
| assert len(set(model_display_names)) == len(model_display_names), "Model display names are not unique" | |
| assert len(set(metric_display_names)) == len(metric_display_names), "Metric display names are not unique" | |
| assert len(set(viz_display_names)) == len(viz_display_names), "Viz display names are not unique" | |
| selected_model_names = st.multiselect('Select multiple methods to compare', model_display_names) | |
| selected_models = [model for model in models if model['display_name'] in selected_model_names] | |
| col1, col2 = st.columns(2) | |
| with col1: | |
| selected_dataset_name = st.selectbox('Select dataset', options=dataset_display_names) | |
| selected_dataset = [dataset for dataset in datasets if dataset['display_name'] == selected_dataset_name][0] | |
| with col2: | |
| selected_sequence = st.selectbox('Select sequence', options=selected_dataset["sequences"].keys()) | |
| usable_metrics = [metric for metric in metrics if metric['no_ref'] == selected_dataset['no_ref']] | |
| usable_metric_display_names = get_display_names(usable_metrics) | |
| selected_metric_names = st.multiselect('Select metrics to display', usable_metric_display_names) | |
| selected_metrics = [metric for metric in usable_metrics if metric['display_name'] in selected_metric_names] | |
| if not selected_dataset['has_frames']: | |
| usable_viz = [viz for viz in visualizations if viz['gt_type'] != 'frame'] | |
| else: | |
| usable_viz = visualizations | |
| usable_viz_display_names = get_display_names(usable_viz) | |
| selected_viz = st.multiselect('Select other visualizations to display', usable_viz_display_names) | |
| selected_visualizations = [viz for viz in visualizations if viz['display_name'] in selected_viz] | |
| if not st.button('Get Results'): | |
| st.stop() | |
| gt_only_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'gt_only'] | |
| model_only_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'model_only'] | |
| both_viz = [viz for viz in selected_visualizations if viz['viz_type'] == 'both'] | |
| recon_viz = {"name": "recon", "display_name": "Reconstruction", "viz_type": "both", "gt_type": "frame"} | |
| ground_truth = {"name": "gt", "display_name": "Ground Truth", "model_id": "groundtruth"} | |
| model_viz = [recon_viz] + both_viz + selected_metrics + model_only_viz | |
| num_model_rows = len(model_viz) + 1 | |
| gt_viz = [] | |
| if selected_dataset['has_frames']: | |
| gt_viz.append(recon_viz) | |
| gt_viz.extend([viz for viz in both_viz if viz['gt_type'] == 'frame']) | |
| gt_viz.extend([viz for viz in gt_only_viz if viz['gt_type'] == 'frame']) | |
| gt_viz.extend([viz for viz in both_viz if viz['gt_type'] == 'event']) | |
| gt_viz.extend([viz for viz in gt_viz if viz['gt_type'] == 'event']) | |
| num_gt_rows = len(gt_viz) + 1 | |
| num_rows = max(num_model_rows, num_gt_rows) | |
| num_model_columns = len(selected_models) + 1 | |
| for row_idx in range(num_rows): | |
| row_visualizations = [] | |
| for col_idx in range(num_model_columns): | |
| if row_idx == 0 and col_idx == 0: | |
| print("meta") | |
| pass | |
| elif row_idx == 0: | |
| # model names | |
| print(selected_models[col_idx - 1]['display_name']) | |
| pass | |
| elif col_idx == 0: | |
| # metric names | |
| print(model_viz[row_idx - 1]['display_name']) | |
| pass | |
| else: | |
| video_path = get_path_for_viz(data_base_path, selected_dataset, selected_sequence, selected_models[col_idx - 1], model_viz[row_idx - 1]) | |
| print(video_path) | |
| pass |