175 lines
5.3 KiB
Python
175 lines
5.3 KiB
Python
"""
|
|
Master script to generate all 15 figures for MCM 2026 Problem A
|
|
O-Prize grade quality with full validation
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import json
|
|
import importlib
|
|
import yaml
|
|
import numpy as np
|
|
|
|
# Add current directory to path for imports
|
|
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
from validation import validate_figure_output
|
|
|
|
def load_config(config_path='config.yaml'):
|
|
"""Load configuration from YAML file"""
|
|
with open(config_path, 'r', encoding='utf-8') as f:
|
|
config = yaml.safe_load(f)
|
|
return config
|
|
|
|
def run_all_figures():
|
|
"""Execute all figure generation scripts"""
|
|
|
|
print("="*70)
|
|
print(" MCM 2026 Problem A - Figure Generation System (O-Prize Grade)")
|
|
print("="*70)
|
|
print()
|
|
|
|
# Load configuration
|
|
print("Loading configuration...")
|
|
config = load_config()
|
|
|
|
# Set global seed for reproducibility
|
|
seed = config.get('global', {}).get('seed', 42)
|
|
np.random.seed(seed)
|
|
print(f"Random seed set to: {seed}")
|
|
print()
|
|
|
|
# Ensure output directory exists
|
|
figure_dir = config.get('global', {}).get('figure_dir', 'figures')
|
|
os.makedirs(figure_dir, exist_ok=True)
|
|
print(f"Output directory: {figure_dir}")
|
|
print()
|
|
|
|
# Figure modules to execute
|
|
figure_modules = [
|
|
('fig01_macro_logic', 1),
|
|
('fig02_system_interaction', 2),
|
|
('fig03_ocv_fitting', 3),
|
|
('fig04_internal_resistance', 4),
|
|
('fig05_radio_tail', 5),
|
|
('fig06_cpl_avalanche', 6),
|
|
('fig07_baseline_validation', 7),
|
|
('fig08_power_breakdown', 8),
|
|
('fig09_scenario_comparison', 9),
|
|
('fig10_tornado_sensitivity', 10),
|
|
('fig11_heatmap_temp_signal', 11),
|
|
('fig12_monte_carlo', 12),
|
|
('fig13_survival_curve', 13),
|
|
('fig14_lifecycle_degradation', 14),
|
|
('fig15_radar_user_guide', 15),
|
|
]
|
|
|
|
# Track results
|
|
results = {}
|
|
failed_figures = []
|
|
|
|
# Execute each figure
|
|
for module_name, fig_num in figure_modules:
|
|
print(f"[{fig_num:02d}/15] Generating Fig{fig_num:02d}...")
|
|
|
|
try:
|
|
# Import module
|
|
module = importlib.import_module(module_name)
|
|
|
|
# Execute make_figure
|
|
result = module.make_figure(config)
|
|
|
|
# Validate
|
|
validation_result = validate_figure_output(
|
|
fig_num,
|
|
result['output_files'],
|
|
result['computed_metrics'],
|
|
result['validation_flags'],
|
|
config
|
|
)
|
|
|
|
results[f"Fig{fig_num:02d}"] = validation_result
|
|
|
|
# Print status
|
|
status = "PASS" if validation_result['pass'] else "FAIL"
|
|
print(f" Status: {status}")
|
|
|
|
if validation_result.get('metrics'):
|
|
print(f" Metrics: {validation_result['metrics']}")
|
|
|
|
if not validation_result['pass']:
|
|
failed_figures.append(f"Fig{fig_num:02d}")
|
|
print(f" Errors: {validation_result.get('errors', [])}")
|
|
|
|
print()
|
|
|
|
except Exception as e:
|
|
print(f" Status: ERROR")
|
|
print(f" Exception: {str(e)}")
|
|
print()
|
|
|
|
results[f"Fig{fig_num:02d}"] = {
|
|
"pass": False,
|
|
"errors": [str(e)]
|
|
}
|
|
failed_figures.append(f"Fig{fig_num:02d}")
|
|
|
|
# Summary
|
|
print("="*70)
|
|
print(" Generation Complete")
|
|
print("="*70)
|
|
print()
|
|
|
|
n_passed = sum(1 for r in results.values() if r['pass'])
|
|
n_total = len(results)
|
|
|
|
print(f"Figures generated: {n_total}")
|
|
print(f"Passed validation: {n_passed}")
|
|
print(f"Failed validation: {len(failed_figures)}")
|
|
|
|
if failed_figures:
|
|
print(f"Failed figures: {', '.join(failed_figures)}")
|
|
else:
|
|
print("All figures passed validation!")
|
|
|
|
print()
|
|
|
|
# Write report
|
|
report_dir = 'artifacts'
|
|
os.makedirs(report_dir, exist_ok=True)
|
|
report_path = os.path.join(report_dir, 'figure_build_report.json')
|
|
|
|
report = {
|
|
"status": "PASS" if len(failed_figures) == 0 else "FAIL",
|
|
"failed_figures": failed_figures,
|
|
"total_figures": n_total,
|
|
"passed_figures": n_passed,
|
|
"details": {}
|
|
}
|
|
|
|
# Convert numpy types to native Python types for JSON serialization
|
|
for fig_key, fig_result in results.items():
|
|
report["details"][fig_key] = {
|
|
"pass": bool(fig_result.get('pass', False)),
|
|
"output_files": fig_result.get('output_files', []),
|
|
"errors": fig_result.get('errors', [])
|
|
}
|
|
if 'metrics' in fig_result and fig_result['metrics']:
|
|
report["details"][fig_key]["metrics"] = {
|
|
k: float(v) if isinstance(v, (int, float, np.number)) else str(v)
|
|
for k, v in fig_result['metrics'].items()
|
|
}
|
|
|
|
with open(report_path, 'w', encoding='utf-8') as f:
|
|
json.dump(report, f, indent=2, ensure_ascii=False)
|
|
|
|
print(f"Validation report saved to: {report_path}")
|
|
print()
|
|
|
|
# Exit with appropriate code
|
|
return 0 if len(failed_figures) == 0 else 1
|
|
|
|
if __name__ == '__main__':
|
|
exit_code = run_all_figures()
|
|
sys.exit(exit_code)
|