Skip to main content

dada_lang/main_lib/
test.rs

1use std::{
2    panic::AssertUnwindSafe,
3    path::{Path, PathBuf},
4    time::Instant,
5};
6
7use dada_compiler::{Compiler, RealFs};
8use dada_ir_ast::diagnostic::{Diagnostic, Level};
9use dada_util::{Fallible, bail};
10use expected::{ExpectedDiagnostic, Probe, ProbeKind};
11use indicatif::ProgressBar;
12use panic_hook::CapturedPanic;
13use rayon::prelude::*;
14use serde::Serialize;
15use walkdir::WalkDir;
16
17use crate::{GlobalOptions, TestOptions};
18
19use super::Main;
20
21mod expected;
22mod spec_validation;
23
24#[derive(thiserror::Error, Debug)]
25#[error("{} test failure(s)", failed_tests.len())]
26struct FailedTests {
27    failed_tests: Vec<FailedTest>,
28}
29
30#[derive(Debug)]
31struct FailedTest {
32    path: PathBuf,
33    full_compiler_output: String,
34    failures: Vec<Failure>,
35}
36
37#[derive(Debug)]
38enum TestResult {
39    /// Test passed as expected
40    Passed,
41    /// Test failed as expected (not FIXME)
42    Failed(FailedTest),
43    /// FIXME test failed as expected
44    FixmeFailed(FailedTest),
45}
46
47/// Enhanced test result with timing and metadata for porcelain output
48#[derive(Debug)]
49struct DetailedTestResult {
50    path: PathBuf,
51    result: TestResult,
52    duration_ms: u64,
53    annotations: Vec<String>,
54}
55
56#[derive(Debug)]
57enum Failure {
58    UnexpectedDiagnostic(Diagnostic),
59    MultipleMatches(ExpectedDiagnostic, Diagnostic),
60    MissingDiagnostic(ExpectedDiagnostic),
61    InternalCompilerError(Option<CapturedPanic>),
62
63    /// A test marked as FIXME did not fail
64    FixmePassed,
65
66    /// The probe at the given location did not yield the expected result.
67    Probe {
68        /// Probe performed
69        probe: Probe,
70
71        /// Actual result returned
72        actual: String,
73    },
74
75    /// Invalid spec reference in #:spec comment
76    InvalidSpecReference(String),
77
78    /// Auxiliary file at `path` did not have expected contents.
79    ///
80    /// See `diff`.
81    ///
82    /// You can auto-update these files by setting `UPDATE_EXPECT=1`.
83    Auxiliary {
84        kind: String,
85        ref_path: PathBuf,
86        txt_path: PathBuf,
87        diff: String,
88    },
89}
90
91impl Failure {}
92
93/// JSON output structures for --porcelain mode
94#[derive(Serialize)]
95struct PorcelainOutput {
96    summary: PorcelainSummary,
97    tests: Vec<PorcelainTest>,
98}
99
100#[derive(Serialize)]
101struct PorcelainSummary {
102    total: usize,
103    passed: usize,
104    failed: usize,
105    duration_ms: u64,
106}
107
108#[derive(Serialize)]
109struct PorcelainTest {
110    path: String,
111    status: String,
112    #[serde(skip_serializing_if = "Option::is_none")]
113    reason: Option<String>,
114    annotations: Vec<String>,
115    #[serde(skip_serializing_if = "Option::is_none")]
116    suggestion: Option<String>,
117    #[serde(skip_serializing_if = "Option::is_none")]
118    details: Option<String>,
119    duration_ms: u64,
120}
121
122mod panic_hook;
123
124/// Trait for formatting test output in different modes
125trait TestOutputFormatter: Sync + Send {
126    fn format_results(
127        &self,
128        results: Vec<DetailedTestResult>,
129        total_duration_ms: u64,
130    ) -> Fallible<()>;
131    fn show_progress(&self, path: &Path, result: &DetailedTestResult, verbose: bool);
132}
133
134/// Formats output for human consumption with progress bars
135struct RegularFormatter {
136    progress_bar: ProgressBar,
137}
138
139impl TestOutputFormatter for RegularFormatter {
140    fn show_progress(&self, path: &Path, result: &DetailedTestResult, verbose: bool) {
141        if verbose {
142            self.progress_bar
143                .println(format!("{}: beginning test", path.display()));
144            match &result.result {
145                TestResult::Passed => {}
146                TestResult::Failed(error) => {
147                    self.progress_bar
148                        .println(format!("{}: {}", path.display(), error.summarize()));
149                }
150                TestResult::FixmeFailed(error) => {
151                    self.progress_bar.println(format!(
152                        "{}: {} (FIXME)",
153                        path.display(),
154                        error.summarize()
155                    ));
156                }
157            }
158        }
159
160        // Increment progress bar after each test
161        self.progress_bar.inc(1);
162    }
163
164    fn format_results(
165        &self,
166        results: Vec<DetailedTestResult>,
167        _total_duration_ms: u64,
168    ) -> Fallible<()> {
169        let mut failed_tests = vec![];
170        let mut fixme_failed_tests = vec![];
171
172        let total_tests = results.len();
173        for detailed_result in results {
174            match detailed_result.result {
175                TestResult::Passed => {}
176                TestResult::Failed(failed_test) => failed_tests.push(failed_test),
177                TestResult::FixmeFailed(failed_test) => fixme_failed_tests.push(failed_test),
178            }
179        }
180
181        if failed_tests.len() == 1 {
182            for failed_test in &failed_tests {
183                let test_report = std::fs::read_to_string(failed_test.test_report_path())?;
184                self.progress_bar.println(test_report);
185            }
186        }
187
188        let total_passed = total_tests - failed_tests.len() - fixme_failed_tests.len();
189
190        if failed_tests.is_empty() {
191            let message = if fixme_failed_tests.is_empty() {
192                format!("All {total_tests} tests passed")
193            } else {
194                format!(
195                    "{} tests passed, {} FIXME tests failed (ignored)",
196                    total_passed,
197                    fixme_failed_tests.len()
198                )
199            };
200            self.progress_bar.println(message);
201            self.progress_bar.finish();
202            Ok(())
203        } else {
204            let message = format!("{} test failure(s)", failed_tests.len());
205            self.progress_bar.println(message);
206            self.progress_bar.finish();
207            Err(FailedTests { failed_tests }.into())
208        }
209    }
210}
211
212/// Formats output as machine-readable JSON
213struct PorcelainFormatter;
214
215impl TestOutputFormatter for PorcelainFormatter {
216    fn show_progress(&self, _path: &Path, _result: &DetailedTestResult, _verbose: bool) {
217        // No progress output for porcelain mode
218    }
219
220    fn format_results(
221        &self,
222        results: Vec<DetailedTestResult>,
223        total_duration_ms: u64,
224    ) -> Fallible<()> {
225        let mut porcelain_tests = Vec::new();
226        let mut failed_count = 0;
227
228        for detailed_result in &results {
229            let porcelain_test = convert_to_porcelain_test(detailed_result);
230            if porcelain_test.status == "fail" {
231                failed_count += 1;
232            }
233            porcelain_tests.push(porcelain_test);
234        }
235
236        let passed_count = results.len() - failed_count;
237
238        let output = PorcelainOutput {
239            summary: PorcelainSummary {
240                total: results.len(),
241                passed: passed_count,
242                failed: failed_count,
243                duration_ms: total_duration_ms,
244            },
245            tests: porcelain_tests,
246        };
247
248        println!("{}", serde_json::to_string_pretty(&output)?);
249
250        if failed_count > 0 {
251            // Create dummy failed tests for error handling
252            let failed_tests: Vec<FailedTest> = output
253                .tests
254                .iter()
255                .filter(|t| t.status == "fail")
256                .map(|t| FailedTest {
257                    path: PathBuf::from(&t.path),
258                    full_compiler_output: t.details.clone().unwrap_or_default(),
259                    failures: vec![], // We don't need detailed failures for porcelain mode
260                })
261                .collect();
262            Err(FailedTests { failed_tests }.into())
263        } else {
264            Ok(())
265        }
266    }
267}
268
269impl Main {
270    pub(super) fn test(&mut self, mut options: TestOptions) -> Fallible<()> {
271        let tests = if options.inputs.is_empty() {
272            self.assemble_tests(&["tests"], &mut false)?
273        } else {
274            self.assemble_tests(&options.inputs, &mut options.verbose)?
275        };
276
277        let start_time = Instant::now();
278
279        // Create appropriate formatter
280        let formatter: Box<dyn TestOutputFormatter> = if options.porcelain {
281            Box::new(PorcelainFormatter)
282        } else {
283            Box::new(RegularFormatter {
284                progress_bar: ProgressBar::new(tests.len() as u64),
285            })
286        };
287
288        // Run tests
289        let test_results: Vec<Fallible<DetailedTestResult>> = panic_hook::recording_panics(|| {
290            let runner = |input: &Path| -> Fallible<DetailedTestResult> {
291                let result = self.run_test(input)?;
292                formatter.show_progress(input, &result, options.verbose);
293                Ok(result)
294            };
295
296            if options.verbose {
297                tests.iter().map(|input| runner(input)).collect()
298            } else {
299                tests.par_iter().map(|input| runner(input)).collect()
300            }
301        });
302
303        // Collect results
304        let results: Result<Vec<DetailedTestResult>, _> = test_results.into_iter().collect();
305        let results = results?;
306
307        let total_duration = start_time.elapsed().as_millis() as u64;
308        formatter.format_results(results, total_duration)
309    }
310
311    fn assemble_tests(
312        &self,
313        inputs: &[impl AsRef<Path>],
314        verbose: &mut bool,
315    ) -> Fallible<Vec<PathBuf>> {
316        let mut result = vec![];
317
318        // If there is exactly one input specified and it is a file (not a directory),
319        // set verbose to true.
320        if inputs.len() == 1 && inputs[0].as_ref().is_file() {
321            *verbose = true;
322        }
323
324        for input in inputs {
325            let input: &Path = input.as_ref();
326
327            if !input.exists() {
328                bail!("test path '{}' does not exist", input.display());
329            } else if is_dada_file(input) {
330                result.push(input.to_path_buf());
331            } else if input.is_dir() {
332                for e in WalkDir::new(input) {
333                    let e = e?;
334                    if is_dada_file(e.path()) {
335                        result.push(e.into_path());
336                    }
337                }
338            } else {
339                bail!(
340                    "input path '{}' is neither a .dada file nor a directory",
341                    input.display()
342                );
343            }
344        }
345
346        Ok(result)
347    }
348
349    /// Run a single test found at the given path.
350    ///
351    /// # Returns
352    ///
353    /// * `Err(e)` for some failure in the test harness itself.
354    /// * `Ok(result)` with the detailed test result including timing and annotations.
355    fn run_test(&self, input: &Path) -> Fallible<DetailedTestResult> {
356        let start_time = Instant::now();
357
358        assert!(is_dada_file(input));
359        let mut compiler = Compiler::new(RealFs::default(), None);
360
361        // Get test annotations and run the test
362        let source_file = compiler.load_source_file(input)?;
363        let expectations = expected::TestExpectations::new(&compiler, source_file)?;
364        let annotations = extract_annotations(&expectations);
365        let is_fixme_ice = expectations.fixme_ice();
366
367        // Run the test and capture panics
368        let result =
369            std::panic::catch_unwind(AssertUnwindSafe(|| expectations.compare(&mut compiler)));
370
371        let duration_ms = start_time.elapsed().as_millis() as u64;
372
373        let test_result = match result {
374            Ok(r) if is_fixme_ice => {
375                // FIXME_ICE test didn't panic — the ICE is fixed, remove the annotation
376                let _ = r; // discard normal test result
377                let failed_test = FailedTest::fixme_passed(input);
378                failed_test.generate_test_report(&compiler)?;
379                TestResult::Failed(failed_test)
380            }
381            Ok(r) => {
382                let (failed_test, is_fixme) = r?;
383                match (failed_test, is_fixme) {
384                    (None, false) => {
385                        delete_test_report(input)?;
386                        TestResult::Passed
387                    }
388                    (None, true) => {
389                        let failed_test = FailedTest::fixme_passed(input);
390                        failed_test.generate_test_report(&compiler)?;
391                        TestResult::Failed(failed_test)
392                    }
393                    (Some(failed_test), is_fixme) => {
394                        failed_test.generate_test_report(&compiler)?;
395                        if is_fixme {
396                            TestResult::FixmeFailed(failed_test)
397                        } else {
398                            TestResult::Failed(failed_test)
399                        }
400                    }
401                }
402            }
403            Err(_unwound) => {
404                let captured_panic = panic_hook::captured_panic();
405                let failed_test = FailedTest::ice(input, captured_panic);
406                failed_test.generate_test_report(&compiler)?;
407                if is_fixme_ice {
408                    TestResult::FixmeFailed(failed_test)
409                } else {
410                    TestResult::Failed(failed_test)
411                }
412            }
413        };
414
415        Ok(DetailedTestResult {
416            path: input.to_path_buf(),
417            result: test_result,
418            duration_ms,
419            annotations,
420        })
421    }
422}
423
424fn extract_annotations(expectations: &expected::TestExpectations) -> Vec<String> {
425    let mut annotations = Vec::new();
426
427    // Check the TestExpectations struct fields and convert to string annotations
428    if expectations.fn_asts() {
429        annotations.push("#:fn_asts".to_string());
430    }
431    if !expectations.codegen() {
432        annotations.push("#:skip_codegen".to_string());
433    }
434    if expectations.fixme() {
435        annotations.push("#:FIXME".to_string());
436    }
437    if expectations.fixme_ice() {
438        annotations.push("#:FIXME_ICE".to_string());
439    }
440
441    // Add spec references
442    for spec_ref in expectations.spec_refs() {
443        annotations.push(format!("#:spec {spec_ref}"));
444    }
445
446    annotations
447}
448
449fn convert_to_porcelain_test(detailed_result: &DetailedTestResult) -> PorcelainTest {
450    let path = detailed_result.path.to_string_lossy().to_string();
451
452    match &detailed_result.result {
453        TestResult::Passed => PorcelainTest {
454            path,
455            status: "pass".to_string(),
456            reason: None,
457            annotations: detailed_result.annotations.clone(),
458            suggestion: None,
459            details: None,
460            duration_ms: detailed_result.duration_ms,
461        },
462        TestResult::Failed(failed_test) => {
463            let (reason, suggestion, details) = analyze_failure(failed_test);
464            PorcelainTest {
465                path,
466                status: "fail".to_string(),
467                reason: Some(reason),
468                annotations: detailed_result.annotations.clone(),
469                suggestion: Some(suggestion),
470                details: Some(details),
471                duration_ms: detailed_result.duration_ms,
472            }
473        }
474        TestResult::FixmeFailed(_failed_test) => PorcelainTest {
475            path,
476            status: "pass".to_string(), // FIXME failures are treated as expected (passed)
477            reason: None,
478            annotations: detailed_result.annotations.clone(),
479            suggestion: None,
480            details: None,
481            duration_ms: detailed_result.duration_ms,
482        },
483    }
484}
485
486fn analyze_failure(failed_test: &FailedTest) -> (String, String, String) {
487    // Simplified approach: always point to test report for detailed guidance
488    let test_report_path = failed_test.test_report_path();
489    let suggestion = format!(
490        "Consult {} for details and guidance",
491        test_report_path.display()
492    );
493
494    (
495        "test_failure".to_string(),
496        suggestion,
497        "See test report for detailed analysis and next steps".to_string(),
498    )
499}
500
501fn is_dada_file(input: &Path) -> bool {
502    input.is_file() && input.extension().map(|e| e == "dada").unwrap_or(false)
503}
504
505impl FailedTest {
506    fn ice(path: &Path, captured_panic: Option<CapturedPanic>) -> Self {
507        FailedTest {
508            path: path.to_path_buf(),
509            full_compiler_output: "(Internal Compiler Error)\n".to_string(),
510            failures: vec![Failure::InternalCompilerError(captured_panic)],
511        }
512    }
513
514    fn fixme_passed(path: &Path) -> Self {
515        FailedTest {
516            path: path.to_path_buf(),
517            full_compiler_output: "FIXME test passed!\n".to_string(),
518            failures: vec![Failure::FixmePassed],
519        }
520    }
521
522    fn test_report_path(&self) -> PathBuf {
523        test_report_path(&self.path)
524    }
525
526    fn summarize(&self) -> String {
527        format!(
528            "{} failures, see {}",
529            self.failures.len(),
530            self.test_report_path().display()
531        )
532    }
533
534    fn report(&self, db: &dyn crate::Db) -> Fallible<String> {
535        use std::fmt::Write;
536        let opts = GlobalOptions::test_options();
537
538        let mut result = String::new();
539
540        writeln!(result, "Test failed: {}", self.path.display())?;
541
542        writeln!(result)?;
543        writeln!(
544            result,
545            "[Test file](./{})",
546            self.path.file_name().unwrap().to_string_lossy()
547        )?;
548        writeln!(result)?;
549
550        writeln!(result)?;
551        writeln!(result, "# Compiler output")?;
552        writeln!(result)?;
553        writeln!(result, "```\n{}```", self.full_compiler_output)?;
554
555        for failure in &self.failures {
556            match failure {
557                Failure::UnexpectedDiagnostic(diagnostic) => {
558                    writeln!(result)?;
559                    writeln!(result, "# Unexpected diagnostic")?;
560                    writeln!(result)?;
561
562                    let render = diagnostic.render(db, &opts.render_opts());
563                    writeln!(result, "```\n{render}\n```")?;
564                    writeln!(result)?;
565                    writeln!(result, "```\n{diagnostic:#?}\n```\n")?;
566                }
567                Failure::MultipleMatches(expected, actual) => {
568                    writeln!(result)?;
569                    writeln!(result, "# Multiple matches for expected diagnostic")?;
570                    writeln!(result)?;
571
572                    writeln!(result, "Diagnostic:")?;
573                    let render = actual.render(db, &opts.render_opts());
574                    writeln!(result, "```\n{render}\n```")?;
575                    writeln!(result)?;
576                    writeln!(result, "```\n{actual:#?}\n```\n")?;
577                    writeln!(result)?;
578                    writeln!(result, "Expected diagnostic that matched multiple times:")?;
579                    writeln!(result, "```\n{expected:#?}\n```")?;
580                }
581                Failure::MissingDiagnostic(expected) => {
582                    writeln!(result)?;
583                    writeln!(result, "# Missing expected diagnostic")?;
584                    writeln!(result)?;
585
586                    // Format this nicely
587                    let annotation_span = expected.annotation_span.into_span(db);
588                    let diagnostic = Diagnostic::new(
589                        db,
590                        Level::Error,
591                        annotation_span,
592                        "missing expected diagnostic",
593                    )
594                    .label(
595                        db,
596                        Level::Error,
597                        annotation_span,
598                        "this diagnostic was never reported",
599                    );
600                    let render = diagnostic.render(db, &opts.render_opts());
601                    writeln!(result, "```\n{render}\n```")?;
602                }
603                Failure::Auxiliary {
604                    kind,
605                    ref_path,
606                    txt_path,
607                    diff,
608                } => {
609                    writeln!(result)?;
610                    writeln!(result, "# {kind} did not match")?;
611                    writeln!(result)?;
612                    writeln!(
613                        result,
614                        "[Reference]({})",
615                        self.relativize(&self.path, ref_path).display()
616                    )?;
617                    writeln!(
618                        result,
619                        "[Actual]({})",
620                        self.relativize(&self.path, txt_path).display()
621                    )?;
622                    writeln!(result)?;
623
624                    writeln!(result, "Diff:")?;
625                    writeln!(result, "```diff\n{diff}\n```")?;
626                }
627                Failure::InternalCompilerError(captured_panic) => {
628                    writeln!(result)?;
629                    writeln!(result, "# Internal compiler error")?;
630                    writeln!(result)?;
631                    if let Some(captured_panic) = captured_panic {
632                        writeln!(result, "{}", captured_panic.render())?;
633                    } else {
634                        writeln!(result, "No details available. :(")?;
635                    }
636                }
637                Failure::Probe { probe, actual } => {
638                    writeln!(result)?;
639                    writeln!(result, "# Probe return unexpected result")?;
640                    writeln!(result)?;
641
642                    let (probe_line, probe_start_col) =
643                        probe.span.source_file.line_col(db, probe.span.start);
644                    let (probe_end_line, probe_end_col) =
645                        probe.span.source_file.line_col(db, probe.span.end);
646                    assert_eq!(
647                        probe_line, probe_end_line,
648                        "multiline probe not currently possible"
649                    );
650
651                    writeln!(
652                        result,
653                        "Probe location: {u}:{l}:{c}:{l}:{e}",
654                        u = probe.span.source_file.url_display(db),
655                        l = probe_line.as_u32() + 1,
656                        c = probe_start_col.as_u32() + 1,
657                        e = probe_end_col.as_u32() + 1,
658                    )?;
659                    writeln!(result, "Probe expected: {e}", e = probe.message)?;
660                    writeln!(result, "Probe got: {actual}")?;
661
662                    let file_text = probe.span.source_file.contents_if_ok(db);
663                    let line_range = probe.span.source_file.line_range(db, probe_line);
664                    if let Some(line_text) =
665                        file_text.get(line_range.start.as_usize()..line_range.end.as_usize())
666                    {
667                        writeln!(result)?;
668                        writeln!(result, "```")?;
669                        write!(result, "{line_text}")?;
670                        writeln!(
671                            result,
672                            "{s}{c} probe `{k:?}` expected `{e}`, got `{a}`",
673                            s = std::iter::repeat_n(' ', probe_start_col.as_usize())
674                                .collect::<String>(),
675                            c = std::iter::repeat_n(
676                                '^',
677                                (probe_end_col - probe_start_col).as_usize()
678                            )
679                            .collect::<String>(),
680                            k = probe.kind,
681                            e = probe.message,
682                            a = actual,
683                        )?;
684                        writeln!(result, "```")?;
685                        writeln!(result)?;
686                    }
687
688                    if matches!(probe.kind, ProbeKind::Ast) {
689                        let escaped_actual = actual
690                            .replace('\\', "\\\\")
691                            .replace('/', "\\/")
692                            .replace('&', "\\&");
693                        writeln!(result, "**Fix command** (if the new AST is correct):")?;
694                        writeln!(result, "```bash")?;
695                        writeln!(
696                            result,
697                            "sed -i '' '{line}s/Ast: .*/Ast: {escaped_actual}/' {path}",
698                            line = probe.annotation_line,
699                            path = self.path.display(),
700                        )?;
701                        writeln!(result, "```")?;
702                        writeln!(result)?;
703                    }
704                }
705                Failure::FixmePassed => {
706                    writeln!(result)?;
707                    writeln!(result, "# Test marked as FIXME and yet it passed")?;
708                    writeln!(result)?;
709                    writeln!(result, "Perhaps the bug was fixed?")?;
710                }
711                Failure::InvalidSpecReference(spec_ref) => {
712                    writeln!(result)?;
713                    writeln!(result, "# Invalid spec reference")?;
714                    writeln!(result)?;
715                    writeln!(
716                        result,
717                        "The spec reference `{spec_ref}` does not exist in the spec mdbook."
718                    )?;
719                    writeln!(
720                        result,
721                        "Check the spec files in `spec/src/` for valid `:::\\{{spec}}` directives."
722                    )?;
723                }
724            }
725        }
726
727        // Add intelligent guidance section
728        self.add_guidance_section(&mut result)?;
729
730        Ok(result)
731    }
732
733    fn add_guidance_section(&self, result: &mut String) -> Fallible<()> {
734        use std::fmt::Write;
735
736        // Count different types of failures to provide targeted guidance
737        let mut unexpected_diagnostics = 0;
738        let mut missing_diagnostics = 0;
739        let mut multiple_matches = 0;
740        let mut auxiliary_failures = 0;
741        let mut ice_failures = 0;
742        let mut spec_failures = 0;
743        let mut fixme_passed = 0;
744
745        for failure in &self.failures {
746            match failure {
747                Failure::UnexpectedDiagnostic(_) => unexpected_diagnostics += 1,
748                Failure::MissingDiagnostic(_) => missing_diagnostics += 1,
749                Failure::MultipleMatches(_, _) => multiple_matches += 1,
750                Failure::Auxiliary { .. } => auxiliary_failures += 1,
751                Failure::InternalCompilerError(_) => ice_failures += 1,
752                Failure::InvalidSpecReference(_) => spec_failures += 1,
753                Failure::FixmePassed => fixme_passed += 1,
754                _ => {}
755            }
756        }
757
758        writeln!(result)?;
759        writeln!(result, "# 🎯 Next Steps")?;
760        writeln!(result)?;
761
762        // Provide specific guidance based on failure types
763        if unexpected_diagnostics > 0 || missing_diagnostics > 0 || multiple_matches > 0 {
764            writeln!(result, "## Diagnostic Expectation Issues")?;
765            writeln!(result)?;
766            writeln!(
767                result,
768                "This test has diagnostic-related failures. Choose one approach:"
769            )?;
770            writeln!(result)?;
771            writeln!(
772                result,
773                "**Option 1: Add diagnostic annotations** (if these errors are expected)"
774            )?;
775            writeln!(
776                result,
777                "- Add `#! error message` or `#! ^^^ error message` annotations"
778            )?;
779            writeln!(
780                result,
781                "- Use `#! /regex/` or `#! ^^^ /regex/` for regex matching (e.g., `#! /could not find.*Baz/`)"
782            )?;
783            writeln!(
784                result,
785                "- Annotation can be on the same line as the error OR on any following line"
786            )?;
787            writeln!(
788                result,
789                "- The `^^^` markers indicate exact column positioning (optional)"
790            )?;
791            writeln!(
792                result,
793                "- Without `^^^`, the diagnostic just needs to start somewhere on the most recent non-empty, non-comment line"
794            )?;
795            writeln!(result, "- Look at other test files for annotation examples")?;
796            writeln!(result)?;
797            writeln!(
798                result,
799                "**Option 2: Fix the compiler/code** (if these errors are bugs)"
800            )?;
801            writeln!(
802                result,
803                "- If diagnostics are incorrect, investigate the compiler logic"
804            )?;
805            writeln!(result, "- If test code is wrong, fix the test source")?;
806            writeln!(result)?;
807            writeln!(
808                result,
809                "💡 **When in doubt**: Consult the user to clarify the test's intent"
810            )?;
811            writeln!(result)?;
812        }
813
814        if auxiliary_failures > 0 {
815            writeln!(result, "## Reference File Mismatch")?;
816            writeln!(result)?;
817            writeln!(
818                result,
819                "Output differs from reference files. If the new output is correct:"
820            )?;
821            writeln!(result, "```bash")?;
822            writeln!(
823                result,
824                "UPDATE_EXPECT=1 cargo dada test {}",
825                self.path.to_string_lossy()
826            )?;
827            writeln!(result, "```")?;
828            writeln!(result)?;
829        }
830
831        if ice_failures > 0 {
832            writeln!(result, "## Internal Compiler Error")?;
833            writeln!(result)?;
834            writeln!(
835                result,
836                "The compiler crashed - this indicates a compiler bug that needs investigation."
837            )?;
838            writeln!(result)?;
839        }
840
841        if spec_failures > 0 {
842            writeln!(result, "## Invalid Spec Reference")?;
843            writeln!(result)?;
844            writeln!(
845                result,
846                "Fix the `#:spec` annotation to reference a valid spec paragraph."
847            )?;
848            writeln!(result)?;
849        }
850
851        if fixme_passed > 0 {
852            writeln!(result, "## FIXME Test Passed")?;
853            writeln!(result)?;
854            writeln!(
855                result,
856                "This test was marked as FIXME but now passes - the bug may be fixed!"
857            )?;
858            writeln!(result, "Consider removing the FIXME annotation.")?;
859            writeln!(result)?;
860        }
861
862        Ok(())
863    }
864
865    fn relativize<'aux>(&self, test_path: &Path, aux_path: &'aux Path) -> &'aux Path {
866        if let Some(dir) = test_path.parent() {
867            aux_path.strip_prefix(dir).unwrap_or(aux_path)
868        } else {
869            aux_path
870        }
871    }
872
873    fn generate_test_report(&self, db: &dyn crate::Db) -> Fallible<()> {
874        std::fs::write(test_report_path(&self.path), self.report(db)?)?;
875        Ok(())
876    }
877}
878
879fn delete_test_report(path: &Path) -> Fallible<()> {
880    let path = test_report_path(path);
881    if path.exists() {
882        std::fs::remove_file(path)?;
883    }
884    Ok(())
885}
886
887fn test_report_path(path: &Path) -> PathBuf {
888    path.with_extension("test-report.md")
889}