[ { "paperid": "2409.12320v1", "paper_path": "./SciVer/papers/2409.12320v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "5.2.1", "5.2.2" ], "image_path": "./SciVer/images/2409.12320v1_figure_4.png", "request_id": 4, "origin_statement": "Despite stable medians at 5, the 3.5% decline in mean helpfulness from 4.93 to 4.76 implies that participants’ enhanced prompt engineering skills increased their critical evaluation standards, causing attribution bias where improved methodology heightens scrutiny of LLM outputs beyond intrinsic performance.", "perturbed_statement": "Despite stable medians at 4, the 15% decline in mean helpfulness from 4.93 to 4.76 implies that participants’ enhanced prompt engineering skills increased their critical evaluation standards, causing attribution bias where improved methodology heightens scrutiny of LLM outputs beyond intrinsic performance.", "perturbed_explanation": "This statement is incorrect because Figure 4 and the text report the median perceived helpfulness remained at 5 (not 4) and the mean decrease was 3.5% (from 4.93 to 4.76), not a 15% decline.", "claim": "Despite stable medians at 5, the 3.5% decline in mean helpfulness from 4.93 to 4.76 implies that participants’ enhanced prompt engineering skills increased their critical evaluation standards, causing attribution bias where improved methodology heightens scrutiny of LLM outputs beyond intrinsic performance.", "label": true }, { "paperid": "2410.08207v1", "paper_path": "./SciVer/papers/2410.08207v1.json", "claim_type": "analytical", "type": "chart", "item": "7(b)", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.08207v1_figure_7(b).png", "request_id": 6, "origin_statement": "The exponential λ schedule consistently yields the lowest structure distance at moderate CLIP similarity (∼20), suggesting that allocating more edit weight early preserves structural fidelity, balancing semantic alignment and reconstruction quality better than linear or uniform schedules.", "perturbed_statement": "The linear λ schedule consistently yields the lowest structure distance at moderate CLIP similarity (∼21), indicating that evenly distributed injection of original signal across steps preserves structural fidelity better than exponential or uniform schedules.", "perturbed_explanation": "At CLIP similarity around 21, Figure 7 shows the structure distance for the linear schedule (~0.019–0.02) is higher than for both exponential (~0.018) and uniform (~0.018) schedules, so linear does not yield the lowest structure distance as claimed.", "claim": "The exponential λ schedule consistently yields the lowest structure distance at moderate CLIP similarity (∼20), suggesting that allocating more edit weight early preserves structural fidelity, balancing semantic alignment and reconstruction quality better than linear or uniform schedules.", "label": true }, { "paperid": "2410.07073v2", "paper_path": "./SciVer/papers/2410.07073v2.json", "claim_type": "analytical", "type": "chart", "item": "11", "section": [ "3", "5" ], "image_path": "./SciVer/images/2410.07073v2_figure_11.png", "request_id": 8, "origin_statement": "The share of respondents 'not worried at all' increases steadily from Gen Z (25%) through Gen X (48%) and then surges to 82% among Baby Boomers, reflecting how tenure and proximity to retirement often buffer older workers against market volatility and strengthen their perceived job security.", "perturbed_statement": "The share of respondents 'not worried at all' increases from Gen Z (30%) through Gen X (55%) before spiking to 75% among Baby Boomers, indicating older workers rely on tenure-based protections against market shocks to feel secure.", "perturbed_explanation": "The perturbed statement misstates the percentages shown in the chart: Gen Z’s 'not worried at all' share is actually 25% (not 30%), Gen X’s is 48% (not 55%), and Baby Boomers’ is 82% (not 75%), contradicting the data.", "claim": "The share of respondents 'not worried at all' increases steadily from Gen Z (25%) through Gen X (48%) and then surges to 82% among Baby Boomers, reflecting how tenure and proximity to retirement often buffer older workers against market volatility and strengthen their perceived job security.", "label": true }, { "paperid": "2410.16617v1", "paper_path": "./SciVer/papers/2410.16617v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2410.16617v1-Table1-1.png", "request_id": 9, "origin_statement": "Laboratory studies demonstrate that mosquito extrinsic incubation accelerates with temperature. The model estimates each 1°C increase in average maximum temperature raises the odds of Zika versus dengue transmission by 10% (OR 1.10), highlighting temperature‐driven facilitation of Zika spread.", "perturbed_statement": "Model results indicate a 1°C rise in average maximum temperature increases the odds of chikungunya versus dengue transmission by 19% (OR 1.19), implying warmer temperatures disproportionately accelerate chikungunya spread in Rio.", "perturbed_explanation": "This statement is incorrect because Table 2 reports an OR of 0.81 for the chikungunya–dengue temperature effect (95% CI 0.76–0.86), meaning a 1°C increase actually decreases chikungunya odds by 19%, not increases them.", "claim": "Laboratory studies demonstrate that mosquito extrinsic incubation accelerates with temperature. The model estimates each 1°C increase in average maximum temperature raises the odds of Zika versus dengue transmission by 10% (OR 1.10), highlighting temperature‐driven facilitation of Zika spread.", "label": true }, { "paperid": "2409.03911v1", "paper_path": "./SciVer/papers/2409.03911v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.03911v1-Table4-1.png", "request_id": 11, "origin_statement": "Spanish pre-training yields a 0.016 BLEU improvement for Catalan, underscoring linguistic proximity's role in transfer learning; yet German alone achieves 0.283 BLEU—outperforming the Spanish-Italian cluster—indicating data volume can sometimes outweigh proximity. Mixing English and German during pre-training introduces conflicting gradients that halve Catalan performance.", "perturbed_statement": "Spanish pre-training yields a 0.026 BLEU improvement for Catalan, highlighting linguistic proximity in transfer; German alone still achieves 0.283 BLEU—outperforming the Spanish-Italian cluster—suggesting data volume can sometimes outweigh proximity. Mixing English and German during pre-training doubles Catalan performance by mitigating gradient conflicts.", "perturbed_explanation": "In Table 4, Spanish pre-training actually raises BLEU from 0.276 to 0.292 (a 0.016 increase), not 0.026. Moreover, English-German pre-training reduces Catalan BLEU from 0.292 to 0.282, rather than doubling it, so the claim about doubling performance contradicts the reported 0.282 BLEU.", "claim": "Spanish pre-training yields a 0.016 BLEU improvement for Catalan, underscoring linguistic proximity's role in transfer learning; yet German alone achieves 0.283 BLEU—outperforming the Spanish-Italian cluster—indicating data volume can sometimes outweigh proximity. Mixing English and German during pre-training introduces conflicting gradients that halve Catalan performance.", "label": true }, { "paperid": "2410.15939v1", "paper_path": "./SciVer/papers/2410.15939v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.15939v1_figure_3(a).png", "request_id": 12, "origin_statement": "Mixtral’s Sink task accuracy on the Insurance graph nearly doubles—from ~30% without context to ~60% with semantic labels—indicating that meaningful variable names allow LLMs to leverage pretraining priors and activate parametric memory to infer challenging causal relationships.", "perturbed_statement": "Mixtral’s Sink task accuracy nearly quadruples—from ~30% without context to ~75% with semantic labels—indicating that meaningful variable names allow LLMs to leverage pretraining priors and activate parametric memory to infer challenging causal relationships.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3 shows Mixtral’s Sink performance rising only from about 30% to roughly 60%, not to 75%, and thus the improvement is closer to a doubling, not a quadrupling.", "claim": "Mixtral’s Sink task accuracy on the Insurance graph nearly doubles—from ~30% without context to ~60% with semantic labels—indicating that meaningful variable names allow LLMs to leverage pretraining priors and activate parametric memory to infer challenging causal relationships.", "label": true }, { "paperid": "2410.09982v3", "paper_path": "./SciVer/papers/2410.09982v3.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "3.1", "3.3" ], "image_path": "./SciVer/images/2410.09982v3_figure_4(a).png", "request_id": 14, "origin_statement": "For prune block size 4, Self-Data FT yields a narrower, right-shifted embedding similarity distribution with mean 0.89 versus SFT’s flatter distribution and mean 0.85, indicating it mitigates distribution shift more effectively by preserving the pretrained model’s representational geometry and reducing catastrophic forgetting risk.", "perturbed_statement": "For prune block size 4, Self-Data FT yields a wider, left-shifted embedding similarity distribution with mean 0.87 versus SFT’s narrower distribution and mean 0.89, indicating it increases distribution shift and exacerbates catastrophic forgetting by distorting representational geometry.", "perturbed_explanation": "The perturbation is incorrect because on the actual figure Self-Data FT has a narrower (not wider), right-shifted distribution and a higher mean similarity (≈0.89) compared to SFT’s mean (≈0.85). Thus the described means and distribution shapes are reversed.", "claim": "For prune block size 4, Self-Data FT yields a narrower, right-shifted embedding similarity distribution with mean 0.89 versus SFT’s flatter distribution and mean 0.85, indicating it mitigates distribution shift more effectively by preserving the pretrained model’s representational geometry and reducing catastrophic forgetting risk.", "label": true }, { "paperid": "2411.00119v2", "paper_path": "./SciVer/papers/2411.00119v2.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.00119v2_figure_2(a).png", "request_id": 18, "origin_statement": "When over 59% of pairwise match-ups are missing, SCO using sigmoid or Fenchel-Young loss achieves the lowest Kendall-tau distance and mean true rating distance among all methods, demonstrating its robustness to high sparsity via loss-based inference.", "perturbed_statement": "When over 30% of pairwise match-ups are missing, SCO using sigmoid or Fenchel-Young loss achieves the lowest Kendall-tau distance and mean true rating distance among all methods, demonstrating its robustness to high sparsity via loss-based inference.", "perturbed_explanation": "The threshold is incorrect: the context specifies that SCO outperforms all other methods only when 59% or more match-ups are missing (and requires 38% missing in the uniform case), not as low as 30%.", "claim": "When over 59% of pairwise match-ups are missing, SCO using sigmoid or Fenchel-Young loss achieves the lowest Kendall-tau distance and mean true rating distance among all methods, demonstrating its robustness to high sparsity via loss-based inference.", "label": true }, { "paperid": "2409.01901v1", "paper_path": "./SciVer/papers/2409.01901v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.01901v1_figure_4.png", "request_id": 20, "origin_statement": "Expert annotations show the 'Unlabeled' handshape accounts for approximately 240 frames—primarily neutral ‘5’ transitions—while automatic clustering’s largest group (cluster 11) has about 140 frames. This parallel prominence suggests recurring neutral posture signals heavily influence both manual and automated handshape distributions.", "perturbed_statement": "Expert annotations show the '5' handshape accounts for approximately 240 frames—primarily neutral ‘5’ transitions—while automatic clustering’s largest group (cluster 14) has about 140 frames. This parallel prominence suggests recurring neutral posture signals heavily influence both manual and automated handshape distributions.", "perturbed_explanation": "The perturbed statement is incorrect because the most frequent expert-annotated category is 'Unlabeled', not the '5' handshape, and the largest automatic cluster is ID 11 with about 140 frames, not cluster 14.", "claim": "Expert annotations show the 'Unlabeled' handshape accounts for approximately 240 frames—primarily neutral ‘5’ transitions—while automatic clustering’s largest group (cluster 11) has about 140 frames. This parallel prominence suggests recurring neutral posture signals heavily influence both manual and automated handshape distributions.", "label": true }, { "paperid": "2411.00312v1", "paper_path": "./SciVer/papers/2411.00312v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "4" ], "image_path": "./SciVer/images/2411.00312v1_figure_2.png", "request_id": 34, "origin_statement": "Figure 2 demonstrates that optimal quarantine controls for groups 1 and 2 exhibit a bang–bang structure, staying at the maximum level throughout most of the 60–120 day horizon, whereas group 3’s control linearly decays, reflecting its lower cost weight and the diminishing marginal benefit of isolation as susceptibles decline.", "perturbed_statement": "Figure 2 demonstrates that optimal quarantine controls for groups 1 and 2 exhibit a bang–bang structure, staying at the minimum level throughout most of the 60–120 day horizon, whereas group 3’s control linearly increases, reflecting its higher cost weight and the increasing marginal benefit of isolation as susceptibles decline.", "perturbed_explanation": "The perturbed statement is incorrect because in Figure 2, controls u₁ and u₂ remain at their maximum (1) for most of the time, not at a minimum. Also, u₃ actually decays over time rather than increases, and group 3 has a lower cost weight, not a higher one.", "claim": "Figure 2 demonstrates that optimal quarantine controls for groups 1 and 2 exhibit a bang–bang structure, staying at the maximum level throughout most of the 60–120 day horizon, whereas group 3’s control linearly decays, reflecting its lower cost weight and the diminishing marginal benefit of isolation as susceptibles decline.", "label": true }, { "paperid": "2409.05371v1", "paper_path": "./SciVer/papers/2409.05371v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.05371v1_figure_2(b).png", "request_id": 38, "origin_statement": "In the strong deflection limit for LQG black holes, the deflection angle α(b) increases as the polymeric parameter P increases, showing that quantum corrections amplify spacetime curvature near the photon sphere, leading to stronger lensing compared to the classical Schwarzschild solution.", "perturbed_statement": "In the strong deflection limit for LQG black holes, the deflection angle α(b) decreases as the polymeric parameter P increases, indicating that quantum corrections weaken spacetime curvature near the photon sphere, leading to weaker lensing compared to the classical Schwarzschild solution.", "perturbed_explanation": "This claim contradicts the context and Figure 2, which show that as P grows (from 0.02 to 0.04), α(b) actually increases relative to Schwarzschild. The statement that α(b) decreases with increasing P is therefore incorrect.", "claim": "In the strong deflection limit for LQG black holes, the deflection angle α(b) increases as the polymeric parameter P increases, showing that quantum corrections amplify spacetime curvature near the photon sphere, leading to stronger lensing compared to the classical Schwarzschild solution.", "label": true }, { "paperid": "2409.08851v1", "paper_path": "./SciVer/papers/2409.08851v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2409.08851v1-Table1-1.png", "request_id": 39, "origin_statement": "The explicit 3p5q(4) Runge-Kutta scheme combines four stages with temporal accuracy of order 3 and pseudo-symplectic order 5, effectively nullifying spurious kinetic energy production up to fifth order and ensuring energy decay arises solely from physical viscous dissipation in homogeneous periodic flows.", "perturbed_statement": "The explicit 3p5q(4) Runge-Kutta scheme combines five stages with temporal accuracy of order 3 and pseudo-symplectic order 4, effectively nullifying spurious kinetic energy production up to fourth order and ensuring energy decay arises solely from physical viscous dissipation in homogeneous periodic flows.", "perturbed_explanation": "The 3p5q(4) scheme actually uses four stages (not five) and has pseudo-symplectic order 5 (not 4), as given in Table 1. These incorrect stage count and order values contradict the provided Butcher tableau coefficients and scheme notation.", "claim": "The explicit 3p5q(4) Runge-Kutta scheme combines four stages with temporal accuracy of order 3 and pseudo-symplectic order 5, effectively nullifying spurious kinetic energy production up to fifth order and ensuring energy decay arises solely from physical viscous dissipation in homogeneous periodic flows.", "label": true }, { "paperid": "2410.03091v1", "paper_path": "./SciVer/papers/2410.03091v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "5" ], "image_path": "./SciVer/images/2410.03091v1_figure_3.png", "request_id": 44, "origin_statement": "The proposed estimator accounts for informative shorter CGM follow-up by down-weighting early high-glucose periods, thus producing systematically higher mean TIR 70–180 mg/dL compared to the naive estimator; this bias correction magnifies over longer durations as patient glucose stabilizes during hospitalization.", "perturbed_statement": "By adjusting for informative follow-up, the proposed method yields systematically lower mean TIR 70–180 mmol/L than the naive estimator; this discrepancy diminishes over longer durations as inpatient glucose variability increases.", "perturbed_explanation": "The perturbed statement is incorrect because TIR is reported in mg/dL, not mmol/L, and the proposed method actually produces higher—not lower—mean TIR 70–180 mg/dL compared to the naive estimator. Additionally, the discrepancy increases over time as glucose stabilizes, not diminishes.", "claim": "The proposed estimator accounts for informative shorter CGM follow-up by down-weighting early high-glucose periods, thus producing systematically higher mean TIR 70–180 mg/dL compared to the naive estimator; this bias correction magnifies over longer durations as patient glucose stabilizes during hospitalization.", "label": true }, { "paperid": "2410.19472v1", "paper_path": "./SciVer/papers/2410.19472v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.4" ], "image_path": "./SciVer/images/2410.19472v1-Table1-1.png", "request_id": 45, "origin_statement": "The low flow split of 6.7% in the left common carotid (Γ2) is consistent with its high resistance ratio (Ri/R4 = 8.6), illustrating that, per Poiseuille’s law, increased outlet resistance reduces volumetric flow, while the abdominal aorta’s large capacitance (C = 12×10⁻¹⁰ m³/Pa) buffers pulsatile pressure.", "perturbed_statement": "The low flow split of 6.7% in the left common carotid (Γ2) is consistent with its resistance ratio (Ri/R4 = 3.5), illustrating that, per Poiseuille’s law, increased outlet resistance reduces volumetric flow, while the abdominal aorta’s capacitance (C = 8×10⁻¹⁰ m³/Pa) buffers pulsatile pressure.", "perturbed_explanation": "The statement is incorrect because the left common carotid’s resistance ratio is actually Ri/R4 = 8.6 (not 3.5), and the abdominal aorta’s capacitance is 12×10⁻¹⁰ m³/Pa (not 8×10⁻¹⁰ m³/Pa), as given in Table 1.", "claim": "The low flow split of 6.7% in the left common carotid (Γ2) is consistent with its high resistance ratio (Ri/R4 = 8.6), illustrating that, per Poiseuille’s law, increased outlet resistance reduces volumetric flow, while the abdominal aorta’s large capacitance (C = 12×10⁻¹⁰ m³/Pa) buffers pulsatile pressure.", "label": true }, { "paperid": "2410.13376v1", "paper_path": "./SciVer/papers/2410.13376v1.json", "claim_type": "analytical", "type": "chart", "item": "5(a)", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.13376v1_figure_5(a).png", "request_id": 46, "origin_statement": "Figure 5 shows that at ε* = 0.0151 and 0.0352, the CAE-FFNN’s predicted v(0,ε*,t) and w(0,ε*,t) trajectories nearly coincide with the true nonlinear cubic limit cycles during the extrapolated time span t∈[1,2], confirming its robust capture of excitable membrane dynamics beyond training data.", "perturbed_statement": "Figure 5 indicates that at ε* = 0.0252 and 0.0452, the CAE-FFNN predictions for v(0,ε*,t) and w(0,ε*,t) align with reference solutions over the training interval t∈[0,1], demonstrating its ability to generalize nonlinear limit cycle behavior within the trained time domain.", "perturbed_explanation": "The statement incorrectly lists ε* as 0.0252 and 0.0452, whereas Figure 5 uses ε* = 0.0151 and 0.0352. It also misidentifies the interval of strong agreement: the predictions coincide over the extrapolated window t ∈ [1,2], not within the training interval t ∈ [0,1].", "claim": "Figure 5 shows that at ε* = 0.0151 and 0.0352, the CAE-FFNN’s predicted v(0,ε*,t) and w(0,ε*,t) trajectories nearly coincide with the true nonlinear cubic limit cycles during the extrapolated time span t∈[1,2], confirming its robust capture of excitable membrane dynamics beyond training data.", "label": true }, { "paperid": "2409.10343v1", "paper_path": "./SciVer/papers/2409.10343v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "7.2" ], "image_path": "./SciVer/images/2409.10343v1-Table4-1.png", "request_id": 47, "origin_statement": "Across Amazon-books, Yelp, and Steam, increasing ε_l^max from 0.05 to 0.10 at constant ε_v=0.5 raises T_score tokens by roughly 100% (11→21 m, 16→30 m, 35→62 m), reflecting that larger noise scales heighten sample diversity and retention during scoring.", "perturbed_statement": "Across Amazon-books, Yelp, and Steam, decreasing ε_v from 0.5 to 0.3 at constant ε_l^max=0.05 nearly doubles T_score tokens (11→21 m, 16→30 m, 35→62 m), showing that lower confidence thresholds amplify sample retention during scoring.", "perturbed_explanation": "This is incorrect because Table 4 shows that reducing ε_v from 0.5 to 0.3 at ε_l^max=0.05 actually decreases T_score tokens to 7 m (Amazon-books), 10 m (Yelp), and 20 m (Steam), rather than increasing them to 21 m, 30 m, and 62 m.", "claim": "Across Amazon-books, Yelp, and Steam, increasing ε_l^max from 0.05 to 0.10 at constant ε_v=0.5 raises T_score tokens by roughly 100% (11→21 m, 16→30 m, 35→62 m), reflecting that larger noise scales heighten sample diversity and retention during scoring.", "label": true }, { "paperid": "2410.22387v1", "paper_path": "./SciVer/papers/2410.22387v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.7" ], "image_path": "./SciVer/images/2410.22387v1-Table3-1.png", "request_id": 49, "origin_statement": "Elevated enrichment scores (NES >2.0, FDR <0.1) for FOXM1- and PLK1-pathways in Gleason 9 versus 7 indicate accelerated mitotic entry and chromatid segregation, suggesting that enhanced CDK1-driven phosphorylation elevates chromosomal instability and contributes to aggressive prostate cancer progression.", "perturbed_statement": "Despite an NES >2.5 (FDR <0.01) reported for the PID E2F pathway in Gleason 9, its enrichment erroneously suggests transcriptional silencing of S-phase genes and reduced proliferation, contradicting the typical role of E2F in promoting cell-cycle progression and proliferation in high-grade tumors.", "perturbed_explanation": "The perturbed statement is incorrect because the table shows the PID E2F Pathway has an NES of 1.8485 (not >2.5) and an FDR q-value of 0.0987 (not <0.01). Moreover, E2F enrichment corresponds to S-phase gene activation and proliferation, not transcriptional silencing.", "claim": "Elevated enrichment scores (NES >2.0, FDR <0.1) for FOXM1- and PLK1-pathways in Gleason 9 versus 7 indicate accelerated mitotic entry and chromatid segregation, suggesting that enhanced CDK1-driven phosphorylation elevates chromosomal instability and contributes to aggressive prostate cancer progression.", "label": true }, { "paperid": "2411.01074v1", "paper_path": "./SciVer/papers/2411.01074v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.01074v1-Table3-1.png", "request_id": 51, "origin_statement": "Incorporating compactness loss during modular training reduced MODA’s average module size by 24.23% and average weight overlap by 21.80%, while incurring only a 0.27% average drop in reuse accuracy, demonstrating that penalizing parameter dispersion effectively sparsifies modules without notable performance loss.", "perturbed_statement": "Incorporating compactness loss during modular training reduced MODA’s average module size by 33.48% and average weight overlap by 30.12%, while incurring only a 0.27% average drop in reuse accuracy, demonstrating that penalizing parameter dispersion effectively sparsifies modules without notable performance loss.", "perturbed_explanation": "The perturbed statement incorrectly reports the average reductions. Table 3 shows a 24.23% average module size reduction (not 33.48%) and a 21.80% average overlap reduction (not 30.12%); the larger values correspond only to specific model–dataset pairs, not the overall average.", "claim": "Incorporating compactness loss during modular training reduced MODA’s average module size by 24.23% and average weight overlap by 21.80%, while incurring only a 0.27% average drop in reuse accuracy, demonstrating that penalizing parameter dispersion effectively sparsifies modules without notable performance loss.", "label": true }, { "paperid": "2410.21813v1", "paper_path": "./SciVer/papers/2410.21813v1.json", "claim_type": "analytical", "type": "table", "item": "7", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.21813v1-Table7-1.png", "request_id": 53, "origin_statement": "SAM2’s 34.6% relative IoU improvement over UNet (63.63% vs 47.27%) arises from its multi-scale feature fusion, which enhances boundary refinement and reduces false negatives at lesion edges, demonstrating the critical role of scale-aware context integration for precise laryngo-pharyngeal tumor segmentation.", "perturbed_statement": "SAM2’s 23.2% relative IoU improvement over UNet (63.63% vs 47.27%) arises from its morphological dilation post-processing, which enhances boundary smoothing but may not significantly reduce false negatives, highlighting the role of post-processing for tumor segmentation.", "perturbed_explanation": "The perturbed statement misstates the relative IoU improvement as 23.2% instead of the correct ~34.6% ((63.63−47.27)/47.27×100). It also wrongly attributes gains to morphological dilation post-processing, whereas the context credits SAM2’s multi-scale feature fusion for boundary refinement.", "claim": "SAM2’s 34.6% relative IoU improvement over UNet (63.63% vs 47.27%) arises from its multi-scale feature fusion, which enhances boundary refinement and reduces false negatives at lesion edges, demonstrating the critical role of scale-aware context integration for precise laryngo-pharyngeal tumor segmentation.", "label": true }, { "paperid": "2411.10399v1", "paper_path": "./SciVer/papers/2411.10399v1.json", "claim_type": "analytical", "type": "chart", "item": "9", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.10399v1_figure_9.png", "request_id": 56, "origin_statement": "In E30, Nash equilibrium actions allocate liquidity across roughly ten atomic price ranges (median ~10 positions), whereas 65% of real LPs open a single broad-range position, suggesting practitioners trade off fee maximization for reduced gas costs and risk diversification.", "perturbed_statement": "In E30, Nash equilibrium actions allocate liquidity across roughly fifteen atomic price ranges (median ~15 positions), whereas 75% of real LPs open a single broad-range position, suggesting practitioners trade off fee maximization for reduced gas costs and risk diversification.", "perturbed_explanation": "The perturbed statement is incorrect because the CDF shows the Nash equilibrium median number of positions is around ten, not fifteen, and only about 65% (not 75%) of real LPs open exactly one position, contradicting the original data.", "claim": "In E30, Nash equilibrium actions allocate liquidity across roughly ten atomic price ranges (median ~10 positions), whereas 65% of real LPs open a single broad-range position, suggesting practitioners trade off fee maximization for reduced gas costs and risk diversification.", "label": true }, { "paperid": "2410.12049v1", "paper_path": "./SciVer/papers/2410.12049v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "2.6" ], "image_path": "./SciVer/images/2410.12049v1-Table5-1.png", "request_id": 57, "origin_statement": "Leveraging inverse weighting of environment difficulty, Sabiá-3’s top web-shopping score (62.5) disproportionately boosts its weighted average (OA 2.9), indicating that targeted improvements in harder web-grounded tasks yield greater gains in agentic performance than uniform enhancements across environments.", "perturbed_statement": "Leveraging inverse weighting, Sabiá-3’s web-shopping score of 72.5 disproportionately boosts its weighted average to OA 3.9, suggesting targeted web-grounded improvements yield gains exceeding those from uniform environment enhancements.", "perturbed_explanation": "The perturbed statement is incorrect because Table 5 reports Sabiá-3’s web-shopping (WS) score as 62.5, not 72.5, and its overall average (OA) as 2.9, not 3.9, contradicting the context.", "claim": "Leveraging inverse weighting of environment difficulty, Sabiá-3’s top web-shopping score (62.5) disproportionately boosts its weighted average (OA 2.9), indicating that targeted improvements in harder web-grounded tasks yield greater gains in agentic performance than uniform enhancements across environments.", "label": true }, { "paperid": "2409.10031v1", "paper_path": "./SciVer/papers/2409.10031v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "3.1", "4.1", "4.3" ], "image_path": "./SciVer/images/2409.10031v1_figure_2(a).png", "request_id": 58, "origin_statement": "The concentration of sanctioned BTC entities in China (13) and Russia (10) indicates that illicit crypto flows are geographically clustered, supporting network centrality theory: these hubs likely act as primary nodes in laundering networks, amplifying the systemic risk of sanctions evasion through high node degree and address reuse.", "perturbed_statement": "The concentration of sanctioned BTC entities in China (13) and Russia (15) indicates that illicit crypto flows are geographically clustered, supporting network centrality theory: these hubs likely act as primary nodes in laundering networks, amplifying the systemic risk of sanctions evasion through high node degree and address reuse.", "perturbed_explanation": "The perturbed statement incorrectly lists 15 BTC-related sanctioned entities for Russia, whereas the context and Figure 2a specify only 10 sanctioned entities in Russia. This misrepresents the actual count and contradicts the provided data.", "claim": "The concentration of sanctioned BTC entities in China (13) and Russia (10) indicates that illicit crypto flows are geographically clustered, supporting network centrality theory: these hubs likely act as primary nodes in laundering networks, amplifying the systemic risk of sanctions evasion through high node degree and address reuse.", "label": true }, { "paperid": "2411.07533v1", "paper_path": "./SciVer/papers/2411.07533v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2", "5.3" ], "image_path": "./SciVer/images/2411.07533v1-Table2-1.png", "request_id": 59, "origin_statement": "The table shows Chinese resources are mid for English-centric LLMs but high for Qwen, while form F1 consistently exceeds meaning F1. This implies structural patterns generalize with less data, whereas semantic comprehension demands richer multilingual corpora, consistent with distributional semantics principles.", "perturbed_statement": "The table indicates Chinese resources are high for both Llama2 and Llama3, while form F1 consistently exceeds meaning F1. This implies structural patterns generalize with less data, whereas semantic comprehension demands richer multilingual corpora, consistent with distributional semantics principles.", "perturbed_explanation": "Table 2 classifies Chinese as a 'mid' resource for both Llama2 and Llama3, not 'high'. Therefore stating that Llama2 and Llama3 have high Chinese resources contradicts the provided context.", "claim": "The table shows Chinese resources are mid for English-centric LLMs but high for Qwen, while form F1 consistently exceeds meaning F1. This implies structural patterns generalize with less data, whereas semantic comprehension demands richer multilingual corpora, consistent with distributional semantics principles.", "label": true }, { "paperid": "2411.06018v1", "paper_path": "./SciVer/papers/2411.06018v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "6.2" ], "image_path": "./SciVer/images/2411.06018v1_figure_8.png", "request_id": 68, "origin_statement": "The mel spectrogram maps frequency from -5.1 to 505.1 Hz over a 2-second window using a colorbar spanning -60 to 0 dB, enhancing RCW's ability to emphasize transient high-frequency features (e.g., peaks above 300 Hz) due to stark decibel-level contrasts.", "perturbed_statement": "The mel spectrogram maps frequency from -50 to 1000 Hz over a 2-second window using a colorbar spanning -100 to +20 dB, enhancing RCW's ability to emphasize transient high-frequency features (e.g., peaks above 300 Hz) due to stark decibel-level contrasts.", "perturbed_explanation": "This statement is incorrect because the actual figure displays frequency from about -5.1 to 505.1 Hz (not -50 to 1000 Hz) and a colorbar range from -60 to 0 dB (not -100 to +20 dB).", "claim": "The mel spectrogram maps frequency from -5.1 to 505.1 Hz over a 2-second window using a colorbar spanning -60 to 0 dB, enhancing RCW's ability to emphasize transient high-frequency features (e.g., peaks above 300 Hz) due to stark decibel-level contrasts.", "label": true }, { "paperid": "2411.10924v1", "paper_path": "./SciVer/papers/2411.10924v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.10924v1_figure_2.png", "request_id": 72, "origin_statement": "The heatmap reveals that spectral channels near indices 50–60 exhibit elevated attention weights (approx. 0.65) across multiple grain classes, suggesting these bands capture key moisture absorption features in hyperspectral data, which ResNet-18 leverages to enhance distinction among similar cereal types.", "perturbed_statement": "The heatmap reveals that spectral channels near indices 150–160 exhibit elevated attention weights (approx. 0.85) across multiple grain classes, suggesting these bands capture key moisture absorption features in hyperspectral data, which ResNet-18 leverages to enhance distinction among similar cereal types.", "perturbed_explanation": "This statement is wrong because Figure 2 shows the highest attention weights (~0.70) around channels 50–60, not channels 150–160, and no weights reach as high as 0.85 in the heatmap.", "claim": "The heatmap reveals that spectral channels near indices 50–60 exhibit elevated attention weights (approx. 0.65) across multiple grain classes, suggesting these bands capture key moisture absorption features in hyperspectral data, which ResNet-18 leverages to enhance distinction among similar cereal types.", "label": true }, { "paperid": "2409.01988v1", "paper_path": "./SciVer/papers/2409.01988v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.01988v1-Table3-1.png", "request_id": 77, "origin_statement": "At compression ratio R=0.25 on OV-MNIST, both GloCal variants achieve over 88% test accuracy—approximately 10% higher than Zisselman-E2E—reflecting how binary-structured measurements capture salient sparse features more effectively than unstructured linear projections.", "perturbed_statement": "At compression ratio R=0.1 on OV-MNIST, adding the recovery loss increases test accuracy from 83.19% to 84.11%, demonstrating a 0.92% robustness gain under high compression.", "perturbed_explanation": "This is incorrect because according to Table 3, the GloCal-with-rec-loss model test accuracy at R=0.1 is actually 81.41%, not 84.11%, and thus underperforms the GloCal-without-rec-loss accuracy of 83.19%.", "claim": "At compression ratio R=0.25 on OV-MNIST, both GloCal variants achieve over 88% test accuracy—approximately 10% higher than Zisselman-E2E—reflecting how binary-structured measurements capture salient sparse features more effectively than unstructured linear projections.", "label": true }, { "paperid": "2409.18903v2", "paper_path": "./SciVer/papers/2409.18903v2.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.18903v2_figure_1.png", "request_id": 80, "origin_statement": "Owing to the nonlocal source term’s smoothing effect, the piecewise linear solution u(t,x) remains Hölder continuous, yet its spatial derivative u_x diverges at t≈1.95 at x=1, causing the absolutely continuous energy distribution on [0,1] to collapse into a Dirac mass δ₁ in F as t→1.95.", "perturbed_statement": "Owing to the nonlocal source term’s smoothing effect, the piecewise linear solution u(t,x) remains Hölder continuous, yet its spatial derivative u_x diverges at t≈1.95 at x=0.5, causing the absolutely continuous energy distribution on [0,1] to collapse into a Dirac mass δ₀.₅ in F as t→1.95.", "perturbed_explanation": "This is incorrect because, in Example 1.1 and Figure 1, the interval shrinks to the point x=1, and F(t,x) converges to δ₁. Wave breaking and energy concentration occur at x=1, not at x=0.5.", "claim": "Owing to the nonlocal source term’s smoothing effect, the piecewise linear solution u(t,x) remains Hölder continuous, yet its spatial derivative u_x diverges at t≈1.95 at x=1, causing the absolutely continuous energy distribution on [0,1] to collapse into a Dirac mass δ₁ in F as t→1.95.", "label": true }, { "paperid": "2409.15951v1", "paper_path": "./SciVer/papers/2409.15951v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2409.15951v1-Table2-1.png", "request_id": 83, "origin_statement": "The close agreement between the fitted limb darkening coefficients (u1=0.349, u2=0.256) and published values indicates that quadratic limb-darkening accurately approximates the radiative transfer-based light curves, implying that transit depth estimation through PySME/StarRotator yields minimal model-dependent biases even for fast rotators.", "perturbed_statement": "The close agreement between the fitted limb darkening coefficients (u1=0.459, u2=0.356) and published values indicates that quadratic limb-darkening accurately approximates the radiative transfer-based light curves, implying that transit depth estimation through PySME/StarRotator yields minimal model-dependent biases even for fast rotators.", "perturbed_explanation": "The perturbed statement misquotes the fitted limb darkening coefficients. Table 2 reports u1=0.349 and u2=0.256, not u1=0.459 and u2=0.356, so the values no longer match the context.", "claim": "The close agreement between the fitted limb darkening coefficients (u1=0.349, u2=0.256) and published values indicates that quadratic limb-darkening accurately approximates the radiative transfer-based light curves, implying that transit depth estimation through PySME/StarRotator yields minimal model-dependent biases even for fast rotators.", "label": true }, { "paperid": "2409.00614v1", "paper_path": "./SciVer/papers/2409.00614v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "6.3" ], "image_path": "./SciVer/images/2409.00614v1_figure_2.png", "request_id": 84, "origin_statement": "By employing Bayesian Optimization to tune local aggregation weights (BOLA), DAMe adaptively adjusts client contributions, mitigating non-IID heterogeneity; this yields an average accuracy increase of 2.5 percentage points over the no-BOLA variant, with the largest gain (8.8 points) observed on Arabic Twitter.", "perturbed_statement": "By employing Bayesian Optimization to tune local aggregation weights (BOLA), DAMe adaptively adjusts client contributions, mitigating non-IID heterogeneity; this yields an average accuracy increase of 5.6 percentage points over the no-BOLA variant, with the largest gain (11.2 points) observed on German Twitter.", "perturbed_explanation": "The perturbed statement is incorrect because the average performance gain of BOLA across datasets is approximately 2.5 percentage points (not 5.6), and the largest single-dataset improvement occurs on Arabic Twitter (8.8 points), not on German Twitter (which only shows a 1.6-point gain).", "claim": "By employing Bayesian Optimization to tune local aggregation weights (BOLA), DAMe adaptively adjusts client contributions, mitigating non-IID heterogeneity; this yields an average accuracy increase of 2.5 percentage points over the no-BOLA variant, with the largest gain (8.8 points) observed on Arabic Twitter.", "label": true }, { "paperid": "2410.20797v1", "paper_path": "./SciVer/papers/2410.20797v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.20797v1-Table1-1.png", "request_id": 87, "origin_statement": "Rplg obtains 40.74% accuracy on TinyImageNet, surpassing the next best Sdct by 4.24%. This suggests its instance-dependent dynamic label disambiguation adapts better to the high intra-class variance, improving robustness on complex datasets compared to static candidate pruning approaches.", "perturbed_statement": "Rplg obtains 42.74% accuracy on TinyImageNet, surpassing the next best Sdct by 2.24%. This suggests its instance-dependent dynamic label disambiguation adapts better to the high intra-class variance, improving robustness on complex datasets compared to static candidate pruning approaches.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 shows Rplg’s TinyImageNet accuracy is 40.74%, not 42.74%, and the margin over Sdct (36.50%) is 4.24%, not 2.24%.", "claim": "Rplg obtains 40.74% accuracy on TinyImageNet, surpassing the next best Sdct by 4.24%. This suggests its instance-dependent dynamic label disambiguation adapts better to the high intra-class variance, improving robustness on complex datasets compared to static candidate pruning approaches.", "label": true }, { "paperid": "2410.21705v1", "paper_path": "./SciVer/papers/2410.21705v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2410.21705v1-Table1-1.png", "request_id": 89, "origin_statement": "AdaptGCD's use of eight experts with half dedicated to new classes allows dynamic feature specialization, yielding 0.8%, 3.9%, and 3.1% “All” accuracy gains over SimGCD on CIFAR10, CIFAR100, and ImageNet100 by mitigating class imbalance and overfitting.", "perturbed_statement": "AdaptGCD's use of four experts with two dedicated to new classes allows dynamic feature specialization, yielding 1.5%, 2.9%, and 4.1% “All” accuracy gains over SimGCD on CIFAR10, CIFAR100, and ImageNet100 by mitigating class imbalance and overfitting.", "perturbed_explanation": "The perturbation is incorrect because AdaptGCD employs eight experts (not four) and the actual improvements over SimGCD on the “All” accuracy are 0.8%, 3.9%, and 3.1% (not 1.5%, 2.9%, and 4.1%).", "claim": "AdaptGCD's use of eight experts with half dedicated to new classes allows dynamic feature specialization, yielding 0.8%, 3.9%, and 3.1% “All” accuracy gains over SimGCD on CIFAR10, CIFAR100, and ImageNet100 by mitigating class imbalance and overfitting.", "label": true }, { "paperid": "2411.01006v2", "paper_path": "./SciVer/papers/2411.01006v2.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.01006v2_figure_8.png", "request_id": 96, "origin_statement": "The t-SNE visualization of VQShape’s discrete codes reveals localized clusters of points, indicating that subsequence shape primitives are effectively grouped by similarity in latent space, thanks to vector quantization and entropy regularization.", "perturbed_statement": "The t-SNE plot of VQShape codes preserves global pairwise distances, so the largest separations between clusters directly reflect the most dissimilar shape prototypes in the high-dimensional codebook due to the β weighting in the vector-quantization loss.", "perturbed_explanation": "This statement is wrong because t-SNE is designed to preserve only local neighbor relations, not global distances, so large separations do not quantitatively indicate the most dissimilar shapes. Additionally, VQShape’s pre-training uses hyperparameters α and γ for loss weighting, not β.", "claim": "The t-SNE visualization of VQShape’s discrete codes reveals localized clusters of points, indicating that subsequence shape primitives are effectively grouped by similarity in latent space, thanks to vector quantization and entropy regularization.", "label": true }, { "paperid": "2411.07253v1", "paper_path": "./SciVer/papers/2411.07253v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "7.1" ], "image_path": "./SciVer/images/2411.07253v1-Table2-1.png", "request_id": 101, "origin_statement": "Leveraging the Barzilai–Borwein adaptive step-size, SPGMO reduces average iterations by over 98% on Imbalance2 versus PGMO by compensating for gradient scaling disparities in imbalanced objectives.", "perturbed_statement": "By incorporating Nesterov’s acceleration, SPGMO reduces average iterations by over 98% on Imbalance2 versus PGMO, owing to momentum-based smoothing of oscillatory gradients.", "perturbed_explanation": "This claim is incorrect because SPGMO does not use Nesterov’s acceleration; it relies on the Barzilai–Borwein adaptive step-size without any momentum term. Nesterov’s acceleration is only applied in APGMO/ASPGMO, not in SPGMO, so the stated mechanism contradicts the context.", "claim": "Leveraging the Barzilai–Borwein adaptive step-size, SPGMO reduces average iterations by over 98% on Imbalance2 versus PGMO by compensating for gradient scaling disparities in imbalanced objectives.", "label": true }, { "paperid": "2411.13607v2", "paper_path": "./SciVer/papers/2411.13607v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.13607v2_figure_6.png", "request_id": 102, "origin_statement": "In Fig.6, VioPose’s hierarchical structure and bidirectional temporal mixing enable predicted 3D keypoints to closely track ground truth, reducing red-circled misestimations—especially in rapid, subtle wrist motions—compared to other SoTA methods, demonstrating superior fine-grained motion fidelity.", "perturbed_statement": "In Fig.6, VioPose’s hierarchical structure and unidirectional temporal mixing enable predicted 3D keypoints to closely track ground truth, reducing red-circled misestimations—especially in rapid, subtle elbow motions—compared to other SoTA methods, demonstrating superior fine-grained motion fidelity.", "perturbed_explanation": "The statement is wrong because VioPose uses bidirectional temporal mixing modules (section 3.4), not unidirectional ones. It also misattributes the reduction of misestimations to elbow motions, whereas the context and Table 3 emphasize gains in subtle wrist articulations (right and left wrists).", "claim": "In Fig.6, VioPose’s hierarchical structure and bidirectional temporal mixing enable predicted 3D keypoints to closely track ground truth, reducing red-circled misestimations—especially in rapid, subtle wrist motions—compared to other SoTA methods, demonstrating superior fine-grained motion fidelity.", "label": true }, { "paperid": "2410.07970v1", "paper_path": "./SciVer/papers/2410.07970v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "2.4.4" ], "image_path": "./SciVer/images/2410.07970v1-Table5-1.png", "request_id": 103, "origin_statement": "The predominance of buy-side firms (over 60%) combined with their under-40% workforce share implies lean, specialized asset management operations, whereas sell-side firms, though fewer (<33%), employ over 60% of staff, reflecting labor-intensive trading and client advisory services.", "perturbed_statement": "The predominance of buy-side firms (over 60%) combined with their under-40% workforce share implies their operations are labor-intensive requiring large teams, whereas sell-side firms, though fewer (<33%), employ only 40% of staff, suggesting streamlined, automated trading services.", "perturbed_explanation": "It is incorrect to describe buy-side operations as labor-intensive with large teams because the context specifies they employ less than 40% of the workforce, indicating lean staffing. It is also wrong to state sell-side firms employ only 40% of staff; the context shows they employ over 60% of the total workforce.", "claim": "The predominance of buy-side firms (over 60%) combined with their under-40% workforce share implies lean, specialized asset management operations, whereas sell-side firms, though fewer (<33%), employ over 60% of staff, reflecting labor-intensive trading and client advisory services.", "label": true }, { "paperid": "2411.00513v1", "paper_path": "./SciVer/papers/2411.00513v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.00513v1-Table3-1.png", "request_id": 105, "origin_statement": "In System C, the larger Si core mass of 1.58 M☉ corresponds to the highest predicted SN kinetic energy of 0.93×10^51 erg, suggesting that an increasing Si core mass strengthens the explosion shock, boosting NS natal kick velocities due to more energetic ejecta.", "perturbed_statement": "In System B, the larger Si core mass of 1.58 M☉ corresponds to the highest predicted SN kinetic energy of 0.93×10^51 erg, suggesting that an increasing Si core mass strengthens the explosion shock, boosting NS natal kick velocities due to more energetic ejecta.", "perturbed_explanation": "This is incorrect because System B’s Si core mass is actually 1.45 M☉ (not 1.58 M☉) and its SN kinetic energy is only 0.70×10^51 erg (not 0.93×10^51 erg).", "claim": "In System C, the larger Si core mass of 1.58 M☉ corresponds to the highest predicted SN kinetic energy of 0.93×10^51 erg, suggesting that an increasing Si core mass strengthens the explosion shock, boosting NS natal kick velocities due to more energetic ejecta.", "label": true }, { "paperid": "2411.00049v1", "paper_path": "./SciVer/papers/2411.00049v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.00049v1-Table1-1.png", "request_id": 111, "origin_statement": "The iterative Ripper approach allocates rules in chunks, reducing Hatespeech memory use from 30.54 GiB to 13.45 GiB (∼56%) while raising classification accuracy from 89.30% to 92.64%, demonstrating that iterative constraint application can simultaneously improve resource efficiency and predictive performance.", "perturbed_statement": "The iterative Ripper approach allocates rules in chunks, reducing Hatespeech memory use by only ∼25% while boosting classification accuracy by over 6%, suggesting that iterative constraint application significantly enhances both resource efficiency and predictive power.", "perturbed_explanation": "The statement is wrong because memory consumption on Hatespeech drops from 30.54 GiB to 13.45 GiB, a reduction of about 55.9%, not 25%. Additionally, accuracy increases from 89.30% to 92.64%, a gain of only ~3.3%, not over 6%.", "claim": "The iterative Ripper approach allocates rules in chunks, reducing Hatespeech memory use from 30.54 GiB to 13.45 GiB (∼56%) while raising classification accuracy from 89.30% to 92.64%, demonstrating that iterative constraint application can simultaneously improve resource efficiency and predictive performance.", "label": true }, { "paperid": "2411.16114v1", "paper_path": "./SciVer/papers/2411.16114v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.16114v1_figure_2(a).png", "request_id": 112, "origin_statement": "In a Milky Way-like halo (Mhalo≈1×10^12 M⊙) with an FDM particle of mass m=10−22 eV/c2, the Newtonian potential |Φ|/c2 surpasses the dominant gravitomagnetic potential |Φ_m1|/c2 by ∼10^9 over 10^−5–10 kpc, rendering Lense–Thirring frame-dragging negligible for galactic-scale stellar orbits.", "perturbed_statement": "In a Milky Way-like halo (Mhalo≈1×10^12 M⊙) with an FDM particle of mass m=10−21 eV/c2, the Newtonian potential |Φ|/c2 surpasses the dominant gravitomagnetic potential |Φ_m1|/c2 by ∼10^7 over 10^−5–10 kpc, rendering Lense–Thirring frame-dragging negligible for galactic-scale stellar orbits.", "perturbed_explanation": "This is incorrect because the context specifies the FDM particle mass as m=10−22 eV/c2, not 10−21 eV/c2, and the plotted values show |Φ|/c2 ≈10^−5 versus |Φ_m1|/c2 ≈10^−15, a difference of ∼10^10 (≈10^9), not ∼10^7.", "claim": "In a Milky Way-like halo (Mhalo≈1×10^12 M⊙) with an FDM particle of mass m=10−22 eV/c2, the Newtonian potential |Φ|/c2 surpasses the dominant gravitomagnetic potential |Φ_m1|/c2 by ∼10^9 over 10^−5–10 kpc, rendering Lense–Thirring frame-dragging negligible for galactic-scale stellar orbits.", "label": true }, { "paperid": "2410.22378v1", "paper_path": "./SciVer/papers/2410.22378v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2410.22378v1_figure_3.png", "request_id": 120, "origin_statement": "Figure 3a shows that η monotonically decreases with increasing Dₑ for all a; moreover, larger a (stiffer Morse potentials) yield higher baseline η at low Dₑ, indicating that steeper potentials amplify zero-point vibrational amplitude and thus enhance quantum correction to the vibrational Stark slope.", "perturbed_statement": "Figure 3a shows that η monotonically increases with increasing Dₑ for all a; moreover, larger a (stiffer Morse potentials) yield higher baseline η at low Dₑ, indicating that steeper potentials amplify zero-point vibrational amplitude and thus enhance quantum correction to the vibrational Stark slope.", "perturbed_explanation": "The perturbation incorrectly states that η increases with increasing Dₑ, but Figure 3a clearly shows η decreasing as Dₑ increases for all values of a, contradicting the plotted downward-sloping curves.", "claim": "Figure 3a shows that η monotonically decreases with increasing Dₑ for all a; moreover, larger a (stiffer Morse potentials) yield higher baseline η at low Dₑ, indicating that steeper potentials amplify zero-point vibrational amplitude and thus enhance quantum correction to the vibrational Stark slope.", "label": true }, { "paperid": "2411.06184v1", "paper_path": "./SciVer/papers/2411.06184v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.06184v1-Table1-1.png", "request_id": 123, "origin_statement": "Utilizing nine intensity discretization strategies enhances the sensitivity of GLCM-derived contrast and entropy, as these 23 second-order features capture subtle spatial heterogeneity, thereby improving differentiation of aggressive tumor phenotypes under varying ROI segmentations.", "perturbed_statement": "Utilizing seven intensity discretization strategies enhances the sensitivity of GLCM-derived contrast and entropy, as these 30 second-order features capture subtle spatial heterogeneity, thereby improving differentiation of aggressive tumor phenotypes under varying ROI segmentations.", "perturbed_explanation": "The perturbed statement is incorrect because the study generated nine discretization strategies (not seven) and extracted 23 GLCM features (not 30), as specified in the context and Table 1.", "claim": "Utilizing nine intensity discretization strategies enhances the sensitivity of GLCM-derived contrast and entropy, as these 23 second-order features capture subtle spatial heterogeneity, thereby improving differentiation of aggressive tumor phenotypes under varying ROI segmentations.", "label": true }, { "paperid": "2410.15592v2", "paper_path": "./SciVer/papers/2410.15592v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.15592v2-Table1-1.png", "request_id": 125, "origin_statement": "Under the dataset’s 40% identity constraint, ColabFold-based AlphaFold2 predictions yield an average pLDDT of 92.4%, 9.7% higher than OmegaFold’s 82.7%, underscoring MSA-driven transformers’ enhanced fold accuracy over single-sequence models.", "perturbed_statement": "Under the dataset’s 30% identity constraint, ColabFold-based AlphaFold2 predictions yield an average pLDDT of 92.4%, 9.7% higher than OmegaFold’s 82.7%, underscoring MSA-driven transformers’ enhanced fold accuracy over single-sequence models.", "perturbed_explanation": "The statement incorrectly claims a 30% sequence identity filtering threshold, whereas the context specifies a 40% threshold for the non-redundant CATH dataset. This discrepancy makes the perturbed statement factually wrong.", "claim": "Under the dataset’s 40% identity constraint, ColabFold-based AlphaFold2 predictions yield an average pLDDT of 92.4%, 9.7% higher than OmegaFold’s 82.7%, underscoring MSA-driven transformers’ enhanced fold accuracy over single-sequence models.", "label": true }, { "paperid": "2411.18243v1", "paper_path": "./SciVer/papers/2411.18243v1.json", "claim_type": "analytical", "type": "chart", "item": "1(b)", "section": [ "2.1" ], "image_path": "./SciVer/images/2411.18243v1_figure_1(b).png", "request_id": 126, "origin_statement": "The detection of two low-frequency QPOs at identical centroid frequencies in both Fermi/GBM (8-800 keV) and ASIM/HED (0.3-20 MeV) PDS with Leahy normalization and Q-factors of ~12 and ~9 suggests a common, coherent magnetohydrodynamic oscillation mechanism in the GRB central engine, consistent with jet precession models.", "perturbed_statement": "The detection of two low-frequency QPOs at identical centroid frequencies in both Fermi/GBM (8-800 keV) and ASIM/HED (0.3-10 MeV) PDS with RMS normalization and Q-factors of ~5 and ~4 suggests a common, coherent magnetohydrodynamic oscillation mechanism in the GRB central engine, consistent with jet precession models.", "perturbed_explanation": "The perturbed statement incorrectly lists the ASIM/HED energy band as 0.3–10 MeV instead of the correct 0.3–20 MeV, misidentifies the PDS normalization as RMS rather than Leahy normalization, and underestimates the Q-factors (true values ~12 and ~9).", "claim": "The detection of two low-frequency QPOs at identical centroid frequencies in both Fermi/GBM (8-800 keV) and ASIM/HED (0.3-20 MeV) PDS with Leahy normalization and Q-factors of ~12 and ~9 suggests a common, coherent magnetohydrodynamic oscillation mechanism in the GRB central engine, consistent with jet precession models.", "label": true }, { "paperid": "2409.07796v1", "paper_path": "./SciVer/papers/2409.07796v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "1" ], "image_path": "./SciVer/images/2409.07796v1_figure_2(b).png", "request_id": 132, "origin_statement": "After fine-tuning, EfficientNets exhibit diminishing returns: the 0.4 GFLOPs B0 yields only a 1% gain, whereas the 2.3 GFLOPs B2 sees a 4% boost, highlighting that models with richer feature representations benefit more from domain-specific adaptation.", "perturbed_statement": "After fine-tuning, EfficientNet-B0 gains 3% accuracy while the mid-complexity B2 model (2.3 GFLOPs) only improves by 2%, implying that larger models suffer diminishing adaptation benefits due to overfitting to source-domain features.", "perturbed_explanation": "The perturbation misstates the observed accuracy gains: according to the figure, B0’s improvement is about 1% (from ~68% to ~69%), not 3%, and B2’s gain is about 4% (from ~74% to ~78%), not 2%, reversing both the magnitudes and the trend.", "claim": "After fine-tuning, EfficientNets exhibit diminishing returns: the 0.4 GFLOPs B0 yields only a 1% gain, whereas the 2.3 GFLOPs B2 sees a 4% boost, highlighting that models with richer feature representations benefit more from domain-specific adaptation.", "label": true }, { "paperid": "2411.05689v1", "paper_path": "./SciVer/papers/2411.05689v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.05689v1-Table1-1.png", "request_id": 133, "origin_statement": "deg represents the maximum total degree of the polynomial, computed as the highest sum of exponents in any row of the powers matrix. Thus, adjusting coefs alone cannot alter deg unless new monomials with larger exponent sums are added to powers.", "perturbed_statement": "deg represents the maximum total degree of the polynomial, computed as the highest product of exponents in any row of the powers matrix. Thus, increasing any coefficient above 1 in coefs will proportionally raise deg.", "perturbed_explanation": "The statement is incorrect because, according to Table 1, deg is defined as the maximum sum of the powers in each row, not the product of exponents. Additionally, deg depends solely on the powers matrix, so changing coefficients in coefs cannot alter the polynomial’s degree.", "claim": "deg represents the maximum total degree of the polynomial, computed as the highest sum of exponents in any row of the powers matrix. Thus, adjusting coefs alone cannot alter deg unless new monomials with larger exponent sums are added to powers.", "label": true }, { "paperid": "2411.07664v1", "paper_path": "./SciVer/papers/2411.07664v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4", "4.1", "4.2" ], "image_path": "./SciVer/images/2411.07664v1_figure_4.png", "request_id": 138, "origin_statement": "The A rating frequency mirrors transformer-based model depth: GPT-4 (82.6%) outperforms Mixtral (79%) and Llama2 (50%), implying that richer self-attention layers improve encoder’s capacity for spatial relation mapping, consistent with positional encoding principles enhancing relative placement inference.", "perturbed_statement": "The A rating frequency mirrors transformer-based model depth: GPT-4 (92.6%) outperforms Mixtral (79%) and Llama2 (50%), implying that richer self-attention layers improve encoder’s capacity for spatial relation mapping, consistent with positional encoding principles enhancing relative placement inference.", "perturbed_explanation": "The perturbed statement incorrectly claims that GPT-4 achieved a 92.6% A rating. According to Figure 4 and the accompanying text, GPT-4’s actual A rating (accuracy) is 82.6%, so the 92.6% figure contradicts the reported result.", "claim": "The A rating frequency mirrors transformer-based model depth: GPT-4 (82.6%) outperforms Mixtral (79%) and Llama2 (50%), implying that richer self-attention layers improve encoder’s capacity for spatial relation mapping, consistent with positional encoding principles enhancing relative placement inference.", "label": true }, { "paperid": "2410.13114v1", "paper_path": "./SciVer/papers/2410.13114v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.13114v1-Table2-1.png", "request_id": 143, "origin_statement": "All seven audited audio datasets omit documentation of sexual orientation, and six omit both race/ethnicity and disability, indicating systemic underreporting of protected classes. This lack of demographic metadata undermines fairness audits and may exacerbate biases in voice-based AI systems unable to correct unobserved disparities.", "perturbed_statement": "Six of the seven audited audio datasets omit documentation of sexual orientation, yet two record race/ethnicity and disability, reflecting partial attention to demographic representativeness that nonetheless leaves significant fairness gaps.", "perturbed_explanation": "This statement is incorrect because Table 2 shows that all seven datasets lack sexual orientation metadata (not six), and none of the datasets document race/ethnicity or disability (not two), directly contradicting the claimed counts.", "claim": "All seven audited audio datasets omit documentation of sexual orientation, and six omit both race/ethnicity and disability, indicating systemic underreporting of protected classes. This lack of demographic metadata undermines fairness audits and may exacerbate biases in voice-based AI systems unable to correct unobserved disparities.", "label": true }, { "paperid": "2411.17640v1", "paper_path": "./SciVer/papers/2411.17640v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.17640v1-Table4-1.png", "request_id": 145, "origin_statement": "The 318±2-minute TTV detected for TOI-5552.01 indicates a >10σ deviation, consistent with strong gravitational perturbations from a near-resonant companion, which could induce radial-velocity signals of ∼10 m/s, detectable with high-precision spectrographs given the host’s T-mag≈12 brightness.", "perturbed_statement": "The 318±2-second TTV detected for TOI-5552.01 indicates a ~3σ deviation, suggesting moderate gravitational perturbations and expected ∼10 m/s radial-velocity signals, feasible for follow-up given the star’s T-mag≈12 brightness.", "perturbed_explanation": "This statement is wrong because Table 4 reports a TTV amplitude of 318±2 minutes (not seconds) and a significance of σ=10.5 (>10σ), not ~3σ. These details contradict the context’s measured units and reported detection significance.", "claim": "The 318±2-minute TTV detected for TOI-5552.01 indicates a >10σ deviation, consistent with strong gravitational perturbations from a near-resonant companion, which could induce radial-velocity signals of ∼10 m/s, detectable with high-precision spectrographs given the host’s T-mag≈12 brightness.", "label": true }, { "paperid": "2411.18328v1", "paper_path": "./SciVer/papers/2411.18328v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.18328v1-Table2-1.png", "request_id": 151, "origin_statement": "Incorporating the Event-Point Encoder into the contrastive learning baseline raises SeAct Top-1 accuracy by 4.31% (66.37→70.68), and further integrating the Spiking-like Contextual Learner adds another 1.72% (70.68→72.41), indicating synergistic enhancement of event-point representation.", "perturbed_statement": "Incorporating the Event-Point Encoder into the contrastive learning baseline raises SeAct Top-1 accuracy by 5.31% (66.37→70.68), and further integrating the Spiking-like Contextual Learner adds another 0.98% (70.68→72.41), indicating synergistic enhancement of event-point representation.", "perturbed_explanation": "The perturbed statement misreports the performance gains: the SeAct Top-1 jump from 66.37 to 70.68 is a 4.31% increase (not 5.31%), and adding SCL raises accuracy from 70.68 to 72.41 by 1.73% (not 0.98%), contradicting the table values.", "claim": "Incorporating the Event-Point Encoder into the contrastive learning baseline raises SeAct Top-1 accuracy by 4.31% (66.37→70.68), and further integrating the Spiking-like Contextual Learner adds another 1.72% (70.68→72.41), indicating synergistic enhancement of event-point representation.", "label": true }, { "paperid": "2411.01960v1", "paper_path": "./SciVer/papers/2411.01960v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6" ], "image_path": "./SciVer/images/2411.01960v1-Table3-1.png", "request_id": 153, "origin_statement": "Magnetic energy in HH 211 MMS derived via DCF (~1.8×10^43 erg) exceeds its gravitational binding (~1.1×10^43 erg), implying magnetic pressure can counteract collapse; conversely, in IC 348 MMS gravity (~1.0×10^43 erg) dominates over DCF magnetic energy (~8.8×10^42 erg), promoting contraction.", "perturbed_statement": "Magnetic energy in HH 211 MMS via DCF (~1.8×10^42 erg) is lower than its gravitational binding (~1.1×10^43 erg), indicating magnetic pressure is insufficient to prevent collapse; conversely, in IC 348 MMS DCF magnetic energy (~8.8×10^43 erg) exceeds gravity (~1.0×10^43 erg), promoting magnetic support.", "perturbed_explanation": "The perturbed statement misstates the DCF magnetic energies by an order of magnitude. Table 3 gives EB,DCF=18×10^42 erg (1.8×10^43 erg) for HH 211 MMS, not 1.8×10^42 erg, and EB,DCF=8.8×10^42 erg (8.8×10^42 erg) for IC 348 MMS, not 8.8×10^43 erg. These incorrect values contradict the table's units and entries.", "claim": "Magnetic energy in HH 211 MMS derived via DCF (~1.8×10^43 erg) exceeds its gravitational binding (~1.1×10^43 erg), implying magnetic pressure can counteract collapse; conversely, in IC 348 MMS gravity (~1.0×10^43 erg) dominates over DCF magnetic energy (~8.8×10^42 erg), promoting contraction.", "label": true }, { "paperid": "2411.01543v1", "paper_path": "./SciVer/papers/2411.01543v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.01543v1-Table2-1.png", "request_id": 155, "origin_statement": "By reducing the split filter NRMSE from 32.7% to 2.8%, A-SWIFT’s enhanced modeling of angular‐dependent beam modulation effectively corrects partial photon penetration errors at mixed material boundaries, indicating that accurate penumbra spectrum reconstruction critically depends on accounting for submodule edge scattering effects.", "perturbed_statement": "By reducing the split filter NRMSE from 32.7% to 8.6%, A-SWIFT’s enhanced modeling of the beam’s energy distribution effectively corrects Compton scattering–induced artifacts, indicating that accurate penumbra spectrum reconstruction critically depends on accounting for photoelectric absorption effects.", "perturbed_explanation": "This statement is incorrect because A-SWIFT reduces the split filter NRMSE to 2.8%, not 8.6%, and its main improvement addresses submodule edge scattering in the penumbra, rather than correcting Compton scattering or relying primarily on photoelectric absorption effects.", "claim": "By reducing the split filter NRMSE from 32.7% to 2.8%, A-SWIFT’s enhanced modeling of angular‐dependent beam modulation effectively corrects partial photon penetration errors at mixed material boundaries, indicating that accurate penumbra spectrum reconstruction critically depends on accounting for submodule edge scattering effects.", "label": true }, { "paperid": "2409.19148v1", "paper_path": "./SciVer/papers/2409.19148v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.19148v1-Table4-1.png", "request_id": 157, "origin_statement": "GPT-4 dramatically overpredicts persuasive devices: while gold labels mark 59% of texts as non-persuasive, GPT-4 assigns 'None' only 12.3%, inflating total predictions to 24,209 vs. 7,465 gold. Confidence thresholding corrects this class prior mismatch, raising binary F1 to 0.608.", "perturbed_statement": "GPT-4 dramatically underpredicts persuasive devices: while gold labels mark 59% of texts as non-persuasive, GPT-4 assigns 'None' 46.7%, inflating total predictions to 24,209 vs. 7,465 gold. Confidence thresholding corrects this class prior mismatch, raising binary F1 to 0.72.", "perturbed_explanation": "The perturbed statement is incorrect because GPT-4 actually assigns 'None' to only 12.3% of texts, not 46.7% (Table 4). It also misreports the optimized F1 score as 0.72, whereas the true maximum F1 after thresholding is 0.608. Additionally, the model overpredicts persuasive instances, not underpredicts them.", "claim": "GPT-4 dramatically overpredicts persuasive devices: while gold labels mark 59% of texts as non-persuasive, GPT-4 assigns 'None' only 12.3%, inflating total predictions to 24,209 vs. 7,465 gold. Confidence thresholding corrects this class prior mismatch, raising binary F1 to 0.608.", "label": true }, { "paperid": "2409.20332v1", "paper_path": "./SciVer/papers/2409.20332v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "4.2", "4.2.2" ], "image_path": "./SciVer/images/2409.20332v1_figure_4(a).png", "request_id": 158, "origin_statement": "In the MDS projection, the closest alignment between Lad’s synthetic volumes and real data ellipses suggests that its generated samples exhibit minimal pairwise feature dissimilarities, implying superior anatomical fidelity relative to Medical Diffusion and HA-GAN as predicted by classical MDS preserving Euclidean proximities.", "perturbed_statement": "In the MDS projection, Medical Diffusion’s embedding ellipse most closely aligns with the real data, indicating that Medical Diffusion’s synthetic volumes exhibit minimal pairwise feature dissimilarities, implying superior anatomical fidelity compared to Lad and HA-GAN, as predicted by classical MDS preserving Euclidean proximities.", "perturbed_explanation": "The perturbed statement wrongly attributes the closest overlap to Medical Diffusion. According to Figure 4 and the context, it is Lad whose embedding ellipse shows the highest degree of overlap with the real data distribution.", "claim": "In the MDS projection, the closest alignment between Lad’s synthetic volumes and real data ellipses suggests that its generated samples exhibit minimal pairwise feature dissimilarities, implying superior anatomical fidelity relative to Medical Diffusion and HA-GAN as predicted by classical MDS preserving Euclidean proximities.", "label": true }, { "paperid": "2411.12846v1", "paper_path": "./SciVer/papers/2411.12846v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.12846v1-Table1-1.png", "request_id": 159, "origin_statement": "With just 3 of 18 datasets reporting Fitzpatrick skin type and only 7 including ethnicity, most melanoma image collections lack demographic diversity; PAD-UFES-20 and DDI exemplify how integrating detailed skin tone and lesion metadata can enhance AI model robustness, fairness, and generalizability across diverse patient populations.", "perturbed_statement": "With just 3 of 18 datasets reporting Fitzpatrick skin type and only 2 including ethnicity, most melanoma image collections lack demographic diversity; PAD-UFES-20 and DDI exemplify how integrating detailed skin tone and lesion metadata can enhance AI model robustness, fairness, and generalizability across diverse patient populations.", "perturbed_explanation": "The perturbed statement incorrectly claims that only 2 out of 18 datasets include ethnicity, while the context specifies that 7 datasets provide ethnicity in their metadata.", "claim": "With just 3 of 18 datasets reporting Fitzpatrick skin type and only 7 including ethnicity, most melanoma image collections lack demographic diversity; PAD-UFES-20 and DDI exemplify how integrating detailed skin tone and lesion metadata can enhance AI model robustness, fairness, and generalizability across diverse patient populations.", "label": true }, { "paperid": "2410.02674v1", "paper_path": "./SciVer/papers/2410.02674v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.02674v1-Table2-1.png", "request_id": 161, "origin_statement": "CANINE-s relative clusters 3 and 7 group AA and WS tokens primarily via 'r'-related grapheme transformations (er→ah, er→a, r→’), yielding low word-level semantic coherence (0.25 and 0.27), which indicates clustering is driven by phonological edit patterns rather than lexical semantics.", "perturbed_statement": "CANINE-s relative clusters 3 and 7 group AA and WS tokens primarily via 'r'-related grapheme transformations (er→ah, er→a, r→’), yielding high word-level semantic coherence (0.75 and 0.78), which indicates clustering is driven by phonological edit patterns rather than lexical semantics.", "perturbed_explanation": "The perturbation incorrectly states that clusters 3 and 7 have high word-level semantic coherence of 0.75 and 0.78. The context specifies these clusters actually exhibit low coherence scores of 0.25 and 0.27, so the claimed values contradict the documented data.", "claim": "CANINE-s relative clusters 3 and 7 group AA and WS tokens primarily via 'r'-related grapheme transformations (er→ah, er→a, r→’), yielding low word-level semantic coherence (0.25 and 0.27), which indicates clustering is driven by phonological edit patterns rather than lexical semantics.", "label": true }, { "paperid": "2410.02001v2", "paper_path": "./SciVer/papers/2410.02001v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2410.02001v2-Table3-1.png", "request_id": 163, "origin_statement": "By excluding noisy spectral bands above the SNR threshold, CFBS0.98 reduces misclassified objects from 318 (FBS) to 124—a 61% decrease—demonstrating that noise-driven filter selection enhances classification accuracy by focusing on high-information wavelengths with a compact filter set.", "perturbed_statement": "By excluding noisy spectral bands above the SNR threshold, CFBS0.95 reduces misclassified objects from 318 (FBS) to 174—a 75% decrease—demonstrating that noise-driven filter selection enhances classification accuracy by focusing on high-information wavelengths with a compact filter set.", "perturbed_explanation": "The statement is incorrect because CFBS₀.₉₅ actually reduces the misclassifications from 318 to 174, which corresponds to only a ~45% decrease, not 75% as claimed (Table 3 shows 318 for FBS vs. 174 for CFBS₀.₉₅).", "claim": "By excluding noisy spectral bands above the SNR threshold, CFBS0.98 reduces misclassified objects from 318 (FBS) to 124—a 61% decrease—demonstrating that noise-driven filter selection enhances classification accuracy by focusing on high-information wavelengths with a compact filter set.", "label": true }, { "paperid": "2410.05046v1", "paper_path": "./SciVer/papers/2410.05046v1.json", "claim_type": "analytical", "type": "chart", "item": "1(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.05046v1_figure_1(b).png", "request_id": 164, "origin_statement": "The relatively narrow interquartile range (~70–75%) for DRUG entities in the span-based metric indicates high annotation consistency, likely reflecting the use of standardized terminologies like RxNorm and consistent dosage expressions across clinical records.", "perturbed_statement": "The relatively narrow interquartile range (~70–75%) for GENE VARIANT entities in the span-based metric indicates high annotation consistency, likely reflecting the use of standardized terminologies like HGVS and consistent variant notation across clinical records.", "perturbed_explanation": "Figure 1 shows that gene variant entities actually have a wider interquartile range (approximately 35–65%) with a median around 50%, not ~70–75%. This contradicts the claim that gene variant annotations are as consistent as drug entities.", "claim": "The relatively narrow interquartile range (~70–75%) for DRUG entities in the span-based metric indicates high annotation consistency, likely reflecting the use of standardized terminologies like RxNorm and consistent dosage expressions across clinical records.", "label": true }, { "paperid": "2411.17459v1", "paper_path": "./SciVer/papers/2411.17459v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "6" ], "image_path": "./SciVer/images/2411.17459v1-Table6-1.png", "request_id": 169, "origin_statement": "By applying a 3D wavelet transform WT(·) to volumetric data and reconstructing via IWT(·), one can decompose the signal into subbands S^abc_l capturing directional details; combining all subbands W^(l) at each scale preserves total signal energy due to orthonormal filter banks used in the transform.", "perturbed_statement": "By applying a 2D wavelet transform WT(·) to volumetric data and reconstructing via IWT(·), one can decompose the signal into subbands S^abc_l capturing directional details; combining all subbands W^(l) at each scale doubles the total signal energy due to redundant filter banks used in the transform.", "perturbed_explanation": "This statement is incorrect because the transform WT(·) in the context is a 3D wavelet transform, not 2D, and the set W^(l) of subbands in each layer preserves the original signal energy rather than doubling it. Additionally, the filter banks are orthonormal (non-redundant), so they do not introduce extra energy.", "claim": "By applying a 3D wavelet transform WT(·) to volumetric data and reconstructing via IWT(·), one can decompose the signal into subbands S^abc_l capturing directional details; combining all subbands W^(l) at each scale preserves total signal energy due to orthonormal filter banks used in the transform.", "label": true }, { "paperid": "2411.00249v1", "paper_path": "./SciVer/papers/2411.00249v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "6.2", "6.4", "7" ], "image_path": "./SciVer/images/2411.00249v1-Table2-1.png", "request_id": 171, "origin_statement": "GraphC exhibits sub-linear split scaling: Chess (55,779 edges) requires 41 Harary splits (≈1,360 edges/split), while WikiConflict (2,025,910 edges) needs 32 splits (≈63,309 edges/split), implying that in denser larger graphs each split partitions more edges, aligning with spectral cut principles.", "perturbed_statement": "GraphC exhibits sub-linear split scaling: Chess (55,779 edges) requires 41 Harary splits (≈1,360 edges/split), while WikiConflict (2,025,910 edges) needs 50 splits (≈40,518 edges/split), implying that in denser larger graphs each split partitions more edges, aligning with spectral cut principles.", "perturbed_explanation": "The perturbed statement wrongly states that WikiConflict uses 50 Harary splits and ≈40,518 edges per split. Table 2 shows WikiConflict actually uses 32 splits, yielding ≈63,309 edges per split, so both the split count and edges/split figure are incorrect.", "claim": "GraphC exhibits sub-linear split scaling: Chess (55,779 edges) requires 41 Harary splits (≈1,360 edges/split), while WikiConflict (2,025,910 edges) needs 32 splits (≈63,309 edges/split), implying that in denser larger graphs each split partitions more edges, aligning with spectral cut principles.", "label": true }, { "paperid": "2409.12428v1", "paper_path": "./SciVer/papers/2409.12428v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.2", "5.3" ], "image_path": "./SciVer/images/2409.12428v1_figure_6.png", "request_id": 172, "origin_statement": "On the BAF dataset, the PR in-processing algorithm maintained equalized odds within ±0.01 of the ideal fairness line (EO scores 0.99–1.00) with under 2% drop in weighted F1 across covariate drift (t0 to t2), indicating its integrated fairness penalty effectively regularizes decision boundaries against distribution shifts.", "perturbed_statement": "On the BAF dataset, the PR in-processing algorithm maintained equalized odds within ±0.1 of the ideal fairness line (EO scores 0.90–1.00) with under 10% drop in weighted F1 across covariate drift (t0 to t3), indicating its integrated fairness penalty effectively regularizes decision boundaries against distribution shifts.", "perturbed_explanation": "This statement incorrectly expands the EO deviation to ±0.1 and EO scores as low as 0.90, whereas Figure 6 shows EO scores strictly between 0.99 and 1.00 (±0.01). It also misstates the F1 drop as under 10% and refers to a shift from t0 to t3; the actual F1 decline is under 2% across t0 to t2 only.", "claim": "On the BAF dataset, the PR in-processing algorithm maintained equalized odds within ±0.01 of the ideal fairness line (EO scores 0.99–1.00) with under 2% drop in weighted F1 across covariate drift (t0 to t2), indicating its integrated fairness penalty effectively regularizes decision boundaries against distribution shifts.", "label": true }, { "paperid": "2411.17977v1", "paper_path": "./SciVer/papers/2411.17977v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2411.17977v1-Table2-1.png", "request_id": 173, "origin_statement": "The power-law form yields the lowest AIC (30.20) and BIC (25.78) with ΔAIC>8.8 relative to other shapes, corresponding to >98% likelihood as the best model. This favors a pure power-law electron spectrum consistent with stochastic Fermi-II acceleration of cosmic rays by ICM turbulence.", "perturbed_statement": "The power-law form yields the lowest AIC (30.20) and BIC (25.78) with ΔAIC>2 relative to other shapes, indicating >95% probability as the best model. This supports a simple power-law electron spectrum consistent with first-order Fermi-I acceleration by ICM turbulence.", "perturbed_explanation": "The statement misstates the AIC difference threshold: the actual ΔAIC is >8.8, not >2, so the >95% probability claim is incorrect. It also incorrectly attributes the spectrum to first-order Fermi-I acceleration, whereas turbulent ICM processes correspond to second-order Fermi-II acceleration.", "claim": "The power-law form yields the lowest AIC (30.20) and BIC (25.78) with ΔAIC>8.8 relative to other shapes, corresponding to >98% likelihood as the best model. This favors a pure power-law electron spectrum consistent with stochastic Fermi-II acceleration of cosmic rays by ICM turbulence.", "label": true }, { "paperid": "2411.06665v1", "paper_path": "./SciVer/papers/2411.06665v1.json", "claim_type": "analytical", "type": "chart", "item": "3(c)", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.06665v1_figure_3(c).png", "request_id": 182, "origin_statement": "In the Office-Home P→C SSDA scenario, varying the pseudo-reconstruction loss weight λ_pr reveals a non-monotonic influence on model accuracy: moderate settings (λ_pr=3 and 9) peak at ~82.4%, indicating that an optimal balance of reconstruction regularization enhances feature alignment by preventing overfitting to noisy pseudo-labels.", "perturbed_statement": "In the Office-Home P→C SSDA scenario, increasing λ_pr monotonically improves accuracy, reaching a maximum of ~84.5% at λ_pr=9, suggesting stronger pseudo-reconstruction weighting consistently benefits cross-domain feature alignment by reinforcing noise-robust regularization.", "perturbed_explanation": "This statement is incorrect because accuracy does not rise monotonically with λ_pr—the observed values (82.2%, 82.4%, 82.0%, 82.0%, 82.4%) form a non-monotonic curve. Moreover, the true maximum accuracy is ~82.4%, not ~84.5% as claimed.", "claim": "In the Office-Home P→C SSDA scenario, varying the pseudo-reconstruction loss weight λ_pr reveals a non-monotonic influence on model accuracy: moderate settings (λ_pr=3 and 9) peak at ~82.4%, indicating that an optimal balance of reconstruction regularization enhances feature alignment by preventing overfitting to noisy pseudo-labels.", "label": true }, { "paperid": "2409.13394v1", "paper_path": "./SciVer/papers/2409.13394v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.13394v1-Table1-1.png", "request_id": 185, "origin_statement": "Integrating microfluidic valves with PDMS enabled eight solid-state nanopores to be addressed using only three pneumatic channels via binary multiplexing, reducing external fluidic connections from eight to three and illustrating that fluidic addressing scales as the binary logarithm of the pore count.", "perturbed_statement": "Integrating microfluidic valves with PDMS enabled eight solid-state nanopores to be addressed using only four pneumatic channels via binary multiplexing, reducing external fluidic connections from eight to four and illustrating that fluidic addressing scales as the binary logarithm of the pore count.", "perturbed_explanation": "The perturbed statement incorrectly states four pneumatic channels are required. According to Jain et al. (2017), a three-bit multiplexer addresses eight channels, using three pneumatic channels (log2(8)=3), and reduces connections to three, not four, as shown in the context.", "claim": "Integrating microfluidic valves with PDMS enabled eight solid-state nanopores to be addressed using only three pneumatic channels via binary multiplexing, reducing external fluidic connections from eight to three and illustrating that fluidic addressing scales as the binary logarithm of the pore count.", "label": true }, { "paperid": "2411.14318v1", "paper_path": "./SciVer/papers/2411.14318v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.5" ], "image_path": "./SciVer/images/2411.14318v1_figure_4.png", "request_id": 188, "origin_statement": "Figure 4 shows that without target loss, Velocitune re-scaling drives GenLanguage’s weight from ~0.2 to ≈1, while other domains’ weights drop below 0.01 by 12000 steps. This imbalance reflects an implicit zero-target assumption causing domain collapse and likely catastrophic forgetting across tasks.", "perturbed_statement": "Figure 4 shows that without target loss, Velocitune re-scaling drives GenCode’s weight from ~0.2 to ≈1, while other domains’ weights drop below 0.05 by 12000 steps. This imbalance reflects an implicit zero-target assumption causing domain collapse and likely catastrophic forgetting across tasks.", "perturbed_explanation": "The perturbation incorrectly assigns the rising weight to GenCode instead of GenLanguage: the yellow line (GenLanguage) actually increases to near 1, whereas the grey GenCode weight declines. It also misstates that other domains drop below 0.05, while the chart shows they fall below 0.01.", "claim": "Figure 4 shows that without target loss, Velocitune re-scaling drives GenLanguage’s weight from ~0.2 to ≈1, while other domains’ weights drop below 0.01 by 12000 steps. This imbalance reflects an implicit zero-target assumption causing domain collapse and likely catastrophic forgetting across tasks.", "label": true }, { "paperid": "2411.11677v1", "paper_path": "./SciVer/papers/2411.11677v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5.5" ], "image_path": "./SciVer/images/2411.11677v1_figure_5.png", "request_id": 192, "origin_statement": "The surrogate models yield marginally diminishing returns in N@10 and Agr@10 beyond a black-box list length of ~100, suggesting that additional recommendation candidates contribute less novel user-item interactions and thus saturate the few-shot extraction performance.", "perturbed_statement": "At a black-box list length of 50, SASRec achieves N@10 of 0.63 and Agr@10 of 0.75, matching NARM and showing that model distinction vanishes even with minimal list sizes.", "perturbed_explanation": "This is incorrect because at length 50, SASRec’s actual N@10 (~0.60) and Agr@10 (~0.72) do not match NARM’s values (0.61 and 0.72). The claimed metrics (0.63 and 0.75) are only observed at length 100, not 50.", "claim": "The surrogate models yield marginally diminishing returns in N@10 and Agr@10 beyond a black-box list length of ~100, suggesting that additional recommendation candidates contribute less novel user-item interactions and thus saturate the few-shot extraction performance.", "label": true }, { "paperid": "2410.00679v1", "paper_path": "./SciVer/papers/2410.00679v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2410.00679v1_figure_1.png", "request_id": 196, "origin_statement": "The downward trend of ^1S0 phase shifts from 60° to −5° across 0–300 MeV reflects increasing dominance of the short-range repulsive core over the long-range one-pion exchange attraction as incident energy rises.", "perturbed_statement": "The downward trend of ^1S0 phase shifts from 60° to −15° across 0–300 MeV reflects increasing dominance of the short-range repulsive core over the long-range two-pion exchange attraction as incident energy rises.", "perturbed_explanation": "This is incorrect because the figure shows the ^1S0 phase shift decreases to about −5° at 300 MeV, not −15°. Moreover, the long-range attraction in S-wave scattering is dominated by one-pion exchange rather than two-pion exchange at these energies.", "claim": "The downward trend of ^1S0 phase shifts from 60° to −5° across 0–300 MeV reflects increasing dominance of the short-range repulsive core over the long-range one-pion exchange attraction as incident energy rises.", "label": true }, { "paperid": "2410.22785v1", "paper_path": "./SciVer/papers/2410.22785v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2410.22785v1_figure_5.png", "request_id": 198, "origin_statement": "For an ionized region with n∝r^−2, free-free emission scales as Sν∝ν^0.6 and angular size as θ∝ν^−0.7; thus NVSS J174218–301526’s 10.2 mJy at 1.4 GHz predicts ≈80 mJy and a ∼4″ source at 43 GHz, inconsistent with our non-detection, ruling out a nascent PN.", "perturbed_statement": "For an ionized region with n∝r^−2, free-free emission scales as Sν∝ν^1.2 and angular size as θ∝ν^−0.5; thus NVSS J174218–301526’s 10.2 mJy at 1.4 GHz predicts ≈700 mJy and a ∼2″ source at 43 GHz, inconsistent with our non-detection, ruling out a nascent PN.", "perturbed_explanation": "The perturbed statement misuses the frequency scaling exponents: theoretical models for a radial density profile n∝r^−2 predict Sν∝ν^0.6 (not ν^1.2) and θ∝ν^−0.7 (not ν^−0.5). Therefore, the extrapolated 700 mJy and 2″ size at 43 GHz contradict both the context and free-free emission theory.", "claim": "For an ionized region with n∝r^−2, free-free emission scales as Sν∝ν^0.6 and angular size as θ∝ν^−0.7; thus NVSS J174218–301526’s 10.2 mJy at 1.4 GHz predicts ≈80 mJy and a ∼4″ source at 43 GHz, inconsistent with our non-detection, ruling out a nascent PN.", "label": true }, { "paperid": "2411.09585v1", "paper_path": "./SciVer/papers/2411.09585v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.09585v1_figure_3.png", "request_id": 200, "origin_statement": "D3 consistently imposes minimal extra training cost, increasing execution time by only around 10–20% relative to vanilla fine-tuning across CIFAR-10, GTSRB, and Tiny ImageNet, indicating its weight-distance regularization scales efficiently with model size.", "perturbed_statement": "D3 accelerates training by roughly 20% compared to vanilla fine-tuning across all datasets, evidencing that its weight-distance term optimizes parameter updates in sub-linear time.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 3 shows D3’s execution time bars slightly above the baseline (scaled value ≈1.1–1.2), indicating a 10–20% slowdown, not a speedup. Moreover, the weight-distance computation remains a linear rather than sub-linear complexity operation.", "claim": "D3 consistently imposes minimal extra training cost, increasing execution time by only around 10–20% relative to vanilla fine-tuning across CIFAR-10, GTSRB, and Tiny ImageNet, indicating its weight-distance regularization scales efficiently with model size.", "label": true }, { "paperid": "2411.12355v1", "paper_path": "./SciVer/papers/2411.12355v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.12355v1-Table6-1.png", "request_id": 203, "origin_statement": "Filtering 40 tokens (with Ub_t0=0) yields 63.7% accuracy, marginally below the 65.6% at 256 tokens, demonstrating that compressing to ~15% of tokens preserves over 97% of accuracy, thus validating efficient prototype compression with minimal QA performance loss.", "perturbed_statement": "Filtering 40 tokens (with Ub_t0=0) yields 67.9% accuracy, exceeding the 65.6% at 256 tokens, demonstrating that compressing to ~15% of tokens actually improves accuracy by 2.3%, thus validating efficient prototype compression with performance gain.", "perturbed_explanation": "Table 6 reports an accuracy of 63.7% for (Ub_t0=0, Ub_t1=40), not 67.9%, so the perturbed statement misreports the 40-token result. Moreover, compression reduces accuracy from 65.6% to 63.7% (a ~1.9% drop), rather than improving it by 2.3%.", "claim": "Filtering 40 tokens (with Ub_t0=0) yields 63.7% accuracy, marginally below the 65.6% at 256 tokens, demonstrating that compressing to ~15% of tokens preserves over 97% of accuracy, thus validating efficient prototype compression with minimal QA performance loss.", "label": true }, { "paperid": "2411.17181v2", "paper_path": "./SciVer/papers/2411.17181v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "1", "4.6", "5" ], "image_path": "./SciVer/images/2411.17181v2-Table4-1.png", "request_id": 205, "origin_statement": "The WGSS model achieves a higher Rouge-1 score on Marathi (0.50) than on Turkish (0.48) and Hindi (0.40), indicating its clustering-based Gaussian similarity approach effectively captures semantic cohesion even in highly inflected agglutinative languages, reducing redundancy and improving topic coverage.", "perturbed_statement": "The WGSS model achieves a higher Rouge-1 score on Turkish (0.52) than on Marathi (0.50) and Hindi (0.40), indicating its clustering-based Manhattan distance approach effectively captures semantic cohesion even in highly inflected languages.", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 lists Turkish’s Rouge-1 as 0.48, not 0.52, and the WGSS method uses a Gaussian kernel to compute word similarities, not Manhattan distance.", "claim": "The WGSS model achieves a higher Rouge-1 score on Marathi (0.50) than on Turkish (0.48) and Hindi (0.40), indicating its clustering-based Gaussian similarity approach effectively captures semantic cohesion even in highly inflected agglutinative languages, reducing redundancy and improving topic coverage.", "label": true }, { "paperid": "2411.17593v1", "paper_path": "./SciVer/papers/2411.17593v1.json", "claim_type": "analytical", "type": "chart", "item": "1(b)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.17593v1_figure_1(b).png", "request_id": 206, "origin_statement": "Prior to balancing, the dataset showed only 41 books at KS2 but over triple that at KS4 (134), reflecting progressive Lexile complexity requirements; such imbalance could skew a transformer classifier towards more advanced texts unless resampling enforces equal class representation.", "perturbed_statement": "Prior to balancing, the dataset showed only 55 books at KS2 but over double that at KS4 (112), reflecting progressive Lexile complexity requirements; such imbalance could skew a transformer classifier towards more advanced texts unless resampling enforces equal class representation.", "perturbed_explanation": "The perturbed statement is incorrect because the context and Figure 1 report 41 books for KS2 and 134 books for KS4, not 55 and 112 respectively. These altered counts contradict the actual data distribution.", "claim": "Prior to balancing, the dataset showed only 41 books at KS2 but over triple that at KS4 (134), reflecting progressive Lexile complexity requirements; such imbalance could skew a transformer classifier towards more advanced texts unless resampling enforces equal class representation.", "label": true }, { "paperid": "2409.05633v1", "paper_path": "./SciVer/papers/2409.05633v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2409.05633v1_figure_4.png", "request_id": 212, "origin_statement": "The larger relative NDCG@10 drop upon removing semantic relevance sampling (Shared-C and Shared-T) versus neighbor perturbations demonstrates that preserving code-level semantic alignment in contrastive views causally underpins CoGCL’s improved recommendation robustness.", "perturbed_statement": "The most significant performance degradation on Instrument and Office occurs when the ‘add’ operator is removed, dropping NDCG@10 by over 0.05, indicating that simple neighbor addition is the primary driver of CoGCL’s contrastive learning gains.", "perturbed_explanation": "This statement is incorrect because Figure 4 shows that removing semantic relevance sampling (Shared-C and Shared-T) causes a larger NDCG@10 drop than removing the ‘add’ operator, and the actual decrease from CoGCL’s 0.0435 to w/o Add’s 0.0429 is only 0.0006, not over 0.05.", "claim": "The larger relative NDCG@10 drop upon removing semantic relevance sampling (Shared-C and Shared-T) versus neighbor perturbations demonstrates that preserving code-level semantic alignment in contrastive views causally underpins CoGCL’s improved recommendation robustness.", "label": true }, { "paperid": "2411.07825v1", "paper_path": "./SciVer/papers/2411.07825v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2411.07825v1-Table2-1.png", "request_id": 215, "origin_statement": "After iteration 2, the spectral radius ρ(A₂) falls below unity, satisfying the Schur stability criterion; this, combined with rising minimal singular values of Q_i, implies improved robustness and faster convergence in the data-driven SPI algorithm.", "perturbed_statement": "After iteration 1, the spectral radius ρ(A₁) falls below unity, satisfying the Schur stability criterion; this, combined with rising minimal singular values of Q_i, implies improved robustness and faster convergence in the data-driven SPI algorithm.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 shows ρ(A₁)=1.0059, which exceeds unity; thus, the spectral radius remains above 1 at iteration 1, and Schur stability is not achieved until iteration 2.", "claim": "After iteration 2, the spectral radius ρ(A₂) falls below unity, satisfying the Schur stability criterion; this, combined with rising minimal singular values of Q_i, implies improved robustness and faster convergence in the data-driven SPI algorithm.", "label": true }, { "paperid": "2409.08522v1", "paper_path": "./SciVer/papers/2409.08522v1.json", "claim_type": "analytical", "type": "chart", "item": "4(c)", "section": [ "5" ], "image_path": "./SciVer/images/2409.08522v1_figure_4(c).png", "request_id": 220, "origin_statement": "MAPX_DAPA maintains F1 scores above 0.32 over 1 to 64 hours, outperforming static average aggregation whose F1 dips to ~0.26 at 16h, illustrating DAPA’s temporal adaptability to sparse features.", "perturbed_statement": "MAPX_DAPA’s F1 rises from 0.33 at 1h to 0.50 at 64h, highlighting its superior temporal learning, whereas simple averaging remains below 0.30, indicating its inability to adapt as user interactions accumulate.", "perturbed_explanation": "The perturbed statement falsely claims an F1 of 0.50 for MAPX_DAPA at 64h; Figure 4 shows its F1 at 64 hours is about 0.35, not 0.50, contradicting the plotted values.", "claim": "MAPX_DAPA maintains F1 scores above 0.32 over 1 to 64 hours, outperforming static average aggregation whose F1 dips to ~0.26 at 16h, illustrating DAPA’s temporal adaptability to sparse features.", "label": true }, { "paperid": "2411.06965v1", "paper_path": "./SciVer/papers/2411.06965v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.06965v1_figure_4.png", "request_id": 224, "origin_statement": "In Humanoid, measure conditioning in mCWAE-WGAIL-Bonus concentrates exploration into regions with the highest reward gradients, yielding superior average cumulative rewards compared to WAE-WGAIL-Bonus, despite slightly reduced overall coverage.", "perturbed_statement": "In Humanoid, measure conditioning in mCWAE-WGAIL-Bonus concentrates exploration into regions with the highest reward gradients, yielding superior average cumulative rewards and significantly increased overall coverage compared to WAE-WGAIL-Bonus.", "perturbed_explanation": "The perturbed claim incorrectly asserts that mCWAE-WGAIL-Bonus has significantly increased overall coverage. The context and Figure 4 state that mCWAE-WGAIL-Bonus actually explores a slightly smaller archive region than WAE-WGAIL-Bonus, not a larger one.", "claim": "In Humanoid, measure conditioning in mCWAE-WGAIL-Bonus concentrates exploration into regions with the highest reward gradients, yielding superior average cumulative rewards compared to WAE-WGAIL-Bonus, despite slightly reduced overall coverage.", "label": true }, { "paperid": "2411.16516v1", "paper_path": "./SciVer/papers/2411.16516v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2411.16516v1-Table3-1.png", "request_id": 225, "origin_statement": "False positive regions for both benchmark and adapted DP mechanisms coincide with parameters θ that simultaneously satisfy ε*(θ)>ε (violating the nominal DP claim per Eq. (3)) and ξ*(θ)≤ε (passing the audit per Eq. (4)), highlighting empirical underestimation of privacy loss.", "perturbed_statement": "False positive regions for both benchmark and adapted DP mechanisms coincide with parameters θ that simultaneously satisfy ε*(θ)<ε (falsely suggesting DP compliance) and ξ*(θ)≥ε (indicating audit rejection), highlighting empirical overestimation of privacy loss.", "perturbed_explanation": "This statement contradicts the defined roadmap: R1 requires ε*(θ)>ε (not ε*(θ)<ε) to violate the DP claim, and R2 requires ξ*(θ)≤ε (not ξ*(θ)≥ε) to pass the auditing tool, as specified in steps R1 and R2 respectively.", "claim": "False positive regions for both benchmark and adapted DP mechanisms coincide with parameters θ that simultaneously satisfy ε*(θ)>ε (violating the nominal DP claim per Eq. (3)) and ξ*(θ)≤ε (passing the audit per Eq. (4)), highlighting empirical underestimation of privacy loss.", "label": true }, { "paperid": "2410.09850v1", "paper_path": "./SciVer/papers/2410.09850v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2410.09850v1_figure_6(a).png", "request_id": 226, "origin_statement": "The GAN’s generator matches the original 0.8993 correlation to within 0.0744, yet its circular scatter arises from isotropic noise overriding anisotropic covariance, indicating the discriminator’s joint feedback fails to enforce alignment along the principal component axis.", "perturbed_statement": "The GAN’s generator matches the original 0.8993 correlation to within 0.17, yet its circular scatter arises from isotropic noise overriding anisotropic covariance, indicating the discriminator’s joint feedback fails to enforce alignment along the principal component axis.", "perturbed_explanation": "The perturbation is incorrect because the reported difference between the original (0.8993) and generated (0.8249) correlations is 0.0744, not 0.17, contradicting the context’s stated correlation deviation.", "claim": "The GAN’s generator matches the original 0.8993 correlation to within 0.0744, yet its circular scatter arises from isotropic noise overriding anisotropic covariance, indicating the discriminator’s joint feedback fails to enforce alignment along the principal component axis.", "label": true }, { "paperid": "2410.14148v3", "paper_path": "./SciVer/papers/2410.14148v3.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.14148v3-Table4-1.png", "request_id": 233, "origin_statement": "Integrating fine-grained token-level rewards with PPO training in FiSAO increases MME^P from 1509.3 to 1522.6 and GQA accuracy from 60.9% to 62.0%, illustrating that token-level feedback stabilizes gradient signals and enhances multimodal alignment.", "perturbed_statement": "Integrating fine-grained token-level rewards with PPO training in FiSAO increases MME^P by 5% (from 1509.3 to 1522.6) and GQA accuracy by 4% (from 60.9% to 62.0%), illustrating that token-level feedback stabilizes gradient signals and enhances multimodal alignment.", "perturbed_explanation": "The perturbation misstates the percentage improvements: MME^P’s gain from 1509.3 to 1522.6 is only ~0.9%, not 5%, and GQA’s rise from 60.9% to 62.0% is ~1.8%, not 4%, contradicting Table 4.", "claim": "Integrating fine-grained token-level rewards with PPO training in FiSAO increases MME^P from 1509.3 to 1522.6 and GQA accuracy from 60.9% to 62.0%, illustrating that token-level feedback stabilizes gradient signals and enhances multimodal alignment.", "label": true }, { "paperid": "2409.06103v1", "paper_path": "./SciVer/papers/2409.06103v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.2.1", "3.2.2", "3.2.3" ], "image_path": "./SciVer/images/2409.06103v1_figure_4.png", "request_id": 234, "origin_statement": "The MoSiC-CGenFF-NTOID force field predicts that branched peptoid homotrimers (e.g., NTbu) have ΔGcis/trans around -2.3 kcal/mol, indicating cis isomer predominance by nearly an order of magnitude, whereas nucleophilic analogs (e.g., NGlu) exhibit ΔGcis/trans of ~2.6 kcal/mol, favoring trans isomers due to disrupted n→π* interactions.", "perturbed_statement": "The MoSiC-CGenFF-NTOID force field predicts that branched peptoid homotrimers (e.g., NTbu) have ΔGcis/trans around +2.3 kcal/mol, indicating trans isomer predominance, whereas nucleophilic analogs (e.g., NGlu) exhibit ΔGcis/trans of -2.6 kcal/mol, favoring cis isomers due to enhanced hydrogen bonding.", "perturbed_explanation": "The perturbed statement incorrectly assigns NTbu a positive ΔGcis/trans (+2.3 kcal/mol) and NGlu a negative ΔGcis/trans (-2.6 kcal/mol), contradicting Figure 4B, which shows NTbu at ≈ -2.3 kcal/mol and NGlu at ≈ +2.6 kcal/mol. Moreover, cis/trans preferences are driven by n→π* and steric effects, not enhanced hydrogen bonding.", "claim": "The MoSiC-CGenFF-NTOID force field predicts that branched peptoid homotrimers (e.g., NTbu) have ΔGcis/trans around -2.3 kcal/mol, indicating cis isomer predominance by nearly an order of magnitude, whereas nucleophilic analogs (e.g., NGlu) exhibit ΔGcis/trans of ~2.6 kcal/mol, favoring trans isomers due to disrupted n→π* interactions.", "label": true }, { "paperid": "2409.10604v4", "paper_path": "./SciVer/papers/2409.10604v4.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.2", "3.3", "3.4" ], "image_path": "./SciVer/images/2409.10604v4-Table2-1.png", "request_id": 261, "origin_statement": "In TLAC shell models, higher H I column densities log(N_HI/cm^2)>19.6 lead to blue–red peak separations increasing above ~400 km s⁻¹, as more frequent resonant scatterings broaden the Ly profiles and shift photons out of resonance before escape.", "perturbed_statement": "In TLAC shell models, H I column densities above log(N_HI/cm^2)>18.0 yield blue–red peak separations exceeding ~600 km s⁻¹ due to Doppler broadening from expansion velocities >500 km s⁻¹.", "perturbed_explanation": "This statement is incorrect because (1) the threshold log(N_HI/cm^2)>18.0 is too low: significant peak separations >400 km s⁻¹ require log(N_HI/cm^2)>19.6, and (2) TLAC’s grid only samples expansion velocities up to ±300 km s⁻¹, so velocities >500 km s⁻¹ lie outside the modeled parameter range.", "claim": "In TLAC shell models, higher H I column densities log(N_HI/cm^2)>19.6 lead to blue–red peak separations increasing above ~400 km s⁻¹, as more frequent resonant scatterings broaden the Ly profiles and shift photons out of resonance before escape.", "label": true }, { "paperid": "2410.09635v1", "paper_path": "./SciVer/papers/2410.09635v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.09635v1_figure_6.png", "request_id": 262, "origin_statement": "At a 0.2 false positive rate, AIMEN achieves roughly 0.62 sensitivity, implying an AUC above 0.75 consistent with moderate discrimination; increasing the decision threshold beyond the default 0.5 drops sensitivity below 60%, illustrating the conservative bias introduced by stricter abnormal criteria.", "perturbed_statement": "At a 0.2 false positive rate, AIMEN achieves roughly 0.72 sensitivity, implying an AUC above 0.85 consistent with strong discrimination; increasing the decision threshold beyond the default 0.5 drops specificity below 60%, illustrating the conservative bias introduced by stricter abnormal criteria.", "perturbed_explanation": "The perturbed statement incorrectly claims a sensitivity of 0.72 at FPR=0.2, but Fig. 6(a) shows sensitivity is about 0.62 at that FPR. It also misstates that specificity drops below 60% beyond threshold 0.5; Fig. 6(b) indicates it is sensitivity, not specificity, that declines.", "claim": "At a 0.2 false positive rate, AIMEN achieves roughly 0.62 sensitivity, implying an AUC above 0.75 consistent with moderate discrimination; increasing the decision threshold beyond the default 0.5 drops sensitivity below 60%, illustrating the conservative bias introduced by stricter abnormal criteria.", "label": true }, { "paperid": "2409.01887v1", "paper_path": "./SciVer/papers/2409.01887v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.01887v1-Table2-1.png", "request_id": 265, "origin_statement": "By matching CNAME records against curated CDN-assigned subdomain patterns (e.g., .fastly.net, .cloudflare.net) and verifying HTTP response headers, the CDN Checker exploits DNS-based redirection and HTTP fingerprinting to reliably distinguish CDN-hosted domains from self-hosted ones, reducing false positives.", "perturbed_statement": "By matching A records against curated CDN-assigned subdomain patterns (e.g., .fastly.net, .cloudflare.net) and verifying HTTP status codes, the CDN Checker exploits DNS-based redirection and HTTP fingerprinting to reliably distinguish CDN-hosted domains from self-hosted ones.", "perturbed_explanation": "The perturbed statement is incorrect because the CDN Checker matches CNAME records—not A records—to identify CDN assignments, and it verifies HTTP response headers (e.g., “Server: cloudflare”) rather than HTTP status codes to confirm CDN hosting.", "claim": "By matching CNAME records against curated CDN-assigned subdomain patterns (e.g., .fastly.net, .cloudflare.net) and verifying HTTP response headers, the CDN Checker exploits DNS-based redirection and HTTP fingerprinting to reliably distinguish CDN-hosted domains from self-hosted ones, reducing false positives.", "label": true }, { "paperid": "2411.14914v1", "paper_path": "./SciVer/papers/2411.14914v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.14914v1_figure_3.png", "request_id": 268, "origin_statement": "GPT-3.5 models achieve the highest median precision and exhibit the greatest inter-seed variance among models on q4 and q5, indicating that their stochastic decoding parameters likely amplify precision fluctuation across random seeds.", "perturbed_statement": "GPT-3.5 models achieve the highest median precision but exhibit the lowest inter-seed variance among models on q4 and q5, indicating that their stochastic decoding parameters stabilize precision across random seeds.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 3 shows that GPT-3.5 models have the largest inter-seed variance (widest precision spread and IQR) on both queries, not the lowest.", "claim": "GPT-3.5 models achieve the highest median precision and exhibit the greatest inter-seed variance among models on q4 and q5, indicating that their stochastic decoding parameters likely amplify precision fluctuation across random seeds.", "label": true }, { "paperid": "2411.15173v1", "paper_path": "./SciVer/papers/2411.15173v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5" ], "image_path": "./SciVer/images/2411.15173v1-Table1-1.png", "request_id": 271, "origin_statement": "FreDA’s decentralized approach cuts the average classification error on CIFAR-10-C to just 22.9%, compared to over 33% for other adaptive methods, by sequentially isolating covariate from label shifts, validating that mixing corruptions at severity level five accentuates centralized adaptation failures.", "perturbed_statement": "FreDA’s decentralized approach cuts the average classification error on CIFAR-10-C to just 42.9%, compared to over 33% for other adaptive methods, by sequentially isolating covariate from label shifts, validating that mixing corruptions at severity level six accentuates centralized adaptation failures.", "perturbed_explanation": "The perturbed statement incorrectly reports FreDA’s average error on CIFAR-10-C as 42.9%, whereas Table 1 shows it is 22.9%. It also references severity level six, but the experiments use severity level five, making both details inconsistent with the context.", "claim": "FreDA’s decentralized approach cuts the average classification error on CIFAR-10-C to just 22.9%, compared to over 33% for other adaptive methods, by sequentially isolating covariate from label shifts, validating that mixing corruptions at severity level five accentuates centralized adaptation failures.", "label": true }, { "paperid": "2410.21088v1", "paper_path": "./SciVer/papers/2410.21088v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.21088v1-Table3-1.png", "request_id": 273, "origin_statement": "Injecting watermarks into high-frequency bands using a circular mask with values drawn from a Gaussian distribution boosts PSNR to 42.3 dB and SSIM to 0.98 by exploiting the lower spectral energy of high-frequency components, thereby reducing perceptible artifacts while retaining watermark detectability.", "perturbed_statement": "Injecting watermarks into high-frequency bands using a circular mask with values drawn from a Gaussian distribution boosts PSNR to 32.3 dB and SSIM to 0.98 by exploiting the lower spectral energy of high-frequency components, thereby reducing perceptible artifacts while retaining watermark detectability.", "perturbed_explanation": "The perturbed statement incorrectly reports the PSNR value. Table 3 shows the PSNR for the high-frequency circular Gaussian watermark is 42.3 dB, not 32.3 dB.", "claim": "Injecting watermarks into high-frequency bands using a circular mask with values drawn from a Gaussian distribution boosts PSNR to 42.3 dB and SSIM to 0.98 by exploiting the lower spectral energy of high-frequency components, thereby reducing perceptible artifacts while retaining watermark detectability.", "label": true }, { "paperid": "2409.01490v1", "paper_path": "./SciVer/papers/2409.01490v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.01490v1_figure_8.png", "request_id": 276, "origin_statement": "As the smoothing parameter ρ decreases from 1.0 to 0.0001, throttle transitions sharpen from sinusoidal pulses to near-instantaneous on/off segments, reflecting Pontryagin’s minimum principle: lower smoothing yields bang-bang control, minimizing fuel consumption by maximizing thrust during active arcs and coasting with zero thrust.", "perturbed_statement": "Contrary to Pontryagin’s minimum principle, only the highest smoothing value (ρ=1.0) exhibits bang-bang control; all profiles for ρ≤0.1 remain smooth and continuous without reaching exact maximum or zero thrust, indicating that reducing smoothing fails to produce piecewise-constant throttle segments.", "perturbed_explanation": "Fig. 8 clearly shows that for ρ=0.01, 0.001, and 0.0001 the throttle profiles reach both full throttle and zero thrust with abrupt switches, demonstrating bang-bang behavior. Therefore the claim that only ρ=1.0 yields bang-bang control and that smaller ρ values stay smooth is false.", "claim": "As the smoothing parameter ρ decreases from 1.0 to 0.0001, throttle transitions sharpen from sinusoidal pulses to near-instantaneous on/off segments, reflecting Pontryagin’s minimum principle: lower smoothing yields bang-bang control, minimizing fuel consumption by maximizing thrust during active arcs and coasting with zero thrust.", "label": true }, { "paperid": "2410.21350v1", "paper_path": "./SciVer/papers/2410.21350v1.json", "claim_type": "analytical", "type": "table", "item": "7", "section": [ "4.5" ], "image_path": "./SciVer/images/2410.21350v1-Table7-1.png", "request_id": 277, "origin_statement": "Using a Kriging-based root-finding, Enhanced SDIS at n=10 trims expected limit-state evaluations by ~36% versus standard SDIS, achieving 170.8 relEff. As n increases to 1000, surrogate inaccuracies double the coefficient of variation from 0.24 to 0.51, dropping relEff to 16.09.", "perturbed_statement": "Using a Kriging-based root-finding, Enhanced SDIS at n=10 trims expected limit-state evaluations by ~64% versus standard SDIS, achieving 170.8 relEff. As n increases to 1000, surrogate inaccuracies double the coefficient of variation from 0.24 to 0.51, dropping relEff to 16.09.", "perturbed_explanation": "The claimed ~64% reduction in expected limit-state evaluations at n=10 is incorrect. Table 7 shows E(N_t) drops from 3254 (standard SDIS) to 2070 (Enhanced SDIS), which corresponds to approximately a 36% reduction, not 64%.", "claim": "Using a Kriging-based root-finding, Enhanced SDIS at n=10 trims expected limit-state evaluations by ~36% versus standard SDIS, achieving 170.8 relEff. As n increases to 1000, surrogate inaccuracies double the coefficient of variation from 0.24 to 0.51, dropping relEff to 16.09.", "label": true }, { "paperid": "2409.08158v1", "paper_path": "./SciVer/papers/2409.08158v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2409.08158v1_figure_1.png", "request_id": 280, "origin_statement": "Between 2010 and 2015, the annual number of estimates grows at twice the pace of papers, reflecting methodological advances—like Monte Carlo sensitivity tests—enabled by faster computing, which has led authors to report broader uncertainty ranges per paper.", "perturbed_statement": "Between 2005 and 2010, the annual number of estimates grows at three times the pace of papers, reflecting methodological advances—like Monte Carlo sensitivity tests—enabled by faster computing, which has led authors to report broader uncertainty ranges per paper.", "perturbed_explanation": "The perturbation is incorrect because Figure 1 shows the estimate‐to‐paper growth acceleration occurs after 2010 rather than between 2005 and 2010, and the ratio roughly doubles (2×) by 2015 rather than tripling (3×) by 2010.", "claim": "Between 2010 and 2015, the annual number of estimates grows at twice the pace of papers, reflecting methodological advances—like Monte Carlo sensitivity tests—enabled by faster computing, which has led authors to report broader uncertainty ranges per paper.", "label": true }, { "paperid": "2409.03044v1", "paper_path": "./SciVer/papers/2409.03044v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.03044v1_figure_2(b).png", "request_id": 282, "origin_statement": "The prominent mouse-based authentication (n=572) implies broad use of on-screen keyboards, aligning with Fitts’ Law: smaller targets on remote GUIs increase selection time and error rates, suggesting mouse-driven password entry is slower and more error-prone than tactile PIN pads.", "perturbed_statement": "The prominent mouse-based authentication (n=5,720) implies broad use of on-screen keyboards, contradicting Fitts’ Law by suggesting smaller targets on remote GUIs decrease selection time and error rates, making mouse-driven entry faster than tactile PIN pads.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 2 reports n=572 mouse-based credential entries, not 5,720. Moreover, Fitts’ Law states that smaller targets increase selection time and error rates, whereas the perturbed claim wrongly asserts they decrease those factors.", "claim": "The prominent mouse-based authentication (n=572) implies broad use of on-screen keyboards, aligning with Fitts’ Law: smaller targets on remote GUIs increase selection time and error rates, suggesting mouse-driven password entry is slower and more error-prone than tactile PIN pads.", "label": true }, { "paperid": "2411.06286v1", "paper_path": "./SciVer/papers/2411.06286v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.06286v1-Table1-1.png", "request_id": 287, "origin_statement": "SPIKAN’s separable architecture transforms the 2D input into two 1D networks, reducing per-iteration floating-point operations and achieving up to 287× training speedup (at 200^2 collocation points) while lowering L2 error from 228.84% to 2.20%, illustrating dimensionality reduction’s efficiency in solving Helmholtz problems.", "perturbed_statement": "SPIKAN’s separable architecture transforms the 2D input into two 1D networks, reducing per-iteration floating-point operations and achieving up to 28.7× training speedup (at 200^2 collocation points) while lowering L2 error from 228.84% to 22.0%, illustrating dimensionality reduction’s efficiency in solving Helmholtz problems.", "perturbed_explanation": "This statement is incorrect because Table 1 shows a 287.0× speedup for the SPIKAN (c) case, not 28.7×, and the L2 error for SPIKAN (c) is 2.20%, not 22.0%.", "claim": "SPIKAN’s separable architecture transforms the 2D input into two 1D networks, reducing per-iteration floating-point operations and achieving up to 287× training speedup (at 200^2 collocation points) while lowering L2 error from 228.84% to 2.20%, illustrating dimensionality reduction’s efficiency in solving Helmholtz problems.", "label": true }, { "paperid": "2410.11566v2", "paper_path": "./SciVer/papers/2410.11566v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.11566v2-Table3-1.png", "request_id": 289, "origin_statement": "In Table 3, under large isotropic measurement disturbance and high angular velocity noise, the proposed BE delivers a steady-state attitude error of 13.19°, which is 2.9% lower than NormBE’s 13.59°, and converges over 10% faster than MEKF by more rapidly discounting error propagation from the initial state.", "perturbed_statement": "In Table 3, under large isotropic measurement disturbance and high angular velocity noise, the proposed BE delivers a steady-state attitude error of 13.19°, which is 4.5% lower than NormBE’s 13.59°, and converges over 10% faster than MEKF by more rapidly discounting error propagation from the initial state.", "perturbed_explanation": "The perturbed statement incorrectly claims a 4.5% reduction. According to Table 3, the actual reduction is (13.59°–13.19°)/13.59° ≈ 2.94%, not 4.5%, so the percentage figure contradicts the data.", "claim": "In Table 3, under large isotropic measurement disturbance and high angular velocity noise, the proposed BE delivers a steady-state attitude error of 13.19°, which is 2.9% lower than NormBE’s 13.59°, and converges over 10% faster than MEKF by more rapidly discounting error propagation from the initial state.", "label": true }, { "paperid": "2409.12052v3", "paper_path": "./SciVer/papers/2409.12052v3.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.2.1", "3.2.2", "3.2.3" ], "image_path": "./SciVer/images/2409.12052v3_figure_2.png", "request_id": 292, "origin_statement": "The colored Δ and λ distributions on Zr2NCl2 surfaces reveal that regions near K points, which exhibit enhanced electron-phonon coupling due to strain-induced band flattening, correspond to larger superconducting gaps; this anisotropy boost explains the Tc increase from ~3 K (no strain) to ~5 K under 6% in-plane strain.", "perturbed_statement": "The Δ and λ distributions on Zr2NCl2 surfaces indicate that regions near M points, which exhibit suppressed electron-phonon coupling due to strain-induced band flattening, correspond to larger superconducting gaps; this anisotropy boost explains the Tc increase from ~8 K to ~10 K under 6% in-plane strain.", "perturbed_explanation": "This statement is incorrect because the original data show enhanced electron-phonon coupling and larger gap regions at the K points, not M points, under strain. Moreover, the superconducting critical temperature (Tc) rises from ~3 K to ~5 K with 6% strain, not from ~8 K to ~10 K.", "claim": "The colored Δ and λ distributions on Zr2NCl2 surfaces reveal that regions near K points, which exhibit enhanced electron-phonon coupling due to strain-induced band flattening, correspond to larger superconducting gaps; this anisotropy boost explains the Tc increase from ~3 K (no strain) to ~5 K under 6% in-plane strain.", "label": true }, { "paperid": "2411.03363v1", "paper_path": "./SciVer/papers/2411.03363v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "3.2", "3.3.1" ], "image_path": "./SciVer/images/2411.03363v1-Table4-1.png", "request_id": 299, "origin_statement": "Model-based TDD methods, exemplified by Model-loss and Model-lira, consistently achieve average AUROC above 0.68 across image, tabular, and text datasets, demonstrating that leveraging ensembles of reference models enhances detection robustness compared to metric-based or learning-based approaches.", "perturbed_statement": "Metric-based TDD methods, such as Metric-ment and Metric-loss, achieve average AUROC above 0.67 across all modalities, outperforming model-based algorithms that average only around 0.61, implying that simple distance metrics yield superior detection accuracy.", "perturbed_explanation": "The perturbation incorrectly states that metric-based methods average above 0.67 and outperform model-based ones averaging around 0.61. In Table 4, metric-based methods have an average AUROC of about 0.590, while model-based algorithms average around 0.684, contradicting the claim.", "claim": "Model-based TDD methods, exemplified by Model-loss and Model-lira, consistently achieve average AUROC above 0.68 across image, tabular, and text datasets, demonstrating that leveraging ensembles of reference models enhances detection robustness compared to metric-based or learning-based approaches.", "label": true }, { "paperid": "2411.00311v1", "paper_path": "./SciVer/papers/2411.00311v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "2.2", "5.5" ], "image_path": "./SciVer/images/2411.00311v1_figure_2(a).png", "request_id": 300, "origin_statement": "Despite fine-tuning only 0.028% of parameters, C2A achieves 84.6% test accuracy, outperforming other PEFT baselines by up to 18.8% under heterogeneous federated data. This shows client-tailored modules effectively stabilize convergence and mitigate non-IID drift compared to one-size-fits-all adaptations.", "perturbed_statement": "Despite fine-tuning only 0.18% of parameters, C2A achieves 79.1% test accuracy, outperforming other PEFT baselines by up to 6.2% under heterogeneous federated data. This shows client-tailored modules moderately stabilize convergence and partially mitigate non-IID drift compared to one-size-fits-all adaptations.", "perturbed_explanation": "The perturbed statement incorrectly reports C2A as using 0.18% of parameters and achieving 79.1% accuracy. In reality, the table shows C2A uses only 0.028% of parameters and reaches 84.6% test accuracy, so both the parameter percentage and accuracy are wrong.", "claim": "Despite fine-tuning only 0.028% of parameters, C2A achieves 84.6% test accuracy, outperforming other PEFT baselines by up to 18.8% under heterogeneous federated data. This shows client-tailored modules effectively stabilize convergence and mitigate non-IID drift compared to one-size-fits-all adaptations.", "label": true }, { "paperid": "2411.02264v1", "paper_path": "./SciVer/papers/2411.02264v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.02264v1_figure_5.png", "request_id": 306, "origin_statement": "Fig. 5 shows the 44Q quasi-normal mode amplitude distribution peaking near |A₄₄Q|≈0.12, roughly four times the (440) linear mode peak at |A₄₄₀|≈0.03, confirming that spin-induced non-linear couplings enhance the 44Q overtone relative to higher-order linear modes.", "perturbed_statement": "Fig. 5 shows the 44Q quasi-normal mode amplitude distribution peaking near |A₄₄Q|≈0.20, almost ten times higher than the (440) linear mode peak at |A₄₄₀|≈0.02, demonstrating a tenfold amplification due to second-order non-linear effects.", "perturbed_explanation": "The perturbed statement incorrectly states the 44Q peak at |A₄₄Q|≈0.20 (actual peak ≈0.12) and the (440) linear peak at |A₄₄₀|≈0.02 (actual peak ≈0.03). The resulting tenfold amplification contradicts the roughly fourfold ratio evident in Fig. 5.", "claim": "Fig. 5 shows the 44Q quasi-normal mode amplitude distribution peaking near |A₄₄Q|≈0.12, roughly four times the (440) linear mode peak at |A₄₄₀|≈0.03, confirming that spin-induced non-linear couplings enhance the 44Q overtone relative to higher-order linear modes.", "label": true }, { "paperid": "2411.05322v1", "paper_path": "./SciVer/papers/2411.05322v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.05322v1-Table2-1.png", "request_id": 309, "origin_statement": "At training views, ReRF encodes scenes at 33.60 dB PSNR and 0.111 LPIPS in 221.9 KB, whereas our Ours(Low) uses only 55.7 KB to achieve 36.50 dB and 0.090 LPIPS, indicating a 3 dB uplift and ~19% perceptual error reduction for a quarter of the storage.", "perturbed_statement": "At training views, ReRF encodes scenes at 33.60 dB PSNR and 0.111 LPIPS in 221.9 KB, whereas our Ours(Low) uses only 70 KB to achieve 34.50 dB and 0.085 LPIPS, indicating a 1 dB uplift and ~23% perceptual error reduction for a third of the storage.", "perturbed_explanation": "This statement is incorrect because Ours(Low) actually uses 55.72 KB (not 70 KB), achieves 36.50 dB PSNR (not 34.50 dB), and 0.090 LPIPS (not 0.085). Consequently, the claimed 1 dB uplift, ~23% error reduction, and ‘third of the storage’ are inconsistent with the reported results.", "claim": "At training views, ReRF encodes scenes at 33.60 dB PSNR and 0.111 LPIPS in 221.9 KB, whereas our Ours(Low) uses only 55.7 KB to achieve 36.50 dB and 0.090 LPIPS, indicating a 3 dB uplift and ~19% perceptual error reduction for a quarter of the storage.", "label": true }, { "paperid": "2411.07273v2", "paper_path": "./SciVer/papers/2411.07273v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.07273v2-Table3-1.png", "request_id": 319, "origin_statement": "Table 3 indicates that when Breakthrough boards reach a height of at least six, all configurations up to width four become first-player wins, reflecting the inability of the second player to block multiple pawn advances across the longer distance.", "perturbed_statement": "Table 3 indicates that when Breakthrough boards reach a height of at least five, all configurations up to width four become first-player wins, reflecting the inability of the second player to block multiple pawn advances across the longer distance.", "perturbed_explanation": "This is incorrect because at height 5 (rows labeled 5) for widths 2, 3, and 4, Table 3 shows P2 (second-player wins), not P1, so the first-player win threshold does not actually start at height 5.", "claim": "Table 3 indicates that when Breakthrough boards reach a height of at least six, all configurations up to width four become first-player wins, reflecting the inability of the second player to block multiple pawn advances across the longer distance.", "label": true }, { "paperid": "2411.15633v1", "paper_path": "./SciVer/papers/2411.15633v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.15633v1-Table1-1.png", "request_id": 323, "origin_statement": "By sampling 32 frames per video and leveraging CLIP ViT-L/14 with only 0.19M parameters, our method achieves an average AUC of 0.917 in cross-dataset evaluations, outperforming 87M-parameter CDFA by ~4%, demonstrating that frame-level temporal aggregation with minimal parameters suffices for robust deepfake detection.", "perturbed_statement": "By sampling 64 frames per video and leveraging CLIP ViT-L/14 with only 0.19B parameters, our method achieves an average AUC of 0.987 in cross-dataset evaluations, outperforming 133M-parameter LSDA by ~12%, demonstrating that extensive frame sampling with minimal parameters suffices for robust deepfake detection.", "perturbed_explanation": "The perturbed statement contradicts the context: the model uses 0.19M (not 0.19B) trainable parameters and samples 32 frames per video (not 64). Its reported average cross-dataset AUC is 0.917 (not 0.987), and LSDA is a 133M-parameter model with an average AUC of 0.794, not the claimed values.", "claim": "By sampling 32 frames per video and leveraging CLIP ViT-L/14 with only 0.19M parameters, our method achieves an average AUC of 0.917 in cross-dataset evaluations, outperforming 87M-parameter CDFA by ~4%, demonstrating that frame-level temporal aggregation with minimal parameters suffices for robust deepfake detection.", "label": true }, { "paperid": "2410.03705v2", "paper_path": "./SciVer/papers/2410.03705v2.json", "claim_type": "analytical", "type": "chart", "item": "1(b)", "section": [ "5" ], "image_path": "./SciVer/images/2410.03705v2_figure_1(b).png", "request_id": 324, "origin_statement": "On the EEG Eye State dataset, ensemble-based GBDT models like XGBoost and LightGBM reach near 98% ROC AUC with under 5 seconds of training by capturing non-linear feature interactions, while Random Forest matches this accuracy in ~3 seconds through bagging that reduces overfitting in continuous EEG features.", "perturbed_statement": "On the EEG Eye State dataset, CatBoost achieves over 99% ROC AUC in under 1 second of training by leveraging ordered boosting to capture high-frequency EEG feature correlations, while Random Forest requires around 30 seconds to reach the same accuracy due to ensemble variance reduction overhead.", "perturbed_explanation": "The perturbed statement wrongly claims that CatBoost achieves over 99% ROC AUC in under 1 second: Figure 1(b) shows CatBoost peaks at ~97% ROC AUC and takes ~30 seconds to train. It also misstates Random Forest’s training time: Random Forest actually requires ~3 seconds, not 30 seconds, to reach near 98% AUC.", "claim": "On the EEG Eye State dataset, ensemble-based GBDT models like XGBoost and LightGBM reach near 98% ROC AUC with under 5 seconds of training by capturing non-linear feature interactions, while Random Forest matches this accuracy in ~3 seconds through bagging that reduces overfitting in continuous EEG features.", "label": true }, { "paperid": "2409.00511v1", "paper_path": "./SciVer/papers/2409.00511v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.00511v1-Table2-1.png", "request_id": 325, "origin_statement": "The RevCD diffusion model’s seen accuracy on AWA2 (82.4%) exceeds f-CLSWGAN’s 68.9% by 13.5%, reflecting its ability to capture low-dimensional global attribute distributions more robustly and mitigate mode collapse during iterative denoising.", "perturbed_statement": "The RevCD diffusion model achieves a 22.5% higher seen accuracy on AWA2 (82.4%) than GAN-based f-CLSWGAN (68.9%), and its unseen accuracy on CUB (40.2%) surpasses CADA-VAE’s 51.6%, illustrating superior density estimation even in high-dimensional semantic spaces.", "perturbed_explanation": "The perturbed statement is incorrect in two ways: (1) the improvement on AWA2 is actually 13.5% (82.4%−68.9%), not 22.5%; (2) RevCD’s unseen accuracy on CUB (40.2%) is lower than CADA-VAE’s 51.6%, so it does not surpass CADA-VAE in that metric.", "claim": "The RevCD diffusion model’s seen accuracy on AWA2 (82.4%) exceeds f-CLSWGAN’s 68.9% by 13.5%, reflecting its ability to capture low-dimensional global attribute distributions more robustly and mitigate mode collapse during iterative denoising.", "label": true }, { "paperid": "2409.01466v1", "paper_path": "./SciVer/papers/2409.01466v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.01466v1_figure_3.png", "request_id": 326, "origin_statement": "Applying chain-of-thought prompting combined with a GPT-4-turbo judge model improved zero-shot politics F1 by ~0.10, demonstrating that explicit intermediate reasoning and adjudication can systematically refine decision boundaries in complex, imbalanced multi-class text classification.", "perturbed_statement": "Applying chain-of-thought prompting combined with a BERT-base judge model improved zero-shot entertainment F1 by ~0.20, demonstrating that explicit intermediate reasoning and adjudication can systematically refine decision boundaries in complex, imbalanced multi-class text classification.", "perturbed_explanation": "The perturbed statement is wrong because the judge model used in the study was GPT-4-turbo, not BERT-base. It was the politics category F1 that improved by approximately 0.10, not the entertainment category by ~0.20, as shown in Figure 3 and described in the context.", "claim": "Applying chain-of-thought prompting combined with a GPT-4-turbo judge model improved zero-shot politics F1 by ~0.10, demonstrating that explicit intermediate reasoning and adjudication can systematically refine decision boundaries in complex, imbalanced multi-class text classification.", "label": true }, { "paperid": "2410.05935v1", "paper_path": "./SciVer/papers/2410.05935v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2410.05935v1-Table3-1.png", "request_id": 327, "origin_statement": "By optimizing variance at each feature channel, the Channel-wise method achieved consistent gains on both seen and unseen classes across all thresholds, indicating that per-channel variance adaptation effectively diversifies query features to model pose and expression variations without over-parameterization.", "perturbed_statement": "By optimizing variance at each spatial position, the Position-wise method achieved consistent gains on seen classes across all thresholds, indicating that per-position variance adaptation effectively diversifies query features to model pose and expression variations.", "perturbed_explanation": "The perturbation is incorrect because Table 3 shows the Position-wise method actually decreases performance on seen classes at all thresholds (0.184 vs. 0.183 at thr=0; 0.193 vs. 0.201 at thr=80; 0.235 vs. 0.259 at thr=160; 0.274 vs. 0.319 at thr=320), so it does not yield gains on seen classes.", "claim": "By optimizing variance at each feature channel, the Channel-wise method achieved consistent gains on both seen and unseen classes across all thresholds, indicating that per-channel variance adaptation effectively diversifies query features to model pose and expression variations without over-parameterization.", "label": true }, { "paperid": "2410.20483v2", "paper_path": "./SciVer/papers/2410.20483v2.json", "claim_type": "analytical", "type": "chart", "item": "5(a)", "section": [ "6.1", "6.2" ], "image_path": "./SciVer/images/2410.20483v2_figure_5(a).png", "request_id": 328, "origin_statement": "Anchoring explanations at cluster centroids, SEV© reduces ℓ\u000084 distances by an average of 0.4 and increases log-likelihood by 0.5, demonstrating that references in high-density regions enhance both closeness and credibility.", "perturbed_statement": "Anchoring explanations at cluster centroids, SEV© increases ℓ\u000084 distances by an average of 0.4 and decreases log-likelihood by 0.5, demonstrating that references in high-density regions worsen both closeness and credibility.", "perturbed_explanation": "This statement is incorrect because the context (Figure 5) shows that SEV© actually reduces ℓ∞ distances (improving closeness) and increases log-likelihood (improving credibility). Claiming that ℓ∞ increases by 0.4 and log-likelihood decreases by 0.5 directly contradicts the observed metrics.", "claim": "Anchoring explanations at cluster centroids, SEV© reduces ℓ\u000084 distances by an average of 0.4 and increases log-likelihood by 0.5, demonstrating that references in high-density regions enhance both closeness and credibility.", "label": true }, { "paperid": "2409.16657v1", "paper_path": "./SciVer/papers/2409.16657v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.16657v1_figure_2.png", "request_id": 330, "origin_statement": "Hybrid HSE06 band structures show CaSnS3 and SrSnS3 have direct band gaps at Γ, facilitating strong optical transitions, whereas Ba-based perovskites exhibit indirect gaps between U and X, reducing photon absorption efficiency. Additionally, enhanced Se–Sn p–d hybridization in ASnSe3 lowers the band gap relative to ASnS3, improving visible light harvesting.", "perturbed_statement": "Hybrid HSE06 band structures show BaSnS3 has a direct band gap at Γ, facilitating strong optical transitions, whereas Ca- and Sr-based perovskites exhibit indirect gaps between U and X, reducing photon absorption efficiency. Additionally, enhanced S–Sn p–d hybridization in ASnS3 lowers the band gap relative to ASnSe3, improving light harvesting.", "perturbed_explanation": "The perturbation is incorrect because the context and Figure 2 report that BaSnS3 has an indirect band gap (valence maximum at S and conduction minimum between Γ and X), not a direct gap at Γ. Also, the text specifies Se–Sn hybridization is stronger than S–Sn, so ASnSe3 has a lower band gap than ASnS3, not the reverse.", "claim": "Hybrid HSE06 band structures show CaSnS3 and SrSnS3 have direct band gaps at Γ, facilitating strong optical transitions, whereas Ba-based perovskites exhibit indirect gaps between U and X, reducing photon absorption efficiency. Additionally, enhanced Se–Sn p–d hybridization in ASnSe3 lowers the band gap relative to ASnS3, improving visible light harvesting.", "label": true }, { "paperid": "2409.07162v2", "paper_path": "./SciVer/papers/2409.07162v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "5.1.2" ], "image_path": "./SciVer/images/2409.07162v2_figure_2.png", "request_id": 332, "origin_statement": "The L-prompt’s inclusion of explicit sentiment instructions likely calibrates model confidence scores, mitigating class imbalance seen in partial matches and boosting neutral F1 by up to 19% for Llama-2-70B due to refined context comprehension.", "perturbed_statement": "The S-prompt’s simplified phrasing inadvertently boosted neutral F1 by up to 19% for LLama-7B.", "perturbed_explanation": "This is incorrect because Figure 2 shows that neutral F1 improvements come from the L-prompt, not the S-prompt, and LLama-7B is the only model that did *not* improve neutral performance.", "claim": "The L-prompt’s inclusion of explicit sentiment instructions likely calibrates model confidence scores, mitigating class imbalance seen in partial matches and boosting neutral F1 by up to 19% for Llama-2-70B due to refined context comprehension.", "label": true }, { "paperid": "2409.11599v1", "paper_path": "./SciVer/papers/2409.11599v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11599v1-Table2-1.png", "request_id": 341, "origin_statement": "Significant lower gaze duration variance on the electronic micrometer (t = 3.506; p = 0.0025) indicates experts' streamlined perceptual processing, reducing cognitive load. Non-significant differences at the height gauge (t = 1.633; p = 0.120) suggest that dual-instrument complexity neutralizes expertise advantages due to increased working memory demands.", "perturbed_statement": "Significant lower gaze duration variance on the electronic micrometer (t = 3.506; p = 0.05) indicates experts' streamlined perceptual processing, reducing cognitive load. Non-significant differences at the height gauge (t = 1.633; p = 0.120) suggest that dual-instrument complexity neutralizes expertise advantages due to increased working memory demands.", "perturbed_explanation": "The p-value for the electronic micrometer is incorrectly reported as 0.05 instead of the actual 0.0025 given in Table 2, making the statistical detail verifiably inaccurate.", "claim": "Significant lower gaze duration variance on the electronic micrometer (t = 3.506; p = 0.0025) indicates experts' streamlined perceptual processing, reducing cognitive load. Non-significant differences at the height gauge (t = 1.633; p = 0.120) suggest that dual-instrument complexity neutralizes expertise advantages due to increased working memory demands.", "label": true }, { "paperid": "2411.03500v1", "paper_path": "./SciVer/papers/2411.03500v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.03500v1-Table1-1.png", "request_id": 343, "origin_statement": "The ILP formulation models join selection as an NP-hard knapsack problem, using constraint R_{c1,c2} ≤ L_{c1} to prevent orphan right-side columns and R_{c1,c2}+R_{c2,c1}<2 to halve symmetric join cases, thus pruning the search space for efficient branch-and-bound solving under token budgets.", "perturbed_statement": "The ILP formulation models join selection as a polynomial-time solvable knapsack variant, using constraint R_{c1,c2} ≥ L_{c1} to prevent orphan right-side columns and R_{c1,c2}+R_{c2,c1}>2 to eliminate symmetric join cases efficiently under token budgets.", "perturbed_explanation": "The perturbation is incorrect because the original constraint is R_{c1,c2} ≤ L_{c1}, not ≥. Additionally, the symmetry constraint is R_{c1,c2}+R_{c2,c1}<2, not >2. Lastly, the problem remains NP-hard rather than polynomial-time solvable.", "claim": "The ILP formulation models join selection as an NP-hard knapsack problem, using constraint R_{c1,c2} ≤ L_{c1} to prevent orphan right-side columns and R_{c1,c2}+R_{c2,c1}<2 to halve symmetric join cases, thus pruning the search space for efficient branch-and-bound solving under token budgets.", "label": true }, { "paperid": "2409.02399v1", "paper_path": "./SciVer/papers/2409.02399v1.json", "claim_type": "analytical", "type": "chart", "item": "1(b)", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.02399v1_figure_1(b).png", "request_id": 344, "origin_statement": "In the 5-dimensional linear Gaussian model, TPPF(RE) halves the variance of the log Z estimator compared to BPF by leveraging recursive Gaussian twisting functions that approximate the optimal importance weights for linear state-space transitions.", "perturbed_statement": "In the 20-dimensional linear Gaussian model, TPPF(RE) halves the variance of the log Z estimator compared to BPF by leveraging recursive Gaussian twisting functions that approximate the optimal importance weights for linear state-space transitions.", "perturbed_explanation": "The perturbed statement incorrectly claims this variance reduction at d=20. According to the context, TPPF(RE) suffers from numerical instability and the curse of dimensionality in high dimensions (e.g., d=20) and thus does not outperform BPF or halve its estimator variance.", "claim": "In the 5-dimensional linear Gaussian model, TPPF(RE) halves the variance of the log Z estimator compared to BPF by leveraging recursive Gaussian twisting functions that approximate the optimal importance weights for linear state-space transitions.", "label": true }, { "paperid": "2410.03577v1", "paper_path": "./SciVer/papers/2410.03577v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.03577v1-Table3-1.png", "request_id": 349, "origin_statement": "By leveraging uncertainty-based retracing, MemVR notably improves object-level hallucination metrics, boosting the Existence score by 14.3 and Count by 30.3, indicating that selective reprocessing of uncertain visual features effectively mitigates false object addition and omission.", "perturbed_statement": "By leveraging uncertainty-based retracing, MemVR notably improves object-level hallucination metrics, boosting the Existence score by 15.3 and Count by 20.3, indicating that selective reprocessing of uncertain visual features effectively mitigates false object addition and omission.", "perturbed_explanation": "The perturbation is incorrect because Table 3 reports an Existence improvement of +14.3 and a Count improvement of +30.3 for MemVR, not +15.3 or +20.3.", "claim": "By leveraging uncertainty-based retracing, MemVR notably improves object-level hallucination metrics, boosting the Existence score by 14.3 and Count by 30.3, indicating that selective reprocessing of uncertain visual features effectively mitigates false object addition and omission.", "label": true }, { "paperid": "2410.13693v1", "paper_path": "./SciVer/papers/2410.13693v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.13693v1_figure_3.png", "request_id": 350, "origin_statement": "The Bumps heat map displays pronounced localized peaks requiring multiscale decomposition to resolve, in contrast to the smooth Doppler pattern whose gradual radial gradient suggests low-pass filtering suffices; thus, adaptive wavelet denoising with scale-dependent thresholds optimally balances bias-variance trade-off across these functions.", "perturbed_statement": "The Bumps heat map displays pronounced localized peaks requiring a single global threshold in wavelet denoising to resolve, in contrast to the smooth Doppler pattern whose gradual radial gradient suggests low-pass filtering suffices; thus, uniform threshold selection optimally balances bias-variance trade-off across these functions.", "perturbed_explanation": "This is incorrect because the Bumps function contains spikes at multiple scales and cannot be effectively captured with a single global threshold. It requires adaptive, scale-dependent thresholds—in a wavelet framework—to avoid overshrinking fine features or undershrinking coarse noise, contrary to the uniform-threshold claim.", "claim": "The Bumps heat map displays pronounced localized peaks requiring multiscale decomposition to resolve, in contrast to the smooth Doppler pattern whose gradual radial gradient suggests low-pass filtering suffices; thus, adaptive wavelet denoising with scale-dependent thresholds optimally balances bias-variance trade-off across these functions.", "label": true }, { "paperid": "2411.14533v1", "paper_path": "./SciVer/papers/2411.14533v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.14533v1-Table3-1.png", "request_id": 351, "origin_statement": "BRKGA+R+LS achieves a 2.38% average improvement in coloring bipartite graphs versus 0.23% for geometric instances, reflecting that bipartiteness reduces constraint conflicts and enhances local search efficacy through larger independent sets.", "perturbed_statement": "BRKGA+R+LS achieves a 2.38% average improvement in coloring bipartite graphs versus 1.23% for geometric instances, reflecting that bipartiteness reduces constraint conflicts and enhances local search efficacy through larger independent sets.", "perturbed_explanation": "The perturbed statement incorrectly reports the geometric class improvement as 1.23%. According to Table 3 and the context, the actual average improvement for geometric instances is 0.23%, not 1.23%.", "claim": "BRKGA+R+LS achieves a 2.38% average improvement in coloring bipartite graphs versus 0.23% for geometric instances, reflecting that bipartiteness reduces constraint conflicts and enhances local search efficacy through larger independent sets.", "label": true }, { "paperid": "2409.12887v2", "paper_path": "./SciVer/papers/2409.12887v2.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.12887v2_figure_7.png", "request_id": 354, "origin_statement": "Applying the Gaussian-decayed function shifts predictions for STS-B dev-set pairs with gold similarity ≥4 toward higher values, reducing false-negative overlap. This aligns with contrastive learning principles, as down-weighting hard negatives preserves embedding uniformity and enhances model performance.", "perturbed_statement": "Applying the Gaussian-decayed function shifts predictions for STS-B dev-set pairs with gold similarity ≤3 toward higher values, increasing false-negative overlap. This contradicts contrastive learning principles, as down-weighting hard negatives distorts embedding uniformity and degrades model performance.", "perturbed_explanation": "The perturbation is incorrect because Figure 7 shows the shift occurs for pairs with gold labels ≥4 (not ≤3), and the Gaussian-decayed function actually reduces false-negative overlap (rather than increasing it). Furthermore, the down-weighting of hard negatives improves, not degrades, embedding uniformity and overall performance.", "claim": "Applying the Gaussian-decayed function shifts predictions for STS-B dev-set pairs with gold similarity ≥4 toward higher values, reducing false-negative overlap. This aligns with contrastive learning principles, as down-weighting hard negatives preserves embedding uniformity and enhances model performance.", "label": true }, { "paperid": "2409.09549v1", "paper_path": "./SciVer/papers/2409.09549v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.09549v1-Table1-1.png", "request_id": 355, "origin_statement": "Integrating the Empatica E4’s high-frequency blood volume pulse (64 Hz) with its 4 Hz galvanic skin response enables concurrent cardiovascular and sympathetic arousal monitoring, facilitating precise heart rate variability and stress analysis in diabetic individuals within ambulatory settings.", "perturbed_statement": "Integrating the Empatica E4’s 64 MHz blood volume pulse sampling with its 8 Hz galvanic skin response enables concurrent cardiovascular and sympathetic arousal monitoring, facilitating precise heart rate variability and stress analysis in diabetic individuals within ambulatory settings.", "perturbed_explanation": "The perturbed statement misreports the sampling rates: Table 1 specifies the blood volume pulse is sampled at 64 Hz (not 64 MHz) and the galvanic skin response at 4 Hz (not 8 Hz), so both details contradict the context.", "claim": "Integrating the Empatica E4’s high-frequency blood volume pulse (64 Hz) with its 4 Hz galvanic skin response enables concurrent cardiovascular and sympathetic arousal monitoring, facilitating precise heart rate variability and stress analysis in diabetic individuals within ambulatory settings.", "label": true }, { "paperid": "2409.05314v2", "paper_path": "./SciVer/papers/2409.05314v2.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "8.3" ], "image_path": "./SciVer/images/2409.05314v2_figure_8.png", "request_id": 356, "origin_statement": "During the first 0.1 epoch, Gemma-2B-Tele’s loss plummets by approximately 50% (1.8 to 0.9), whereas Tinyllama-1.1B-Tele only achieves a ~15% reduction (0.85 to 0.72). This indicates superior gradient signal strength in Gemma’s architecture under telecom instruction fine-tuning.", "perturbed_statement": "During the first 0.1 epoch, Gemma-2B-Tele’s loss plummets by approximately 70% (1.8 to 0.5), whereas Tinyllama-1.1B-Tele only achieves a ~30% reduction (0.85 to 0.6). This indicates superior gradient signal strength in Gemma’s architecture under telecom instruction fine-tuning.", "perturbed_explanation": "The perturbed statement misrepresents the actual training curves: at 0.1 epoch, Gemma-2B-Tele’s loss is about 1.0 (≈44% reduction from 1.8), not 0.5 (70% reduction). Likewise, Tinyllama-1.1B-Tele’s loss is ≈0.72 (≈15% reduction), not 0.6 (30%).", "claim": "During the first 0.1 epoch, Gemma-2B-Tele’s loss plummets by approximately 50% (1.8 to 0.9), whereas Tinyllama-1.1B-Tele only achieves a ~15% reduction (0.85 to 0.72). This indicates superior gradient signal strength in Gemma’s architecture under telecom instruction fine-tuning.", "label": true }, { "paperid": "2409.14032v1", "paper_path": "./SciVer/papers/2409.14032v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2409.14032v1-Table1-1.png", "request_id": 375, "origin_statement": "The SCAD estimator's non-convex penalty yields lower MSD than OLS at n=550 by effectively shrinking irrelevant coefficients, but as n increases (10^5, 10^6), variance reduction becomes negligible and both estimators converge in accuracy, illustrating diminishing marginal gain from penalization in large samples.", "perturbed_statement": "At n=10^5, the SCAD estimator exhibits a 10% higher MSD than OLS (approximately 3.2×10⁻⁵ vs 2.9×10⁻⁵), reflecting increased bias from over-penalization in moderate samples, despite being computationally twice as fast.", "perturbed_explanation": "The statement erroneously claims SCAD’s MSD at n=10⁵ is higher and around 3.2×10⁻⁵; the table shows SCAD’s MSD is actually 2.833×10⁻⁵, slightly lower than OLS’s 2.884×10⁻⁵. It also incorrectly asserts SCAD is twice as fast, whereas SCAD’s CPU time (253.384 s) is much slower than OLS’s (20.820 s).", "claim": "The SCAD estimator's non-convex penalty yields lower MSD than OLS at n=550 by effectively shrinking irrelevant coefficients, but as n increases (10^5, 10^6), variance reduction becomes negligible and both estimators converge in accuracy, illustrating diminishing marginal gain from penalization in large samples.", "label": true }, { "paperid": "2409.05878v2", "paper_path": "./SciVer/papers/2409.05878v2.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.05878v2_figure_1.png", "request_id": 390, "origin_statement": "By leveraging edge-specific parameterization from the Kolmogorov–Arnold representation theorem, CF-KAN attains 0.27 Recall@20 in under one minute on the Anime dataset, reflecting both superior nonlinear modeling and robust mitigation of catastrophic forgetting compared to MLP-based autoencoders.", "perturbed_statement": "By leveraging node-specific parameterization from the Kolmogorov–Arnold representation theorem, CF-KAN attains 0.30 Recall@20 in under five minutes on the Anime dataset, reflecting both superior nonlinear modeling and robust catastrophic forgetting mitigation compared to MLP-based autoencoders.", "perturbed_explanation": "The perturbation is incorrect because CF-KAN uses learnable activation on edges rather than nodes (edge-specific, not node-specific). It achieves approximately 0.27 Recall@20 (not 0.30) and trains in under one minute (not under five minutes) on the Anime dataset, per Figure 1.", "claim": "By leveraging edge-specific parameterization from the Kolmogorov–Arnold representation theorem, CF-KAN attains 0.27 Recall@20 in under one minute on the Anime dataset, reflecting both superior nonlinear modeling and robust mitigation of catastrophic forgetting compared to MLP-based autoencoders.", "label": true }, { "paperid": "2411.07140v2", "paper_path": "./SciVer/papers/2411.07140v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "3.3.4" ], "image_path": "./SciVer/images/2411.07140v2_figure_6.png", "request_id": 394, "origin_statement": "Alignment via RLHF introduces a trade-off between safety and factual accuracy, disproportionately affecting mid-scale architectures: Baichuan2-7B’s F-score drops 47%, whereas InternLM2.5-7B declines just 2%, implying parameter scale mediates susceptibility to alignment tax.", "perturbed_statement": "Alignment via RLHF introduces a trade-off between safety and factual accuracy, disproportionately affecting mid-scale architectures: Baichuan2-7B’s F-score drops 27%, whereas InternLM2.5-7B declines by 5%, implying parameter scale mediates susceptibility to alignment tax.", "perturbed_explanation": "The perturbed statement incorrectly reports Baichuan2-7B’s F-score drop as 27% instead of the actual 47%, and it misstates InternLM2.5-7B’s decline as 5% rather than the observed 2% reduction.", "claim": "Alignment via RLHF introduces a trade-off between safety and factual accuracy, disproportionately affecting mid-scale architectures: Baichuan2-7B’s F-score drops 47%, whereas InternLM2.5-7B declines just 2%, implying parameter scale mediates susceptibility to alignment tax.", "label": true }, { "paperid": "2411.00744v1", "paper_path": "./SciVer/papers/2411.00744v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.3.1" ], "image_path": "./SciVer/images/2411.00744v1-Table3-1.png", "request_id": 395, "origin_statement": "CORAG’s ROUGE-1 score increases from 0.411 to 0.427 as budget grows from 1024 to 8192 tokens, indicating that its cost-aware agent effectively prioritizes high-utility chunks. This reflects diminishing marginal returns: each budget increment yields smaller performance gains as token cost management optimally allocates resources.", "perturbed_statement": "CORAG’s ROUGE-1 score increases from 0.390 to 0.427 as budget grows from 1024 to 8192 tokens, indicating that its cost-aware agent effectively prioritizes high-utility chunks. This reflects increasing marginal returns: each budget increment yields progressively larger performance gains as token cost management optimally allocates resources.", "perturbed_explanation": "The perturbed statement is incorrect because CORAG’s actual ROUGE-1 score at a 1024-token budget is 0.411, not 0.390. Moreover, the performance gains from budget increases (0.411→0.425→0.427) demonstrate diminishing, not increasing, marginal returns.", "claim": "CORAG’s ROUGE-1 score increases from 0.411 to 0.427 as budget grows from 1024 to 8192 tokens, indicating that its cost-aware agent effectively prioritizes high-utility chunks. This reflects diminishing marginal returns: each budget increment yields smaller performance gains as token cost management optimally allocates resources.", "label": true }, { "paperid": "2410.22046v2", "paper_path": "./SciVer/papers/2410.22046v2.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.2.1" ], "image_path": "./SciVer/images/2410.22046v2_figure_4.png", "request_id": 396, "origin_statement": "The prominence of G (13.8%), C (11.4%), and D (10.4%) chords underscores the centrality of the I–IV–V triads in Western tonal harmony, reflecting the dataset’s bias towards common popular-song progressions and simplified user transcriptions.", "perturbed_statement": "The prominence of G (19.8%), C (15.4%), and D (18.4%) chords underscores the centrality of the I–IV–V triads in Western tonal harmony, reflecting the dataset’s bias towards common popular-song progressions and simplified user transcriptions.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 reports G at 13.8%, C at 11.4%, and D at 10.4%, not 19.8%, 15.4%, and 18.4% respectively, so the stated chord proportions do not match the dataset distribution.", "claim": "The prominence of G (13.8%), C (11.4%), and D (10.4%) chords underscores the centrality of the I–IV–V triads in Western tonal harmony, reflecting the dataset’s bias towards common popular-song progressions and simplified user transcriptions.", "label": true }, { "paperid": "2410.23296v1", "paper_path": "./SciVer/papers/2410.23296v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.23296v1_figure_4.png", "request_id": 398, "origin_statement": "In Figure 4, the qLSTM PDF on synthetic normally distributed data exhibits slight skewness relative to LQR's near-symmetric peak, reflecting its capacity to model deviations from Gaussianity, whereas LQR’s linear approach remains optimal under the true symmetric distribution, minimizing quantile loss.", "perturbed_statement": "In Figure 4, the qLSTM PDF on synthetic normally distributed data shows an almost perfect symmetric Gaussian peak and outperforms LQR by roughly 20% in quantile loss, highlighting qLSTM’s superior efficiency for symmetric distributions.", "perturbed_explanation": "This is incorrect because, according to the context, LQR actually outperforms qLSTM on normal distributions by 39%, and Figure 4 indicates that qLSTM’s PDF retains slight asymmetry rather than an almost perfect Gaussian symmetry.", "claim": "In Figure 4, the qLSTM PDF on synthetic normally distributed data exhibits slight skewness relative to LQR's near-symmetric peak, reflecting its capacity to model deviations from Gaussianity, whereas LQR’s linear approach remains optimal under the true symmetric distribution, minimizing quantile loss.", "label": true }, { "paperid": "2409.04290v1", "paper_path": "./SciVer/papers/2409.04290v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "4.2.1" ], "image_path": "./SciVer/images/2409.04290v1_figure_7.png", "request_id": 404, "origin_statement": "The auto_symbolic method yields a flattened hazard contribution plateau above neuron values (x1,2) of 0.5, discarding the secondary decline and rise seen in the B-spline, whereas PySR’s symbolic regression preserves both the peak at ≈0.8 and the subsequent trough, capturing the full non-monotonic curvature essential for interaction modelling in CoxKAN.", "perturbed_statement": "The auto_symbolic fit closely matches the original B-spline's non-monotonic curvature, including the peak at ≈0.8 and trough at ≈0.2, whereas PySR simplifies the curve into a constant plateau beyond x1,2 >0.5, failing to represent the secondary decline.", "perturbed_explanation": "This statement is incorrect because, in Figure 7, auto_symbolic (left) actually flattens the activation above x1,2 ≈0.5—losing the non-monotonic trough and rise—while PySR (right) retains both the peak (≈0.8) and trough (around Δθ≈0.4), rather than collapsing into a constant plateau.", "claim": "The auto_symbolic method yields a flattened hazard contribution plateau above neuron values (x1,2) of 0.5, discarding the secondary decline and rise seen in the B-spline, whereas PySR’s symbolic regression preserves both the peak at ≈0.8 and the subsequent trough, capturing the full non-monotonic curvature essential for interaction modelling in CoxKAN.", "label": true }, { "paperid": "2409.07019v1", "paper_path": "./SciVer/papers/2409.07019v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "2.7", "4.1" ], "image_path": "./SciVer/images/2409.07019v1_figure_2.png", "request_id": 406, "origin_statement": "The quadratic RV curvature (0.00008 m/s/day^2) measured by HARPS-N indicates a companion orbiting beyond ~3 AU if it's a Jupiter-mass body, aligning with the non-detection of stellar companions inside 0.5″ (~40 AU).", "perturbed_statement": "The quadratic RV curvature (0.08 m/s/year^2) measured by HARPS-N indicates a companion orbiting beyond ~1 AU if it's a Jupiter-mass body, aligning with the non-detection of stellar companions inside 0.05″ (~20 AU).", "perturbed_explanation": "The perturbed statement misuses the acceleration units and value: the true quadratic term is ~0.00008 m/s/day^2, not 0.08 m/s/year^2. It also misstates the companion distance threshold (~3 AU in reality, not 1 AU) and the imaging limit (0.5″, not 0.05″).", "claim": "The quadratic RV curvature (0.00008 m/s/day^2) measured by HARPS-N indicates a companion orbiting beyond ~3 AU if it's a Jupiter-mass body, aligning with the non-detection of stellar companions inside 0.5″ (~40 AU).", "label": true }, { "paperid": "2409.00163v1", "paper_path": "./SciVer/papers/2409.00163v1.json", "claim_type": "analytical", "type": "chart", "item": "3(c)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.00163v1_figure_3(c).png", "request_id": 408, "origin_statement": "The near-linear decline in DeepHit's predicted survival functions for diverse patients suggests the model overly smooths individual hazard variability, potentially due to its discrete time-to-event parameterization, which reduces patient-specific risk differentiation and impairs calibration despite preserved ranking performance.", "perturbed_statement": "DeepHit's predicted survival curves for the five patients exhibit an exponential early decay, indicating the model substantially exaggerates initial hazard peaks and yields superior calibration through sharper separation of patient risks.", "perturbed_explanation": "This statement is incorrect because Figure 3 shows survival probabilities declining almost linearly from 1.0 to 0.0 over time—not an exponential decay. Moreover, the context explicitly notes that DeepHit demonstrates poorer calibration, not superior calibration, compared to CoxPH and DeepSurv.", "claim": "DeepHit's predicted survival curves for the five patients exhibit an exponential early decay, indicating the model substantially exaggerates initial hazard peaks and yields superior calibration through sharper separation of patient risks.", "label": false }, { "paperid": "2411.01739v2", "paper_path": "./SciVer/papers/2411.01739v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.01739v2-Table3-1.png", "request_id": 409, "origin_statement": "On Split-UT-Zappos 10 tasks, CompILer achieves a harmonic mean of 57.69%, representing only a 13.1% relative drop from its 5-task HM, suggesting it maintains balanced performance despite increased tasks, thanks to its fixed-parameter design preserving both plasticity and stability.", "perturbed_statement": "On Split-UT-Zappos 10 tasks, CompILer achieves a harmonic mean of 57.69%, representing only a 5% relative drop from its 5-task HM, suggesting it maintains balanced performance despite increased tasks, thanks to its fixed-parameter design preserving both plasticity and stability.", "perturbed_explanation": "The perturbation incorrectly states a 5% relative drop. In fact, the HM decreases from 66.31% on 5 tasks to 57.69% on 10 tasks, which is an 8.62-point (≈13.0%) drop, not 5%.", "claim": "On Split-UT-Zappos 10 tasks, CompILer achieves a harmonic mean of 57.69%, representing only a 5% relative drop from its 5-task HM, suggesting it maintains balanced performance despite increased tasks, thanks to its fixed-parameter design preserving both plasticity and stability.", "label": false }, { "paperid": "2411.01537v1", "paper_path": "./SciVer/papers/2411.01537v1.json", "claim_type": "analytical", "type": "chart", "item": "5(b)", "section": [ "4.6" ], "image_path": "./SciVer/images/2411.01537v1_figure_5(b).png", "request_id": 410, "origin_statement": "In SASRec’s second layer, attention intensities peak at two intermediate lags (~11 and ~22), indicating that the model disproportionately emphasizes mid-sequence items due to its fixed positional embeddings, thus forming periodic intermediate-range dependencies while underweighting both very recent and distant interactions.", "perturbed_statement": "In SASRec’s second layer, attention intensities peak at the first and last items, indicating the model disproportionately emphasizes sequence boundaries due to its positional bias, thus forming boundary-specific dependencies while underweighting intermediate and mid-range interactions.", "perturbed_explanation": "The perturbed statement is wrong because the heatmap shows the highest attention scores at positions around 11 and 22 (mid-sequence), not at the first (position 0) or last items. SASRec layer 2 thus focuses on intermediate items rather than sequence boundaries as claimed.", "claim": "In SASRec’s second layer, attention intensities peak at the first and last items, indicating the model disproportionately emphasizes sequence boundaries due to its positional bias, thus forming boundary-specific dependencies while underweighting intermediate and mid-range interactions.", "label": false }, { "paperid": "2410.10672v1", "paper_path": "./SciVer/papers/2410.10672v1.json", "claim_type": "analytical", "type": "table", "item": "11", "section": [ "5.2.2" ], "image_path": "./SciVer/images/2410.10672v1-Table11-1.png", "request_id": 411, "origin_statement": "The progressive reduction in matrix nuclear norm across increasing GPT model sizes indicates that larger models develop lower-rank state matrices, improving data compression through capturing principal singular value structures more efficiently, consistent across Dolly-15K, Wikipedia, OpenWebText2, and HH-RLHF datasets.", "perturbed_statement": "The progressive increase in matrix nuclear norm across increasing GPT sizes implies that larger models develop higher-rank internal representations, reducing data compression efficiency by spreading information across more singular components in Dolly-15K, Wikipedia, OpenWebText2, and HH-RLHF.", "perturbed_explanation": "The perturbed statement is incorrect because Table 13 shows the matrix nuclear norm decreases rather than increases as model size grows. For example, on Dolly-15K the norm falls from 0.6207 at 111M to 0.4859 at 13B, and similar decreases occur across all datasets, contradicting the claim of an increase and higher-rank representations.", "claim": "The progressive increase in matrix nuclear norm across increasing GPT sizes implies that larger models develop higher-rank internal representations, reducing data compression efficiency by spreading information across more singular components in Dolly-15K, Wikipedia, OpenWebText2, and HH-RLHF.", "label": false }, { "paperid": "2410.11716v1", "paper_path": "./SciVer/papers/2410.11716v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.11716v1_figure_3.png", "request_id": 414, "origin_statement": "Randomization tests using residual-based methods or the MCP-Mod statistic with penalized MLE produce nearly uniform p-value distributions under the null (ensuring exact type-I error control), while the population-based test’s histogram is skewed towards higher p-values, indicating conservative bias.", "perturbed_statement": "In Figure 3, the population-based test exhibits an excess of p-values below 0.1, implying anti-conservative type-I error due to variance underestimation; by contrast, the residual-based randomization tests show a marked right-skew with many p-values above 0.8, indicating conservative bias.", "perturbed_explanation": "The perturbation is incorrect: the population-based test histogram actually shows fewer p-values below 0.1 (not an excess), reflecting conservative type-I error, and the residual-based tests display an approximately uniform distribution rather than a pronounced right-skew with surplus high p-values.", "claim": "In Figure 3, the population-based test exhibits an excess of p-values below 0.1, implying anti-conservative type-I error due to variance underestimation; by contrast, the residual-based randomization tests show a marked right-skew with many p-values above 0.8, indicating conservative bias.", "label": false }, { "paperid": "2410.05522v1", "paper_path": "./SciVer/papers/2410.05522v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "3.4" ], "image_path": "./SciVer/images/2410.05522v1_figure_8.png", "request_id": 416, "origin_statement": "The 7% drop in R² from 0.939 on the coarse mesh to 0.874 on the fine mesh indicates the model’s nodal interpolation cannot capture high-frequency stress variations beyond its training resolution, reflecting a Nyquist-like sampling limitation as finer meshes introduce unresolved stress gradients.", "perturbed_statement": "The R² increases from 0.939 on the coarse mesh to 0.960 on the fine mesh (with max edge length reduced to 0.005m), demonstrating that the network’s implicit cubic interpolation outperforms the linear finite-element solution even at higher resolutions.", "perturbed_explanation": "This claim is incorrect because Figure 8 shows R² actually decreases from 0.939 on the coarse mesh to 0.874 on the fine mesh, not increases to 0.960. The fine mesh maximum edge length was 0.01 m (not 0.005 m), and the model employs linear nodal interpolation rather than cubic interpolation.", "claim": "The R² increases from 0.939 on the coarse mesh to 0.960 on the fine mesh (with max edge length reduced to 0.005m), demonstrating that the network’s implicit cubic interpolation outperforms the linear finite-element solution even at higher resolutions.", "label": false }, { "paperid": "2409.17201v2", "paper_path": "./SciVer/papers/2409.17201v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.1" ], "image_path": "./SciVer/images/2409.17201v2-Table3-1.png", "request_id": 417, "origin_statement": "Across all models, immersion coding increases parameter counts by fewer than 0.15%—e.g., CNN’s ñ is just 129 parameters above n—thus adding negligible overhead while doubling total dimensions to guarantee invertibility and enable noise removal for high-level DP preservation without degrading capacity.", "perturbed_statement": "Across all models, immersion coding increases parameter counts by around 5%—e.g., CNN’s ñ is 60,000 parameters above n—thus adding moderate overhead while doubling total dimensions to guarantee invertibility and enable noise removal for DP.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows CNN’s ñ is 1,200,011 versus n=1,199,882, an increase of only 129 parameters (≈0.01%), not 60,000 parameters (≈5%). The claimed ~5% overhead contradicts the actual <0.015% expansion.", "claim": "Across all models, immersion coding increases parameter counts by around 5%—e.g., CNN’s ñ is 60,000 parameters above n—thus adding moderate overhead while doubling total dimensions to guarantee invertibility and enable noise removal for DP.", "label": false }, { "paperid": "2410.14508v1", "paper_path": "./SciVer/papers/2410.14508v1.json", "claim_type": "analytical", "type": "chart", "item": "3(c)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.14508v1_figure_3(c).png", "request_id": 418, "origin_statement": "The majority of participants (29/40, 72.5%) reported moderate to high animation familiarity (scores ≥3), indicating sufficient domain expertise to yield reliable subjective ratings of motion realism.", "perturbed_statement": "The vast majority of participants (35/40, 87.5%) scored their animation familiarity as ≥3, ensuring expert-driven evaluations of motion realism.", "perturbed_explanation": "The perturbed statement is incorrect because only 29 participants rated their familiarity as ≥3 (scores 3, 4, or 5), not 35. Therefore, the correct percentage is 72.5%, not 87.5%, contradicting the context data.", "claim": "The vast majority of participants (35/40, 87.5%) scored their animation familiarity as ≥3, ensuring expert-driven evaluations of motion realism.", "label": false }, { "paperid": "2409.02137v1", "paper_path": "./SciVer/papers/2409.02137v1.json", "claim_type": "analytical", "type": "table", "item": "7", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.02137v1-Table7-1.png", "request_id": 421, "origin_statement": "Biased exploration's predicate weighting effectively increases the probability of sampling rare protocol transition states (e.g. log restoration and follower election), thus exposing all seven crash bugs and three safety violations by systematically exploring corner-case state configurations more frequently than pure random policies.", "perturbed_statement": "Biased exploration's predicate weighting effectively increases the probability of sampling rare protocol transition states (e.g. log restoration and follower election), thus exposing only five crash bugs and one safety violation by exploring corner-case state configurations less frequently than pure random policies.", "perturbed_explanation": "The perturbation incorrectly states that biased exploration exposes only five crash bugs and one safety violation and explores less frequently than pure random policies. In fact, Table 7 shows there are seven crash bugs and three safety violations, and the context specifies biased exploration uncovers all bugs more frequently.", "claim": "Biased exploration's predicate weighting effectively increases the probability of sampling rare protocol transition states (e.g. log restoration and follower election), thus exposing only five crash bugs and one safety violation by exploring corner-case state configurations less frequently than pure random policies.", "label": false }, { "paperid": "2410.02398v1", "paper_path": "./SciVer/papers/2410.02398v1.json", "claim_type": "analytical", "type": "chart", "item": "9", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.02398v1_figure_9.png", "request_id": 422, "origin_statement": "The heatmap in Figure 9 shows two 36×36 blocks of FETs segregated by S₃×S₃ parity, confirming that parity conservation forbids adjacency between even and odd automorphisms under two-component disorder models.", "perturbed_statement": "Figure 9’s heatmap displays three 24×24 blocks of FETs grouped by even, odd, and neutral parity under the S₃×S₃ subgroup, implying two-component disorder models permit connections through a neutral parity sector between even and odd automorphisms.", "perturbed_explanation": "This statement is incorrect because Figure 9 only shows two connected components (each 36×36), not three 24×24 blocks. There is no neutral parity sector in the S₃×S₃ subgroup: only even and odd parity classes exist, and they remain disconnected.", "claim": "Figure 9’s heatmap displays three 24×24 blocks of FETs grouped by even, odd, and neutral parity under the S₃×S₃ subgroup, implying two-component disorder models permit connections through a neutral parity sector between even and odd automorphisms.", "label": false }, { "paperid": "2409.17608v1", "paper_path": "./SciVer/papers/2409.17608v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.17608v1-Table3-1.png", "request_id": 427, "origin_statement": "By exploiting SHT’s extensive anomalous variety and motion-driven adaptation, the flow-based method attains state-of-the-art cross-domain AUCs: 97.53% on Ped2 and 87.47% on Avenue, demonstrating robust generalization through elimination of background interference and focus on dynamic features.", "perturbed_statement": "By leveraging SHT’s anomalous diversity and motion-focused adaptation, the flow-based method achieves state-of-the-art cross-domain AUCs: 98.53% on Ped2 and 87.47% on Avenue, indicating exceptional generalization by isolating dynamic motion cues from background noise.", "perturbed_explanation": "The perturbed statement is incorrect because the Ped2 AUC is misstated as 98.53%; according to Table 3, the actual flow-based method’s AUC on Ped2 is 97.53%.", "claim": "By leveraging SHT’s anomalous diversity and motion-focused adaptation, the flow-based method achieves state-of-the-art cross-domain AUCs: 98.53% on Ped2 and 87.47% on Avenue, indicating exceptional generalization by isolating dynamic motion cues from background noise.", "label": false }, { "paperid": "2411.14034v1", "paper_path": "./SciVer/papers/2411.14034v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.14034v1_figure_6.png", "request_id": 430, "origin_statement": "CrabNet's 90% FDR in predicting high σ demonstrates that its attention-based architecture effectively captures the non-linear dependence of log10(σ) on stoichiometric variations, enabling robust identification of materials exceeding 10^2 S/cm despite out-of-distribution compositions, unlike RF's underestimated modeling.", "perturbed_statement": "CrabNet's 90% FDR in predicting high σ demonstrates that its attention-based architecture effectively captures the linear dependence of log10(σ) on stoichiometric variations, enabling robust identification of materials exceeding 10^3 S/cm despite out-of-distribution compositions, unlike RF's underestimated modeling.", "perturbed_explanation": "The perturbation misstates two details: the model captures non-linear (not linear) relationships between stoichiometry and log10(σ), and the threshold for high conductivity is 10^2 S/cm (log10(σ)>2), not 10^3 S/cm (log10(σ)>3), contradicting the context.", "claim": "CrabNet's 90% FDR in predicting high σ demonstrates that its attention-based architecture effectively captures the linear dependence of log10(σ) on stoichiometric variations, enabling robust identification of materials exceeding 10^3 S/cm despite out-of-distribution compositions, unlike RF's underestimated modeling.", "label": false }, { "paperid": "2411.11706v1", "paper_path": "./SciVer/papers/2411.11706v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4" ], "image_path": "./SciVer/images/2411.11706v1-Table1-1.png", "request_id": 431, "origin_statement": "MC-LLaVA’s integration of multi-concept images annotated by both GPT-4o and humans likely improves VLM personalization by offering richer semantic diversity and reducing ambiguity, supporting more robust concept disambiguation compared to single-concept datasets.", "perturbed_statement": "MC-LLaVA’s integration of multi-concept images annotated exclusively by GPT-3.5 across 2.6K images likely improves VLM personalization by offering richer semantic diversity and reducing ambiguity, supporting more robust concept disambiguation compared to single-concept datasets.", "perturbed_explanation": "The perturbed statement is incorrect because the context specifies that MC-LLaVA’s captions are produced by GPT-4o and humans, not GPT-3.5 alone. Additionally, MC-LLaVA contains 1.6K samples, not 2.6K images.", "claim": "MC-LLaVA’s integration of multi-concept images annotated exclusively by GPT-3.5 across 2.6K images likely improves VLM personalization by offering richer semantic diversity and reducing ambiguity, supporting more robust concept disambiguation compared to single-concept datasets.", "label": false }, { "paperid": "2409.19028v1", "paper_path": "./SciVer/papers/2409.19028v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.19028v1-Table2-1.png", "request_id": 433, "origin_statement": "Although Llama2’s mean explanation score (2.78) only slightly exceeds GPT-3.5’s (2.50), the Bonferroni-corrected p=0.02 and Cliff’s δ=−0.18 indicate a statistically significant but small effect size, implying Llama2’s explanations are systematically higher yet with limited practical difference.", "perturbed_statement": "Although Llama2’s mean explanation score (2.78) only slightly exceeds GPT-3.5’s (2.50), the Bonferroni-corrected p=0.20 and Cliff’s δ=−0.18 indicate no statistical significance, implying Llama2’s explanations are not reliably better.", "perturbed_explanation": "The actual Bonferroni-corrected p-value for GPT-3.5 vs. Llama2 in Table 2 is 0.02 (not 0.20) and is below the 0.05 threshold, so the difference is statistically significant; misreporting it as 0.20 falsely negates the observed significance.", "claim": "Although Llama2’s mean explanation score (2.78) only slightly exceeds GPT-3.5’s (2.50), the Bonferroni-corrected p=0.20 and Cliff’s δ=−0.18 indicate no statistical significance, implying Llama2’s explanations are not reliably better.", "label": false }, { "paperid": "2411.01076v2", "paper_path": "./SciVer/papers/2411.01076v2.json", "claim_type": "analytical", "type": "chart", "item": "1(a)", "section": [ "1" ], "image_path": "./SciVer/images/2411.01076v2_figure_1(a).png", "request_id": 436, "origin_statement": "By measuring per-iteration packet sizes – where correct speculative tokens yield multi-token bursts and mis-speculations emit single tokens – a network adversary can accurately fingerprint user inputs, reconstructing private prompts with over 90% accuracy, leveraging side-channel packet-size variations inherent in streaming LLM speculative decoding.", "perturbed_statement": "By measuring per-iteration packet sizes – where correct speculative tokens yield single-token emissions and mis-speculations emit multi-token bursts – a network adversary can accurately fingerprint user inputs, reconstructing private prompts with over 90% accuracy, leveraging side-channel packet-size variations inherent in streaming LLM speculative decoding.", "perturbed_explanation": "The perturbed statement reverses the token patterns: in the context (and Figure 1b), mis-speculations produce one token per iteration and correct speculations produce multi-token bursts. The perturbation incorrectly claims that correct speculations yield single tokens and mis-speculations yield multi-token bursts.", "claim": "By measuring per-iteration packet sizes – where correct speculative tokens yield single-token emissions and mis-speculations emit multi-token bursts – a network adversary can accurately fingerprint user inputs, reconstructing private prompts with over 90% accuracy, leveraging side-channel packet-size variations inherent in streaming LLM speculative decoding.", "label": false }, { "paperid": "2410.23893v3", "paper_path": "./SciVer/papers/2410.23893v3.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.23893v3-Table2-1.png", "request_id": 437, "origin_statement": "As EOL threshold decreases from 90% to 60%, mean RMSE values rise approximately linearly from 1.19 to 3.16, at roughly 0.65 RMSE increase per 10% reduction, reflecting increased prediction error due to less available discharge capacity for model fitting under deeper aging conditions.", "perturbed_statement": "As EOL threshold decreases from 90% to 60%, mean RMSE values rise approximately linearly from 1.19 to 4.16, at roughly 1.0 RMSE increase per 10% reduction, reflecting increased prediction error due to less available discharge capacity.", "perturbed_explanation": "The perturbed statement incorrectly reports the RMSE at 60% EOL as 4.16 (actual is 3.16) and misstates the average RMSE increase per 10% drop as 1.0 (actual is ~0.65), contradicting the reported mean RMSE values and slope in Table 2.", "claim": "As EOL threshold decreases from 90% to 60%, mean RMSE values rise approximately linearly from 1.19 to 4.16, at roughly 1.0 RMSE increase per 10% reduction, reflecting increased prediction error due to less available discharge capacity.", "label": false }, { "paperid": "2410.22551v2", "paper_path": "./SciVer/papers/2410.22551v2.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.6" ], "image_path": "./SciVer/images/2410.22551v2-Table5-1.png", "request_id": 449, "origin_statement": "By leveraging contrast-aware diffusion, FairSkin-S images present sharper lesion border delineation and more accurate color heterogeneity, facilitating differential diagnosis. For example, in African people allergic contact dermatitis, FairSkin-S better captures vesicular morphology essential for clinical grading compared to Vanilla outputs.", "perturbed_statement": "By leveraging contrast-aware diffusion, FairSkin-S images present less pronounced lesion border delineation and reduced color heterogeneity, hindering differential diagnosis. For example, in African people allergic contact dermatitis, FairSkin-S fails to capture vesicular morphology essential for clinical grading compared to Vanilla outputs.", "perturbed_explanation": "The context in Table 5 explicitly states that FairSkin-S produces superior image generation with more detailed lesion depiction and pronounced disease representations. Thus the claim that it presents less pronounced borders and reduced color heterogeneity contradicts the demonstrated improvements over Vanilla.", "claim": "By leveraging contrast-aware diffusion, FairSkin-S images present less pronounced lesion border delineation and reduced color heterogeneity, hindering differential diagnosis. For example, in African people allergic contact dermatitis, FairSkin-S fails to capture vesicular morphology essential for clinical grading compared to Vanilla outputs.", "label": false }, { "paperid": "2410.06905v2", "paper_path": "./SciVer/papers/2410.06905v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.06905v2-Table2-1.png", "request_id": 451, "origin_statement": "The model's S68 remains at 0.4 m²/s with an average reliability Ravg of 95.6%, indicating narrow 68% confidence intervals that align with theoretical Gaussian uncertainty propagation, demonstrating well-calibrated probabilistic human trajectory predictions.", "perturbed_statement": "The model's S68 remains at 0.04 m²/s with an average reliability Ravg of 92.5%, indicating unusually narrow 68% confidence intervals that align with theoretical uniform uncertainty propagation, demonstrating well-calibrated probabilistic human trajectory predictions.", "perturbed_explanation": "The table reports S68 as 0.4 m²/s (not 0.04 m²/s) and Ravg as 95.6% (not 92.5%). Moreover, the model’s calibration follows Gaussian uncertainty propagation, not uniform propagation, making both the numeric and methodological claims incorrect.", "claim": "The model's S68 remains at 0.04 m²/s with an average reliability Ravg of 92.5%, indicating unusually narrow 68% confidence intervals that align with theoretical uniform uncertainty propagation, demonstrating well-calibrated probabilistic human trajectory predictions.", "label": false }, { "paperid": "2409.11088v1", "paper_path": "./SciVer/papers/2409.11088v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.11088v1_figure_3.png", "request_id": 452, "origin_statement": "Despite the monotonic decrease in HI column density with redshift, rising filament gas temperature enhances collisional excitation coefficients—steeply increasing q(T) beyond 10^5 K—driving higher Lyα surface brightness at lower redshifts than density scaling alone would predict.", "perturbed_statement": "Despite the monotonic increase in HI column density with redshift, rising filament gas temperature enhances collisional excitation coefficients—peaking q(T) around 10^4 K—driving higher Lyα surface brightness at lower redshifts than density scaling alone would predict.", "perturbed_explanation": "The perturbed statement is incorrect because the HI column density actually decreases with redshift, not increases, as shown in the first panel of Fig. 3. Additionally, q(T) sharply rises between ~10^4.5 K and 10^6.5 K and peaks near 10^6.5 K, not at 10^4 K.", "claim": "Despite the monotonic increase in HI column density with redshift, rising filament gas temperature enhances collisional excitation coefficients—peaking q(T) around 10^4 K—driving higher Lyα surface brightness at lower redshifts than density scaling alone would predict.", "label": false }, { "paperid": "2410.20731v2", "paper_path": "./SciVer/papers/2410.20731v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.20731v2-Table4-1.png", "request_id": 459, "origin_statement": "Applying the adjustment method to Pavllo et al.’s model halves its FPS from 958 to 435 while improving MPJPE by 1.2 mm, illustrating that incorporating bone-length refinement incurs acceptable computational overhead in exchange for enhanced 3D pose estimation accuracy.", "perturbed_statement": "Applying the adjustment method to Pavllo et al.’s model reduces its FPS from 958 to 200 while improving MPJPE by 2.5 mm, demonstrating that bone-length refinement achieves substantial accuracy gains at minimal computational cost.", "perturbed_explanation": "According to Table 4, the FPS for Pavllo et al. with adjustment is 435, not 200, and the MPJPE improvement is 46.8−45.6 = 1.2 mm, not 2.5 mm.", "claim": "Applying the adjustment method to Pavllo et al.’s model reduces its FPS from 958 to 200 while improving MPJPE by 2.5 mm, demonstrating that bone-length refinement achieves substantial accuracy gains at minimal computational cost.", "label": false }, { "paperid": "2410.22938v2", "paper_path": "./SciVer/papers/2410.22938v2.json", "claim_type": "analytical", "type": "chart", "item": "3(d)", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.22938v2_figure_3(d).png", "request_id": 462, "origin_statement": "DiffLight trained at a 25% missing rate achieves a 1.03 relative generalization score when tested at 8.33% missing, suggesting that learning from extremely sparse data enhances low-missing-rate imputation robustness through richer pattern representation.", "perturbed_statement": "DiffLight trained at a 25% missing rate achieves a 0.87 relative generalization score when tested at 8.33% missing, suggesting that learning from extremely sparse data reduces low-missing-rate imputation robustness.", "perturbed_explanation": "The heatmap in Figure 3 shows the relative generalization score for training at 25% and testing at 8.33% is actually 1.03, not 0.87. Therefore, the perturbed value contradicts the reported performance and misrepresents the model’s robustness.", "claim": "DiffLight trained at a 25% missing rate achieves a 0.87 relative generalization score when tested at 8.33% missing, suggesting that learning from extremely sparse data reduces low-missing-rate imputation robustness.", "label": false }, { "paperid": "2410.06765v1", "paper_path": "./SciVer/papers/2410.06765v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.4" ], "image_path": "./SciVer/images/2410.06765v1-Table2-1.png", "request_id": 465, "origin_statement": "The table indicates that average pooling outperforms Q-Former by 4.67% on coarse-grained perception and by 12.01% on fine-grained tasks, suggesting that minimizing positional disruption through simple aggregation preserves spatial cues, whereas Q-Former’s self-attention likely distorts fine-detail information.", "perturbed_statement": "The table indicates that average pooling outperforms Q-Former by 5.5% on coarse-grained perception and by 14.3% on fine-grained tasks, suggesting that simple aggregation avoids spectral noise amplification inherent in self-attention, which otherwise degrades channel responses.", "perturbed_explanation": "The stated improvement percentages (5.5% and 14.3%) are incorrect: the table reports 4.67% and 12.01%. Additionally, the context attributes Q-Former’s weaknesses to positional disruption, not to spectral noise amplification, so the mechanism described contradicts the documented rationale.", "claim": "The table indicates that average pooling outperforms Q-Former by 5.5% on coarse-grained perception and by 14.3% on fine-grained tasks, suggesting that simple aggregation avoids spectral noise amplification inherent in self-attention, which otherwise degrades channel responses.", "label": false }, { "paperid": "2411.09410v2", "paper_path": "./SciVer/papers/2411.09410v2.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.09410v2_figure_3(b).png", "request_id": 466, "origin_statement": "On the Grocery dataset, removing the semantic prediction auxiliary task reduces NDCG@20 by approximately 2.5% relative to the complete EIMF, indicating that in homogenous, goal-driven purchase behaviors, explicit semantic cues extracted via prediction tasks more effectively reveal latent interests than cross-modal alignment alone.", "perturbed_statement": "On the Grocery dataset, omitting the modal alignment auxiliary task decreases Recall@20 by about 5% compared to the complete EIMF, suggesting that cross-modal alignment is more important than semantic prediction in revealing latent interests for uniform, goal-oriented purchases.", "perturbed_explanation": "This statement is incorrect because Figure 3(b) shows that in the Grocery dataset, removing modal alignment only lowers Recall@20 from 0.176 to 0.172 (a 2.3% drop), whereas removing semantic prediction drops it further to 0.170 (a 3.4% drop). Therefore, semantic prediction, not alignment, contributes more to performance.", "claim": "On the Grocery dataset, omitting the modal alignment auxiliary task decreases Recall@20 by about 5% compared to the complete EIMF, suggesting that cross-modal alignment is more important than semantic prediction in revealing latent interests for uniform, goal-oriented purchases.", "label": false }, { "paperid": "2409.10132v1", "paper_path": "./SciVer/papers/2409.10132v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.10132v1_figure_5.png", "request_id": 468, "origin_statement": "In multi-hop QA, StruEdit’s structured knowledge graph integration mitigates cascading hallucinations, leading to only a 38% average accuracy drop per additional reasoning hop, compared to 57% and 56% declines for ME and ICE, respectively, across both open- and closed-source LLMs.", "perturbed_statement": "In multi-hop QA, StruEdit’s structured knowledge graph integration mitigates cascading hallucinations, leading to only a 28% average accuracy drop per additional reasoning hop, compared to 47% and 46% declines for ME and ICE, respectively, across both open- and closed-source LLMs.", "perturbed_explanation": "The perturbed statement misreports the performance declines: StruEdit’s drop is incorrectly given as 28% instead of 38%, and ME and ICE declines are incorrectly stated as 47% and 46% rather than the actual 57% and 56% observed in the context.", "claim": "In multi-hop QA, StruEdit’s structured knowledge graph integration mitigates cascading hallucinations, leading to only a 28% average accuracy drop per additional reasoning hop, compared to 47% and 46% declines for ME and ICE, respectively, across both open- and closed-source LLMs.", "label": false }, { "paperid": "2410.17831v1", "paper_path": "./SciVer/papers/2410.17831v1.json", "claim_type": "analytical", "type": "chart", "item": "5(a)", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.17831v1_figure_5(a).png", "request_id": 472, "origin_statement": "By modeling the ground as a mixture of two Gaussian Process Distance Fields, CHOMP precisely adheres to step elevations, enabling continuous ground contact and feasible traversal of multi-level subterranean terrains, unlike PRM or single-GPDF CHOMP, which violate height constraints by averaging over discontinuities.", "perturbed_statement": "By modeling the ground as a mixture of three Gaussian Process Distance Fields, CHOMP precisely adheres to step elevations, enabling continuous ground contact and feasible traversal of multi-level subterranean terrains, unlike PRM or single-GPDF CHOMP, which violate height constraints by averaging over discontinuities.", "perturbed_explanation": "The perturbation incorrectly states that CHOMP uses three Gaussian Process Distance Fields, whereas the context and figure explicitly describe CHOMP employing only two GPDFs. No mention or evidence of a three-GPDF configuration exists in the provided description.", "claim": "By modeling the ground as a mixture of three Gaussian Process Distance Fields, CHOMP precisely adheres to step elevations, enabling continuous ground contact and feasible traversal of multi-level subterranean terrains, unlike PRM or single-GPDF CHOMP, which violate height constraints by averaging over discontinuities.", "label": false }, { "paperid": "2409.00140v1", "paper_path": "./SciVer/papers/2409.00140v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.00140v1-Table3-1.png", "request_id": 475, "origin_statement": "Pooled data shows the FQReLU-QIP-1-2 variant achieves accuracy statistically equivalent to FQReLU-QIP-1 while using only a quarter of the parameters, demonstrating quaternion inner products' efficient four-dimensional weight sharing.", "perturbed_statement": "Pooled data shows the FQReLU-QIP-1-2 variant achieves accuracy statistically equivalent to FQReLU-QIP-1 while using only half of the parameters, demonstrating quaternion inner products' efficient four-dimensional weight sharing.", "perturbed_explanation": "The perturbed statement misstates the parameter reduction: Table 3 shows FQReLU-QIP-1-2 has 20,640 parameters—which is exactly 25% (one quarter) of FQReLU-QIP-1's 79,680 parameters—not 50%.", "claim": "Pooled data shows the FQReLU-QIP-1-2 variant achieves accuracy statistically equivalent to FQReLU-QIP-1 while using only half of the parameters, demonstrating quaternion inner products' efficient four-dimensional weight sharing.", "label": false }, { "paperid": "2410.05817v3", "paper_path": "./SciVer/papers/2410.05817v3.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "6" ], "image_path": "./SciVer/images/2410.05817v3_figure_6.png", "request_id": 476, "origin_statement": "Across Phi-1.5, Llama3-8B, and Mistral-7B, PK outputs show significantly higher median subject frequencies than CK and ND (p<0.01), suggesting that repeated corpus exposure enhances embedding salience and biases exemplar selection; Pythia-1.4B breaks this pattern, with PK higher only relative to CK.", "perturbed_statement": "Wilcoxon signed-rank tests indicate that CK outputs have significantly higher median subject frequencies than PK and ND in Phi-1.5 (p<0.05), implying novel topics dominate exemplar selection; additionally, ND surpasses PK in Mistral-7B, contradicting frequency-driven retrieval.", "perturbed_explanation": "The perturbation is incorrect because the original analysis used a Mann-Whitney U test, not a Wilcoxon signed-rank test. In the actual data, PK outputs have higher, not lower, subject frequencies than CK in Phi-1.5, and ND does not surpass PK in Mistral-7B.", "claim": "Wilcoxon signed-rank tests indicate that CK outputs have significantly higher median subject frequencies than PK and ND in Phi-1.5 (p<0.05), implying novel topics dominate exemplar selection; additionally, ND surpasses PK in Mistral-7B, contradicting frequency-driven retrieval.", "label": false }, { "paperid": "2409.00935v1", "paper_path": "./SciVer/papers/2409.00935v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "6" ], "image_path": "./SciVer/images/2409.00935v1_figure_5.png", "request_id": 478, "origin_statement": "Over half (54.1%) of the 5.7 million instructions are common everyday queries; this imbalance likely biases models to excel at generic advice tasks while underrepresenting specialized academic and coding challenges.", "perturbed_statement": "Over half (54.1%) of the 5.7 million instructions are coding tasks; this skew likely biases models to excel at debugging and code generation while underrepresenting common and academic competencies.", "perturbed_explanation": "The perturbed claim is incorrect because coding instructions make up only 31.5% of the dataset, not 54.1% as stated. In reality, common everyday queries account for 54.1%, so the bias and category proportions are misrepresented.", "claim": "Over half (54.1%) of the 5.7 million instructions are coding tasks; this skew likely biases models to excel at debugging and code generation while underrepresenting common and academic competencies.", "label": false }, { "paperid": "2409.00119v2", "paper_path": "./SciVer/papers/2409.00119v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.00119v2-Table2-1.png", "request_id": 483, "origin_statement": "On RoBERTa-base, RoAd1 achieves a 64.4 CoLA score—2 points above full fine-tuning's 62.4—despite using only 0.07% trainable parameters. This suggests that 2D rotational adaptations can more effectively capture syntactic acceptability patterns than dense updates.", "perturbed_statement": "On RoBERTa-base, RoAd1 achieves a 65.4 CoLA score—3 points above full fine-tuning's 62.4—despite using only 0.007% trainable parameters. This suggests that 2D rotational adaptations can more effectively capture syntactic acceptability patterns than dense updates.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 reports RoAd1’s CoLA score as 64.4 (not 65.4) and its percentage of trainable parameters as 0.07% (not 0.007%).", "claim": "On RoBERTa-base, RoAd1 achieves a 65.4 CoLA score—3 points above full fine-tuning's 62.4—despite using only 0.007% trainable parameters. This suggests that 2D rotational adaptations can more effectively capture syntactic acceptability patterns than dense updates.", "label": false }, { "paperid": "2410.05729v1", "paper_path": "./SciVer/papers/2410.05729v1.json", "claim_type": "analytical", "type": "chart", "item": "6(b)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.05729v1_figure_6(b).png", "request_id": 484, "origin_statement": "Because the output dimension m is at least twice the rank r, increasing r from 35 to 131 boosts the LRFT module’s memory footprint quadratically—from 41.8 MB to 53.4 MB—since parameter count scales with r×m.", "perturbed_statement": "Assuming output dimension equals the rank (m=r), halving r from 259 to 131 reduces the LRFT module’s memory from 53 GB to 42 GB, indicating a strictly linear scaling in parameters with r.", "perturbed_explanation": "This is incorrect because the output dimension is defined as m ≥ 2r, not m=r, and the observed memory sizes are in megabytes (≈53 MB to 43.9 MB) rather than gigabytes. Thus, both the scaling relationship and units are misrepresented.", "claim": "Assuming output dimension equals the rank (m=r), halving r from 259 to 131 reduces the LRFT module’s memory from 53 GB to 42 GB, indicating a strictly linear scaling in parameters with r.", "label": false }, { "paperid": "2410.22187v1", "paper_path": "./SciVer/papers/2410.22187v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "6.1" ], "image_path": "./SciVer/images/2410.22187v1_figure_6.png", "request_id": 486, "origin_statement": "ResNet-50 CoOp-based AL achieves diminishing accuracy gains over six cycles, rising from 46.2% at cycle 1 to 55.2% at cycle 2 but only 64.6% at cycle 5, illustrating diminishing returns in active learning as the model uncertainty decreases with additional labeled samples.", "perturbed_statement": "ResNet-50 CoOp-based AL achieves accelerating accuracy gains over six cycles, rising from 46.2% at cycle 1 to 55.2% at cycle 2 and skyrocketing to 82.5% at cycle 5, illustrating increasing returns in active learning as later labels boost performance more.", "perturbed_explanation": "The perturbed statement incorrectly reports the cycle 5 accuracy as 82.5%—the actual average is 64.6%—and mischaracterizes the trend as accelerating (increasing returns), whereas the data show diminishing gains across the AL cycles.", "claim": "ResNet-50 CoOp-based AL achieves accelerating accuracy gains over six cycles, rising from 46.2% at cycle 1 to 55.2% at cycle 2 and skyrocketing to 82.5% at cycle 5, illustrating increasing returns in active learning as later labels boost performance more.", "label": false }, { "paperid": "2411.05009v1", "paper_path": "./SciVer/papers/2411.05009v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.05009v1_figure_2(b).png", "request_id": 490, "origin_statement": "BS-SOLCTRA’s near-linear time-to-solution with particle count across GPUs, and superior throughput on H100 and MI300A relative to A100 and MI210, highlight its compute-bound scaling in line with each GPU’s peak TFLOP/s. The V100’s better small-size performance stems from under-saturating more powerful devices for the smallest workload.", "perturbed_statement": "BS-SOLCTRA’s near-linear time-to-solution with particle count across GPUs, and superior throughput on A100 and MI210 relative to H100 and MI300A, highlight its memory-bound scaling in line with each GPU’s peak TFLOP/s. The V100’s better small-size performance stems from more effective cache utilization.", "perturbed_explanation": "The perturbed statement incorrectly reverses the observed performance hierarchy: the context clearly states H100 and MI300A outperform A100 and MI210. It also misattributes BS-SOLCTRA’s behavior as memory-bound, whereas the text explicitly links its scaling to compute-bound performance governed by peak TFLOP/s.", "claim": "BS-SOLCTRA’s near-linear time-to-solution with particle count across GPUs, and superior throughput on A100 and MI210 relative to H100 and MI300A, highlight its memory-bound scaling in line with each GPU’s peak TFLOP/s. The V100’s better small-size performance stems from more effective cache utilization.", "label": false }, { "paperid": "2411.17502v1", "paper_path": "./SciVer/papers/2411.17502v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.17502v1-Table4-1.png", "request_id": 497, "origin_statement": "Numerical embeddings improve DL-based model accuracy by up to 3.8% overall and by 11–12% for External Shifts, underscoring how representation learning mitigates rare-event class imbalance to reduce misrouted loads in tactical load-planning.", "perturbed_statement": "Numerical embeddings improve DL-based model accuracy by up to 3.8% overall and by 8–9% for External Shifts, highlighting how representation learning reduces misrouted loads under class imbalance.", "perturbed_explanation": "The perturbation is incorrect because Table 4 and the text report accuracy improvements of 11% for MLP and 12% for ResNet on External Shifts due to numerical embeddings, not 8–9%.", "claim": "Numerical embeddings improve DL-based model accuracy by up to 3.8% overall and by 8–9% for External Shifts, highlighting how representation learning reduces misrouted loads under class imbalance.", "label": false }, { "paperid": "2409.15313v1", "paper_path": "./SciVer/papers/2409.15313v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "4.4.1" ], "image_path": "./SciVer/images/2409.15313v1-Table6-1.png", "request_id": 501, "origin_statement": "The combination of 8 attention heads with a 32-dimensional projection and a patch size of 6 on 72×72 images balances representational granularity and computational overhead, enabling stable gradient optimization via Adam at a 0.001 learning rate with 1e-4 weight decay across 10 epochs.", "perturbed_statement": "The combination of 8 attention heads with a 32-dimensional projection and a patch size of 6 on 72×72 images balances representational granularity and computational overhead, enabling stable gradient optimization via Adam at a 0.01 learning rate with 1e-3 weight decay across 10 epochs.", "perturbed_explanation": "The perturbation is incorrect because Table 6 specifies a learning rate of 0.001 (not 0.01) and a weight decay of 0.0001 (not 0.001). These discrepancies contradict the context’s listed hyperparameters.", "claim": "The combination of 8 attention heads with a 32-dimensional projection and a patch size of 6 on 72×72 images balances representational granularity and computational overhead, enabling stable gradient optimization via Adam at a 0.01 learning rate with 1e-3 weight decay across 10 epochs.", "label": false }, { "paperid": "2409.16745v1", "paper_path": "./SciVer/papers/2409.16745v1.json", "claim_type": "analytical", "type": "chart", "item": "1(c)", "section": [ "3.1", "3.2" ], "image_path": "./SciVer/images/2409.16745v1_figure_1(c).png", "request_id": 502, "origin_statement": "The similar ∼8–9% X-ray polarization across 2–8 keV, contrasted with the rising optical polarization from 3.13% in R to 4.27% in B, indicates a uniform magnetic field structure at X-ray–emitting shock fronts, while energy-dependent turbulence decay enhances ordering in the optical synchrotron region.", "perturbed_statement": "The X-ray polarization degree increases from 5% at 2–3 keV to 14% at 5–8 keV, consistent with inverse Compton emission dominating at higher energies, whereas optical bands exhibit negligible polarization (<1%) due to strong Faraday depolarization.", "perturbed_explanation": "The perturbed statement is incorrect because the measured X-ray polarization remains roughly constant at ∼8% across 2–8 keV (not rising from 5% to 14%), and optical polarization degrees are 3–4% (not <1%). Moreover, the emission is synchrotron‐dominated, not inverse Compton.", "claim": "The X-ray polarization degree increases from 5% at 2–3 keV to 14% at 5–8 keV, consistent with inverse Compton emission dominating at higher energies, whereas optical bands exhibit negligible polarization (<1%) due to strong Faraday depolarization.", "label": false }, { "paperid": "2411.03788v1", "paper_path": "./SciVer/papers/2411.03788v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.03788v1-Table3-1.png", "request_id": 505, "origin_statement": "Incorporating diffusion increases C₁ concentration by a factor of ~5 (from 3.40×10⁻⁵ to 1.73×10⁻⁴), reflecting that slower-decaying precursors diffuse farther, thus enhancing spatial homogenization and amplifying delayed source smoothing relative to advection-only predictions.", "perturbed_statement": "Incorporating diffusion increases C₁ concentration by a factor of ~2 (from 3.40×10⁻⁵ to 1.73×10⁻⁴), reflecting that slower-decaying precursors diffuse farther, thus enhancing spatial homogenization and amplifying delayed source smoothing relative to advection-only predictions.", "perturbed_explanation": "The perturbation miscalculates the concentration increase: the ratio 1.73×10⁻⁴/3.40×10⁻⁵ ≈5.09, not ~2, so claiming a twofold change contradicts the table data.", "claim": "Incorporating diffusion increases C₁ concentration by a factor of ~2 (from 3.40×10⁻⁵ to 1.73×10⁻⁴), reflecting that slower-decaying precursors diffuse farther, thus enhancing spatial homogenization and amplifying delayed source smoothing relative to advection-only predictions.", "label": false }, { "paperid": "2409.20002v2", "paper_path": "./SciVer/papers/2409.20002v2.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.20002v2_figure_3.png", "request_id": 508, "origin_statement": "ROC AUC rises monotonically from 0.64 at 350 words to 1.00 for 500- and 800-word documents, implying that longer document inputs amplify response-time differentials between cache hits and misses and thus enable increasingly precise timing-based inference via KV cache side channels.", "perturbed_statement": "ROC AUC rises monotonically from 0.64 at 350 words to 0.90 for 500- and 800-word documents, implying that longer document inputs reduce response-time differentials between cache hits and misses and thus degrade timing-based inference via KV cache side channels.", "perturbed_explanation": "The perturbation is wrong because Figure 3 shows AUC values of 1.00—not 0.90—for both 500- and 800-word documents. Moreover, the original data indicate larger, not smaller, latency gaps between cache hits and misses as document length increases, improving rather than degrading inference accuracy.", "claim": "ROC AUC rises monotonically from 0.64 at 350 words to 0.90 for 500- and 800-word documents, implying that longer document inputs reduce response-time differentials between cache hits and misses and thus degrade timing-based inference via KV cache side channels.", "label": false }, { "paperid": "2410.20742v1", "paper_path": "./SciVer/papers/2410.20742v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "5.5" ], "image_path": "./SciVer/images/2410.20742v1_figure_3.png", "request_id": 512, "origin_statement": "Objective a yields both the highest WER (110%) and elevated MCD (~10.7) compared to other losses, implying that emphasizing high spectral distortion in perturbation disrupts TTS reconstruction by increasing both mel‐cepstral deviation and word error rate, in line with adversarial perturbation theory targeting feature representations.", "perturbed_statement": "Objective a yields both the lowest WER (20%) and minimal MCD (~5.8) compared to other losses, suggesting that minimizing spectral distortion in perturbation enhances TTS reconstruction by reducing both mel‐cepstral deviation and word error rate, contradicting expectations from adversarial perturbation theory.", "perturbed_explanation": "The perturbed statement misattributes objective a with a 20% WER and ~5.8 MCD, but Figure 3 shows that these values correspond to objective c. In reality, objective a has the highest WER (~110%) and an MCD of ~10.7, not the minimal metrics claimed.", "claim": "Objective a yields both the lowest WER (20%) and minimal MCD (~5.8) compared to other losses, suggesting that minimizing spectral distortion in perturbation enhances TTS reconstruction by reducing both mel‐cepstral deviation and word error rate, contradicting expectations from adversarial perturbation theory.", "label": false }, { "paperid": "2410.02799v1", "paper_path": "./SciVer/papers/2410.02799v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "4.1", "4.3" ], "image_path": "./SciVer/images/2410.02799v1-Table6-1.png", "request_id": 513, "origin_statement": "The 95% conditional PIs for Asian patients spans 0.227–0.926 (width 0.699), over twice the White span of 0.398–0.689 (0.291), indicating greater heterogeneity in input–output efficiency, likely driven by variability in KDPI scores and waitlist durations, reflecting less standardized pre-transplant protocols.", "perturbed_statement": "At the 90% coverage level, the conditional PI for White patients spans 0.398–0.689 (width 0.699), which is over twice the Asian span of 0.227–0.926 (0.291), suggesting greater variability in graft survival outcomes driven by differences in donor blood type distribution.", "perturbed_explanation": "The statement is incorrect because the prediction intervals were computed at a 95% (not 90%) coverage level. Also, the interval widths are reversed: the White interval (0.398–0.689) has width 0.291, and the Asian interval (0.227–0.926) has width 0.699. Finally, donor blood type distribution was not cited; observed variability arises from KDPI and waitlist duration heterogeneity.", "claim": "At the 90% coverage level, the conditional PI for White patients spans 0.398–0.689 (width 0.699), which is over twice the Asian span of 0.227–0.926 (0.291), suggesting greater variability in graft survival outcomes driven by differences in donor blood type distribution.", "label": false }, { "paperid": "2411.06175v1", "paper_path": "./SciVer/papers/2411.06175v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "6" ], "image_path": "./SciVer/images/2411.06175v1_figure_2(a).png", "request_id": 522, "origin_statement": "For tail categories with original log10(size+1) below 1, RAG augmentation raises their log counts to around 1.5–2.0, substantially mitigating long-tail imbalance and enriching underrepresented classes.", "perturbed_statement": "For tail categories with original log10(size+1) below 1, RAG augmentation raises their log counts uniformly to around 3.0, completely eliminating long-tail imbalance.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 2 shows that RAG augmentation lifts tail category log10(size+1) values only to about 1.5–2.0, not to 3.0. Moreover, the increase is not uniform across all tail categories, and many very small classes remain below a log count of 3.0.", "claim": "For tail categories with original log10(size+1) below 1, RAG augmentation raises their log counts uniformly to around 3.0, completely eliminating long-tail imbalance.", "label": false }, { "paperid": "2410.00454v1", "paper_path": "./SciVer/papers/2410.00454v1.json", "claim_type": "analytical", "type": "chart", "item": "3(c)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.00454v1_figure_3(c).png", "request_id": 524, "origin_statement": "Increasing the router threshold ε from 0.1 to 0.6 sharply boosts locality accuracy from about 0.1 to nearly 1.0 by filtering out unrelated inputs, while reliability remains above 0.95. This demonstrates that stricter routing thresholds enhance edit containment without compromising overall model fidelity.", "perturbed_statement": "Increasing ε from 0.1 to 0.6 elevates locality from 0.1 to 1.0 while reliability climbs above 0.99 across all thresholds, indicating that stricter gating flawlessly constrains edits at negligible cost to model integrity.", "perturbed_explanation": "The perturbation is incorrect because reliability never consistently exceeds 0.99 in Figure 3; it stays around 0.95–0.98 for most ε values. Thus, the claim that reliability climbs above 0.99 across all thresholds contradicts the reported reliability metrics.", "claim": "Increasing ε from 0.1 to 0.6 elevates locality from 0.1 to 1.0 while reliability climbs above 0.99 across all thresholds, indicating that stricter gating flawlessly constrains edits at negligible cost to model integrity.", "label": false }, { "paperid": "2409.05653v3", "paper_path": "./SciVer/papers/2409.05653v3.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.05653v3-Table4-1.png", "request_id": 527, "origin_statement": "SpanBERT-large shows a positive co-reference bias for \"hairdresser\" when resolving nominative and accusative female pronouns (\"she\", \"her\") but a negative bias for the same occupation with the gender-neutral pronoun \"they\", suggesting that grammatical case and pronoun gender jointly influence resolution likelihood beyond mere gender stereotypes.", "perturbed_statement": "SpanBERT-large shows a positive co-reference bias for \"hairdresser\" when resolving both the female pronoun \"she\" and the gender-neutral pronoun \"they\", indicating that grammatical case has negligible effect compared to overarching gender stereotypes.", "perturbed_explanation": "This statement is incorrect because Table 4 indicates that SpanBERT-large has a negative (or no) bias for \"hairdresser\" with the pronoun \"they/them/their\". Thus it does not show a positive bias with the gender-neutral pronoun, and grammatical case does influence resolution biases.", "claim": "SpanBERT-large shows a positive co-reference bias for \"hairdresser\" when resolving both the female pronoun \"she\" and the gender-neutral pronoun \"they\", indicating that grammatical case has negligible effect compared to overarching gender stereotypes.", "label": false }, { "paperid": "2409.00768v1", "paper_path": "./SciVer/papers/2409.00768v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.00768v1_figure_5.png", "request_id": 534, "origin_statement": "Although training with 75% quality images maintains PSNR gains until ~300k iterations, severe JPEG artifacts at 50% quality cause PSNR to fall below its 10k-iteration value by 800k. This implies that compression-induced noise accumulation destabilizes gradient descent, outweighing the benefits of larger sample diversity.", "perturbed_statement": "Although training with 80% quality images maintains PSNR gains until ~200k iterations, moderate JPEG artifacts at 60% quality cause PSNR to fall below its 5k-iteration value by 900k. This implies that compression-induced noise accumulation reinforces gradient descent, outweighing sample diversity.", "perturbed_explanation": "The perturbed statement is incorrect because the experiments only use JPEG qualities of 100%, 90%, 75%, 50%, and 10%—80% and 60% are not tested. PSNR actually maintains gains until ~300k iterations for 75% quality (not 200k), and the drop at 50% quality falls below its 10k-iteration value by 800k (not below 5k at 900k). Moreover, compression-induced noise destabilizes rather than reinforces gradient descent.", "claim": "Although training with 80% quality images maintains PSNR gains until ~200k iterations, moderate JPEG artifacts at 60% quality cause PSNR to fall below its 5k-iteration value by 900k. This implies that compression-induced noise accumulation reinforces gradient descent, outweighing sample diversity.", "label": false }, { "paperid": "2409.10695v2", "paper_path": "./SciVer/papers/2409.10695v2.json", "claim_type": "analytical", "type": "chart", "item": "18", "section": [ "7" ], "image_path": "./SciVer/images/2409.10695v2_figure_18.png", "request_id": 538, "origin_statement": "PG Captioner outperforms GPT-4o and Claude-3.5 Sonnet in entity recognition (~90% vs. ~85% and ~82%), but underperforms in artifact detection (~15% vs. ~20% and ~25%), suggesting its semantic focus comes at the expense of addressing low-level visual noise.", "perturbed_statement": "GPT-4o outperforms PG Captioner in artifact detection (~85% vs. ~15%), indicating its superior low-level noise modeling. Furthermore, GPT-4o’s entity recognition score is only ~45%, far below PG Captioner’s ~90%, highlighting its semantic weaknesses despite strong artifact performance.", "perturbed_explanation": "This statement is wrong because GPT-4o’s artifact detection score on the chart is about ~20%, not ~85%, and its entity recognition score is around ~85%, not ~45%. Both values directly contradict the plotted ‘artifacts’ and ‘entity’ percentages in the context.", "claim": "GPT-4o outperforms PG Captioner in artifact detection (~85% vs. ~15%), indicating its superior low-level noise modeling. Furthermore, GPT-4o’s entity recognition score is only ~45%, far below PG Captioner’s ~90%, highlighting its semantic weaknesses despite strong artifact performance.", "label": false }, { "paperid": "2409.08980v1", "paper_path": "./SciVer/papers/2409.08980v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2409.08980v1_figure_4.png", "request_id": 544, "origin_statement": "Mean SHAP scores show Overall Risk-Benefit, Ease of Use, and Reduce Accidents dominate trust predictions, implying that user trust in AVs is heavily influenced by cognitive assessments of usability and safety, aligning with expectancy-value theory where perceived benefits surpass risks to increase acceptance.", "perturbed_statement": "Mean SHAP scores show Overall Risk-Benefit, Ease of Use, and Reduce Accidents dominate trust predictions, implying that user trust in AVs is heavily influenced by cognitive assessments of usability and safety; notably, higher 'Losing Control' ratings also positively correlated with trust.", "perturbed_explanation": "'Losing Control' actually has a negative association with trust in the SHAP analysis (its high scores indicate a contribution toward low trust), so it cannot be said to positively correlate with trust.", "claim": "Mean SHAP scores show Overall Risk-Benefit, Ease of Use, and Reduce Accidents dominate trust predictions, implying that user trust in AVs is heavily influenced by cognitive assessments of usability and safety; notably, higher 'Losing Control' ratings also positively correlated with trust.", "label": false }, { "paperid": "2410.23300v2", "paper_path": "./SciVer/papers/2410.23300v2.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "6.1.1", "6.2" ], "image_path": "./SciVer/images/2410.23300v2-Table5-1.png", "request_id": 547, "origin_statement": "AmazonBook’s extremely low density (0.06%) exacerbates rank collapse in MF embeddings by increasing underdetermined user-item relationships, so initializing with stable rank regularization reduces necessary epochs by up to 65.9%, exploiting orthogonality constraints to maintain uniform representation despite sparse interactions.", "perturbed_statement": "AmazonBook’s extremely low density (0.6%) exacerbates rank collapse in MF embeddings by increasing underdetermined user-item relationships, so initializing with stable rank regularization reduces necessary epochs by up to 75%, exploiting orthogonality constraints to maintain uniform representation despite sparse interactions.", "perturbed_explanation": "The perturbed statement is incorrect because the actual density of AmazonBook is 0.06%, not 0.6%, and the maximum epoch reduction achieved by stable rank warm-start on AmazonBook is 65.9%, not 75%.", "claim": "AmazonBook’s extremely low density (0.6%) exacerbates rank collapse in MF embeddings by increasing underdetermined user-item relationships, so initializing with stable rank regularization reduces necessary epochs by up to 75%, exploiting orthogonality constraints to maintain uniform representation despite sparse interactions.", "label": false }, { "paperid": "2409.19589v1", "paper_path": "./SciVer/papers/2409.19589v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.19589v1_figure_1.png", "request_id": 548, "origin_statement": "DiT-SR, a U-shaped isotropic diffusion transformer, achieves a CLIPIQA of 0.72 with only ~100 M parameters, outperforming other diffusion-based SR methods by ~20%, implying that isotropic channel allocation within a multi-scale framework efficiently recovers high-frequency details.", "perturbed_statement": "DiT-SR, a U-shaped isotropic diffusion transformer, achieves a CLIPIQA of 0.82 with only ~10 M parameters, outperforming other diffusion-based SR methods by ~30%, implying that isotropic channel allocation within a multi-scale framework efficiently recovers high-frequency details.", "perturbed_explanation": "The perturbed statement misstates two key details. First, Figure 1 shows DiT-SR achieves a CLIPIQA of about 0.72, not 0.82. Second, the parameter count for DiT-SR is around 100 M, not 10 M. Both discrepancies contradict the reported values in the context.", "claim": "DiT-SR, a U-shaped isotropic diffusion transformer, achieves a CLIPIQA of 0.82 with only ~10 M parameters, outperforming other diffusion-based SR methods by ~30%, implying that isotropic channel allocation within a multi-scale framework efficiently recovers high-frequency details.", "label": false }, { "paperid": "2411.16273v1", "paper_path": "./SciVer/papers/2411.16273v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "2.2", "4.6" ], "image_path": "./SciVer/images/2411.16273v1_figure_3.png", "request_id": 552, "origin_statement": "By fusing EMG and IMU inputs, the CNN model leverages complementary spatiotemporal and kinematic cues, achieving ~98% accuracy (∼2% higher than the LSTM) and reducing confusion between symmetric motions—owing to convolutional filters’ effective encoding of localized activation patterns.", "perturbed_statement": "By fusing EMG and IMU inputs, the CNN model leverages complementary spatiotemporal and kinematic cues, achieving ~99.8% accuracy (∼5% higher than the LSTM) and completely eliminating confusion between symmetric motions—owing to convolutional filters’ lossless encoding of activation patterns.", "perturbed_explanation": "The perturbation is incorrect because the CNN’s test accuracy was ~98%, not 99.8%, and its advantage over the LSTM was ~2%, not 5%. Additionally, the confusion matrix and text report residual misclassifications between symmetric motions, so confusion was not ‘‘completely eliminated,’’ and convolutional encoding involves pooling (not lossless).", "claim": "By fusing EMG and IMU inputs, the CNN model leverages complementary spatiotemporal and kinematic cues, achieving ~99.8% accuracy (∼5% higher than the LSTM) and completely eliminating confusion between symmetric motions—owing to convolutional filters’ lossless encoding of activation patterns.", "label": false }, { "paperid": "2409.13873v1", "paper_path": "./SciVer/papers/2409.13873v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "7.3" ], "image_path": "./SciVer/images/2409.13873v1-Table2-1.png", "request_id": 555, "origin_statement": "In Table 2 with ~20% censoring, increasing sample size from 100 to 500 for the joint model reduces MSE for β1 from 0.018 to 0.008 and improves CI coverage from 95.6% to 95.1%, reflecting asymptotic variance reduction in Bayesian hierarchical models.", "perturbed_statement": "In Table 2 with ~20% censoring, increasing sample size from 100 to 500 for the joint model reduces MSE for β1 from 0.018 to 0.010 and improves CI coverage from 95.6% to 96.3%, reflecting asymptotic variance reduction in Bayesian hierarchical models.", "perturbed_explanation": "The table reports that MSE for β1 under the joint model drops from 0.018 (n=100) to 0.008 (n=500), not to 0.010. Likewise, the 95% credible interval coverage changes from 95.6% to 95.1%, not to 96.3%.", "claim": "In Table 2 with ~20% censoring, increasing sample size from 100 to 500 for the joint model reduces MSE for β1 from 0.018 to 0.010 and improves CI coverage from 95.6% to 96.3%, reflecting asymptotic variance reduction in Bayesian hierarchical models.", "label": false }, { "paperid": "2410.21563v1", "paper_path": "./SciVer/papers/2410.21563v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "6", "7" ], "image_path": "./SciVer/images/2410.21563v1_figure_3(a).png", "request_id": 556, "origin_statement": "The p_T,H distribution shows that for p_T,H > 340 GeV, dimension-eight operators c^{(3)}_{q^4H^2} and c^{(3)}_{q^2H^2D^3} yield a 15–25% enhancement relative to the SM, reflecting constructive interference that scales as E^4/Λ^4, thus dominating over dim-6 effects constrained by LEP EWPO.", "perturbed_statement": "The p_T,H distribution shows that for p_T,H > 300 GeV, the dim-6 operator c^{(3)}_{Hq} yields a 20–30% enhancement relative to the SM due to constructive interference scaling as E^4, thus dominating over dim-8 effects constrained by unitarity.", "perturbed_explanation": "This is incorrect because the green (dim-6) curve in the lower panel never rises by 20–30% above the SM at 300 GeV—instead it stays within ±5% and even dips slightly negative. Additionally, dim-6 terms scale like E^2/Λ^2 (not E^4), and the large high-p_T enhancement comes from the red and blue dimension-eight operators above 340 GeV, not the dim-6 c^{(3)}_{Hq}.", "claim": "The p_T,H distribution shows that for p_T,H > 300 GeV, the dim-6 operator c^{(3)}_{Hq} yields a 20–30% enhancement relative to the SM due to constructive interference scaling as E^4, thus dominating over dim-8 effects constrained by unitarity.", "label": false }, { "paperid": "2409.09586v1", "paper_path": "./SciVer/papers/2409.09586v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4", "4.3.3", "5.3", "5.3.4" ], "image_path": "./SciVer/images/2409.09586v1-Table2-1.png", "request_id": 557, "origin_statement": "In the healthcare vignette, although human responses exhibit substantial variability (SD=0.953), LLM outputs are more consistent (SD=0.799), and a strong Pearson correlation (GPT4-T = 0.799; overall = 0.790) with p = 0.402 indicates robust alignment, reflecting effective noise-tolerant calibration in value judgments.", "perturbed_statement": "In the healthcare vignette, although human responses exhibit substantial variability (SD=0.953), LLM outputs are more consistent (SD=0.799), and a strong Pearson correlation (GPT4-T = 0.879; overall = 0.790) with p = 0.052 indicates robust alignment, reflecting effective noise-tolerant calibration in value judgments.", "perturbed_explanation": "The perturbed statement misreports the GPT4-T Pearson correlation coefficient as 0.879 instead of the actual 0.799, and incorrectly lists the p-value as 0.052 rather than the table’s p = 0.402, making the claim factually inaccurate.", "claim": "In the healthcare vignette, although human responses exhibit substantial variability (SD=0.953), LLM outputs are more consistent (SD=0.799), and a strong Pearson correlation (GPT4-T = 0.879; overall = 0.790) with p = 0.052 indicates robust alignment, reflecting effective noise-tolerant calibration in value judgments.", "label": false }, { "paperid": "2411.09903v1", "paper_path": "./SciVer/papers/2411.09903v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "2.3", "4", "6" ], "image_path": "./SciVer/images/2411.09903v1_figure_5.png", "request_id": 562, "origin_statement": "Because ALFALFA and FASHI North probe larger volumes at log(M_HI/M⊙)>9, their HIMFs converge as cosmic variance diminishes; however, at log(M_HI/M⊙)<8.5 the limited FASHI detection volume magnifies Poisson noise, producing >0.5 dex scatter among surveys.", "perturbed_statement": "Because ALFALFA and FASHI North probe larger volumes at log(M_HI/M⊙)>8, their HIMFs converge as cosmic variance diminishes; however, at log(M_HI/M⊙)<9 the limited FASHI detection volume magnifies cosmic variance, producing >0.5 dex scatter among surveys.", "perturbed_explanation": "This statement misstates the mass thresholds and the dominant uncertainty. In the context, surveys converge above log(M_HI/M⊙)>9 (not >8), and at the low-mass end (log(M_HI/M⊙)<8.5) Poisson noise—rather than cosmic variance—drives the large field-to-field variations.", "claim": "Because ALFALFA and FASHI North probe larger volumes at log(M_HI/M⊙)>8, their HIMFs converge as cosmic variance diminishes; however, at log(M_HI/M⊙)<9 the limited FASHI detection volume magnifies cosmic variance, producing >0.5 dex scatter among surveys.", "label": false }, { "paperid": "2410.22451v1", "paper_path": "./SciVer/papers/2410.22451v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.3", "5.1" ], "image_path": "./SciVer/images/2410.22451v1_figure_4.png", "request_id": 568, "origin_statement": "In Figure 4, interjection frames exhibit L2 distances exceeding 460 and log MDR values above zero, whereas non-interjections remain below these thresholds. This separation indicates a nearly orthogonal decision boundary leveraging both short-term similarity and long-term maximum distance ratios to achieve perfect discrimination.", "perturbed_statement": "In Figure 4, interjection frames exhibit L2 distances exceeding 300 and log MDR values above one (implying MDR >10), while non-interjections remain below these thresholds, suggesting that a decision boundary at L2 >300 and MDR >10 perfectly separates both classes.", "perturbed_explanation": "The perturbed statement misstates two critical thresholds: interjection frames in the figure clearly cluster above an L2 distance of around 460 (not 300), and the decision boundary is drawn at log MDR = 0 (MDR >1), not at log MDR = 1 (MDR >10). These inaccuracies contradict the plotted boundaries.", "claim": "In Figure 4, interjection frames exhibit L2 distances exceeding 300 and log MDR values above one (implying MDR >10), while non-interjections remain below these thresholds, suggesting that a decision boundary at L2 >300 and MDR >10 perfectly separates both classes.", "label": false }, { "paperid": "2409.09506v1", "paper_path": "./SciVer/papers/2409.09506v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.09506v1-Table1-1.png", "request_id": 571, "origin_statement": "LoRA-based PEFT yields comparable dev and test WER improvements to full fine-tuning on clean and other subsets, indicating low-rank adaptation effectively leverages pretrained OWSM representations, while Lhotse's online augmentation slightly worsens WER likely because clean LibriSpeech data reduces beneficial noise diversity.", "perturbed_statement": "LoRA-based PEFT yields a 7% relative reduction in dev-other WER compared to full fine-tuning on OWSM, and Lhotse's online augmentation further lowers test-clean WER by 0.3 by adding noise diversity.", "perturbed_explanation": "The table shows full fine-tuning (ESPnet-EZ) dev-other WER at 8.5 and LoRA at 8.4, a ~1.2% relative reduction, not 7%. Additionally, Lhotse augmentation increases test-clean WER from 3.5 to 3.7, so it does not lower test-clean WER by 0.3.", "claim": "LoRA-based PEFT yields a 7% relative reduction in dev-other WER compared to full fine-tuning on OWSM, and Lhotse's online augmentation further lowers test-clean WER by 0.3 by adding noise diversity.", "label": false }, { "paperid": "2411.15224v1", "paper_path": "./SciVer/papers/2411.15224v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "4.2.1", "4.2.2" ], "image_path": "./SciVer/images/2411.15224v1_figure_2(a).png", "request_id": 572, "origin_statement": "Figure 2(a)'s dominant diagonal values (0.95–0.93) and negligible off-diagonals reveal that fine-tuned projector weights can be approximated by an identity-like linear mapping, implying that adaptation mainly requires individual feature re-scaling rather than inter-feature rotation or mixing.", "perturbed_statement": "Figure 2(a)'s dominant diagonal values (0.95–0.93) and negligible off-diagonals reveal that fine-tuned projector weights can be approximated by a pure rotation matrix, implying that adaptation requires significant inter-feature mixing rather than simple feature scaling.", "perturbed_explanation": "The perturbation is incorrect because T_det is almost diagonal—off-diagonal entries are near zero (~0.02)—so the mapping reflects per-feature scaling, not a rotation. Claiming a \"pure rotation\" and dominant inter-feature mixing contradicts the observed minimal off-diagonal values in Fig. 2(a).", "claim": "Figure 2(a)'s dominant diagonal values (0.95–0.93) and negligible off-diagonals reveal that fine-tuned projector weights can be approximated by a pure rotation matrix, implying that adaptation requires significant inter-feature mixing rather than simple feature scaling.", "label": false }, { "paperid": "2411.16012v1", "paper_path": "./SciVer/papers/2411.16012v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.5" ], "image_path": "./SciVer/images/2411.16012v1-Table1-1.png", "request_id": 575, "origin_statement": "Applying an LKJ(η=1) prior yields a uniform distribution over correlation matrices, ensuring no a priori anisotropy, while Gamma(α=2,β) priors with modes at 10 pc (positions) and 2 km·s⁻¹ (velocities) anchor scale parameters, softly constraining spatial extent and kinematic dispersion in poorly sampled clusters.", "perturbed_statement": "Setting an LKJ(η=2) prior yields a uniform distribution over correlation matrices, ensuring no a priori anisotropy, while Gamma(α=2,β) priors with modes at 5 pc (positions) and 2 km·s⁻¹ (velocities) anchor scale parameters, softly constraining spatial extent and kinematic dispersion in poorly sampled clusters.", "perturbed_explanation": "The statement is incorrect because a uniform prior over correlations requires η=1, not η=2, as per the LKJ(η) behavior. Additionally, the mode of the Gamma(α=2,β) prior equals β, which is 10 pc for positions, not 5 pc according to the default hyperparameters.", "claim": "Setting an LKJ(η=2) prior yields a uniform distribution over correlation matrices, ensuring no a priori anisotropy, while Gamma(α=2,β) priors with modes at 5 pc (positions) and 2 km·s⁻¹ (velocities) anchor scale parameters, softly constraining spatial extent and kinematic dispersion in poorly sampled clusters.", "label": false }, { "paperid": "2411.18065v1", "paper_path": "./SciVer/papers/2411.18065v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.18065v1-Table3-1.png", "request_id": 577, "origin_statement": "Cloud-B's 16K PEs arranged in a 128×128 array, combined with 128 GB/s off-chip HBM and 32 MB weight buffer, enables up to 16× higher parallelism and on-chip data reuse than Mobile-A's 1K PE configuration, significantly reducing DRAM access stalls during LLM inference.", "perturbed_statement": "Cloud-B's 16K PEs arranged in a 128×128 array, combined with 64 GB/s off-chip HBM and 64 MB weight buffer, enables up to 16× higher parallelism and on-chip data reuse than Mobile-A's 1K PE configuration, significantly reducing DRAM access stalls during LLM inference.", "perturbed_explanation": "This statement is incorrect because Table 3 specifies that Cloud-B has 128 GB/s off-chip bandwidth (HBM) and a 32 MB weight global buffer, not 64 GB/s and 64 MB as claimed.", "claim": "Cloud-B's 16K PEs arranged in a 128×128 array, combined with 64 GB/s off-chip HBM and 64 MB weight buffer, enables up to 16× higher parallelism and on-chip data reuse than Mobile-A's 1K PE configuration, significantly reducing DRAM access stalls during LLM inference.", "label": false }, { "paperid": "2409.02418v1", "paper_path": "./SciVer/papers/2409.02418v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "4.3.2", "5.2" ], "image_path": "./SciVer/images/2409.02418v1_figure_4(a).png", "request_id": 580, "origin_statement": "By integrating language-informed anatomical priors, MOSMOS enhances CT duodenum segmentation with a 2.03% Dice increase, reflecting improved delineation of low-contrast tubular structures through contextualized text-image attention mechanisms.", "perturbed_statement": "By integrating language-informed anatomical priors, MOSMOS enhances CT duodenum segmentation with a 1.5% Dice increase, reflecting improved delineation of low-contrast tubular structures through contextualized text-image attention mechanisms.", "perturbed_explanation": "This statement is incorrect because the actual Dice improvement for CT duodenum segmentation is 2.03% (from 73.47% to 75.50%), not 1.5%, as shown in the context and Fig. 4(a).", "claim": "By integrating language-informed anatomical priors, MOSMOS enhances CT duodenum segmentation with a 1.5% Dice increase, reflecting improved delineation of low-contrast tubular structures through contextualized text-image attention mechanisms.", "label": false }, { "paperid": "2410.12831v1", "paper_path": "./SciVer/papers/2410.12831v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.12831v1-Table3-1.png", "request_id": 581, "origin_statement": "FLanS, relying solely on positional and size descriptors in free-form text, attains a Dice score of 0.844 on the FLARE set and 0.851 on RAOS OOD, nearly matching bbox-prompted MedSAM (0.887 Dice on FLARE) and outperforming point-prompted SAM2, indicating robust morphology-agnostic generalization via spatial reasoning.", "perturbed_statement": "FLanS, relying solely on positional and size descriptors in free-form text, attains a Dice score of 0.914 on the FLARE set and 0.851 on RAOS OOD, surpassing bbox-prompted MedSAM (0.887 Dice on FLARE) and outperforming point-prompted SAM2, indicating robust morphology-agnostic generalization via spatial reasoning.", "perturbed_explanation": "The perturbed statement incorrectly reports FLanS’s Dice score on the FLARE set as 0.914. According to Table 3, FLanS actually achieves a Dice of 0.844 on FLARE, so it does not surpass MedSAM’s 0.887 score.", "claim": "FLanS, relying solely on positional and size descriptors in free-form text, attains a Dice score of 0.914 on the FLARE set and 0.851 on RAOS OOD, surpassing bbox-prompted MedSAM (0.887 Dice on FLARE) and outperforming point-prompted SAM2, indicating robust morphology-agnostic generalization via spatial reasoning.", "label": false }, { "paperid": "2411.13005v1", "paper_path": "./SciVer/papers/2411.13005v1.json", "claim_type": "analytical", "type": "chart", "item": "5(b)", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.13005v1_figure_5(b).png", "request_id": 582, "origin_statement": "DT-LSD achieves notably higher precision than LETR and L-CNN beyond a 0.1 recall threshold, reaching approximately 0.45 precision at 0.5 recall, which underscores its robustness in detecting urban line structures under high-recall demands.", "perturbed_statement": "DT-LSD achieves notably higher precision than LETR and L-CNN beyond a 0.3 recall threshold, reaching approximately 0.35 precision at 0.5 recall, which underscores its robustness in detecting urban line structures under high-recall demands.", "perturbed_explanation": "The recall threshold is misreported as 0.3 instead of 0.1, and the precision at 0.5 recall is actually about 0.45 (not 0.35) according to the DT-LSD curve in Figure 5.", "claim": "DT-LSD achieves notably higher precision than LETR and L-CNN beyond a 0.3 recall threshold, reaching approximately 0.35 precision at 0.5 recall, which underscores its robustness in detecting urban line structures under high-recall demands.", "label": false }, { "paperid": "2410.15553v2", "paper_path": "./SciVer/papers/2410.15553v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "9" ], "image_path": "./SciVer/images/2410.15553v2-Table4-1.png", "request_id": 583, "origin_statement": "After excluding false refusals, Gemini-1.5 Pro's Turn 1 average rose from 0.758 to 0.760, a marginal 0.2% gain, highlighting that false refusals only minimally impacted its relative performance deficit to GPT-4.", "perturbed_statement": "After excluding false refusals, Gemini-1.5 Pro's Turn 1 average rose from 0.758 to 0.818, a 6% improvement, surpassing GPT-4’s 0.813 due to the removal of refusal bias.", "perturbed_explanation": "This is incorrect because Table 4 shows the post-removal average for Gemini-1.5 Pro on Turn 1 is actually 0.760, not 0.818, which remains below GPT-4’s 0.813. The true improvement is only 0.002 (0.2%), not 0.06 (6%), and it does not surpass GPT-4.", "claim": "After excluding false refusals, Gemini-1.5 Pro's Turn 1 average rose from 0.758 to 0.818, a 6% improvement, surpassing GPT-4’s 0.813 due to the removal of refusal bias.", "label": false }, { "paperid": "2411.05966v1", "paper_path": "./SciVer/papers/2411.05966v1.json", "claim_type": "analytical", "type": "chart", "item": "6(d)", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.05966v1_figure_6(d).png", "request_id": 584, "origin_statement": "Figure 6 shows Phi 3’s loss decreasing from ~1.8 to ~1.2 over 50 k steps, converging 30% faster than Llama 3 due to its smaller parameter count, which reduces gradient noise and redundancy, facilitating more efficient protein sequence modeling under a constant learning rate of 5×10⁻⁵.", "perturbed_statement": "Figure 6 shows Phi 3’s loss decreasing from ~1.8 to ~1.1 over 50 k steps, converging 50% faster than Llama 3 thanks to its elevated LoRA rank, which reduces overfitting, under a constant learning rate of 4×10⁻⁵.", "perturbed_explanation": "The perturbation incorrectly states that Phi 3 converged 50% faster, while the context reports a 30% reduction. It misattributes efficiency to LoRA rank instead of parameter count, and misreports the learning rate as 4×10⁻⁵ instead of the stated 5×10⁻⁵. The final loss of ~1.1 also contradicts the observed ~1.2.", "claim": "Figure 6 shows Phi 3’s loss decreasing from ~1.8 to ~1.1 over 50 k steps, converging 50% faster than Llama 3 thanks to its elevated LoRA rank, which reduces overfitting, under a constant learning rate of 4×10⁻⁵.", "label": false }, { "paperid": "2410.06842v1", "paper_path": "./SciVer/papers/2410.06842v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.06842v1-Table5-1.png", "request_id": 585, "origin_statement": "By applying SCCT sampling, SACLoss reduces MAE on COD10K from 0.031 to 0.030—a 3.2% decrease—demonstrating that multi-layer contrastive boundary supervision sharpens object-background separation in camouflage detection.", "perturbed_statement": "By applying SCCT sampling, SACLoss achieves a 7.4% MAE reduction on the CHAMELEON dataset (from 0.027 to 0.025) over BCE-only, evidencing that multi-layer contrastive supervision at object boundaries refines foreground-background separation.", "perturbed_explanation": "This statement is incorrect because Table 5 shows that SCCT yields an MAE of 0.027 on CHAMELEON (row 4), not 0.025. The 0.025 MAE corresponds to the SubSample strategy (row 3). Therefore, both the claimed MAE value and the percentage reduction are unsupported by the context.", "claim": "By applying SCCT sampling, SACLoss achieves a 7.4% MAE reduction on the CHAMELEON dataset (from 0.027 to 0.025) over BCE-only, evidencing that multi-layer contrastive supervision at object boundaries refines foreground-background separation.", "label": false }, { "paperid": "2409.12210v1", "paper_path": "./SciVer/papers/2409.12210v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.12210v1-Table5-1.png", "request_id": 591, "origin_statement": "By allocating difficult tokens to appropriately sized experts, MoDSE achieves a 0.58 average CE loss reduction for tokens with >2.0 baseline loss versus only 0.18 for those exceeding 1.05, exemplifying a convex optimization behavior where higher initial error yields larger corrective updates.", "perturbed_statement": "By allocating difficult tokens to appropriately sized experts, MoDSE achieves a 0.78 average CE loss reduction for tokens with >2.0 baseline loss versus only 0.18 for those exceeding 1.05, exemplifying a convex optimization behavior where higher initial error yields larger corrective updates.", "perturbed_explanation": "The perturbed statement incorrectly reports the average CE loss reduction for tokens with >2.0 baseline loss as 0.78; Table 5 actually lists this value as 0.58, making the stated figure verifiably inaccurate.", "claim": "By allocating difficult tokens to appropriately sized experts, MoDSE achieves a 0.78 average CE loss reduction for tokens with >2.0 baseline loss versus only 0.18 for those exceeding 1.05, exemplifying a convex optimization behavior where higher initial error yields larger corrective updates.", "label": false }, { "paperid": "2410.16255v1", "paper_path": "./SciVer/papers/2410.16255v1.json", "claim_type": "analytical", "type": "chart", "item": "8(b)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.16255v1_figure_8(b).png", "request_id": 592, "origin_statement": "Pixel AUROC peaks at 81.08% for α=0.5 and β=0.95, indicating that lower quantile thresholds retain subtle score variations essential for distinguishing structural anomalies, whereas higher thresholds compress the anomaly distribution and reduce pixel-level sensitivity.", "perturbed_statement": "Pixel AUROC peaks at 82.10% for α=0.5 and β=0.98, indicating that mid-range quantile thresholds retain subtle score variations essential for distinguishing structural anomalies, whereas higher thresholds compress the anomaly distribution and reduce pixel-level sensitivity.", "perturbed_explanation": "The perturbed statement is incorrect because the actual maximum Pixel AUROC observed is 81.08% at α=0.5 and β=0.95, not 82.10% at β=0.98, according to Figure 8. The specified value and β setting do not match the reported results.", "claim": "Pixel AUROC peaks at 82.10% for α=0.5 and β=0.98, indicating that mid-range quantile thresholds retain subtle score variations essential for distinguishing structural anomalies, whereas higher thresholds compress the anomaly distribution and reduce pixel-level sensitivity.", "label": false }, { "paperid": "2409.14857v2", "paper_path": "./SciVer/papers/2409.14857v2.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "6.1" ], "image_path": "./SciVer/images/2409.14857v2_figure_3.png", "request_id": 594, "origin_statement": "Figure 3 shows that on KINSHIP, cubic embeddings (degree 3) yield the highest test MRR due to symmetric relation modeling, whereas on COUNTRIES, a 15-degree polynomial maximizes generalization, reflecting more complex, hierarchical geographic relationships requiring higher-order feature interactions.", "perturbed_statement": "Figure 3 shows that on KINSHIP, septenary embeddings (degree 7) yield the highest test MRR due to symmetric relation modeling, whereas on COUNTRIES, a 31-degree polynomial maximizes generalization, reflecting more complex, hierarchical geographic relationships requiring higher-order feature interactions.", "perturbed_explanation": "This is incorrect because Figure 3 actually shows the highest test MRR on KINSHIP at degree 3, not degree 7, and on COUNTRIES the optimal degree for peak test MRR is 15, not 31.", "claim": "Figure 3 shows that on KINSHIP, septenary embeddings (degree 7) yield the highest test MRR due to symmetric relation modeling, whereas on COUNTRIES, a 31-degree polynomial maximizes generalization, reflecting more complex, hierarchical geographic relationships requiring higher-order feature interactions.", "label": false }, { "paperid": "2410.20421v1", "paper_path": "./SciVer/papers/2410.20421v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.20421v1-Table3-1.png", "request_id": 597, "origin_statement": "Fine-tuning ProContEXT on attribute-focused NT-VOT211 subsets yields marginal AUC gains (e.g., a 5.1-point jump in 'distractors' and 2.3 in 'camera motion'), suggesting limited generalization of context-based adaptation under complex night-time dynamics.", "perturbed_statement": "Fine-tuning ProContEXT on attribute-focused NT-VOT211 subsets yields marginal AUC gains (e.g., a 10-point jump in 'tiny target' and 5.5 in 'motion blur'), suggesting limited generalization of context-based adaptation under complex night-time dynamics.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows the 'tiny target' AUC improves only from 29.1 to 29.7 (0.6 points), not 10 points, and 'motion blur' improves from 47.5 to 49.1 (1.6 points), not 5.5 points.", "claim": "Fine-tuning ProContEXT on attribute-focused NT-VOT211 subsets yields marginal AUC gains (e.g., a 10-point jump in 'tiny target' and 5.5 in 'motion blur'), suggesting limited generalization of context-based adaptation under complex night-time dynamics.", "label": false }, { "paperid": "2409.06538v1", "paper_path": "./SciVer/papers/2409.06538v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.06538v1_figure_6.png", "request_id": 602, "origin_statement": "At bond dimension D=16, the TNMC acceptance probability stays above ~0.8 for lattice sizes up to L=512, constraining autocorrelation times below 1.25 and thus preventing the exponential critical slowing down typical at T_c. This reflects the tensor network’s accurate conditional sampling even near gapless criticality.", "perturbed_statement": "At bond dimension D=16, the TNMC acceptance probability remains above ~0.9 for lattice sizes up to L=1024, constraining autocorrelation times below 1.1 and thus eliminating critical slowing down at T_c.", "perturbed_explanation": "The perturbed statement is incorrect because, according to Fig. 6(a), the acceptance probability p_a at L=1024 and D=16 drops to about 0.2, not above 0.9. Consequently, the autocorrelation time τ=1/p_a would exceed 4 rather than remaining below 1.1, so critical slowing down is not eliminated.", "claim": "At bond dimension D=16, the TNMC acceptance probability remains above ~0.9 for lattice sizes up to L=1024, constraining autocorrelation times below 1.1 and thus eliminating critical slowing down at T_c.", "label": false }, { "paperid": "2411.05867v1", "paper_path": "./SciVer/papers/2411.05867v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "3.1", "3.2.1" ], "image_path": "./SciVer/images/2411.05867v1_figure_8.png", "request_id": 604, "origin_statement": "The hybrid RC mitigates reservoir chaos induced by spectral radii above 1.0, using expert ODE corrections to restore memory capacity and recover low NMSE where the standard RC’s performance irreversibly degrades.", "perturbed_statement": "The hybrid RC mitigates reservoir chaos induced by spectral radii above 2.0, using expert ODE corrections to restore memory capacity and recover low NMSE where the standard RC’s performance irreversibly degrades.", "perturbed_explanation": "The perturbed statement incorrectly shifts the critical spectral radius threshold from 1.0 to 2.0. According to Figure 8 and the context, the standard RC begins to degrade as soon as the spectral radius exceeds 1.0, and the hybrid RC recovery is observed shortly above that value—not at 2.0.", "claim": "The hybrid RC mitigates reservoir chaos induced by spectral radii above 2.0, using expert ODE corrections to restore memory capacity and recover low NMSE where the standard RC’s performance irreversibly degrades.", "label": false }, { "paperid": "2410.04927v2", "paper_path": "./SciVer/papers/2410.04927v2.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "5.9" ], "image_path": "./SciVer/images/2410.04927v2-Table6-1.png", "request_id": 605, "origin_statement": "Compared to no embedding, Llama2’s extended contextual embeddings boost HR@20 by 18.4%, while Llama3.1’s further refined attention increases H@10 and N@10 by 55% and 72%, respectively, highlighting that longer attention mechanisms capture deeper user-item dependencies.", "perturbed_statement": "Compared to BERT embeddings, Llama2’s extended contextual embeddings boost HR@20 by 28.4%, while Llama3.1’s further refined attention increases H@10 and N@10 by 65% and 82%, respectively, highlighting that longer attention mechanisms capture deeper user-item dependencies.", "perturbed_explanation": "The perturbation misstates key improvement percentages and baseline comparisons. Llama2’s HR@20 rises from 0.05533 (BERT) to 0.06389, a ~15.6% gain, not 28.4%. Likewise, Llama3.1’s H@10 and N@10 improve by ~55% (0.03096→0.04808) and ~72% (0.01432→0.02465), not 65% and 82%.", "claim": "Compared to BERT embeddings, Llama2’s extended contextual embeddings boost HR@20 by 28.4%, while Llama3.1’s further refined attention increases H@10 and N@10 by 65% and 82%, respectively, highlighting that longer attention mechanisms capture deeper user-item dependencies.", "label": false }, { "paperid": "2411.15351v1", "paper_path": "./SciVer/papers/2411.15351v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2411.15351v1-Table2-1.png", "request_id": 617, "origin_statement": "ORB reproduces VASP formation energies of CuAu L10, Cu3Au L12, and CuAu3 L12 within 0.002 eV per atom, resulting in phase diagram transition temperature deviations under 30 K because 0.01 eV formation energy corresponds to ~116 K in thermal energy, preserving topology.", "perturbed_statement": "ORB reproduces VASP formation energies of CuAu L10, Cu3Au L12, and CuAu3 L12 within 0.02 eV per atom, resulting in phase diagram transition temperature deviations under 300 K because 0.01 eV formation energy corresponds to ~11.6 K in thermal energy, preserving topology.", "perturbed_explanation": "This statement is incorrect because Table 2 shows ORB errors are actually around 0.002 eV per atom for those intermetallics, not 0.02 eV. Moreover, 0.01 eV corresponds to about 116 K, not 11.6 K, so both the error magnitude and temperature conversion are wrong.", "claim": "ORB reproduces VASP formation energies of CuAu L10, Cu3Au L12, and CuAu3 L12 within 0.02 eV per atom, resulting in phase diagram transition temperature deviations under 300 K because 0.01 eV formation energy corresponds to ~11.6 K in thermal energy, preserving topology.", "label": false }, { "paperid": "2409.11040v1", "paper_path": "./SciVer/papers/2409.11040v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "4" ], "image_path": "./SciVer/images/2409.11040v1_figure_1.png", "request_id": 618, "origin_statement": "In ZIP simulations, leveraging within-unit autoregressive correlation (α) via the EM algorithm reduces mean absolute imputation error by up to 35% at moderate zero-inflation (π≈0.5), since stronger correlation enhances posterior expectation accuracy, unlike mode imputation which remains insensitive to α and dominated by zero prevalence.", "perturbed_statement": "In ZIP simulations, leveraging within-unit autoregressive correlation (α) via the EM algorithm reduces mean absolute imputation error by up to 50% at high zero-inflation (π≈0.9), since stronger correlation enhances posterior expectation accuracy, unlike mode imputation which remains sensitive to α and dominated by zero prevalence.", "perturbed_explanation": "The perturbed statement is incorrect on two counts: first, the maximal reduction in EM error at moderate zero-inflation (π≈0.5) is about 35%, not 50% at π≈0.9, as observed in Figure 1. Second, mode imputation’s error remains largely insensitive to changes in α (panel colors are nearly parallel), contradicting the claim that it is sensitive to α.", "claim": "In ZIP simulations, leveraging within-unit autoregressive correlation (α) via the EM algorithm reduces mean absolute imputation error by up to 50% at high zero-inflation (π≈0.9), since stronger correlation enhances posterior expectation accuracy, unlike mode imputation which remains sensitive to α and dominated by zero prevalence.", "label": false }, { "paperid": "2409.07942v1", "paper_path": "./SciVer/papers/2409.07942v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.5" ], "image_path": "./SciVer/images/2409.07942v1-Table3-1.png", "request_id": 619, "origin_statement": "For 1D toy data, combining NCL and DPM with the DTB module reduces interpolation MSE from 0.163 to 0.099 (a 39% decrease), indicating the Taylor-based heteroscedastic noise modeling effectively prevents overfitting compared to DTB alone.", "perturbed_statement": "For 1D toy data, combining NCL and DPM with the DTB module reduces interpolation MSE from 0.256 to 0.099 (a 61% decrease), indicating the Taylor-based heteroscedastic noise modeling effectively prevents overfitting compared to DTB alone.", "perturbed_explanation": "The perturbation misstates the baseline interpolation MSE as 0.256, but Table 3 reports a DTB base interpolation MSE of 0.163. Therefore both the starting value and the claimed percentage reduction are incorrect.", "claim": "For 1D toy data, combining NCL and DPM with the DTB module reduces interpolation MSE from 0.256 to 0.099 (a 61% decrease), indicating the Taylor-based heteroscedastic noise modeling effectively prevents overfitting compared to DTB alone.", "label": false }, { "paperid": "2410.10054v1", "paper_path": "./SciVer/papers/2410.10054v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.10054v1_figure_2.png", "request_id": 620, "origin_statement": "In LLaMA-7B, PL_Alpha_Hill values increase steadily from ~2.6 in early layers to above 4.0 after layer 16, indicating a shift from heavy-tailed to light-tailed ESDs in deeper layers; HT-SR suggests decayed learning rates there to avoid overfitting.", "perturbed_statement": "In LLaMA-7B, PL_Alpha_Hill values decline from ~2.6 in early layers to below 2.0 after layer 16, indicating decreased tail heaviness; HT-SR theory suggests decaying learning rates in these deeper layers to prevent overfitting.", "perturbed_explanation": "This is incorrect because Figure 2 shows PL_Alpha_Hill for LLaMA-7B actually rises after layer 16 to values above 4.0, not declines to below 2.0. The stated trend and threshold (<2.0) directly contradict the plotted data.", "claim": "In LLaMA-7B, PL_Alpha_Hill values decline from ~2.6 in early layers to below 2.0 after layer 16, indicating decreased tail heaviness; HT-SR theory suggests decaying learning rates in these deeper layers to prevent overfitting.", "label": false }, { "paperid": "2409.13990v2", "paper_path": "./SciVer/papers/2409.13990v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.13990v2-Table3-1.png", "request_id": 621, "origin_statement": "Table 3 shows that as α increases from 0.05 to 0.2, median coverage declines from 0.970 to 0.834 and quartile coverage from 0.972 to 0.814, illustrating the trade-off between tighter conformal intervals and empirical coverage in distribution-free quantile inference.", "perturbed_statement": "Table 3 shows that as α increases from 0.05 to 0.2, median coverage declines from 0.970 to 0.834 and quartile coverage from 0.954 to 0.814, illustrating the trade-off between tighter conformal intervals and empirical coverage in distribution-free quantile inference.", "perturbed_explanation": "The perturbed statement misreports the quartile coverage at α=0.05 as 0.954, but Table 3 actually shows a quartile coverage of 0.972 at α=0.05, making the initial figure incorrect.", "claim": "Table 3 shows that as α increases from 0.05 to 0.2, median coverage declines from 0.970 to 0.834 and quartile coverage from 0.954 to 0.814, illustrating the trade-off between tighter conformal intervals and empirical coverage in distribution-free quantile inference.", "label": false }, { "paperid": "2411.11449v1", "paper_path": "./SciVer/papers/2411.11449v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.1.4" ], "image_path": "./SciVer/images/2411.11449v1_figure_4.png", "request_id": 624, "origin_statement": "In single interviews, participants requested up to 9 system-detail explanations (in S3), compared to at most 6 in any focus group (Gr. 2), suggesting that individualized sessions enhance users’ demand for algorithmic transparency by reducing group-level information filtering.", "perturbed_statement": "In single interviews, participants requested up to 12 system-detail explanations (in S3), compared to at most 4 in any focus group (Gr. 2), suggesting that individualized sessions enhance users’ demand for algorithmic transparency by reducing group-level information filtering.", "perturbed_explanation": "The perturbation incorrectly states 12 system-detail explanations in S3 and 4 in Gr. 2. Figure 4 shows 9 system-detail requests in S3 (orange bar) and 6 in Gr. 2, not 12 or 4.", "claim": "In single interviews, participants requested up to 12 system-detail explanations (in S3), compared to at most 4 in any focus group (Gr. 2), suggesting that individualized sessions enhance users’ demand for algorithmic transparency by reducing group-level information filtering.", "label": false }, { "paperid": "2409.19521v1", "paper_path": "./SciVer/papers/2409.19521v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.19521v1_figure_7.png", "request_id": 632, "origin_statement": "The minimal variance (<4%) in GenTel-Shield's accuracy across five languages suggests its underlying adversarial feature extractor effectively captures language-agnostic semantic structures, mitigating translation-induced noise.", "perturbed_statement": "The minimal variance (<2%) in GenTel-Shield's accuracy across five languages suggests its underlying adversarial feature extractor effectively captures language-agnostic semantic structures, mitigating translation-induced noise.", "perturbed_explanation": "The claim is incorrect because GenTel-Shield's highest accuracy (97.36% in Chinese) and lowest (93.84% in German) differ by about 3.52%, which exceeds the stated <2% variance.", "claim": "The minimal variance (<2%) in GenTel-Shield's accuracy across five languages suggests its underlying adversarial feature extractor effectively captures language-agnostic semantic structures, mitigating translation-induced noise.", "label": false }, { "paperid": "2409.13980v1", "paper_path": "./SciVer/papers/2409.13980v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.13980v1-Table1-1.png", "request_id": 635, "origin_statement": "CVR-LLM_Llama3's 88.7% SWOW accuracy on WinoGAViL surpasses BLIP2 by 17.1%, illustrating that integrating an LLM with a vision backbone enhances multimodal pronoun resolution through improved cross-modal attention.", "perturbed_statement": "CVR-LLM_Llama3's 88.7% SWOW accuracy on WinoGAViL only surpasses BLIP2 by 7.1%, illustrating that semantic segmentation integration enhances multimodal pronoun resolution through improved boundary detection.", "perturbed_explanation": "This is incorrect because CVR-LLM_Llama3 actually outperforms BLIP2 by 17.1% (88.7% vs. 71.6%), not 7.1%. Moreover, the model architecture relies on cross-modal attention fusion, not semantic segmentation integration.", "claim": "CVR-LLM_Llama3's 88.7% SWOW accuracy on WinoGAViL only surpasses BLIP2 by 7.1%, illustrating that semantic segmentation integration enhances multimodal pronoun resolution through improved boundary detection.", "label": false }, { "paperid": "2410.19599v2", "paper_path": "./SciVer/papers/2410.19599v2.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4" ], "image_path": "./SciVer/images/2410.19599v2_figure_5.png", "request_id": 636, "origin_statement": "The stark contrast between ~80–100% beauty contest instruction replication and near-zero replication on the 11–20 request game indicates LLMs preferentially memorize widely represented strategic game formats over rarely encountered numerical tasks, reflecting a training data bias toward competitive strategy descriptions.", "perturbed_statement": "LLMs replicate the 11–20 money request game instructions with 80–100% accuracy while achieving near-zero success on the beauty contest game, implying LLMs memorize simple numerical rules more reliably than strategic game formats.", "perturbed_explanation": "This is incorrect because Figure 5 shows essentially 0% replication success for the 11–20 money request game across all models (except a 2.9% outlier), and 75–100% replication success for the beauty contest game, not the reverse.", "claim": "LLMs replicate the 11–20 money request game instructions with 80–100% accuracy while achieving near-zero success on the beauty contest game, implying LLMs memorize simple numerical rules more reliably than strategic game formats.", "label": false }, { "paperid": "2411.18373v1", "paper_path": "./SciVer/papers/2411.18373v1.json", "claim_type": "analytical", "type": "chart", "item": "1(a)", "section": [ "3" ], "image_path": "./SciVer/images/2411.18373v1_figure_1(a).png", "request_id": 638, "origin_statement": "In the repulsive potential scenario (m1=1.5, m2=3.5), the total phase shift displays a sharp cusp exactly at the threshold momentum k_th≈√(m2^2−m1^2)≈3.16, marking the second channel’s opening; subsequent 2π– smoothing removes discontinuities, yielding a monotonic rise toward zero.", "perturbed_statement": "The total phase shift in the repulsive channel exhibits a sharp cusp exactly at the threshold momentum k_th≈√(m2^2−m1^2)≈2.5, marking the second channel’s opening; subsequent 2π– smoothing removes discontinuities, yielding a monotonic rise toward zero.", "perturbed_explanation": "The perturbed statement miscalculates the threshold momentum. With m2=3.5 and m1=1.5, k_th=√(3.5^2−1.5^2)=√10≈3.16, not 2.5, so the cusp cannot occur at k≈2.5 as claimed.", "claim": "The total phase shift in the repulsive channel exhibits a sharp cusp exactly at the threshold momentum k_th≈√(m2^2−m1^2)≈2.5, marking the second channel’s opening; subsequent 2π– smoothing removes discontinuities, yielding a monotonic rise toward zero.", "label": false }, { "paperid": "2410.19288v1", "paper_path": "./SciVer/papers/2410.19288v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "3.3.8" ], "image_path": "./SciVer/images/2410.19288v1_figure_8.png", "request_id": 642, "origin_statement": "Neuroradiologists rated over 96% of SR images as excellent (score 90), demonstrating that FTDDM effectively reconstructs high-frequency metabolite features, notably restoring gray–white matter contrast in NAA and Glu maps, aligning with known metabolite distributions and potentially improving lesion detection in MRSI.", "perturbed_statement": "Neuroradiologists rated over 75% of SR images as good (score 70), demonstrating that FTDDM effectively reconstructs high-frequency metabolite features, notably restoring gray–white matter contrast in NAA and Glu maps, aligning with known metabolite distributions and potentially improving lesion detection in MRSI.", "perturbed_explanation": "This statement is incorrect because the neuroradiologists actually rated over 96% of SR images as excellent (score 90), not 75% as good (score 70). The percentage and score threshold contradict the reported Figure 8 results.", "claim": "Neuroradiologists rated over 75% of SR images as good (score 70), demonstrating that FTDDM effectively reconstructs high-frequency metabolite features, notably restoring gray–white matter contrast in NAA and Glu maps, aligning with known metabolite distributions and potentially improving lesion detection in MRSI.", "label": false }, { "paperid": "2410.10652v1", "paper_path": "./SciVer/papers/2410.10652v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.10652v1-Table5-1.png", "request_id": 643, "origin_statement": "For Layer5_Layer6_100 niches, QueST’s 0.802 mean Pearson correlation outperforms the No-Pooling variant by 0.306, demonstrating that combining contrastive learning with subgraph pooling effectively preserves spatial topology in larger multicellular niches.", "perturbed_statement": "For Layer5_Layer6_100 niches, QueST’s 0.802 mean Pearson correlation outperforms the No-Pooling variant by 0.056, demonstrating that the batch discriminator primarily preserves spatial topology in larger multicellular niches.", "perturbed_explanation": "The perturbation is incorrect because the actual improvement over the No-Pooling variant is 0.802–0.496=0.306, not 0.056. Moreover, the batch discriminator has only a minor impact (No-Batch yields 0.647), whereas the main gain in topology preservation arises from contrastive learning combined with subgraph pooling.", "claim": "For Layer5_Layer6_100 niches, QueST’s 0.802 mean Pearson correlation outperforms the No-Pooling variant by 0.056, demonstrating that the batch discriminator primarily preserves spatial topology in larger multicellular niches.", "label": false }, { "paperid": "2411.13093v1", "paper_path": "./SciVer/papers/2411.13093v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.13093v1-Table1-1.png", "request_id": 645, "origin_statement": "Incorporating ~14 additional video frames (≈2.0K tokens, 144 tokens/frame) via Video-RAG yields an average 8.0% performance uplift on Video-MME, indicating that auxiliary text embeddings bridge visual–semantic alignment gaps in LVLMs primarily pretrained on text.", "perturbed_statement": "Incorporating ~14 additional video frames (≈2.0K tokens, 256 tokens/frame) via Video-RAG yields an average 8.0% performance uplift on Video-MME, indicating that auxiliary text embeddings bridge visual–semantic alignment gaps in LVLMs primarily pretrained on text.", "perturbed_explanation": "The perturbed statement incorrectly states 256 tokens per frame. Table 1 and the context specify each frame contributes approximately 144 tokens, not 256, to reach the total of ~2.0K tokens for 14 frames.", "claim": "Incorporating ~14 additional video frames (≈2.0K tokens, 256 tokens/frame) via Video-RAG yields an average 8.0% performance uplift on Video-MME, indicating that auxiliary text embeddings bridge visual–semantic alignment gaps in LVLMs primarily pretrained on text.", "label": false }, { "paperid": "2409.14128v2", "paper_path": "./SciVer/papers/2409.14128v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "5.3", "6.1" ], "image_path": "./SciVer/images/2409.14128v2_figure_2.png", "request_id": 646, "origin_statement": "Due to bilinear interpolation smoothing high-frequency tampering artifacts, SuSy maintains a stable combined recall (~140%) at 224–512 px, but experiences steep declines beyond 768 px as interpolation attenuates forensic traces.", "perturbed_statement": "Because bicubic interpolation enhances subtle high-frequency artifacts, SuSy’s combined recall actually peaks at the highest tested resolution (1440 px) at around 160%, while lower scales (224–512 px) only yield near 100% combined recall.", "perturbed_explanation": "The perturbation is incorrect because SuSy uses bilinear interpolation (not bicubic), its combined recall never reaches 160% at 1440 px (it drops to about 97.5%), and lower scales (224–512 px) achieve around 140% combined recall, not near 100%.", "claim": "Because bicubic interpolation enhances subtle high-frequency artifacts, SuSy’s combined recall actually peaks at the highest tested resolution (1440 px) at around 160%, while lower scales (224–512 px) only yield near 100% combined recall.", "label": false }, { "paperid": "2410.01727v1", "paper_path": "./SciVer/papers/2410.01727v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4" ], "image_path": "./SciVer/images/2410.01727v1-Table1-1.png", "request_id": 647, "origin_statement": "With 5.55M interactions across 7,652 questions, XES3G5M averages ~725 interactions/question—higher than Eedi’s ~580 (2.32M interactions/4,019 questions)—offering denser practice data that boosts the stability of skill-parameter estimates in KT models.", "perturbed_statement": "With 2.32M interactions across 7,652 questions, XES3G5M averages ~303 interactions/question—lower than Eedi’s ~580 (2.32M interactions/4,019 questions)—resulting in sparser practice data that undermines the stability of skill-parameter estimates in KT models.", "perturbed_explanation": "The perturbed statement incorrectly reports XES3G5M as having 2.32M interactions, whereas Table 1 lists 5,549,635 interactions. This misreported total leads to an invalid calculation of ~303 interactions per question.", "claim": "With 2.32M interactions across 7,652 questions, XES3G5M averages ~303 interactions/question—lower than Eedi’s ~580 (2.32M interactions/4,019 questions)—resulting in sparser practice data that undermines the stability of skill-parameter estimates in KT models.", "label": false }, { "paperid": "2411.10959v1", "paper_path": "./SciVer/papers/2411.10959v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.10959v1-Table3-1.png", "request_id": 657, "origin_statement": "Treatment-group shrids average 2,604 inhabitants, 21.6% higher than non-study shrids (2,143), and also have the highest urban land share (0.004). This suggests the Smartcard rollout initially targeted denser, more urbanized villages, potentially enhancing adoption through stronger social networks and infrastructure availability.", "perturbed_statement": "Treatment-group shrids average 2,604 inhabitants, 18% lower than non-study shrids (2,143), and have the lowest urban land share (0.001). This suggests the rollout focused on sparsely populated, rural villages, diminishing network effects.", "perturbed_explanation": "The perturbed statement incorrectly inverts the population gap and urbanization metric. Table 3 shows treatment shrids average 2,604 inhabitants, which is 21.6% higher (not 18% lower) than non-study shrids (2,143), and a 0.004 urban area share (not the lowest 0.001). Both details conflict with the table.", "claim": "Treatment-group shrids average 2,604 inhabitants, 18% lower than non-study shrids (2,143), and have the lowest urban land share (0.001). This suggests the rollout focused on sparsely populated, rural villages, diminishing network effects.", "label": false }, { "paperid": "2409.11736v1", "paper_path": "./SciVer/papers/2409.11736v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3", "5.1", "5.1.1" ], "image_path": "./SciVer/images/2409.11736v1_figure_5.png", "request_id": 660, "origin_statement": "The increasing positive slope of the spectroscopic–seismic mass difference indicates that systematic Teff underestimations propagate through the ν_max-based scaling relation, causing increasing seismic mass underestimates at higher spectroscopic mass; Teff uncertainties of ~100 K hence induce mass-dependent offsets visible across sources.", "perturbed_statement": "The decreasing negative slope of the spectroscopic–seismic mass difference suggests that systematic Teff overestimations propagate through the Δν-based scaling relation, causing decreasing seismic mass overestimates at higher spectroscopic mass; Teff uncertainties of ~200 K hence induce mass-dependent offsets across sources.", "perturbed_explanation": "This is incorrect because Fig. 5 uses a ν_max-based seismic mass (not a Δν-based relation), shows a positive (not negative) slope between 0.26 and 0.67, and the adopted Teff uncertainty is 100 K (not 200 K).", "claim": "The decreasing negative slope of the spectroscopic–seismic mass difference suggests that systematic Teff overestimations propagate through the Δν-based scaling relation, causing decreasing seismic mass overestimates at higher spectroscopic mass; Teff uncertainties of ~200 K hence induce mass-dependent offsets across sources.", "label": false }, { "paperid": "2411.03539v1", "paper_path": "./SciVer/papers/2411.03539v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.1.2" ], "image_path": "./SciVer/images/2411.03539v1_figure_4.png", "request_id": 662, "origin_statement": "By requiring a total deposited energy around 12.35 MeV, an absolute energy difference ΔE <1 MeV, and an inter-hit angle θ ≥25°, the secondary photons arrive delayed beyond the LaBr3 time resolution, fully rejecting γγ cascade events.", "perturbed_statement": "By requiring a total deposited energy around 12.35 MeV, an absolute energy difference ΔE <2 MeV, and an inter-hit angle θ ≥15°, the secondary photons arrive delayed beyond the LaBr3 time resolution, fully rejecting γγ cascade events.", "perturbed_explanation": "The statement is incorrect because the experiment uses ΔE <1 MeV (not <2 MeV) and a minimum correlation angle of θ ≥25° (not 15°). Using looser energy and angular cuts would not eliminate all γγ cascade events as described in the context.", "claim": "By requiring a total deposited energy around 12.35 MeV, an absolute energy difference ΔE <2 MeV, and an inter-hit angle θ ≥15°, the secondary photons arrive delayed beyond the LaBr3 time resolution, fully rejecting γγ cascade events.", "label": false }, { "paperid": "2410.20847v1", "paper_path": "./SciVer/papers/2410.20847v1.json", "claim_type": "analytical", "type": "chart", "item": "11", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.20847v1_figure_11.png", "request_id": 666, "origin_statement": "Spring season records the highest average daily radio visibility (~20 h/day) because the halo orbit's plane aligns optimally with Earth's orbital plane, minimizing Moon-induced occultations and maximizing ground station line-of-sight windows.", "perturbed_statement": "Summer season records the highest average daily radio visibility (~20 h/day) because the halo orbit's plane aligns optimally with Earth's orbital plane, minimizing Moon-induced occultations and maximizing ground station line-of-sight windows.", "perturbed_explanation": "The context specifies that spring—not summer—has the highest average daily radio visibility at about 20 h/day. Summer visibility averages only around 16 h/day, so attributing the peak 20 h/day to summer contradicts the given seasonal values.", "claim": "Summer season records the highest average daily radio visibility (~20 h/day) because the halo orbit's plane aligns optimally with Earth's orbital plane, minimizing Moon-induced occultations and maximizing ground station line-of-sight windows.", "label": false }, { "paperid": "2411.16312v1", "paper_path": "./SciVer/papers/2411.16312v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.16312v1-Table3-1.png", "request_id": 667, "origin_statement": "The content-aware sampling yields nearly identical ΔVMAF (±0.06) across 16×16 to 64×64 patches, highlighting that the method effectively prioritizes perceptual quality irrespective of spatial resolution by focusing on high-frequency informative regions.", "perturbed_statement": "The content-aware sampling yields nearly identical ΔVMAF (±0.6) across 16×16 to 64×64 patches, highlighting that the method effectively prioritizes perceptual quality irrespective of spatial resolution by focusing on high-frequency informative regions.", "perturbed_explanation": "The stated ±0.6 variation is incorrect: Table 3 shows ΔVMAF values of 2.66, 2.72, 2.69 for ×2 and 6.54, 6.65, 6.58 for ×4, so the actual variation is only ±0.06 for ×2 and ±0.055 for ×4, not ±0.6.", "claim": "The content-aware sampling yields nearly identical ΔVMAF (±0.6) across 16×16 to 64×64 patches, highlighting that the method effectively prioritizes perceptual quality irrespective of spatial resolution by focusing on high-frequency informative regions.", "label": false }, { "paperid": "2411.13545v2", "paper_path": "./SciVer/papers/2411.13545v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2411.13545v2-Table3-1.png", "request_id": 673, "origin_statement": "At 99.99% sparsity, DyReLU phasing preserves nonzero gradient flow, enabling ResNet50 to reach 53.32% test accuracy versus 10% with standard ReLU, by amplifying salient activations during early training and avoiding representational collapse at extreme sparsity.", "perturbed_statement": "At 99.95% sparsity, DyReLU phasing preserves nonzero gradient flow, enabling ResNet50 to reach 82.02% test accuracy versus 30.14% with standard ReLU, by amplifying salient activations during early training.", "perturbed_explanation": "Table 3 shows that at 99.95% sparsity, ResNet50 with DyReLU achieves 69.57% accuracy, not 82.02%. The 82.02% result actually corresponds to weight sharing (WS), not DyReLU. Thus the perturbed statement’s accuracy figure and method attribution are incorrect.", "claim": "At 99.95% sparsity, DyReLU phasing preserves nonzero gradient flow, enabling ResNet50 to reach 82.02% test accuracy versus 30.14% with standard ReLU, by amplifying salient activations during early training.", "label": false }, { "paperid": "2410.23828v2", "paper_path": "./SciVer/papers/2410.23828v2.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.23828v2_figure_7.png", "request_id": 674, "origin_statement": "Incrementally increasing the vision-language decoder from one to three layers enhances feature fusion and cross-modal reasoning, reaching maximum OA (71.6%) and oIoU (43.8%) at three layers; adding more layers yields marginal declines, likely due to heightened overfitting risks.", "perturbed_statement": "Performance increases through four layers, reaching highest OA (72.0%) and oIoU (44.3%) at n=4, and then marginally declines, implying deeper decoding layers continually enhance generalization and circumvent overfitting even beyond four layers.", "perturbed_explanation": "The chart shows the peak occurs at three layers (OA 71.6%, oIoU 43.8%), not four. At four layers, OA is about 71.3% and oIoU about 43.1%, contradicting the claimed 72.0% and 44.3%. Additionally, deeper layers beyond three tend to overfit, causing declines.", "claim": "Performance increases through four layers, reaching highest OA (72.0%) and oIoU (44.3%) at n=4, and then marginally declines, implying deeper decoding layers continually enhance generalization and circumvent overfitting even beyond four layers.", "label": false }, { "paperid": "2411.06866v1", "paper_path": "./SciVer/papers/2411.06866v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.06866v1-Table1-1.png", "request_id": 677, "origin_statement": "The CommonsenseQA official split has fewer training instances (9,741) than SocialIQA (33,410), suggesting that models may require stronger semantic reasoning biases to generalize effectively on smaller structured knowledge-based QA tasks compared to large social inference datasets.", "perturbed_statement": "The CommonsenseQA official split has more training instances (9,741) than SocialIQA (23,410), suggesting that models depend more on large structured knowledge-based QA datasets for robust semantic reasoning compared to social inference tasks.", "perturbed_explanation": "This statement is incorrect because SocialIQA actually contains 33,410 training examples (not 23,410), and CommonsenseQA’s official split (9,741) is smaller, not larger, than SocialIQA’s training size as shown in Table 1.", "claim": "The CommonsenseQA official split has more training instances (9,741) than SocialIQA (23,410), suggesting that models depend more on large structured knowledge-based QA datasets for robust semantic reasoning compared to social inference tasks.", "label": false }, { "paperid": "2409.01629v1", "paper_path": "./SciVer/papers/2409.01629v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2409.01629v1-Table1-1.png", "request_id": 679, "origin_statement": "The consistent use of a 1200 gr/mm grating at 345° with F/4 focuses yields a reciprocal dispersion of 10 Å/mm, producing a resolving power of ~5,000 over 4100–4900 Å; coupling with Pandol developer enhances low-contrast line detection in B–F type spectra.", "perturbed_statement": "The consistent use of a 1200 gr/mm grating at 345° with F/4 focuses yields a reciprocal dispersion of 5 Å/mm, producing a resolving power of ~10,000 over 4100–4900 Å; coupling with Pandol developer enhances low-contrast line detection in B–F type spectra.", "perturbed_explanation": "The context specifies a reciprocal dispersion of 10 Å/mm, not 5 Å/mm, so the resolving power is overestimated. Therefore both the altered dispersion and derived resolving power (~10,000) contradict the original data.", "claim": "The consistent use of a 1200 gr/mm grating at 345° with F/4 focuses yields a reciprocal dispersion of 5 Å/mm, producing a resolving power of ~10,000 over 4100–4900 Å; coupling with Pandol developer enhances low-contrast line detection in B–F type spectra.", "label": false }, { "paperid": "2411.07042v1", "paper_path": "./SciVer/papers/2411.07042v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.07042v1_figure_4(a).png", "request_id": 680, "origin_statement": "The predominance of user-driven 'Reason and Preach' (21.5%) and expert-driven 'Proposal' (19.3%) implies participants favored cognitively rich interventions over affective tactics, aligning with the Elaboration Likelihood Model's central route emphasis, contrasting with low reliance on emotive 'Anger Expression' (7.9%).", "perturbed_statement": "The predominance of user-driven 'Reason and Preach' (21.5%) and expert-driven 'Proposal' (19.3%) implies participants favored cognitively rich interventions over affective tactics, aligning with the Elaboration Likelihood Model's central route emphasis, contrasting with moderate reliance on emotive 'Anger Expression' (15%).", "perturbed_explanation": "The perturbed statement is incorrect because the pie chart and context report 'Anger Expression' at 7.9%, not 15%, thereby overstating its usage and misrepresenting the comparative reliance on affective tactics.", "claim": "The predominance of user-driven 'Reason and Preach' (21.5%) and expert-driven 'Proposal' (19.3%) implies participants favored cognitively rich interventions over affective tactics, aligning with the Elaboration Likelihood Model's central route emphasis, contrasting with moderate reliance on emotive 'Anger Expression' (15%).", "label": false }, { "paperid": "2409.12507v1", "paper_path": "./SciVer/papers/2409.12507v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "1", "5.5" ], "image_path": "./SciVer/images/2409.12507v1_figure_2(a).png", "request_id": 690, "origin_statement": "On CIFAR10-DVS at timestep 1, HSD preserves spatio-temporal coherence, achieving 75.8% accuracy—over 33% higher than TET (DTS)'s 42.2%—demonstrating that consistent time steps significantly mitigate information loss from event data sparsity.", "perturbed_statement": "On CIFAR10-DVS at timestep 1, HSD preserves spatio-temporal coherence, achieving 65.2% accuracy—only 5% higher than TET (DTS)'s 60.2%—implying that consistent time steps have negligible effect on information loss.", "perturbed_explanation": "This is incorrect because the actual HSD accuracy at timestep 1 is 75.81%, not 65.2%, and TET (DTS)'s accuracy is around 42.2%, not 60.2%, so the claimed 5% gap contradicts the data.", "claim": "On CIFAR10-DVS at timestep 1, HSD preserves spatio-temporal coherence, achieving 65.2% accuracy—only 5% higher than TET (DTS)'s 60.2%—implying that consistent time steps have negligible effect on information loss.", "label": false }, { "paperid": "2411.06609v1", "paper_path": "./SciVer/papers/2411.06609v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.06609v1-Table1-1.png", "request_id": 697, "origin_statement": "Increasing the excitation amplitude from I0=10^2 to 4·10^2 reduces the relative L2 reconstruction error by ~6.12% under weak damping (α=0.3) and ~8.29% under strong damping (α=0.8), demonstrating that higher energy enhances SNR and mitigates attenuation-induced information loss in bi-Laplacian regularization.", "perturbed_statement": "Increasing the excitation amplitude from I0=10^2 to 4·10^2 reduces the relative L2 reconstruction error by 15% under weak damping (α=0.3) and 20% under strong damping (α=0.8), showing that higher energy boosts SNR and counters attenuation in bi-Laplacian regularization.", "perturbed_explanation": "The stated reductions (15% and 20%) are incorrect. The table shows actual error drops from 35.66% to 29.54% (≈6.12%) for α=0.3 and from 58.19% to 49.90% (≈8.29%) for α=0.8, not 15% and 20%.", "claim": "Increasing the excitation amplitude from I0=10^2 to 4·10^2 reduces the relative L2 reconstruction error by 15% under weak damping (α=0.3) and 20% under strong damping (α=0.8), showing that higher energy boosts SNR and counters attenuation in bi-Laplacian regularization.", "label": false }, { "paperid": "2409.06439v1", "paper_path": "./SciVer/papers/2409.06439v1.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.06439v1_figure_3(b).png", "request_id": 700, "origin_statement": "Because E2Tree’s dissimilarity matrix D=1−ĤO, derived from MSE‐weighted co‐occurrence counts, hierarchical clustering using complete linkage recovers the RF’s natural grouping with an FMI≈0.9, indicating near‐perfect preservation of the random forest’s structure in Ō_{ij}.", "perturbed_statement": "Because E2Tree’s dissimilarity matrix D=1−ĤO, derived from MSE‐weighted co‐occurrence counts, hierarchical clustering using complete linkage recovers the RF’s natural grouping with an FMI≈0.5, indicating near‐perfect preservation of the random forest’s structure in Ō_{ij}.", "perturbed_explanation": "The perturbation is incorrect because the context states the Fowlkes–Mallows index (FMI) is approximately 90%, not 50%, for the E2Tree estimation. Thus the claim of FMI≈0.5 contradicts the documented result of FMI≈0.9.", "claim": "Because E2Tree’s dissimilarity matrix D=1−ĤO, derived from MSE‐weighted co‐occurrence counts, hierarchical clustering using complete linkage recovers the RF’s natural grouping with an FMI≈0.5, indicating near‐perfect preservation of the random forest’s structure in Ō_{ij}.", "label": false }, { "paperid": "2411.02305v1", "paper_path": "./SciVer/papers/2411.02305v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.02305v1_figure_5.png", "request_id": 702, "origin_statement": "In Figure 5, pass^k decays concavely across trials, with function calling’s drop decreasing from 8% (k=1→2) to 2% (k=3→4). This diminishing marginal failure rate suggests positive intertrial correlations, contradicting a memoryless Bernoulli process assumption.", "perturbed_statement": "In Figure 5, pass^k decays convexly across trials, with function calling’s drop increasing from 8% (k=1→2) to 12% (k=3→4). This growing marginal failure rate suggests negative intertrial correlations, consistent with a hyper-sensitive error accumulation process.", "perturbed_explanation": "The perturbation is incorrect because pass^k actually exhibits a concave decline, not convex. Specifically, function calling’s drop from k=3 to k=4 is only about 2% (48%→46%), not 12%, and the marginal failure rate decreases rather than increases.", "claim": "In Figure 5, pass^k decays convexly across trials, with function calling’s drop increasing from 8% (k=1→2) to 12% (k=3→4). This growing marginal failure rate suggests negative intertrial correlations, consistent with a hyper-sensitive error accumulation process.", "label": false }, { "paperid": "2409.08519v1", "paper_path": "./SciVer/papers/2409.08519v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.08519v1_figure_7.png", "request_id": 704, "origin_statement": "The merge tree interleaving distance peaks when the moving Gaussian crosses the saddle-line between fixed clusters, indicating topological transitions; LSH-based d_R and d_S binary matrices reproduce the three clusters reported by Yan et al. [80], confirming LSH effectively approximates interleaving distance under persistence threshold τ=0.2.", "perturbed_statement": "The merge tree interleaving distance peaks when the moving Gaussian aligns with a global maximum, indicating topological transitions; LSH-based d_R and d_S binary matrices reproduce the four clusters reported by Yan et al. [80], confirming LSH effectively approximates interleaving distance under persistence threshold τ=0.05.", "perturbed_explanation": "This statement is incorrect because the interleaving distance actually reflects crossings of saddle connections, not alignments with a global maximum. Yan et al. [80] identified three clusters, not four, and the experiments used a persistence threshold of τ=0.2 rather than τ=0.05.", "claim": "The merge tree interleaving distance peaks when the moving Gaussian aligns with a global maximum, indicating topological transitions; LSH-based d_R and d_S binary matrices reproduce the four clusters reported by Yan et al. [80], confirming LSH effectively approximates interleaving distance under persistence threshold τ=0.05.", "label": false }, { "paperid": "2410.19231v1", "paper_path": "./SciVer/papers/2410.19231v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.19231v1-Table3-1.png", "request_id": 705, "origin_statement": "The fine-tuned Mistral7b model's extended session durations (274.6 s) and elevated Care rating (1.846) relative to the larger Mistral8x7b (98.9 s, 1.833) suggest a stronger scaffolded interaction strategy that aligns with Vygotsky's zone of proximal development, promoting deeper learner engagement.", "perturbed_statement": "The fine-tuned Mistral7b model's shorter average sessions (98.9 s vs. 274.6 s) and lower Care rating (1.833 vs. 1.846) relative to Mistral8x7b indicate a deficient scaffolded tutoring approach, which contradicts Vygotsky's zone of proximal development.", "perturbed_explanation": "The perturbed statement erroneously reverses both metrics: in reality, the fine-tuned Mistral7b averaged 274.61 s (not 98.9 s) and had a higher Care score of 1.846 (not 1.833) compared to the base model, invalidating the claimed deficiency.", "claim": "The fine-tuned Mistral7b model's shorter average sessions (98.9 s vs. 274.6 s) and lower Care rating (1.833 vs. 1.846) relative to Mistral8x7b indicate a deficient scaffolded tutoring approach, which contradicts Vygotsky's zone of proximal development.", "label": false }, { "paperid": "2411.12115v1", "paper_path": "./SciVer/papers/2411.12115v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.12115v1_figure_7.png", "request_id": 706, "origin_statement": "At 3% dataset size, DM matches its full-dataset accuracy, suggesting its pruning prioritizes low-noise, information-rich samples that preserve gradient diversity and model convergence under extreme data reduction.", "perturbed_statement": "At 3% dataset size, MTT matches its full-dataset accuracy, suggesting its pruning prioritizes low-noise, information-rich samples that preserve gradient diversity and model convergence under extreme data reduction.", "perturbed_explanation": "Figure 7 shows that at 3% dataset size, MTT achieves only about 28.5% accuracy, which is well below its full-dataset baseline of approximately 32.3%. Therefore, MTT does not match full-dataset accuracy at 3%, contradicting the perturbed claim.", "claim": "At 3% dataset size, MTT matches its full-dataset accuracy, suggesting its pruning prioritizes low-noise, information-rich samples that preserve gradient diversity and model convergence under extreme data reduction.", "label": false }, { "paperid": "2409.08201v2", "paper_path": "./SciVer/papers/2409.08201v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.08201v2-Table1-1.png", "request_id": 707, "origin_statement": "By fixing the L2-norm δ between competing survival functions at 0.2, the study ensures that two-sample test power π remains between 0.1 and 0.9 for varying sample sizes and intersection patterns. This constraint standardizes effect size, allowing fair comparison of test performance under 0, 1, or 2 hazard crossings.", "perturbed_statement": "By fixing the L1-norm δ between competing survival functions at 0.2, the study ensures that two-sample test power π remains between 0.05 and 0.95 for varying sample sizes and intersection patterns. This constraint standardizes effect size, allowing fair comparison of test performance under 0, 1, or 2 hazard crossings.", "perturbed_explanation": "The perturbation is incorrect because the original design fixes the L2-norm (not the L1-norm) of the difference between survival functions, and it ensures test power π falls between 0.1 and 0.9 (not between 0.05 and 0.95), as stated in the context.", "claim": "By fixing the L1-norm δ between competing survival functions at 0.2, the study ensures that two-sample test power π remains between 0.05 and 0.95 for varying sample sizes and intersection patterns. This constraint standardizes effect size, allowing fair comparison of test performance under 0, 1, or 2 hazard crossings.", "label": false }, { "paperid": "2409.00471v2", "paper_path": "./SciVer/papers/2409.00471v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "2", "3.2", "4.2", "4.3.2" ], "image_path": "./SciVer/images/2409.00471v2-Table2-1.png", "request_id": 709, "origin_statement": "The posterior log stellar mass formed (11.00) and current stellar mass (10.80) imply ~37% mass loss, aligning with theoretical expectations (~30–40%) for 1 Gyr-old populations shedding mass via supernova and AGB winds.", "perturbed_statement": "The posterior log stellar mass formed (11.00) and current stellar mass (10.80) imply ~63% mass loss, aligning with theoretical expectations (~60%) for 1 Gyr-old populations shedding mass via supernova and AGB winds.", "perturbed_explanation": "Table 2 lists log M_*_formed = 11.00 and log M_*_current = 10.80, so the mass fraction retained is 10^10.80/10^11.00 ≃ 0.63, meaning ~37% is lost, not 63%. Moreover, models predict ~30–40% mass loss over 1 Gyr, not ~60%.", "claim": "The posterior log stellar mass formed (11.00) and current stellar mass (10.80) imply ~63% mass loss, aligning with theoretical expectations (~60%) for 1 Gyr-old populations shedding mass via supernova and AGB winds.", "label": false }, { "paperid": "2409.12946v1", "paper_path": "./SciVer/papers/2409.12946v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1", "4.2" ], "image_path": "./SciVer/images/2409.12946v1_figure_1.png", "request_id": 710, "origin_statement": "By using entropy-minimization to rectify noisy pseudo labels before adversarial fine-tuning, SNORD curtails error amplification, achieving within 1% of TRADES-AWP robustness on CIFAR-10 at just 0.2% labeling, suggesting that label-noise mitigation is crucial for SSL adversarial robustness.", "perturbed_statement": "By using entropy-minimization to rectify noisy pseudo labels before adversarial fine-tuning, SNORD curtails error amplification, achieving within 5% of TRADES robustness on CIFAR-100 at just 2% labeling, suggesting that label-noise mitigation is sufficient for SSL adversarial robustness.", "perturbed_explanation": "The perturbed statement is incorrect because SNORD’s reported result refers to CIFAR-10—not CIFAR-100—and it reaches within 1% of TRADES-AWP robustness (not 5% of TRADES). Additionally, it requires only 0.2% labeled data, not 2%.", "claim": "By using entropy-minimization to rectify noisy pseudo labels before adversarial fine-tuning, SNORD curtails error amplification, achieving within 5% of TRADES robustness on CIFAR-100 at just 2% labeling, suggesting that label-noise mitigation is sufficient for SSL adversarial robustness.", "label": false }, { "paperid": "2410.19277v2", "paper_path": "./SciVer/papers/2410.19277v2.json", "claim_type": "analytical", "type": "chart", "item": "5(b)", "section": [ "3.2.1" ], "image_path": "./SciVer/images/2410.19277v2_figure_5(b).png", "request_id": 714, "origin_statement": "In UC-2, GA-driven test generation starts matching RS in early evaluations but surpasses it by revealing ~6 failures at 200 evaluations, demonstrating that evolutionary search exploits accumulating diversity through crossover and mutation to uncover systematic vulnerabilities more effectively than random sampling.", "perturbed_statement": "In UC-2, GA-driven test generation starts matching RS in early evaluations but surpasses it by revealing ~8 failures at 200 evaluations, demonstrating that evolutionary search exploits accumulating diversity through crossover and mutation to uncover systematic vulnerabilities more effectively than random sampling.", "perturbed_explanation": "The perturbed statement incorrectly claims ~8 failures at 200 evaluations for GA in UC-2. Figure 5 shows GA reveals approximately 6–6.5 failures by that point, not 8, so the failure count detail is contradicted by the data.", "claim": "In UC-2, GA-driven test generation starts matching RS in early evaluations but surpasses it by revealing ~8 failures at 200 evaluations, demonstrating that evolutionary search exploits accumulating diversity through crossover and mutation to uncover systematic vulnerabilities more effectively than random sampling.", "label": false }, { "paperid": "2410.01912v1", "paper_path": "./SciVer/papers/2410.01912v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.01912v1-Table2-1.png", "request_id": 723, "origin_statement": "Given that FID quantifies the Fréchet distance between real and generated image features, the 2.5B DnD-Transformer-XXXL (cfg=2) achieves a 2.21 FID—nearly half of LlamaGen-3B's 4.21—demonstrating superior distribution alignment with shorter autoregressive sequences.", "perturbed_statement": "Given that FID quantifies structural similarity between real and generated images, the 2.5B DnD-Transformer-XXXL (cfg=2) achieves a 1.21 FID—nearly one quarter of LlamaGen-3B's 4.21—demonstrating superior distribution alignment with shorter autoregressive sequences.", "perturbed_explanation": "The table reports a 2.21 FID for the 2.5B DnD-Transformer-XXXL (cfg=2), not 1.21, and FID measures the Fréchet distance between Inception embeddings rather than structural similarity.", "claim": "Given that FID quantifies structural similarity between real and generated images, the 2.5B DnD-Transformer-XXXL (cfg=2) achieves a 1.21 FID—nearly one quarter of LlamaGen-3B's 4.21—demonstrating superior distribution alignment with shorter autoregressive sequences.", "label": false }, { "paperid": "2410.12968v1", "paper_path": "./SciVer/papers/2410.12968v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.12968v1_figure_3.png", "request_id": 724, "origin_statement": "Figure 3 shows that in the innermost 0–5% Lagrangian shell, second-generation stars >0.8 M⊙ begin with a normalized mixing ratio of ≈2.1, decreasing to ≈1.2 by 5 Gyr, demonstrating that two-body relaxation accelerates spatial mixing more effectively for lower-mass populations than for high-mass stars.", "perturbed_statement": "Figure 3 shows that in the innermost 0–5% Lagrangian shell, second-generation stars >0.8 M⊙ begin with a normalized mixing ratio of ≈2.1, decreasing to ≈0.8 by 5 Gyr, demonstrating that two-body relaxation accelerates spatial mixing more effectively for higher-mass populations than for low-mass stars.", "perturbed_explanation": "The perturbation is incorrect because Figure 3 indicates the ratio for >0.8 M⊙ stars never falls to 0.8—it remains above ≈1.0 at 5 Gyr—and two-body relaxation actually mixes low-mass stars faster, not high-mass ones, contradicting both the numerical value and the causal claim.", "claim": "Figure 3 shows that in the innermost 0–5% Lagrangian shell, second-generation stars >0.8 M⊙ begin with a normalized mixing ratio of ≈2.1, decreasing to ≈0.8 by 5 Gyr, demonstrating that two-body relaxation accelerates spatial mixing more effectively for higher-mass populations than for low-mass stars.", "label": false }, { "paperid": "2411.08229v1", "paper_path": "./SciVer/papers/2411.08229v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.3", "3.4" ], "image_path": "./SciVer/images/2411.08229v1-Table2-1.png", "request_id": 727, "origin_statement": "In iteration 2, setting a 0.8 m stride length with a 1 m/s advance speed produces a 0.8 s oscillation period (1.25 Hz), matching typical human head-bob frequencies (1–2 Hz) and thereby improving gait realism in VR.", "perturbed_statement": "In iteration 5, the 0.8 m stride length combined with a 1 m/s advance speed produces a 0.5 s oscillation period (2 Hz), doubling typical head-bob frequency and exaggerating gait cues in VR.", "perturbed_explanation": "The oscillation period is miscalculated: the correct period is stride length (0.8 m) divided by advance speed (1 m/s), which equals 0.8 s rather than the stated 0.5 s. Thus the claimed 2 Hz frequency is incorrect.", "claim": "In iteration 5, the 0.8 m stride length combined with a 1 m/s advance speed produces a 0.5 s oscillation period (2 Hz), doubling typical head-bob frequency and exaggerating gait cues in VR.", "label": false }, { "paperid": "2409.02251v1", "paper_path": "./SciVer/papers/2409.02251v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.02251v1-Table1-1.png", "request_id": 731, "origin_statement": "Despite training with WGN σ values of 5 and 10, peak AASR occurs at σ=13 for CIFAR-10 models, implying backdoored networks exploit higher-variance triggers to strengthen gradient-driven misclassification pathways, thus maintaining CA while enhancing attack sensitivity through variance-induced feature dominance.", "perturbed_statement": "The AEVC for CIFAR-10 ResNet50 is 0.8010, significantly lower than its CA of 0.9305, suggesting the WGN trigger broadly affects non-victim class predictions, indicating poor sample specificity.", "perturbed_explanation": "This statement incorrectly cites the AEVC for CIFAR-10 ResNet50 as 0.8010; according to Table 1 the correct AEVC is 0.9010, which is close to the CA of 0.9305, demonstrating the trigger remains sample-specific.", "claim": "The AEVC for CIFAR-10 ResNet50 is 0.8010, significantly lower than its CA of 0.9305, suggesting the WGN trigger broadly affects non-victim class predictions, indicating poor sample specificity.", "label": false }, { "paperid": "2409.08845v1", "paper_path": "./SciVer/papers/2409.08845v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3", "4.3" ], "image_path": "./SciVer/images/2409.08845v1-Table3-1.png", "request_id": 733, "origin_statement": "By weighting gradient updates according to the agreement margin between reference and reward models, AIPO on Mistral-7B increases AlpacaEval win rate by 7.6 pp over Snorkel-Mistral-PairRM-DPO and reduces average response length by 313 characters, demonstrating more efficient preference optimization.", "perturbed_statement": "By using a fixed reward margin in AIPO, Mistral-7B achieves a 12.3 pp win rate gain on AlpacaEval over Snorkel-Mistral-PairRM-DPO and reduces average response length by 150 tokens, showing more efficient preference optimization.", "perturbed_explanation": "The perturbation is incorrect because AIPO employs a dynamic target margin (λ) rather than a fixed margin. Table 3 reports a 7.6 percentage point improvement on AlpacaEval (37.8% vs. 30.2%), not 12.3. Additionally, the average response length reduction is 313 characters, not 150 tokens.", "claim": "By using a fixed reward margin in AIPO, Mistral-7B achieves a 12.3 pp win rate gain on AlpacaEval over Snorkel-Mistral-PairRM-DPO and reduces average response length by 150 tokens, showing more efficient preference optimization.", "label": false }, { "paperid": "2410.21759v2", "paper_path": "./SciVer/papers/2410.21759v2.json", "claim_type": "analytical", "type": "chart", "item": "12", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.21759v2_figure_12.png", "request_id": 736, "origin_statement": "The Laplace auxiliary matrix distribution’s light-tailed shape concentrates over 90% of samples within ±0.05, minimizing log2 quantization error and yielding a 1.5% accuracy gain over heavy-tailed Gaussian and Student-T in 4-bit IntLoRA experiments.", "perturbed_statement": "The Gaussian auxiliary matrix distribution’s light-tailed shape concentrates over 90% of samples within ±0.05, minimizing log2 quantization error and yielding a 1.5% accuracy gain over Laplace and Student-T in 4-bit IntLoRA experiments.", "perturbed_explanation": "This is incorrect because Tab. 5 and Fig. 12 demonstrate that the Laplace distribution, not Gaussian, clusters most auxiliary values near zero and achieves the highest performance under 4-bit quantization. Gaussian does not concentrate over 90% of samples within ±0.05 nor deliver the claimed 1.5% gain.", "claim": "The Gaussian auxiliary matrix distribution’s light-tailed shape concentrates over 90% of samples within ±0.05, minimizing log2 quantization error and yielding a 1.5% accuracy gain over Laplace and Student-T in 4-bit IntLoRA experiments.", "label": false }, { "paperid": "2409.01788v1", "paper_path": "./SciVer/papers/2409.01788v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.01788v1_figure_2.png", "request_id": 738, "origin_statement": "Guided and coverage-guided grey-box fuzzers (DogeFuzz-G and DogeFuzz-DG) achieve about 48% median code coverage on 409 complex Ethereum contracts within 15 minutes—roughly 20% higher than the 40% median of the black-box strategy—demonstrating that lightweight instrumentation-driven seed prioritization efficiently exercises deeper contract logic.", "perturbed_statement": "Guided and coverage-guided grey-box fuzzers (DogeFuzz-G and DogeFuzz-DG) achieve about 60% median code coverage on 409 complex Ethereum contracts within 30 minutes—roughly 20% higher than the 40% median of the black-box strategy—demonstrating that lightweight instrumentation-driven seed prioritization efficiently exercises deeper contract logic.", "perturbed_explanation": "The perturbed statement is incorrect because DogeFuzz-G and DogeFuzz-DG reach around 48% median code coverage (not 60%) in only 15 minutes (not 30), as reported for the 409 executable contracts. The context specifies a 15-minute campaign and ~48% median coverage.", "claim": "Guided and coverage-guided grey-box fuzzers (DogeFuzz-G and DogeFuzz-DG) achieve about 60% median code coverage on 409 complex Ethereum contracts within 30 minutes—roughly 20% higher than the 40% median of the black-box strategy—demonstrating that lightweight instrumentation-driven seed prioritization efficiently exercises deeper contract logic.", "label": false }, { "paperid": "2409.04557v1", "paper_path": "./SciVer/papers/2409.04557v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.04557v1_figure_1.png", "request_id": 740, "origin_statement": "Chi^2 rises by two orders of magnitude when the semi-major axis deviates by ∼−0.005 AU and ∼+0.001 AU, corresponding to first-order mean-motion resonances that amplify TTV amplitudes through enhanced resonant gravitational interactions, highlighting non-linear sensitivity of TTV signals to minute orbital changes.", "perturbed_statement": "Chi^2 peaks by two orders of magnitude at ∼+0.005 AU and ∼−0.002 AU offsets, corresponding to second-order secular resonances that amplify TTV amplitudes through long-term angular momentum exchange, highlighting linear sensitivity of TTV signals to small orbital changes.", "perturbed_explanation": "The perturbation is incorrect because Figure 1 shows the largest χ² spikes at offsets near −0.005 AU and +0.001 AU, not at +0.005 AU and −0.002 AU. Moreover, these features arise from first-order mean-motion resonances, not second-order secular resonances, and the sensitivity is strongly non-linear rather than linear.", "claim": "Chi^2 peaks by two orders of magnitude at ∼+0.005 AU and ∼−0.002 AU offsets, corresponding to second-order secular resonances that amplify TTV amplitudes through long-term angular momentum exchange, highlighting linear sensitivity of TTV signals to small orbital changes.", "label": false }, { "paperid": "2409.04728v1", "paper_path": "./SciVer/papers/2409.04728v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "4.6" ], "image_path": "./SciVer/images/2409.04728v1_figure_7.png", "request_id": 742, "origin_statement": "On September 23, 2017, Zurich’s primary Koopman modes display reduced amplitudes and roughly two-hour phase shifts compared to September 1, indicating suppressed oscillatory flow and delayed congestion peaks consistent with hysteretic unloading in the dynamic MFD, where reduced network storage leads to lower amplitude and delayed flow peaks.", "perturbed_statement": "On September 23, 2017, Zurich’s primary Koopman modes exhibit increased amplitudes and roughly three-hour phase leads compared to September 1, indicating enhanced oscillatory flow and earlier congestion peaks consistent with preloading behavior in the dynamic MFD, where increased network storage produces higher amplitude and advanced flow responses.", "perturbed_explanation": "This statement contradicts the context: the data show that on September 23 the mode amplitudes are lower (not higher) and their peaks occur later (not earlier), reflecting hysteretic unloading rather than preloading and advanced responses.", "claim": "On September 23, 2017, Zurich’s primary Koopman modes exhibit increased amplitudes and roughly three-hour phase leads compared to September 1, indicating enhanced oscillatory flow and earlier congestion peaks consistent with preloading behavior in the dynamic MFD, where increased network storage produces higher amplitude and advanced flow responses.", "label": false }, { "paperid": "2409.14781v4", "paper_path": "./SciVer/papers/2409.14781v4.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.14781v4_figure_3(b).png", "request_id": 756, "origin_statement": "Figure 3(b) demonstrates DC-PDD’s AUC increasing monotonically from 0.85 at 64 tokens to 0.95 at 512 tokens, suggesting that detection performance improves with text length due to richer contextual cues amplifying memorization signals in the LLM’s pretraining distribution.", "perturbed_statement": "Figure 3(b) demonstrates DC-PDD’s AUC peaking at 0.98 at 256 tokens then dropping to 0.90 at 512 tokens, implying that excessively long texts flood the model with noise, undermining memorization signals and degrading detection accuracy.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3(b) shows DC-PDD’s AUC rising steadily from about 0.85 at 64 tokens to about 0.95 at 512 tokens, with no decline or peak at 0.98. Therefore, both the claimed peak value and the downward trend at 512 tokens contradict the actual data.", "claim": "Figure 3(b) demonstrates DC-PDD’s AUC peaking at 0.98 at 256 tokens then dropping to 0.90 at 512 tokens, implying that excessively long texts flood the model with noise, undermining memorization signals and degrading detection accuracy.", "label": false }, { "paperid": "2409.19747v1", "paper_path": "./SciVer/papers/2409.19747v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.1.3" ], "image_path": "./SciVer/images/2409.19747v1_figure_3.png", "request_id": 758, "origin_statement": "By integrating scene graphs that capture precise visual attributes such as pixel-level coordinates and color encodings with standard data tables, VisText generates semantically nuanced captions that accurately describe chart trends, mitigating OCR-induced errors.", "perturbed_statement": "By integrating scene graphs that capture precise visual attributes such as pixel-level coordinates and color encodings with standard data tables, VisText generates semantically nuanced captions that accurately describe chart trends, mitigating OCR-induced errors. However, these scene graphs can only be recovered from rasterized PNG charts.", "perturbed_explanation": "This statement is incorrect because, according to the context, scene graphs can be accurately recovered from charts available in SVG format—not limited to rasterized PNG images.", "claim": "By integrating scene graphs that capture precise visual attributes such as pixel-level coordinates and color encodings with standard data tables, VisText generates semantically nuanced captions that accurately describe chart trends, mitigating OCR-induced errors. However, these scene graphs can only be recovered from rasterized PNG charts.", "label": false }, { "paperid": "2409.16670v1", "paper_path": "./SciVer/papers/2409.16670v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "5.5.1", "5.5.2" ], "image_path": "./SciVer/images/2409.16670v1_figure_4.png", "request_id": 760, "origin_statement": "GraphLoRA’s accuracy on Photo and Computer peaks at λ1≈0.5–1, implying moderate regularization avoids under- and overfitting; performance remains within ±1% across rank r from 2^0 to 2^8, demonstrating that low-rank adaptations minimally impact expressive capacity when r≤256.", "perturbed_statement": "GraphLoRA’s accuracy on Photo and Computer peaks at λ1≈2–5, implying strong regularization avoids under- and overfitting; performance remains within ±2% across rank r from 2^0 to 2^4, demonstrating that low-rank adaptations minimally impact capacity when r≤16.", "perturbed_explanation": "The perturbed statement is incorrect in two ways. First, Figure 4 shows accuracy on Photo and Computer actually peaks at λ1 around 0.5–1, not 2–5. Second, performance is stable within about ±1% from r=2^0 to r=2^8, not ±2% only up to r=2^4 or r≤16.", "claim": "GraphLoRA’s accuracy on Photo and Computer peaks at λ1≈2–5, implying strong regularization avoids under- and overfitting; performance remains within ±2% across rank r from 2^0 to 2^4, demonstrating that low-rank adaptations minimally impact capacity when r≤16.", "label": false }, { "paperid": "2409.19257v1", "paper_path": "./SciVer/papers/2409.19257v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2409.19257v1-Table2-1.png", "request_id": 761, "origin_statement": "The cluster-5 approach yields a peak AP of 0.7679 under LISTN-C, surpassing the 0.7620 from LISTN-CA. This indicates that modeling user-content interactions alone more effectively captures semantic clusters, and that author adjacency adds noise rather than enhancing lexicon induction.", "perturbed_statement": "Under LISTN-CA, the cluster-20 method produces an AP of 0.7554, outperforming LISTN-C’s cluster-5 AP of 0.7620, highlighting that author adjacency via shared threads significantly boosts lexicon induction precision.", "perturbed_explanation": "The perturbed statement is wrong because Table 2 shows LISTN-CA’s cluster-20 AP is 0.7069 (not 0.7554), and LISTN-C’s cluster-5 AP is 0.7679 (not 0.7620). The incorrect AP values and model comparisons contradict the reported results.", "claim": "Under LISTN-CA, the cluster-20 method produces an AP of 0.7554, outperforming LISTN-C’s cluster-5 AP of 0.7620, highlighting that author adjacency via shared threads significantly boosts lexicon induction precision.", "label": false }, { "paperid": "2409.10104v1", "paper_path": "./SciVer/papers/2409.10104v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1", "3.2", "3.3", "3.4", "4.2", "4.3" ], "image_path": "./SciVer/images/2409.10104v1-Table1-1.png", "request_id": 765, "origin_statement": "vit-base and vit-large use batch sizes of 32 and 64 with low learning rates (1.15E-05 and 2.78E-05), reflecting the principle that high-capacity transformer models require smaller step sizes for stable gradient descent and to avoid divergence during fine-tuning.", "perturbed_statement": "vit-base and vit-large use batch sizes of 64 and 64 with low learning rates (1.15E-05 and 2.78E-04), reflecting the principle that high-capacity transformer models require smaller step sizes for stable gradient descent and to avoid divergence during fine-tuning.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 shows vit-base uses a batch size of 32 (not 64), and vit-large’s learning rate is 2.78E-05 (not 2.78E-04). These values directly contradict the provided hyperparameter settings.", "claim": "vit-base and vit-large use batch sizes of 64 and 64 with low learning rates (1.15E-05 and 2.78E-04), reflecting the principle that high-capacity transformer models require smaller step sizes for stable gradient descent and to avoid divergence during fine-tuning.", "label": false }, { "paperid": "2410.14161v2", "paper_path": "./SciVer/papers/2410.14161v2.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "4.6" ], "image_path": "./SciVer/images/2410.14161v2_figure_7.png", "request_id": 776, "origin_statement": "The dynamic time warping alignment yields a peak score at action 4 (∼90), indicating near-perfect kinematic congruence to the stretching template, while the chest-expanding motion’s sub-60 score underscores substantial joint-angle divergence, demonstrating the method’s sensitivity to temporal shape variations.", "perturbed_statement": "The dynamic time warping alignment yields a peak score at action 5 (∼90), indicating near-perfect kinematic congruence to the stretching template, while the chest-expanding motion’s sub-50 score underscores substantial joint-angle divergence.", "perturbed_explanation": "This statement is incorrect because the highest score of approximately 90 actually occurs at action 4, not action 5, and the chest-expanding motion (action 2) scores around 57, not below 50, contradicting the specified values.", "claim": "The dynamic time warping alignment yields a peak score at action 5 (∼90), indicating near-perfect kinematic congruence to the stretching template, while the chest-expanding motion’s sub-50 score underscores substantial joint-angle divergence.", "label": false }, { "paperid": "2409.16914v1", "paper_path": "./SciVer/papers/2409.16914v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.16914v1_figure_4.png", "request_id": 780, "origin_statement": "Beyond 90 tokens, the human and LLM cohesiveness peaks diverge markedly (≈0.1 vs 0.4), indicating that longer passages amplify divergences in next-token probability distributions, improving zero-shot detection accuracy via reduced variance in BARTScore-based cohesiveness estimates.", "perturbed_statement": "At 45 tokens, two distinct cohesiveness peaks appear around 0.05 for human text and 0.5 for LLM outputs, showing that very short passages yield maximal detector accuracy due to strong variance amplification.", "perturbed_explanation": "Figure 4 at length 45 shows heavily overlapping cohesiveness distributions without clear peaks at 0.05 or 0.5, and the text states TOCSIN fails to improve detection at this length. Thus the claim of distinct peaks and maximal accuracy at 45 tokens contradicts both the figure and the reported detection performance.", "claim": "At 45 tokens, two distinct cohesiveness peaks appear around 0.05 for human text and 0.5 for LLM outputs, showing that very short passages yield maximal detector accuracy due to strong variance amplification.", "label": false }, { "paperid": "2411.15729v1", "paper_path": "./SciVer/papers/2411.15729v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.1" ], "image_path": "./SciVer/images/2411.15729v1-Table1-1.png", "request_id": 785, "origin_statement": "OccludeNet-D’s use of dynamically tracked occluders with a 0–100% occlusion duration ratio enhances the model’s ability to learn temporal occlusion patterns, thereby improving robustness in single-view action recognition compared to synthetic geometric-only occlusions.", "perturbed_statement": "OccludeNet-D’s use of static occluders with a fixed 50% occlusion duration ratio enhances the model’s ability to learn temporal occlusion patterns, thereby improving robustness in single-view action recognition compared to synthetic geometric-only occlusions.", "perturbed_explanation": "The perturbation is incorrect because OccludeNet-D employs dynamically tracked occluders (not static), and its occlusion duration ratio spans 0–100% (not a fixed 50%), as stated in Table 1.", "claim": "OccludeNet-D’s use of static occluders with a fixed 50% occlusion duration ratio enhances the model’s ability to learn temporal occlusion patterns, thereby improving robustness in single-view action recognition compared to synthetic geometric-only occlusions.", "label": false }, { "paperid": "2410.13842v1", "paper_path": "./SciVer/papers/2410.13842v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.13842v1-Table1-1.png", "request_id": 789, "origin_statement": "After pretraining on Objects365, D-FINE-X’s AP jumps by 3.5% (from 55.8% to 59.3%) within 21 epochs, whereas YOLOv10-X gains only 0.5% over 300 epochs, showing DETR-based models exhibit more sample-efficient convergence due to their end-to-end attention architecture.", "perturbed_statement": "After pretraining on Objects365, D-FINE-X’s AP jumps by 3.5% (from 55.8% to 59.3%) within 210 epochs, whereas YOLOv10-X gains only 0.5% over 300 epochs, showing DETR-based models exhibit more sample-efficient convergence due to their end-to-end attention architecture.", "perturbed_explanation": "The perturbed statement incorrectly cites 210 pretraining epochs for D-FINE-X, but the context specifies it achieves its AP gain in only 21 epochs. This misstates the model’s demonstrated sample efficiency in pretraining.", "claim": "After pretraining on Objects365, D-FINE-X’s AP jumps by 3.5% (from 55.8% to 59.3%) within 210 epochs, whereas YOLOv10-X gains only 0.5% over 300 epochs, showing DETR-based models exhibit more sample-efficient convergence due to their end-to-end attention architecture.", "label": false }, { "paperid": "2411.11736v1", "paper_path": "./SciVer/papers/2411.11736v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.11736v1_figure_2.png", "request_id": 790, "origin_statement": "After multi-task fine-tuning, PCA of [CLS] embeddings reveals well-separated, domain-specific clusters, indicating orthogonalization of domain features and reduced interference, which suggests improved domain alignment and generalization compared to single-task fine-tuning where embeddings form a continuous manifold with overlapping domain representations.", "perturbed_statement": "In the single-task fine-tuned model, PCA of [CLS] embeddings reveals perfectly distinct domain clusters with no overlap, while the MTL-fine-tuned model still produces a continuous manifold lacking domain separation.", "perturbed_explanation": "The perturbed statement wrongly claims that single-task fine-tuning yields perfectly distinct, non-overlapping domain clusters; however, Figure 2(a) shows that domains overlap in the single-task embedding space. It also incorrectly asserts that MTL embeddings lack separation, contradicting the clear domain clusters in Figure 2(b).", "claim": "In the single-task fine-tuned model, PCA of [CLS] embeddings reveals perfectly distinct domain clusters with no overlap, while the MTL-fine-tuned model still produces a continuous manifold lacking domain separation.", "label": false }, { "paperid": "2411.02949v1", "paper_path": "./SciVer/papers/2411.02949v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.02949v1_figure_6.png", "request_id": 796, "origin_statement": "In chaotic systems, exponential trajectory divergence can paradoxically lower short-term mean squared prediction error for inaccurate reconstructions, yet collapse the attractor to a limit cycle, illustrating how prediction error metrics fail to capture essential invariant properties such as attractor geometry and long-term temporal statistics.", "perturbed_statement": "In chaotic systems, exponential trajectory divergence always increases short-term mean squared prediction error for inaccurate reconstructions and maintains chaotic dynamics, demonstrating that mean squared prediction error reliably captures attractor geometry.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 6B shows the inaccurate (green) reconstruction has a lower 10-step prediction error (PE₁₀ = 0.007) than the accurate (orange) one (PE₁₀ = 0.011), so divergence does not always increase PE. Moreover, the inaccurate reconstruction collapses into a limit cycle rather than maintaining chaotic dynamics, contradicting the claim that PE reliably captures attractor geometry.", "claim": "In chaotic systems, exponential trajectory divergence always increases short-term mean squared prediction error for inaccurate reconstructions and maintains chaotic dynamics, demonstrating that mean squared prediction error reliably captures attractor geometry.", "label": false }, { "paperid": "2409.10944v1", "paper_path": "./SciVer/papers/2409.10944v1.json", "claim_type": "analytical", "type": "chart", "item": "1(a)", "section": [ "1" ], "image_path": "./SciVer/images/2409.10944v1_figure_1(a).png", "request_id": 798, "origin_statement": "The t-SNE visualization of mean-pooled BOLD network features reveals distinct clustering by acquisition site, implying that scanner variability and site-specific protocols introduce dominant variance that can overshadow subtle disease-related connectivity patterns.", "perturbed_statement": "The t-SNE visualization of mean-pooled BOLD network features reveals distinct clustering by parcellation resolution, implying that employing a 300-ROI atlas introduces dominant variance that can overshadow subtle disease-related connectivity patterns.", "perturbed_explanation": "This statement is incorrect because Figure 1(a) and its caption indicate clustering by acquisition site (CALTECH, KKI, LEUVEN, MAX_MUN), not by parcellation resolution. Additionally, the ABIDE dataset uses a fixed ROI atlas for all subjects, so atlas resolution cannot drive the observed clusters.", "claim": "The t-SNE visualization of mean-pooled BOLD network features reveals distinct clustering by parcellation resolution, implying that employing a 300-ROI atlas introduces dominant variance that can overshadow subtle disease-related connectivity patterns.", "label": false }, { "paperid": "2411.01443v2", "paper_path": "./SciVer/papers/2411.01443v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "6.2" ], "image_path": "./SciVer/images/2411.01443v2-Table4-1.png", "request_id": 799, "origin_statement": "By integrating QKA-driven self-attention, the model consistently boosts localization recall across scenes, yielding a 5.1% gain at Cambridge (2m,5°) and a 3.6% improvement at 7Scenes (0.3m,10°), indicating enhanced multi-scene embedding discrimination.", "perturbed_statement": "By integrating QKA-driven self-attention, the model consistently boosts localization recall across scenes, yielding a 7.2% gain at Cambridge (1m,5°) and a 4.8% improvement at 7Scenes (0.2m,10°), indicating enhanced multi-scene embedding discrimination.", "perturbed_explanation": "The perturbed statement incorrectly reports a 7.2% gain at Cambridge (1m,5°) and a 4.8% improvement at 7Scenes (0.2m,10°). In the table, the actual improvements are 3.2% (32.6→35.8) at Cambridge (1m,5°) and 2.4% (50.2→52.6) at 7Scenes (0.2m,10°).", "claim": "By integrating QKA-driven self-attention, the model consistently boosts localization recall across scenes, yielding a 7.2% gain at Cambridge (1m,5°) and a 4.8% improvement at 7Scenes (0.2m,10°), indicating enhanced multi-scene embedding discrimination.", "label": false }, { "paperid": "2410.16215v1", "paper_path": "./SciVer/papers/2410.16215v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.16215v1_figure_1.png", "request_id": 800, "origin_statement": "PD* yields around 2.8%, 3.6%, and 4.3% absolute accuracy gains over LM-only training on 1.9B, 3.8B, and 6.8B models, respectively, indicating that enriched logits-based knowledge distillation functions as dynamic label smoothing, which increasingly benefits larger student LLMs by mitigating overfitting.", "perturbed_statement": "PD* yields around 5.8%, 6.6%, and 7.3% absolute accuracy gains over LM-only training on 1.9B, 3.8B, and 6.8B models, respectively, indicating that enriched logits-based knowledge distillation functions as dynamic weight decay, which increasingly benefits larger student LLMs by mitigating overfitting.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 1 shows PD* improvements of approximately 2.8%, 3.6%, and 4.3%—not 5.8%, 6.6%, and 7.3%. Additionally, knowledge distillation acts like dynamic label smoothing rather than dynamic weight decay, contradicting both the numerical gains and the described mechanism.", "claim": "PD* yields around 5.8%, 6.6%, and 7.3% absolute accuracy gains over LM-only training on 1.9B, 3.8B, and 6.8B models, respectively, indicating that enriched logits-based knowledge distillation functions as dynamic weight decay, which increasingly benefits larger student LLMs by mitigating overfitting.", "label": false }, { "paperid": "2409.06178v1", "paper_path": "./SciVer/papers/2409.06178v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "6.6", "9.5" ], "image_path": "./SciVer/images/2409.06178v1_figure_8.png", "request_id": 802, "origin_statement": "By externalizing intermediate query results and natural language steps, SQLucid fosters better metacognitive monitoring, reducing users’ cognitive load, as reflected in a 2.63-point higher average confidence level (6.42 vs. 3.79) compared to MISP, with p=1.53×10⁻¹¹ indicating strong significance.", "perturbed_statement": "By externalizing intermediate query results and natural language steps, SQLucid fosters better metacognitive monitoring, reducing users’ cognitive load, as reflected in a 1.13-point higher average confidence level (6.42 vs. 5.29) compared to MISP, with p=0.05 indicating marginal significance.", "perturbed_explanation": "The perturbed statement is incorrect because it misidentifies MISP’s average confidence score as 5.29 (the actual MISP value is 3.79) and misreports the p-value (the ANOVA result is p=1.53×10⁻¹¹, not p=0.05). These discrepancies contradict the context data.", "claim": "By externalizing intermediate query results and natural language steps, SQLucid fosters better metacognitive monitoring, reducing users’ cognitive load, as reflected in a 1.13-point higher average confidence level (6.42 vs. 5.29) compared to MISP, with p=0.05 indicating marginal significance.", "label": false }, { "paperid": "2410.11772v2", "paper_path": "./SciVer/papers/2410.11772v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.11772v2_figure_2.png", "request_id": 810, "origin_statement": "Greedy layer removal reveals inherent layer-wise sparsity in PEFT: gradient updates concentrate in a small subset of mid-level transformer blocks, such that tuning only the top 25% most important layers via LoRA yields perplexity <13, closely matching full fine-tuning while reducing parameters by 75%.", "perturbed_statement": "Greedy layer removal reveals inherent layer-wise sparsity in PEFT: gradient updates concentrate in a small subset of mid-level transformer blocks, such that tuning only the top 25% most important layers via LoRA yields perplexity <11, closely matching full fine-tuning while reducing parameters by 75%.", "perturbed_explanation": "The perturbed statement claims a perplexity below 11, but Figure 2(b) shows that even when tuning the top 25% of layers, the lowest perplexity achieved is around 12.3–12.5. There is no evidence of perplexity dropping below 11.", "claim": "Greedy layer removal reveals inherent layer-wise sparsity in PEFT: gradient updates concentrate in a small subset of mid-level transformer blocks, such that tuning only the top 25% most important layers via LoRA yields perplexity <11, closely matching full fine-tuning while reducing parameters by 75%.", "label": false }, { "paperid": "2410.04223v1", "paper_path": "./SciVer/papers/2410.04223v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.3.2" ], "image_path": "./SciVer/images/2410.04223v1-Table3-1.png", "request_id": 813, "origin_statement": "Allowing unlimited planning time roughly doubles Llama-3.1’s retrosynthetic success rate from 17.6% to 31.2%, demonstrating that expanding the A* search budget outweighs heuristic improvements in uncovering viable multi-step synthesis routes.", "perturbed_statement": "Allowing unlimited planning time roughly doubles Llama-3.1’s retrosynthetic success rate from 17.6% to 41.2%, demonstrating that expanding the A* search budget outweighs heuristic improvements in uncovering viable multi-step synthesis routes.", "perturbed_explanation": "The table reports that Llama-3.1’s success rate under unlimited time is 0.312 (31.2%), not 41.2%, so the perturbed percentage contradicts the actual data.", "claim": "Allowing unlimited planning time roughly doubles Llama-3.1’s retrosynthetic success rate from 17.6% to 41.2%, demonstrating that expanding the A* search budget outweighs heuristic improvements in uncovering viable multi-step synthesis routes.", "label": false }, { "paperid": "2411.11699v1", "paper_path": "./SciVer/papers/2411.11699v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "7.3" ], "image_path": "./SciVer/images/2411.11699v1-Table4-1.png", "request_id": 815, "origin_statement": "By integrating link parameters via self-attention modules, LiTformer reduces mean absolute error by over 80% compared to FNN and 84% compared to LSTM on TS1, while also achieving ~26% faster inference than LSTM, demonstrating attention’s efficiency in modeling non-sequential channel effects.", "perturbed_statement": "By integrating link parameters via self-attention modules, LiTformer reduces mean absolute error by over 50% compared to FNN and 60% compared to LSTM on TS1, while also achieving ~26% faster inference than LSTM, demonstrating attention’s efficiency in modeling non-sequential channel effects.", "perturbed_explanation": "The stated error reduction percentages are incorrect. According to Table 4, LiTformer’s AE of 4.20 mV versus FNN’s 21.0 mV corresponds to an ~80% reduction (not 50%), and versus LSTM’s 26.0 mV corresponds to ~84% reduction (not 60%).", "claim": "By integrating link parameters via self-attention modules, LiTformer reduces mean absolute error by over 50% compared to FNN and 60% compared to LSTM on TS1, while also achieving ~26% faster inference than LSTM, demonstrating attention’s efficiency in modeling non-sequential channel effects.", "label": false }, { "paperid": "2411.09694v1", "paper_path": "./SciVer/papers/2411.09694v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.09694v1_figure_6.png", "request_id": 816, "origin_statement": "When increasing BayesOpt+GP batch size from 1 to 10, the candidate score initially drops by about 0.003, but this gap diminishes to near zero by roughly 80 CometKiwi runs, illustrating the classic tradeoff between evaluation parallelism and sequential update fidelity in Bayesian optimization.", "perturbed_statement": "When increasing BayesOpt+GP batch size from 1 to 20, the candidate score initially drops by about 0.005, but this gap diminishes to near zero by roughly 30 CometKiwi runs, illustrating the classic tradeoff between evaluation parallelism and sequential update fidelity in Bayesian optimization.", "perturbed_explanation": "Incorrect because the experiment only tested batch sizes up to 10, not 20; the observed maximum drop in candidate score is about 0.003 rather than 0.005; and performance differences only converge after ~80 runs, not after 30 runs.", "claim": "When increasing BayesOpt+GP batch size from 1 to 20, the candidate score initially drops by about 0.005, but this gap diminishes to near zero by roughly 30 CometKiwi runs, illustrating the classic tradeoff between evaluation parallelism and sequential update fidelity in Bayesian optimization.", "label": false }, { "paperid": "2410.00366v1", "paper_path": "./SciVer/papers/2410.00366v1.json", "claim_type": "analytical", "type": "chart", "item": "10", "section": [ "6.4" ], "image_path": "./SciVer/images/2410.00366v1_figure_10.png", "request_id": 820, "origin_statement": "Applying AFE-selected features in lung cancer classification increases Random Forest precision from approximately 95% (Permutation) to 97%, reducing false positives and enhancing diagnostic reliability by prioritizing features with higher signal-to-noise ratios critical for clinical decision support.", "perturbed_statement": "Applying AFE-selected features in lung cancer classification increases Random Forest precision from approximately 92% (Permutation) to 99%, reducing false negatives and enhancing diagnostic reliability by prioritizing features with lower signal-to-noise ratios critical for clinical decision support.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 10 shows Permutation precision at about 95%, not 92%, and AFE precision at roughly 97%, not 99%. Additionally, AFE features reduce false positives rather than false negatives and emphasize features with higher signal-to-noise ratios, not lower.", "claim": "Applying AFE-selected features in lung cancer classification increases Random Forest precision from approximately 92% (Permutation) to 99%, reducing false negatives and enhancing diagnostic reliability by prioritizing features with lower signal-to-noise ratios critical for clinical decision support.", "label": false }, { "paperid": "2411.00312v1", "paper_path": "./SciVer/papers/2411.00312v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2411.00312v1-Table1-1.png", "request_id": 35, "origin_statement": "The adult age group spans 40 years, which is twice the 20-year span of the young age group.", "perturbed_statement": "The adult age group spans 60 years, which is three times the 20-year span of the young age group.", "perturbed_explanation": "The table defines the adult group as aged 20 to 59, a span of 40 years, not 60 years, so it is twice, not three times, the 20-year span of the young group.", "claim": "The adult age group spans 40 years, which is twice the 20-year span of the young age group.", "label": true }, { "paperid": "2409.19148v1", "paper_path": "./SciVer/papers/2409.19148v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.19148v1-Table6-1.png", "request_id": 153, "origin_statement": "The persuasive text set (PTS) for sentence index 3 contains nine unique terms, which is three more than the six unique terms in the PTS for sentence index 2.", "perturbed_statement": "The persuasive text set (PTS) for sentence index 3 contains ten unique terms, which is four more than the six unique terms in the PTS for sentence index 2.", "perturbed_explanation": "This is incorrect because the table lists only nine terms for the PTS at sentence index 3 (incalculable, outraged, cultural tragedy, lobotomy, fire, loss, destroyed, ruins, threat) and six terms for sentence index 2, so there aren’t ten terms nor a four-term difference.", "claim": "The persuasive text set (PTS) for sentence index 3 contains nine unique terms, which is three more than the six unique terms in the PTS for sentence index 2.", "label": true }, { "paperid": "2410.02674v1", "paper_path": "./SciVer/papers/2410.02674v1.json", "claim_type": "direct", "type": "chart", "item": "1(b)", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.02674v1_figure_1(b).png", "request_id": 156, "origin_statement": "At k=4, BERT-forced_1 achieves approximately 0.90 average accuracy, fasttext_pretrained_1 around 0.85 and google/canine-s_1 about 0.62, while bert-large-uncased_1 (≈0.30), bert-base-uncased_1 (≈0.15) and google/canine-c_1 (≈0.48) all fall below 0.50.", "perturbed_statement": "At k=4, BERT-forced_1 achieves approximately 0.70 average accuracy, fasttext_pretrained_1 around 0.85 and google/canine-s_1 about 0.62, while bert-large-uncased_1 (≈0.30), bert-base-uncased_1 (≈0.15) and google/canine-c_1 (≈0.48) all fall below 0.50.", "perturbed_explanation": "The perturbed statement incorrectly states that BERT-forced_1 has around 0.70 accuracy at k=4, but the figure shows it actually has about 0.90 average accuracy at k=4.", "claim": "At k=4, BERT-forced_1 achieves approximately 0.90 average accuracy, fasttext_pretrained_1 around 0.85 and google/canine-s_1 about 0.62, while bert-large-uncased_1 (≈0.30), bert-base-uncased_1 (≈0.15) and google/canine-c_1 (≈0.48) all fall below 0.50.", "label": true }, { "paperid": "2411.14751v1", "paper_path": "./SciVer/papers/2411.14751v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.14751v1_figure_3(a).png", "request_id": 230, "origin_statement": "At noise level_3, the rot10_std5_prob0.5 model reaches an mAP of approximately 0.37, which is about 0.15 higher than the baseline’s mAP of around 0.22.", "perturbed_statement": "At noise level_3, the rot10_std5_prob0.5 model reaches an mAP of approximately 0.32, which is only about 0.10 higher than the baseline’s mAP of around 0.26.", "perturbed_explanation": "This is incorrect because in the figure, at level_3 the rot10_std5_prob0.5 curve is at about 0.37 mAP (not 0.32), and the baseline curve is at about 0.22 mAP (not 0.26).", "claim": "At noise level_3, the rot10_std5_prob0.5 model reaches an mAP of approximately 0.37, which is about 0.15 higher than the baseline’s mAP of around 0.22.", "label": true }, { "paperid": "2411.15633v1", "paper_path": "./SciVer/papers/2411.15633v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.15633v1-Table1-1.png", "request_id": 311, "origin_statement": "Effort yields a cross-method evaluation average AUC of 0.940, which is 0.075 higher than ProDet’s 0.867 and 0.105 higher than LSDA’s 0.835.", "perturbed_statement": "Effort yields a cross-method evaluation average AUC of 0.910, which is 0.043 higher than ProDet’s 0.867 and 0.075 higher than LSDA’s 0.835.", "perturbed_explanation": "The table reports Effort’s cross-method average AUC as 0.940, not 0.910, so both the AUC value and the stated performance differences relative to ProDet and LSDA are incorrect.", "claim": "Effort yields a cross-method evaluation average AUC of 0.940, which is 0.075 higher than ProDet’s 0.867 and 0.105 higher than LSDA’s 0.835.", "label": true }, { "paperid": "2410.20483v2", "paper_path": "./SciVer/papers/2410.20483v2.json", "claim_type": "direct", "type": "chart", "item": "6(b)", "section": [ "6.2", "6.3" ], "image_path": "./SciVer/images/2410.20483v2_figure_6(b).png", "request_id": 316, "origin_statement": "On the German dataset, DiCE has a median log-likelihood near 10^-3 with zero features changed, while Growing Sphere has a median log-likelihood near 10^0 with about 20 features changed.", "perturbed_statement": "On the German dataset, DiCE has a median log-likelihood near 10^-1 with zero features changed, while Growing Sphere has a median log-likelihood near 10^0 with about 15 features changed.", "perturbed_explanation": "The figure shows the DiCE point on the German dataset at a log-likelihood around 10^-3 (not 10^-1), and the Growing Sphere point is positioned at about 20 features changed (not ~15). These details contradict the perturbed statement.", "claim": "On the German dataset, DiCE has a median log-likelihood near 10^-3 with zero features changed, while Growing Sphere has a median log-likelihood near 10^0 with about 20 features changed.", "label": true }, { "paperid": "2410.03577v1", "paper_path": "./SciVer/papers/2410.03577v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.3", "3.4.2" ], "image_path": "./SciVer/images/2410.03577v1_figure_3.png", "request_id": 338, "origin_statement": "Layer 26 shows the lowest uncertainty (0.030) for 'wooden', while layer 10 shows the highest uncertainty (0.972), indicating uncertainty fluctuates across layers.", "perturbed_statement": "Layer 24 shows the lowest uncertainty (0.020) for 'wooden', while layer 12 shows the highest uncertainty (0.976), indicating strong fluctuation across layers.", "perturbed_explanation": "This is incorrect because the table records the lowest uncertainty for 'wooden' at layer 26 (0.030), not layer 24 (0.020), and the highest uncertainty at layer 10 (0.972), not layer 12 (0.976).", "claim": "Layer 26 shows the lowest uncertainty (0.030) for 'wooden', while layer 10 shows the highest uncertainty (0.972), indicating uncertainty fluctuates across layers.", "label": true }, { "paperid": "2410.22046v2", "paper_path": "./SciVer/papers/2410.22046v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.2.1" ], "image_path": "./SciVer/images/2410.22046v2_figure_4.png", "request_id": 378, "origin_statement": "G chords appear nearly twice as often as A chords, with G at 13.8% compared to A’s 7.6%.", "perturbed_statement": "G chords appear nearly twice as often as A chords, with G at 15.8% compared to A’s 7.6%.", "perturbed_explanation": "The dataset’s chart indicates G chords occur at 13.8%, not 15.8%, so the perturbed percentage for G is inconsistent with the provided distribution.", "claim": "G chords appear nearly twice as often as A chords, with G at 13.8% compared to A’s 7.6%.", "label": true }, { "paperid": "2410.23296v1", "paper_path": "./SciVer/papers/2410.23296v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.23296v1_figure_4.png", "request_id": 380, "origin_statement": "On the synthetic normal data slice, the qLSTM PDF peaks at approximately 17.5 density, about 34% higher than the LQR PDF’s peak density of around 13.1.", "perturbed_statement": "On the synthetic normal data slice, the qLSTM PDF peaks at approximately 20.3 density, about 55% higher than the LQR PDF’s peak density of around 15.2.", "perturbed_explanation": "This statement is incorrect because the actual peak densities shown are roughly 17.5 for qLSTM and about 13.1 for LQR, not 20.3 and 15.2 as claimed.", "claim": "On the synthetic normal data slice, the qLSTM PDF peaks at approximately 17.5 density, about 34% higher than the LQR PDF’s peak density of around 13.1.", "label": true }, { "paperid": "2411.16273v1", "paper_path": "./SciVer/papers/2411.16273v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.16273v1_figure_5.png", "request_id": 534, "origin_statement": "The raw IMU signal in subplot (a) reaches a maximum of about 1.8 m/s² at around 900 ms, whereas the filtered IMU signal in subplot (b) peaks at about 1.1 m/s² at roughly the same time, indicating a reduction of approximately 0.7 m/s² from filtering.", "perturbed_statement": "The raw IMU signal in subplot (a) reaches a maximum of about 1.5 m/s² at around 900 ms, whereas the filtered IMU signal in subplot (b) peaks at about 0.9 m/s² at roughly the same time, indicating a reduction of approximately 0.6 m/s² from filtering.", "perturbed_explanation": "This is incorrect because the actual raw IMU peak in subplot (a) is around 1.8 m/s² (not 1.5 m/s²) and the filtered IMU peak in subplot (b) is around 1.1 m/s² (not 0.9 m/s²), so the reduction is about 0.7 m/s², not 0.6 m/s².", "claim": "The raw IMU signal in subplot (a) reaches a maximum of about 1.8 m/s² at around 900 ms, whereas the filtered IMU signal in subplot (b) peaks at about 1.1 m/s² at roughly the same time, indicating a reduction of approximately 0.7 m/s² from filtering.", "label": true }, { "paperid": "2409.13873v1", "paper_path": "./SciVer/papers/2409.13873v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "2", "8", "9" ], "image_path": "./SciVer/images/2409.13873v1_figure_1.png", "request_id": 538, "origin_statement": "All censored subjects (orange lines) maintain a negative percent change in tumor burden (PCHG < 0) throughout their observed days up to around 550 days.", "perturbed_statement": "All event subjects (blue lines) maintain a negative percent change in tumor burden (PCHG < 0) throughout their observed days up to around 550 days.", "perturbed_explanation": "The perturbed statement is false because multiple blue event trajectories exceed 0% PCHG after approximately 200 days, showing positive percent changes in tumor burden, which contradicts the claim that they all remain below zero.", "claim": "All censored subjects (orange lines) maintain a negative percent change in tumor burden (PCHG < 0) throughout their observed days up to around 550 days.", "label": true }, { "paperid": "2409.09506v1", "paper_path": "./SciVer/papers/2409.09506v1.json", "claim_type": "direct", "type": "chart", "item": "1(c)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.09506v1_figure_1(c).png", "request_id": 558, "origin_statement": "ESPnet-EZ has roughly one-sixth the dependent code of ESPnet, with 40k lines versus 264k lines, indicating a reduction of over 85%.", "perturbed_statement": "ESPnet-EZ has roughly one-eighth the dependent code of ESPnet, with 30k lines versus 264k lines, indicating a reduction of nearly 90%.", "perturbed_explanation": "The figure shows ESPnet-EZ has 40k dependent code lines (not 30k), yielding about one-sixth the size of 264k lines rather than one-eighth, and the reduction is about 85%, not nearly 90%.", "claim": "ESPnet-EZ has roughly one-sixth the dependent code of ESPnet, with 40k lines versus 264k lines, indicating a reduction of over 85%.", "label": true }, { "paperid": "2411.05966v1", "paper_path": "./SciVer/papers/2411.05966v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.05966v1_figure_5.png", "request_id": 570, "origin_statement": "Across the four properties, Llama 3’s TM-Scores average 92.75, which is 8.25 points higher than Phi 3’s average of 84.5.", "perturbed_statement": "Across the four properties, Llama 3’s TM-Scores average 89, which is 5 points higher than Phi 3’s average of 84.", "perturbed_explanation": "The perturbed statement is incorrect because Llama 3’s actual TM-Scores are 82, 95, 98, and 96, averaging 92.75 (not 89). Phi 3’s actual scores are 78, 80, 85, and 95, averaging 84.5 (not 84), making the true difference 8.25 points rather than 5.", "claim": "Across the four properties, Llama 3’s TM-Scores average 92.75, which is 8.25 points higher than Phi 3’s average of 84.5.", "label": true }, { "paperid": "2410.04927v2", "paper_path": "./SciVer/papers/2410.04927v2.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.04927v2_figure_7.png", "request_id": 588, "origin_statement": "In the 1/ε=0.001 perturbed sequence, 3 out of 5 items retain the original brand names, while in the 1/ε=0.01 sequence, none of the 5 replacements share a brand with the originals.", "perturbed_statement": "In the 1/ε=0.001 perturbed sequence, 2 out of 5 items retain the original brand names, while in the 1/ε=0.01 sequence, none of the 5 replacements share a brand with the originals.", "perturbed_explanation": "The perturbed statement incorrectly states that only 2 items retain the original brand at 1/ε=0.001. In fact, items v1 (Avery), v2 (3M), and v5 (uni-ball) all keep the original brands, totaling 3 items, not 2.", "claim": "In the 1/ε=0.001 perturbed sequence, 3 out of 5 items retain the original brand names, while in the 1/ε=0.01 sequence, none of the 5 replacements share a brand with the originals.", "label": true }, { "paperid": "2411.11449v1", "paper_path": "./SciVer/papers/2411.11449v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.1.4" ], "image_path": "./SciVer/images/2411.11449v1_figure_4.png", "request_id": 606, "origin_statement": "Single interview participant S3 requested 25 explanations in total, 11 more than focus group Gr.8’s 14 explanations.", "perturbed_statement": "Single interview participant S3 requested 25 explanations in total, five more than focus group Gr.8’s 14 explanations.", "perturbed_explanation": "The perturbed statement understates the difference: S3 requested 25 explanations and Gr.8 requested 14, so the actual difference is 11 explanations, not five.", "claim": "Single interview participant S3 requested 25 explanations in total, 11 more than focus group Gr.8’s 14 explanations.", "label": true }, { "paperid": "2410.10652v1", "paper_path": "./SciVer/papers/2410.10652v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.10652v1-Table2-1.png", "request_id": 625, "origin_statement": "QueST achieves the lowest Subgraph Wasserstein distance in eight out of nine niches on the DLPFC dataset and has the lowest average distance (0.838) across all nine niches compared to GraphST, SLAT, and STAGATE.", "perturbed_statement": "QueST achieves the lowest Subgraph Wasserstein distance in all nine niches on the DLPFC dataset and has the lowest average distance (0.838) across all nine niches compared to GraphST, SLAT, and STAGATE.", "perturbed_explanation": "The perturbed claim is incorrect for the \"Layer3_Layer4_Layer5_100\" niche, where SLAT has a lower distance (0.809) than QueST (0.870), so QueST does not achieve the lowest distance in that niche.", "claim": "QueST achieves the lowest Subgraph Wasserstein distance in eight out of nine niches on the DLPFC dataset and has the lowest average distance (0.838) across all nine niches compared to GraphST, SLAT, and STAGATE.", "label": true }, { "paperid": "2411.06866v1", "paper_path": "./SciVer/papers/2411.06866v1.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "5.8" ], "image_path": "./SciVer/images/2411.06866v1_figure_3(b).png", "request_id": 660, "origin_statement": "OpenBookQA accuracy peaks at 72.3% when n=100, which is 1.2 percentage points higher than the 71.1% at n=10.", "perturbed_statement": "OpenBookQA accuracy peaks at 72.3% when n=50, which is 1.2 percentage points higher than the 71.1% at n=10.", "perturbed_explanation": "The perturbed claim incorrectly states that the peak accuracy occurs at n=50. According to the chart, the 72.3% peak for OpenBookQA actually happens at n=100, not at n=50.", "claim": "OpenBookQA accuracy peaks at 72.3% when n=100, which is 1.2 percentage points higher than the 71.1% at n=10.", "label": true }, { "paperid": "2411.07042v1", "paper_path": "./SciVer/papers/2411.07042v1.json", "claim_type": "direct", "type": "chart", "item": "4(b)", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.07042v1_figure_4(b).png", "request_id": 662, "origin_statement": "User P19 used 67 strategies, which is the highest; P16 used the fewest with 12, marking a 55-strategy difference.", "perturbed_statement": "User P19 used 61 strategies, which is the highest; P16 used the fewest with 15, marking a 46-strategy difference.", "perturbed_explanation": "This is incorrect because the chart shows P19 at 67 strategies (not 61) and P16 at 12 (not 15), yielding a 55-strategy difference rather than 46.", "claim": "User P19 used 67 strategies, which is the highest; P16 used the fewest with 12, marking a 55-strategy difference.", "label": true }, { "paperid": "2409.19747v1", "paper_path": "./SciVer/papers/2409.19747v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.19747v1_figure_5.png", "request_id": 740, "origin_statement": "Between 1986 and 2014, Asian voter turnout fell by about 13.3 percentage points, roughly double the 4.9-point decline observed for white voters.", "perturbed_statement": "Between 1986 and 2014, Asian voter turnout fell by about 5 percentage points, roughly double the 4.9-point decline observed for white voters.", "perturbed_explanation": "This statement is incorrect because Asian turnout actually dropped from 40.2% in 1986 to 26.9% in 2014, a decrease of 13.3 points, not 5 points.", "claim": "Between 1986 and 2014, Asian voter turnout fell by about 13.3 percentage points, roughly double the 4.9-point decline observed for white voters.", "label": true }, { "paperid": "2410.11772v2", "paper_path": "./SciVer/papers/2410.11772v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.11772v2_figure_4.png", "request_id": 794, "origin_statement": "By iteration 1500 in the Arithmetic Reasoning task, Layer 6's importance score nears 0.9, while Layer 2's importance falls to approximately 0.2, demonstrating the model's emphasis shifting to deeper layers.", "perturbed_statement": "By iteration 500 in the Arithmetic Reasoning task, Layer 6's importance score nears 0.6, while Layer 2's importance falls to approximately 0.2, demonstrating the model's emphasis shifting to deeper layers.", "perturbed_explanation": "At iteration 500, the plot shows Layer 6's importance already around 0.75–0.8, not 0.6. The perturbed statement understates Layer 6’s actual importance score at that point, making it factually incorrect.", "claim": "By iteration 1500 in the Arithmetic Reasoning task, Layer 6's importance score nears 0.9, while Layer 2's importance falls to approximately 0.2, demonstrating the model's emphasis shifting to deeper layers.", "label": true }, { "paperid": "2410.12641v1", "paper_path": "./SciVer/papers/2410.12641v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.3", "2.5" ], "image_path": "./SciVer/images/2410.12641v1-Table1-1.png", "request_id": 905, "origin_statement": "The frequency of concentric alignment (56.1%) exceeds the highest osteophyte staging class frequency (36.1%) by 19.9 percentage points.", "perturbed_statement": "The frequency of concentric alignment (56.1%) exceeds the highest osteophyte staging class frequency (32.8%) by 23.3 percentage points.", "perturbed_explanation": "The table shows the highest osteophyte staging class frequency is 36.1%, not 32.8%, so the comparison and percentage-point difference are incorrect.", "claim": "The frequency of concentric alignment (56.1%) exceeds the highest osteophyte staging class frequency (36.1%) by 19.9 percentage points.", "label": true }, { "paperid": "2409.15044v3", "paper_path": "./SciVer/papers/2409.15044v3.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4" ], "image_path": "./SciVer/images/2409.15044v3_figure_2.png", "request_id": 968, "origin_statement": "The D−→K+π−π− mode’s M_BC distribution peaks at about 19×10^4 events/(0.25 GeV/c^2), roughly 1×10^4 more than the ~18×10^4 peak of the D¯0→K+π−π0 mode and nearly ten times the ~2×10^4 peak of D−→K_S^0π−.", "perturbed_statement": "The D−→K+π−π− mode’s M_BC distribution peaks at about 17×10^4 events/(0.25 GeV/c^2), roughly 2×10^4 more than the ~15×10^4 peak of the D¯0→K+π−π0 mode and nearly five times the ~3×10^4 peak of D−→K_S^0π−.", "perturbed_explanation": "This is incorrect because the plot shows the D−→K+π−π− peak at about 19×10^4 events, not 17×10^4; the D¯0→K+π−π0 peak is around 18×10^4, not 15×10^4; and the D−→K_S^0π− peak is near 2×10^4, not 3×10^4.", "claim": "The D−→K+π−π− mode’s M_BC distribution peaks at about 19×10^4 events/(0.25 GeV/c^2), roughly 1×10^4 more than the ~18×10^4 peak of the D¯0→K+π−π0 mode and nearly ten times the ~2×10^4 peak of D−→K_S^0π−.", "label": true }, { "paperid": "2410.10630v1", "paper_path": "./SciVer/papers/2410.10630v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3.3", "3.5" ], "image_path": "./SciVer/images/2410.10630v1-Table4-1.png", "request_id": 1071, "origin_statement": "By the 4th training iteration, the TPO model with the Armo judge on UltraFeedback data using the specific thought prompt reaches a win rate of 37.3%, surpassing its 33.2% win rate at the third iteration by 4.1%.", "perturbed_statement": "By the 4th training iteration, the TPO model with the Armo judge on UltraFeedback data using the specific thought prompt reaches a win rate of 35.3%, surpassing its 33.2% win rate at the third iteration by 2.1%.", "perturbed_explanation": "The table shows the actual win rate at iteration 4 for the TPO model with Armo judge on UltraFeedback using the specific prompt is 37.3%, not 35.3%, making both the stated win rate and the 2.1% difference incorrect.", "claim": "By the 4th training iteration, the TPO model with the Armo judge on UltraFeedback data using the specific thought prompt reaches a win rate of 37.3%, surpassing its 33.2% win rate at the third iteration by 4.1%.", "label": true }, { "paperid": "2410.18100v1", "paper_path": "./SciVer/papers/2410.18100v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1.1" ], "image_path": "./SciVer/images/2410.18100v1-Table1-1.png", "request_id": 1111, "origin_statement": "Directly Mapped Cursor studies achieve an average WPM of 17.3, compared to an average of 11.4 WPM for Direct Touch studies.", "perturbed_statement": "Directly Mapped Cursor studies achieve an average WPM of 17.3, compared to an average of 12.8 WPM for Direct Touch studies.", "perturbed_explanation": "The perturbed statement incorrectly claims a 12.8 WPM average for Direct Touch studies. According to the table, the Direct Touch entry rates are 13.8, 11.9, 10.0, and 9.9 WPM, which average to (13.8+11.9+10.0+9.9)/4 = 11.4 WPM, not 12.8 WPM.", "claim": "Directly Mapped Cursor studies achieve an average WPM of 17.3, compared to an average of 11.4 WPM for Direct Touch studies.", "label": true }, { "paperid": "2410.13638v1", "paper_path": "./SciVer/papers/2410.13638v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1", "2" ], "image_path": "./SciVer/images/2410.13638v1-Table1-1.png", "request_id": 1185, "origin_statement": "The LSM study recorded 40 million hours of data, which is over 2.5 times the 15.7 million hours in Yuan et al. (2024) and 200 times the 0.2 million hours in Adaimi et al. (2024).", "perturbed_statement": "The LSM study recorded 45 million hours of data, which is nearly three times the 15.7 million hours in Yuan et al. (2024) and over 200 times the 0.2 million hours in Adaimi et al. (2024).", "perturbed_explanation": "Table 1 shows LSM logged 40,000 (000s) hours, i.e. 40 million hours, not 45 million. Therefore claiming 45 million hours is factually incorrect.", "claim": "The LSM study recorded 40 million hours of data, which is over 2.5 times the 15.7 million hours in Yuan et al. (2024) and 200 times the 0.2 million hours in Adaimi et al. (2024).", "label": true }, { "paperid": "2411.02348v1", "paper_path": "./SciVer/papers/2411.02348v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3.5", "3.5.2" ], "image_path": "./SciVer/images/2411.02348v1_figure_6.png", "request_id": 1210, "origin_statement": "Adults’ Other rule use is highest in the Greek alphabet (≈100%), which is roughly 40 percentage points higher than their Latin usage (≈60%) and about 10 points higher than their Symbol usage (≈90%).", "perturbed_statement": "Adults’ Other rule use is highest in the Greek alphabet (≈80%), which is roughly 20 percentage points higher than their Latin usage (≈60%) and about 10 points higher than their Symbol usage (≈90%).", "perturbed_explanation": "The Greek Other rule use for adults is shown at about 100% in the chart, not 80%. Thus stating it is ≈80% and only ≈20 points above Latin is incorrect.", "claim": "Adults’ Other rule use is highest in the Greek alphabet (≈100%), which is roughly 40 percentage points higher than their Latin usage (≈60%) and about 10 points higher than their Symbol usage (≈90%).", "label": true }, { "paperid": "2411.11374v1", "paper_path": "./SciVer/papers/2411.11374v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.11374v1_figure_9.png", "request_id": 1414, "origin_statement": "At 10 hours of training, S+Ours achieves a PSNR of about 25.6 dB, which is approximately 0.3 dB higher than S+Grid’s 25.3 dB and 0.7 dB higher than S-NeRF’s 24.9 dB.", "perturbed_statement": "At 10 hours of training, S+Ours achieves a PSNR of about 26.5 dB, which is approximately 0.4 dB higher than S+Grid’s 26.1 dB and 1.0 dB higher than S-NeRF’s 25.5 dB.", "perturbed_explanation": "The perturbed statement is wrong because at the 10-hour mark the actual PSNR values from the figure are approximately 25.6 dB for S+Ours, 25.3 dB for S+Grid, and 24.9 dB for S-NeRF—not the inflated values of 26.5, 26.1, and 25.5 dB respectively.", "claim": "At 10 hours of training, S+Ours achieves a PSNR of about 25.6 dB, which is approximately 0.3 dB higher than S+Grid’s 25.3 dB and 0.7 dB higher than S-NeRF’s 24.9 dB.", "label": true }, { "paperid": "2410.11676v2", "paper_path": "./SciVer/papers/2410.11676v2.json", "claim_type": "direct", "type": "chart", "item": "1(b)", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.11676v2_figure_1(b).png", "request_id": 1426, "origin_statement": "Grad SR1 PQN reduces the gradient norm below 10⁻⁵ in about 0.02 time units, while Gradient Descent reaches that level only after roughly 0.3 time units.", "perturbed_statement": "Grad SR1 PQN reduces the gradient norm below 10⁻⁵ in about 0.2 time units, while Gradient Descent reaches that level only after roughly 1.0 time units.", "perturbed_explanation": "This is incorrect because the plot shows Grad SR1 PQN achieves a gradient norm below 10⁻⁵ at around 0.02 time units (not 0.2), and Gradient Descent reaches 10⁻⁵ around 0.3 time units (not 1.0).", "claim": "Grad SR1 PQN reduces the gradient norm below 10⁻⁵ in about 0.02 time units, while Gradient Descent reaches that level only after roughly 0.3 time units.", "label": true }, { "paperid": "2411.17076v1", "paper_path": "./SciVer/papers/2411.17076v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3" ], "image_path": "./SciVer/images/2411.17076v1_figure_6.png", "request_id": 1494, "origin_statement": "At the rare-earth element region around mass number 160, the FRDM2012 model yields abundances approximately 30% lower than those predicted by the WS4 model in Figure 6.", "perturbed_statement": "At the rare-earth element region around mass number 160, the FRDM2012 model yields abundances approximately 30% higher than those predicted by the WS4 model in Figure 6.", "perturbed_explanation": "In Figure 6 at A≈160, the FRDM2012 curve (red) lies below the WS4 curve (blue) in all panels, showing lower abundances; thus it cannot be 30% higher, contradicting the perturbed statement.", "claim": "At the rare-earth element region around mass number 160, the FRDM2012 model yields abundances approximately 30% lower than those predicted by the WS4 model in Figure 6.", "label": true }, { "paperid": "2411.06018v1", "paper_path": "./SciVer/papers/2411.06018v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "6.2" ], "image_path": "./SciVer/images/2411.06018v1_figure_5.png", "request_id": 64, "origin_statement": "On HAR, VL-Time\\TimeStamps scores approximately 32.8% accuracy, about 4.7 percentage points lower than the full VL-Time model’s 37.5%.", "perturbed_statement": "On HAR, VL-Time\\TimeStamps scores approximately 28.8% accuracy, about 8.7 percentage points lower than the full VL-Time model’s 37.5%.", "perturbed_explanation": "The chart shows VL-Time\\TimeStamps at about 32.8% on HAR, not 28.8%. Therefore, the actual gap to VL-Time’s 37.5% is 4.7 points, not 8.7.", "claim": "On HAR, VL-Time\\TimeStamps scores approximately 32.8% accuracy, about 4.7 percentage points lower than the full VL-Time model’s 37.5%.", "label": true }, { "paperid": "2411.10924v1", "paper_path": "./SciVer/papers/2411.10924v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.10924v1_figure_5.png", "request_id": 68, "origin_statement": "Figure 5 shows that across all classes our approach matches the baseline only for Rye (0.00% difference), whereas Oland suffers the largest accuracy drop (−3.09%), and all other classes see declines between −0.56% and −2.93%.", "perturbed_statement": "Figure 5 shows that our method's accuracy aligns with the baseline for WH 5 (0.00% difference), whereas Oland suffers the largest drop at −2.58%, and other classes decline by −0.56% to −2.93%.", "perturbed_explanation": "The perturbed claim is incorrect because the confusion matrix shows WH 5 has a −1.27% difference (not 0.00%), and Oland’s accuracy drop is −3.09% (not −2.58%).", "claim": "Figure 5 shows that across all classes our approach matches the baseline only for Rye (0.00% difference), whereas Oland suffers the largest accuracy drop (−3.09%), and all other classes see declines between −0.56% and −2.93%.", "label": true }, { "paperid": "2410.20797v1", "paper_path": "./SciVer/papers/2410.20797v1.json", "claim_type": "direct", "type": "chart", "item": "1(c)", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.20797v1_figure_1(c).png", "request_id": 82, "origin_statement": "On CIFAR-10, the Frobenius norm difference ||Q^(n) - Q*||_F decreases from approximately 42 at epoch 0 to under 5 by epoch 100, demonstrating rapid convergence within the first 100 epochs.", "perturbed_statement": "On CIFAR-10, the Frobenius norm difference ||Q^(n) - Q*||_F decreases from approximately 42 at epoch 0 to below 1 by epoch 100, demonstrating near-complete convergence within the first 100 epochs.", "perturbed_explanation": "The perturbed statement is incorrect because the plot shows that at epoch 100, ||Q^(n) - Q*||_F is around 4, not below 1 as claimed.", "claim": "On CIFAR-10, the Frobenius norm difference ||Q^(n) - Q*||_F decreases from approximately 42 at epoch 0 to under 5 by epoch 100, demonstrating rapid convergence within the first 100 epochs.", "label": true }, { "paperid": "2410.07970v1", "paper_path": "./SciVer/papers/2410.07970v1.json", "claim_type": "direct", "type": "chart", "item": "1(a)", "section": [ "2.3.2", "2.3.3" ], "image_path": "./SciVer/images/2410.07970v1_figure_1(a).png", "request_id": 96, "origin_statement": "Dealing in Securities licenses (180,000) are more than double those for Asset Management (80,000), representing an increase of 125%.", "perturbed_statement": "Dealing in Securities licenses (200,000) are more than double those for Asset Management (80,000), representing an increase of 150%.", "perturbed_explanation": "This is incorrect because the chart shows 180,000 licenses for Dealing in Securities, not 200,000, and the increase over 80,000 Asset Management licenses is 125%, not 150%.", "claim": "Dealing in Securities licenses (180,000) are more than double those for Asset Management (80,000), representing an increase of 125%.", "label": true }, { "paperid": "2411.07664v1", "paper_path": "./SciVer/papers/2411.07664v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4", "4.1", "4.2" ], "image_path": "./SciVer/images/2411.07664v1_figure_4.png", "request_id": 136, "origin_statement": "The average A rating across LLMs (Llama2, Gemini-pro, GPT-3.5-turbo, Mixtral, GPT-4) is approximately 71.4%, which is about 40.7 percentage points higher than the 30.7% average for T2I models (SDXL, DeepFloyd, DALL·E 3).", "perturbed_statement": "The average A rating across LLMs (Llama2, Gemini-pro, GPT-3.5-turbo, Mixtral, GPT-4) is approximately 71.4%, which is about 50.7 percentage points higher than the 20.7% average for T2I models (SDXL, DeepFloyd, DALL·E 3).", "perturbed_explanation": "The perturbed statement understates the T2I models' actual average A rating. The true average for SDXL (12%), DeepFloyd (38%), and DALL·E 3 (42%) is about 30.7%, not 20.7%, making the difference roughly 40.7 percentage points, not 50.7.", "claim": "The average A rating across LLMs (Llama2, Gemini-pro, GPT-3.5-turbo, Mixtral, GPT-4) is approximately 71.4%, which is about 40.7 percentage points higher than the 30.7% average for T2I models (SDXL, DeepFloyd, DALL·E 3).", "label": true }, { "paperid": "2410.05046v1", "paper_path": "./SciVer/papers/2410.05046v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.05046v1_figure_4.png", "request_id": 158, "origin_statement": "Gliner-multitask-large-v0.5’s span-based F1-score is approximately 2 percentage points higher than its token-based F1-score, increasing from about 61% to about 63%.", "perturbed_statement": "Gliner-multitask-large-v0.5’s span-based F1-score is approximately 5 percentage points higher than its token-based F1-score, increasing from about 60% to about 65%.", "perturbed_explanation": "The perturbed statement is incorrect because in the figure Gliner-multitask-large-v0.5 has a token-based F1 of about 61% and a span-based F1 of about 63%, not 60% and 65%, and the actual increase is about 2 percentage points rather than 5.", "claim": "Gliner-multitask-large-v0.5’s span-based F1-score is approximately 2 percentage points higher than its token-based F1-score, increasing from about 61% to about 63%.", "label": true }, { "paperid": "2411.09585v1", "paper_path": "./SciVer/papers/2411.09585v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.09585v1-Table5-1.png", "request_id": 199, "origin_statement": "For the WaNet attack, as the budget increases from 1.0 to 3.0, FT-SAM's attack success rate rises from 5.57% to 18.87%, whereas D3's ASR remains below 1.5% throughout.", "perturbed_statement": "For the WaNet attack, as the budget increases from 1.0 to 3.0, FT-SAM's attack success rate doubles from 5.57% to 11.14%, whereas D3's ASR remains under 1.5% throughout.", "perturbed_explanation": "The perturbed statement is wrong because Table 5 reports FT-SAM's ASR at budget 3.0 for WaNet as 18.87%, not 11.14%.", "claim": "For the WaNet attack, as the budget increases from 1.0 to 3.0, FT-SAM's attack success rate rises from 5.57% to 18.87%, whereas D3's ASR remains below 1.5% throughout.", "label": true }, { "paperid": "2410.09635v1", "paper_path": "./SciVer/papers/2410.09635v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.3", "4.4" ], "image_path": "./SciVer/images/2410.09635v1-Table2-1.png", "request_id": 255, "origin_statement": "MLP_v5 achieves an average F1 score of 0.737, which is 0.031 higher than the 0.706 recorded for MLP_v1.", "perturbed_statement": "MLP_v5 achieves an average F1 score of 0.747, which is 0.041 higher than the 0.706 recorded for MLP_v1.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 reports MLP_v5’s average F1 as 0.737 (not 0.747). Consequently, the stated difference of 0.041 is also false compared to the actual difference of 0.031.", "claim": "MLP_v5 achieves an average F1 score of 0.737, which is 0.031 higher than the 0.706 recorded for MLP_v1.", "label": true }, { "paperid": "2409.01887v1", "paper_path": "./SciVer/papers/2409.01887v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.01887v1-Table6-1.png", "request_id": 259, "origin_statement": "Among the 24 CDN providers shown, 17 (≈71%) support HTTPS borrowing of shared certificates, only 4 (≈17%) allow HTTPS wildcard certificate borrowing, and all 24 (100%) allow unencrypted HTTP connections without certificate validation.", "perturbed_statement": "Among the 24 CDN providers shown, 17 (≈71%) support HTTPS borrowing of shared certificates, 5 (≈21%) allow HTTPS wildcard certificate borrowing, and all 24 (100%) allow unencrypted HTTP connections without certificate validation.", "perturbed_explanation": "The perturbed statement incorrectly claims that 5 CDN providers support HTTPS wildcard certificate borrowing, but the table clearly shows only 4 providers (Cachefly, CDN77, Netlify, and StackPath) have a check under the wildcard certificate column.", "claim": "Among the 24 CDN providers shown, 17 (≈71%) support HTTPS borrowing of shared certificates, only 4 (≈17%) allow HTTPS wildcard certificate borrowing, and all 24 (100%) allow unencrypted HTTP connections without certificate validation.", "label": true }, { "paperid": "2410.21350v1", "paper_path": "./SciVer/papers/2410.21350v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.21350v1_figure_6.png", "request_id": 270, "origin_statement": "The first auxiliary limit state surface G(3u)=42.23 (green) lies about twice as close to the origin (approximately 2 units along u1) compared to the true failure surface G(u)=0 (blue), which is roughly 4 units away along u1.", "perturbed_statement": "The first auxiliary limit state surface G(3u)=42.23 lies about three units from the origin along u1, while the true failure surface G(u)=0 lies about six units along u1.", "perturbed_explanation": "In the figure, the green auxiliary surface clearly intersects the u1–axis at approximately 2 units, and the true blue failure surface intersects at about 4 units, not at 3 and 6 units as stated.", "claim": "The first auxiliary limit state surface G(3u)=42.23 (green) lies about twice as close to the origin (approximately 2 units along u1) compared to the true failure surface G(u)=0 (blue), which is roughly 4 units away along u1.", "label": true }, { "paperid": "2410.05935v1", "paper_path": "./SciVer/papers/2410.05935v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1.1" ], "image_path": "./SciVer/images/2410.05935v1-Table1-1.png", "request_id": 315, "origin_statement": "The average number of images per volume in the test split is approximately 99.4, about 2 images more than the train split’s average of 97.4.", "perturbed_statement": "The average number of images per volume in the test split is approximately 97.4, about 2 images fewer than the train split’s average of 99.4.", "perturbed_explanation": "The perturbed claim reverses the actual averages. According to the table, test volumes have 94, 97, 99, 95, and 112 images (average ≈99.4), while train volumes have 99, 97, 92, 91, and 108 images (average ≈97.4).", "claim": "The average number of images per volume in the test split is approximately 99.4, about 2 images more than the train split’s average of 97.4.", "label": true }, { "paperid": "2409.05306v2", "paper_path": "./SciVer/papers/2409.05306v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.05306v2-Table2-1.png", "request_id": 341, "origin_statement": "In the confusion matrix, the number of off-diagonal cells (FP and FN) equals the number of diagonal cells (TP and TN), both totaling two.", "perturbed_statement": "In the confusion matrix, the number of off-diagonal cells (FP and FN) is twice the number of diagonal cells (TP and TN), with off-diagonal totaling two and diagonal totaling one.", "perturbed_explanation": "This is incorrect because the confusion matrix template shows two diagonal cells (TP and TN) and two off-diagonal cells (FP and FN), so the off-diagonal count (2) equals, not doubles, the diagonal count (2).", "claim": "In the confusion matrix, the number of off-diagonal cells (FP and FN) equals the number of diagonal cells (TP and TN), both totaling two.", "label": true }, { "paperid": "2409.02137v1", "paper_path": "./SciVer/papers/2409.02137v1.json", "claim_type": "direct", "type": "chart", "item": "5(a)", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.02137v1_figure_5(a).png", "request_id": 404, "origin_statement": "At 100,000 time steps, the LogCommitDiff3[2] curve reaches around 14,000 unique states, approximately four times more than the roughly 3,500 states reached by the LogDiff1[1] curve.", "perturbed_statement": "At 100,000 time steps, the LogCommitDiff3[2] curve reaches around 8,000 unique states, only twice the roughly 4,000 states reached by the LogDiff1[1] curve.", "perturbed_explanation": "The perturbed statement is wrong because the chart shows LogCommitDiff3[2] near 14,000 unique states (not 8,000) at 100,000 steps and LogDiff1[1] near 3,500 states (not 4,000). Thus both the absolute values and ratio are incorrect.", "claim": "At 100,000 time steps, the LogCommitDiff3[2] curve reaches around 14,000 unique states, approximately four times more than the roughly 3,500 states reached by the LogDiff1[1] curve.", "label": true }, { "paperid": "2410.20731v2", "paper_path": "./SciVer/papers/2410.20731v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.20731v2-Table2-1.png", "request_id": 439, "origin_statement": "The base model with the highest bone length error (12.3 mm) experiences a 1.6 mm MPJPE decrease using Bi-GRU adjustment, compared to just a 0.5 mm reduction for the model with the lowest bone error (8.5 mm).", "perturbed_statement": "The base model with the highest bone length error (12.3 mm) experiences only a 0.3 mm MPJPE decrease using Bi-GRU adjustment, while the model with the lowest bone error (8.5 mm) achieves a 2.0 mm reduction.", "perturbed_explanation": "This is incorrect because Pavllo et al.’s model (12.3 mm error) actually drops MPJPE from 46.8 to 45.2 mm (1.6 mm reduction), not 0.3 mm, and Gong et al.’s model (8.5 mm error) drops MPJPE from 39.5 to 39.0 mm (0.5 mm reduction), not 2.0 mm.", "claim": "The base model with the highest bone length error (12.3 mm) experiences a 1.6 mm MPJPE decrease using Bi-GRU adjustment, compared to just a 0.5 mm reduction for the model with the lowest bone error (8.5 mm).", "label": true }, { "paperid": "2411.09410v2", "paper_path": "./SciVer/papers/2411.09410v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2411.09410v2-Table4-1.png", "request_id": 445, "origin_statement": "On the Office dataset, adding EIMF to MIND yields a Recall@50 improvement of +26.91%, exceeding the +9.42% gain for Bert4Rec and +6.31% for SASRec.", "perturbed_statement": "On the Office dataset, adding EIMF to MIND yields a Recall@50 improvement of +6.31%, less than the +9.42% gain for Bert4Rec and +26.91% gain for SASRec.", "perturbed_explanation": "The perturbed statement misstates the Recall@50 improvements. The table shows MIND’s improvement is +26.91%, not +6.31%, and SASRec’s improvement is +6.31%, not +26.91%.", "claim": "On the Office dataset, adding EIMF to MIND yields a Recall@50 improvement of +26.91%, exceeding the +9.42% gain for Bert4Rec and +6.31% for SASRec.", "label": true }, { "paperid": "2409.10695v2", "paper_path": "./SciVer/papers/2409.10695v2.json", "claim_type": "direct", "type": "chart", "item": "14", "section": [ "6.1" ], "image_path": "./SciVer/images/2409.10695v2_figure_14.png", "request_id": 522, "origin_statement": "Playground v3 achieved its largest preference margin in the stickers category, with about 80% of users choosing it over 20% for human designers, while the t-shirt category had the closest result at roughly 55% versus 45%.", "perturbed_statement": "Playground v3 achieved its largest preference margin in the stickers category, with about 90% of users choosing it over 10% for human designers, while the t-shirt category had the closest result at roughly 65% versus 35%.", "perturbed_explanation": "This claim is incorrect because the chart shows Playground v3 was preferred in stickers by about 80% compared to 20% for human designers (not 90% vs 10%), and in the t-shirt category the actual split was roughly 55% vs 45% (not 65% vs 35%).", "claim": "Playground v3 achieved its largest preference margin in the stickers category, with about 80% of users choosing it over 20% for human designers, while the t-shirt category had the closest result at roughly 55% versus 45%.", "label": true }, { "paperid": "2411.16012v1", "paper_path": "./SciVer/papers/2411.16012v1.json", "claim_type": "direct", "type": "table", "item": "7", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.16012v1-Table7-1.png", "request_id": 561, "origin_statement": "The original Hao+2022 sample has a standard deviation in X of 5.40±0.32 pc, which is almost twice that of the Jadhav+2024 sample at 2.90±0.10 pc.", "perturbed_statement": "The original Hao+2022 sample has a standard deviation in X of 5.40±0.32 pc, which is almost triple that of the Jadhav+2024 sample at 2.90±0.10 pc.", "perturbed_explanation": "This statement is wrong because 5.40 pc is only about 1.86 times the 2.90 pc reported for the Jadhav+2024 sample, not nearly three times that value.", "claim": "The original Hao+2022 sample has a standard deviation in X of 5.40±0.32 pc, which is almost twice that of the Jadhav+2024 sample at 2.90±0.10 pc.", "label": true }, { "paperid": "2410.06842v1", "paper_path": "./SciVer/papers/2410.06842v1.json", "claim_type": "direct", "type": "chart", "item": "12", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.06842v1_figure_12.png", "request_id": 572, "origin_statement": "The MAE histogram shows density falls below 1 after MAE exceeds 0.2, indicating very few samples have MAE above 0.2.", "perturbed_statement": "The MAE histogram shows density falls below 1 after MAE exceeds 0.1, indicating very few samples have MAE above 0.1.", "perturbed_explanation": "This is incorrect because the histogram bars between MAE values of 0.1 and 0.2 clearly have densities above 1, so density does not drop below 1 until after 0.2.", "claim": "The MAE histogram shows density falls below 1 after MAE exceeds 0.2, indicating very few samples have MAE above 0.2.", "label": true }, { "paperid": "2409.15139v2", "paper_path": "./SciVer/papers/2409.15139v2.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.15139v2_figure_5.png", "request_id": 594, "origin_statement": "The initial straight-line string yields a minimum JP of nearly 0 at the central field (index ≈17); by iteration 4 the minimum JP has climbed to about 0.06, and by the final iteration (18) all 35 interpolated fields attain JP values above 0.98.", "perturbed_statement": "The initial straight-line string yields a minimum JP of about 0.2 at the central field; by iteration 4 the minimum JP has climbed to roughly 0.3, and by the final iteration (18) all 35 interpolated fields attain JP values above 0.8.", "perturbed_explanation": "The perturbation is incorrect because the plot shows the initial minimum JP at the central field is near 0 (not 0.2), iteration 4’s minimum JP is about 0.06 (not 0.3), and the final iteration’s JP values are nearly 1.0 (not just above 0.8).", "claim": "The initial straight-line string yields a minimum JP of nearly 0 at the central field (index ≈17); by iteration 4 the minimum JP has climbed to about 0.06, and by the final iteration (18) all 35 interpolated fields attain JP values above 0.98.", "label": true }, { "paperid": "2411.18373v1", "paper_path": "./SciVer/papers/2411.18373v1.json", "claim_type": "direct", "type": "chart", "item": "2(c)", "section": [ "4" ], "image_path": "./SciVer/images/2411.18373v1_figure_2(c).png", "request_id": 620, "origin_statement": "At x = 3, the fitted profile a(x) (red dashed) is approximately 0.02, about 33% higher than the DEQ solution’s value of around 0.015.", "perturbed_statement": "At x = 3, the fitted profile a(x) (red dashed) is approximately 0.03, roughly 100% higher than the DEQ solution’s value of about 0.015.", "perturbed_explanation": "The perturbed claim is incorrect because the fit profile at x = 3 is actually about 0.02, not 0.03, and this represents a 33% increase over the DEQ value of ~0.015, not a 100% increase.", "claim": "At x = 3, the fitted profile a(x) (red dashed) is approximately 0.02, about 33% higher than the DEQ solution’s value of around 0.015.", "label": true }, { "paperid": "2409.14128v2", "paper_path": "./SciVer/papers/2409.14128v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.3", "7" ], "image_path": "./SciVer/images/2409.14128v2-Table3-1.png", "request_id": 629, "origin_statement": "Training with ABlur yields the highest overall average accuracy (87.37%), surpassing the GBlur-trained model by 1.10 points; in contrast, the six-class model without augmentation scores only 54.73% on GaussianBlur, 36.17 points below its 90.90% performance on unaltered images.", "perturbed_statement": "Training with ABlur yields the highest overall average accuracy (86.37%), surpassing the GBlur-trained model by 1.10 points; in contrast, the six-class model without augmentation scores only 60.00% on GaussianBlur, 30.90 points below its 90.90% performance on unaltered images.", "perturbed_explanation": "Both figures are incorrect when compared to Table 3. The ABlur-trained model’s true average accuracy is 87.37% (not 86.37%), and the six-class model’s accuracy on GaussianBlur is 54.73% (not 60.00%), so the stated differences are false.", "claim": "Training with ABlur yields the highest overall average accuracy (87.37%), surpassing the GBlur-trained model by 1.10 points; in contrast, the six-class model without augmentation scores only 54.73% on GaussianBlur, 36.17 points below its 90.90% performance on unaltered images.", "label": true }, { "paperid": "2410.01727v1", "paper_path": "./SciVer/papers/2410.01727v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.01727v1-Table2-1.png", "request_id": 631, "origin_statement": "On the Eedi dataset, sparseKT model’s AUC increases by 3.98 points (from 74.98 to 78.96), representing a relative gain of 5.31%, the largest percentage improvement among all models.", "perturbed_statement": "On the Eedi dataset, sparseKT model’s AUC increases by 4.10 points (from 75.00 to 79.10), representing a relative gain of 5.31%, the largest percentage improvement among all models.", "perturbed_explanation": "The table shows sparseKT’s AUC rises from 74.98 to 78.96, a gain of 3.98 points, not from 75.00 to 79.10 (4.10 points). The baseline and improved values in the perturbed claim do not match the reported 74.98→78.96.", "claim": "On the Eedi dataset, sparseKT model’s AUC increases by 3.98 points (from 74.98 to 78.96), representing a relative gain of 5.31%, the largest percentage improvement among all models.", "label": true }, { "paperid": "2411.02305v1", "paper_path": "./SciVer/papers/2411.02305v1.json", "claim_type": "direct", "type": "table", "item": "15", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.02305v1-Table15-1.png", "request_id": 685, "origin_statement": "Half of the functions (50%) in Table 15 are dependent functions, with the Query-dependent and Calculation-dependent categories each contributing 25% of the total 16 functions.", "perturbed_statement": "Only 40% of the functions in Table 15 are dependent functions, with the Query-dependent and Calculation-dependent categories each contributing 20% of the total 16 functions.", "perturbed_explanation": "This is incorrect because Table 15 actually lists 16 functions, 8 of which are dependent (4 Query-dependent and 4 Calculation-dependent), making 50% overall and 25% per dependent category, not 40% and 20%.", "claim": "Half of the functions (50%) in Table 15 are dependent functions, with the Query-dependent and Calculation-dependent categories each contributing 25% of the total 16 functions.", "label": true }, { "paperid": "2411.12115v1", "paper_path": "./SciVer/papers/2411.12115v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.12115v1-Table2-1.png", "request_id": 687, "origin_statement": "Pruned LD3M with DM achieves a 3.3 percentage-point improvement on ImageNet-B under IPC=10, exceeding the 2.2-point gain of pruned GLaD with DC by 1.1 points.", "perturbed_statement": "Under IPC=10, pruning LD3M in DC yields a 3.3 percentage-point gain on ImageNet-B, surpassing pruned GLaD’s 2.2-point improvement on the same subset by 1.1 points.", "perturbed_explanation": "The perturbation is incorrect because the 3.3-point improvement on ImageNet-B is for LD3M with DM (not DC), and the actual gain for LD3M with DC in the table is only +1.7, not +3.3.", "claim": "Pruned LD3M with DM achieves a 3.3 percentage-point improvement on ImageNet-B under IPC=10, exceeding the 2.2-point gain of pruned GLaD with DC by 1.1 points.", "label": true }, { "paperid": "2409.12946v1", "paper_path": "./SciVer/papers/2409.12946v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.12946v1-Table1-1.png", "request_id": 691, "origin_statement": "At 0.1% labels on CIFAR-10, SNORD achieves 71.71% standard accuracy, surpassing DynACL++ by 7.37% (64.34%) and ACL by 23.69% (48.02%).", "perturbed_statement": "At 0.1% labels on CIFAR-10, SNORD achieves 65.00% standard accuracy, surpassing DynACL++ by 0.66% (64.34%) and ACL by 17.98% (48.02%).", "perturbed_explanation": "The perturbed statement is incorrect because SNORD’s standard accuracy at 0.1% labels is actually 71.71%, not 65.00%. Consequently, the reported margins over DynACL++ (64.34%) and ACL (48.02%) are misstated.", "claim": "At 0.1% labels on CIFAR-10, SNORD achieves 71.71% standard accuracy, surpassing DynACL++ by 7.37% (64.34%) and ACL by 23.69% (48.02%).", "label": true }, { "paperid": "2410.12968v1", "paper_path": "./SciVer/papers/2410.12968v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.12968v1_figure_1.png", "request_id": 706, "origin_statement": "At a projected radius of five times the half-light radius, the 0.5–0.8 M⊙ group has the lowest normalized second-generation fraction (~0.68), compared to ~0.8 for all stars.", "perturbed_statement": "At a projected radius of five times the half-light radius, the 0.8+ M⊙ group has the lowest normalized second-generation fraction (~0.68), compared to ~0.8 for all stars.", "perturbed_explanation": "The value of ~0.68 at R/Rₕₗ = 5 corresponds to the 0.5–0.8 M⊙ mass bin (blue line), not the 0.8+ M⊙ bin (purple line), which is actually at ~0.75.", "claim": "At a projected radius of five times the half-light radius, the 0.5–0.8 M⊙ group has the lowest normalized second-generation fraction (~0.68), compared to ~0.8 for all stars.", "label": true }, { "paperid": "2409.14781v4", "paper_path": "./SciVer/papers/2409.14781v4.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.14781v4-Table2-1.png", "request_id": 737, "origin_statement": "On the PatentMIA benchmark using Baichuan-13B, DC-PDD achieves an AUC of 0.699, which is 0.042 higher than the next best method, Small Ref, which scores 0.657.", "perturbed_statement": "On the PatentMIA benchmark using Baichuan-13B, DC-PDD achieves an AUC of 0.750, which is 0.093 higher than the next best method, Small Ref, which scores 0.657.", "perturbed_explanation": "This is incorrect because Table 2 shows DC-PDD’s AUC for Baichuan-13B as 0.699, not 0.750, and the actual difference over Small Ref’s 0.657 is 0.042, not 0.093.", "claim": "On the PatentMIA benchmark using Baichuan-13B, DC-PDD achieves an AUC of 0.699, which is 0.042 higher than the next best method, Small Ref, which scores 0.657.", "label": true }, { "paperid": "2409.19257v1", "paper_path": "./SciVer/papers/2409.19257v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2409.19257v1-Table2-1.png", "request_id": 743, "origin_statement": "LISTN-C’s Cluster-20 configuration achieves an AP score of 0.7554, outperforming LISTN-CA’s Cluster-20 score of 0.7069 by 0.0485, the largest AP improvement among corresponding clustering methods.", "perturbed_statement": "LISTN-C’s Cluster-20 configuration achieves an AP score of 0.7554, outperforming LISTN-CA’s Cluster-20 score of 0.7069 by 0.0585, the largest AP improvement among corresponding clustering methods.", "perturbed_explanation": "The stated improvement of 0.0585 is incorrect. According to the table, LISTN-C’s AP (0.7554) minus LISTN-CA’s AP (0.7069) equals 0.0485, not 0.0585.", "claim": "LISTN-C’s Cluster-20 configuration achieves an AP score of 0.7554, outperforming LISTN-CA’s Cluster-20 score of 0.7069 by 0.0485, the largest AP improvement among corresponding clustering methods.", "label": true }, { "paperid": "2410.13842v1", "paper_path": "./SciVer/papers/2410.13842v1.json", "claim_type": "direct", "type": "chart", "item": "4(f)", "section": [ "5.5" ], "image_path": "./SciVer/images/2410.13842v1_figure_4(f).png", "request_id": 772, "origin_statement": "In the unweighted distributions, the peak of the refined (green) distribution for the top edge reaches about 0.63, approximately 0.07 higher than the bottom edge peak of about 0.56—a 12.5% increase.", "perturbed_statement": "In the unweighted distributions, the peak of the refined (green) distribution for the top edge reaches about 0.63, roughly 0.17 higher than the bottom edge peak of about 0.46—a 37% increase.", "perturbed_explanation": "The perturbed statement incorrectly states the bottom edge peak as 0.46 and the difference as 0.17; however, the image shows the bottom refined peak at about 0.56 and a true difference of about 0.07.", "claim": "In the unweighted distributions, the peak of the refined (green) distribution for the top edge reaches about 0.63, approximately 0.07 higher than the bottom edge peak of about 0.56—a 12.5% increase.", "label": true }, { "paperid": "2411.06382v1", "paper_path": "./SciVer/papers/2411.06382v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5" ], "image_path": "./SciVer/images/2411.06382v1_figure_7.png", "request_id": 774, "origin_statement": "The altitude RMSE rises from ≈0.15 cm with 64-bit floats to ≈1.5 cm with 8-bit fixed point, a tenfold increase.", "perturbed_statement": "The altitude RMSE rises from ≈0.05 cm with 64-bit floats to ≈1.5 cm with 8-bit fixed point, a thirtyfold increase.", "perturbed_explanation": "This is incorrect because the bar plot shows the 64-bit floating-point altitude RMSE is about 0.15 cm, not 0.05 cm, and the change from 0.15 cm to 1.5 cm is a tenfold increase, not thirtyfold.", "claim": "The altitude RMSE rises from ≈0.15 cm with 64-bit floats to ≈1.5 cm with 8-bit fixed point, a tenfold increase.", "label": true }, { "paperid": "2409.01577v1", "paper_path": "./SciVer/papers/2409.01577v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "7.1.x" ], "image_path": "./SciVer/images/2409.01577v1_figure_8.png", "request_id": 832, "origin_statement": "Between 2006 and 2010, the Internet's share of news sources rose by 17 percentage points, the largest change among the four media types.", "perturbed_statement": "Between 2006 and 2010, the Internet's share of news sources rose by 17 percentage points, the smallest change among the four media types.", "perturbed_explanation": "The chart shows the Internet share increased from 24% in 2006 to 41% in 2010, a 17-point rise, which is the largest change; newspaper fell by 6 points, television by 3, and radio rose only by 2—so it cannot be the smallest change.", "claim": "Between 2006 and 2010, the Internet's share of news sources rose by 17 percentage points, the largest change among the four media types.", "label": true }, { "paperid": "2410.11315v1", "paper_path": "./SciVer/papers/2410.11315v1.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.11315v1_figure_5(b).png", "request_id": 840, "origin_statement": "At NSR=4, the base extractor’s silver faithfulness drops by about 12%, which is roughly twice the approximately 6% drop of the aligned extractor.", "perturbed_statement": "At NSR=4, the base extractor’s silver faithfulness drops by about 14%, which is roughly twice the approximately 6% drop of the aligned extractor.", "perturbed_explanation": "This is wrong because the chart shows the base extractor’s silver faithfulness drop at NSR=4 is about 12%, not 14%, so the stated 14% contradicts the actual value in the figure.", "claim": "At NSR=4, the base extractor’s silver faithfulness drops by about 12%, which is roughly twice the approximately 6% drop of the aligned extractor.", "label": true }, { "paperid": "2411.02824v1", "paper_path": "./SciVer/papers/2411.02824v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.4.1" ], "image_path": "./SciVer/images/2411.02824v1_figure_4.png", "request_id": 896, "origin_statement": "As pruning ratio increases from 0% to 80%, the maximum imaginary part of the poles rises from around 10 to around 80.", "perturbed_statement": "As pruning ratio increases from 0% to 80%, the maximum imaginary part of the poles rises from around 5 to around 40.", "perturbed_explanation": "The figure shows the highest imaginary values at 0% pruning near ±10, not ±5, and at 80% pruning near ±80, not ±40, so the stated maxima contradict the observed values.", "claim": "As pruning ratio increases from 0% to 80%, the maximum imaginary part of the poles rises from around 10 to around 80.", "label": true }, { "paperid": "2409.09166v1", "paper_path": "./SciVer/papers/2409.09166v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "1" ], "image_path": "./SciVer/images/2409.09166v1-Table3-1.png", "request_id": 907, "origin_statement": "Four of the eleven mission acronyms in Table 3 are exactly three letters long, accounting for about 36% of the total.", "perturbed_statement": "Three of the eleven mission acronyms in Table 3 are exactly three letters long, accounting for about 27% of the total.", "perturbed_explanation": "The table actually lists four three-letter acronyms—SDO, HMI, AIA, and EIS—so stating only three is incorrect.", "claim": "Four of the eleven mission acronyms in Table 3 are exactly three letters long, accounting for about 36% of the total.", "label": true }, { "paperid": "2409.14794v1", "paper_path": "./SciVer/papers/2409.14794v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1" ], "image_path": "./SciVer/images/2409.14794v1-Table1-1.png", "request_id": 915, "origin_statement": "Abdurrahim and Fudholi's CNN-BiLSTM model achieved both an 85.0% F1 score and 85.0% accuracy, improving by 2.0 percentage points compared to the BiLSTM model's 83.0% in both metrics.", "perturbed_statement": "Abdurrahim and Fudholi's CNN-BiLSTM model achieved both an 88.0% F1 score and 88.0% accuracy, improving by 5.0 percentage points compared to the BiLSTM model's 83.0% in both metrics.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 reports the CNN-BiLSTM model actually achieved 85.0% F1 score and 85.0% accuracy (not 88.0%), and thus it improved by only 2.0 percentage points over the BiLSTM model’s 83.0% (not 5.0).", "claim": "Abdurrahim and Fudholi's CNN-BiLSTM model achieved both an 85.0% F1 score and 85.0% accuracy, improving by 2.0 percentage points compared to the BiLSTM model's 83.0% in both metrics.", "label": true }, { "paperid": "2410.03717v1", "paper_path": "./SciVer/papers/2410.03717v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.03717v1-Table3-1.png", "request_id": 935, "origin_statement": "The post-trained model achieves 32% accuracy on direct questions, four times the base model’s 8%, and 27% on multihop questions, over twice the base model’s 12%.", "perturbed_statement": "The post-trained model achieves 18% accuracy on direct questions, just over twice the base model’s 8%, and 27% on multihop questions, over twice the base model’s 12%.", "perturbed_explanation": "This statement is incorrect because Table 3 shows the post-trained model’s accuracy on direct questions is actually 32%, not 18%, making the claimed comparison to the base model’s 8% invalid.", "claim": "The post-trained model achieves 32% accuracy on direct questions, four times the base model’s 8%, and 27% on multihop questions, over twice the base model’s 12%.", "label": true }, { "paperid": "2410.20441v1", "paper_path": "./SciVer/papers/2410.20441v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2410.20441v1_figure_6.png", "request_id": 948, "origin_statement": "Increasing the number of solver iterations from 1×n_x to 2×n_x reduces the log10 L1 error ||C_err_33||_1 by only about 0.1 orders of magnitude, whereas increasing from 2×n_x to 3×n_x yields a larger reduction of approximately 0.65 orders of magnitude.", "perturbed_statement": "Increasing the number of solver iterations from 1×n_x to 2×n_x reduces the log10 L1 error ||C_err_33||_1 by about 0.5 orders of magnitude, whereas increasing from 2×n_x to 3×n_x yields a smaller reduction of approximately 0.2 orders of magnitude.", "perturbed_explanation": "This statement is wrong because the actual plot shows the error drops from around –11.5 to –11.6 (≈0.1 order) when going from 1×n_x to 2×n_x, and from –11.6 to –12.25 (≈0.65 order) from 2×n_x to 3×n_x, not 0.5 and 0.2 orders respectively.", "claim": "Increasing the number of solver iterations from 1×n_x to 2×n_x reduces the log10 L1 error ||C_err_33||_1 by only about 0.1 orders of magnitude, whereas increasing from 2×n_x to 3×n_x yields a larger reduction of approximately 0.65 orders of magnitude.", "label": true }, { "paperid": "2411.06324v1", "paper_path": "./SciVer/papers/2411.06324v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5" ], "image_path": "./SciVer/images/2411.06324v1_figure_3.png", "request_id": 974, "origin_statement": "In the southernmost 20% of latitudes (y<0.2), the standardized residuals average around +0.2, whereas in the northernmost 20% (y>0.4) they average around -0.2.", "perturbed_statement": "In the southernmost 20% of latitudes (y<0.2), the standardized residuals average around +0.3, whereas in the northernmost 20% (y>0.4) they average around -0.3.", "perturbed_explanation": "The perturbed statement is incorrect because the actual average standardized residuals in the southernmost quintile are about +0.2 rather than +0.3, and in the northernmost quintile about -0.2 rather than -0.3, as visible on the color distribution in the figure.", "claim": "In the southernmost 20% of latitudes (y<0.2), the standardized residuals average around +0.2, whereas in the northernmost 20% (y>0.4) they average around -0.2.", "label": true }, { "paperid": "2409.17896v1", "paper_path": "./SciVer/papers/2409.17896v1.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "6.1.2" ], "image_path": "./SciVer/images/2409.17896v1_figure_5(b).png", "request_id": 982, "origin_statement": "In SAC+CAPS, the normalized elevator position command peaks at 1.0 around 25 timesteps then decays and stabilizes near 0.2 by 500 timesteps, indicating a fivefold reduction from peak to steady-state.", "perturbed_statement": "In SAC+CAPS, the normalized elevator position command peaks at 0.8 around 25 timesteps then decays and stabilizes near 0.1 by 500 timesteps, indicating an eightfold reduction from peak to steady-state.", "perturbed_explanation": "The perturbed claim is wrong because the actual elevator command reaches 1.0 at its peak (not 0.8) and settles around 0.2 (not 0.1), resulting in a fivefold reduction rather than eightfold.", "claim": "In SAC+CAPS, the normalized elevator position command peaks at 1.0 around 25 timesteps then decays and stabilizes near 0.2 by 500 timesteps, indicating a fivefold reduction from peak to steady-state.", "label": true }, { "paperid": "2410.17497v1", "paper_path": "./SciVer/papers/2410.17497v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2", "3.1" ], "image_path": "./SciVer/images/2410.17497v1-Table1-1.png", "request_id": 991, "origin_statement": "The average LSD SNR increased from 1504 in 2014 Oct to a peak of 2975 in 2016 Dec, nearly doubling in two years.", "perturbed_statement": "The average LSD SNR increased from 1504 in 2014 Oct to a peak of 3500 in 2016 Dec, more than doubling in two years.", "perturbed_explanation": "This is incorrect because Table 1 lists the average LSD SNR for the 2016 Dec 16–19 observing run as 2975, not 3500, so the claimed peak value contradicts the provided data.", "claim": "The average LSD SNR increased from 1504 in 2014 Oct to a peak of 2975 in 2016 Dec, nearly doubling in two years.", "label": true }, { "paperid": "2410.03505v1", "paper_path": "./SciVer/papers/2410.03505v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.03505v1-Table3-1.png", "request_id": 995, "origin_statement": "On CIFAR-10, removing BatchNorm from the joint GradResNet model reduces classification accuracy by 84.4 percentage points, from 96.3% to 11.9%.", "perturbed_statement": "On CIFAR-10, removing BatchNorm from the joint GradResNet model reduces classification accuracy by 2.3 percentage points, from 96.3% to 94.0%.", "perturbed_explanation": "Table 3 shows that the BatchNorm ablation yields an accuracy of 11.9%, representing an 84.4-point drop from 96.3%, not a 2.3-point drop to 94.0%.", "claim": "On CIFAR-10, removing BatchNorm from the joint GradResNet model reduces classification accuracy by 84.4 percentage points, from 96.3% to 11.9%.", "label": true }, { "paperid": "2411.14355v1", "paper_path": "./SciVer/papers/2411.14355v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.3", "4" ], "image_path": "./SciVer/images/2411.14355v1-Table1-1.png", "request_id": 1031, "origin_statement": "The KK capture mode accounts for 72.7%, which is more than three times the 19.7% KL fraction, exceeding it by 53.0 percentage points.", "perturbed_statement": "The KK capture mode accounts for 72.7%, which is less than twice the 19.7% KL fraction, differing by only 25.0 percentage points.", "perturbed_explanation": "Table 1 lists the KK fraction as 72.7% and the KL fraction as 19.7%. In reality, 72.7% is over three times 19.7%, and the difference is 72.7% − 19.7% = 53.0 percentage points, not 25.0, and it is not less than twice.", "claim": "The KK capture mode accounts for 72.7%, which is more than three times the 19.7% KL fraction, exceeding it by 53.0 percentage points.", "label": true }, { "paperid": "2410.01623v2", "paper_path": "./SciVer/papers/2410.01623v2.json", "claim_type": "direct", "type": "chart", "item": "3(c)", "section": [ "1", "4.1" ], "image_path": "./SciVer/images/2410.01623v2_figure_3(c).png", "request_id": 1034, "origin_statement": "During the 10K pre-training steps (logged every 20), weight matrix 6 has the highest average log scaling factor while matrix 3 has the lowest, with matrix 6’s values consistently near 6 compared to matrix 3’s around 2–3.", "perturbed_statement": "During the 10K pre-training steps (logged every 20), weight matrix 9 has the highest average log scaling factor while matrix 1 has the lowest, with matrix 9’s values consistently near 6 compared to matrix 1’s around 2–3.", "perturbed_explanation": "This is incorrect because the heatmap labels rank matrix 6 as having the highest average scaling factor (row 1 labeled 6) and matrix 3 as the lowest (row 10 labeled 3), not matrix 9 or matrix 1.", "claim": "During the 10K pre-training steps (logged every 20), weight matrix 6 has the highest average log scaling factor while matrix 3 has the lowest, with matrix 6’s values consistently near 6 compared to matrix 3’s around 2–3.", "label": true }, { "paperid": "2411.14572v1", "paper_path": "./SciVer/papers/2411.14572v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.14572v1-Table4-1.png", "request_id": 1051, "origin_statement": "The table presents six colored placeholders across its three rows: three blue positive labels (internal knowledge, helpful context, aligned context) and three red negative labels (no internal knowledge, unhelpful context, contradictory context), reflecting an equal 50% split between positive and negative scenarios.", "perturbed_statement": "The table presents seven colored placeholders across its three rows: four blue positive labels (internal knowledge, helpful context, aligned context, extra context) and three red negative labels (no internal knowledge, unhelpful context, contradictory context), reflecting a 57% positive emphasis.", "perturbed_explanation": "This is incorrect because the table actually contains only six placeholders, not seven. There are exactly three blue labels and three red labels, not four blue and three red, so the asserted 57% positive emphasis and extra context label do not exist in the source.", "claim": "The table presents six colored placeholders across its three rows: three blue positive labels (internal knowledge, helpful context, aligned context) and three red negative labels (no internal knowledge, unhelpful context, contradictory context), reflecting an equal 50% split between positive and negative scenarios.", "label": true }, { "paperid": "2410.03039v1", "paper_path": "./SciVer/papers/2410.03039v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.03039v1-Table1-1.png", "request_id": 1077, "origin_statement": "Under style-driven generation on the WikiArt dataset with DreamBooth fine-tuning, FineXtract's A-ESR_0.6 is exactly twice that of CFG+Clustering (0.22 vs 0.11), highlighting its improved extraction at the lower success threshold.", "perturbed_statement": "Under style-driven generation on the WikiArt dataset with DreamBooth fine-tuning, FineXtract's A-ESR_0.6 is three times that of CFG+Clustering (0.33 vs 0.11), highlighting its improved extraction at the lower success threshold.", "perturbed_explanation": "The perturbed statement erroneously reports FineXtract's A-ESR_0.6 as 0.33, but the table shows it is actually 0.22. Because the true value is 0.22, the claimed threefold increase (0.33 vs 0.11) is incorrect.", "claim": "Under style-driven generation on the WikiArt dataset with DreamBooth fine-tuning, FineXtract's A-ESR_0.6 is exactly twice that of CFG+Clustering (0.22 vs 0.11), highlighting its improved extraction at the lower success threshold.", "label": true }, { "paperid": "2409.17003v1", "paper_path": "./SciVer/papers/2409.17003v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3.2", "3.4.1", "3.4.2" ], "image_path": "./SciVer/images/2409.17003v1_figure_6.png", "request_id": 1094, "origin_statement": "Between A_V = 0.1 mag and A_V = 2.0 mag, the gas temperature decreases by about 110 K, from ∼160 K to ∼50 K, in the outer PDR layer of Model 1027.", "perturbed_statement": "Between A_V = 0.1 mag and A_V = 2.0 mag, the gas temperature decreases by about 140 K, from ∼170 K to ∼30 K, in the outer PDR layer of Model 1027.", "perturbed_explanation": "This is wrong because the plot shows the temperature actually falls from about 160 K at A_V≈0.1 to about 50 K at A_V≈2.0 (a drop of ≈110 K), not from 170 K to 30 K.", "claim": "Between A_V = 0.1 mag and A_V = 2.0 mag, the gas temperature decreases by about 110 K, from ∼160 K to ∼50 K, in the outer PDR layer of Model 1027.", "label": true }, { "paperid": "2411.04941v1", "paper_path": "./SciVer/papers/2411.04941v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.04941v1-Table1-1.png", "request_id": 1107, "origin_statement": "The present study's dimensionless density difference Δ ranges from 0.018 to 0.021, which is at least 8.7% lower than the minimum 0.023 observed in previous miscible gate-type experiments.", "perturbed_statement": "The present study's dimensionless density difference Δ ranges from 0.018 to 0.022, which is at least 4.3% lower than the minimum 0.023 observed in previous miscible gate-type experiments.", "perturbed_explanation": "The upper bound of Δ is incorrectly stated as 0.022, but Table 1 lists a maximum Δ of 0.021 for the present study, so the perturbed statement contradicts the actual data.", "claim": "The present study's dimensionless density difference Δ ranges from 0.018 to 0.021, which is at least 8.7% lower than the minimum 0.023 observed in previous miscible gate-type experiments.", "label": true }, { "paperid": "2410.22142v1", "paper_path": "./SciVer/papers/2410.22142v1.json", "claim_type": "direct", "type": "chart", "item": "1(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.22142v1_figure_1(a).png", "request_id": 1120, "origin_statement": "On Bobby Lawrence’s channel, the daily discussion messages peaked at around 620 in late April 2022, which is over 30 times the maximum daily broadcast messages (approximately 20) observed in the same timeframe.", "perturbed_statement": "On Bobby Lawrence’s channel, the daily discussion messages peaked at around 540 in mid-June 2022, which is over 25 times the maximum daily broadcast messages (approximately 20) observed in the same timeframe.", "perturbed_explanation": "This statement is incorrect because the chart shows the highest daily discussion count reached about 620 in late April 2022, not around 540 in mid-June 2022. The timing and peak value of the discussion messages are misstated.", "claim": "On Bobby Lawrence’s channel, the daily discussion messages peaked at around 620 in late April 2022, which is over 30 times the maximum daily broadcast messages (approximately 20) observed in the same timeframe.", "label": true }, { "paperid": "2409.15550v1", "paper_path": "./SciVer/papers/2409.15550v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.15550v1_figure_2(b).png", "request_id": 1126, "origin_statement": "GPT-4o’s affect empathy ratings exhibit a 0.329-point lower standard deviation (0.745 vs 1.074) than human ratings, indicating approximately 31% less variability.", "perturbed_statement": "GPT-4o’s affect empathy ratings exhibit a 0.329-point higher standard deviation (1.074 vs 0.745) than human ratings, indicating approximately 44% greater variability.", "perturbed_explanation": "This is incorrect because the chart shows GPT-4o’s affect empathy standard deviation is actually 0.745 and humans’ is 1.074; the values are swapped in the perturbed statement, so GPT-4o has about 31% less, not greater, variability.", "claim": "GPT-4o’s affect empathy ratings exhibit a 0.329-point lower standard deviation (0.745 vs 1.074) than human ratings, indicating approximately 31% less variability.", "label": true }, { "paperid": "2411.15211v1", "paper_path": "./SciVer/papers/2411.15211v1.json", "claim_type": "direct", "type": "chart", "item": "6(b)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.15211v1_figure_6(b).png", "request_id": 1130, "origin_statement": "In the seen office environment, LightLLM achieves a 90th percentile localization error of approximately 0.7 m, which is about 0.25 m lower than the Iris system’s roughly 0.95 m error.", "perturbed_statement": "In the seen office environment, LightLLM achieves a 90th percentile localization error of approximately 1.2 m, which is 0.3 m lower than Iris’s roughly 1.5 m error.", "perturbed_explanation": "This statement is incorrect because the figure shows LightLLM’s 90th percentile error is about 0.7 m, not 1.2 m, and Iris’s error is around 0.95 m, not 1.5 m.", "claim": "In the seen office environment, LightLLM achieves a 90th percentile localization error of approximately 0.7 m, which is about 0.25 m lower than the Iris system’s roughly 0.95 m error.", "label": true }, { "paperid": "2411.12263v1", "paper_path": "./SciVer/papers/2411.12263v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "7.3" ], "image_path": "./SciVer/images/2411.12263v1_figure_7.png", "request_id": 1152, "origin_statement": "Increasing the number of AODs from 1 to 4 reduces the QFT benchmark’s execution time from approximately 125000 μs to about 45000 μs, a decrease of roughly 64%.", "perturbed_statement": "Increasing the number of AODs from 1 to 4 reduces the QFT benchmark’s execution time from approximately 125000 μs to about 60000 μs, a decrease of roughly 50%.", "perturbed_explanation": "The perturbed statement is incorrect because the chart shows the QFT execution time at 4 AODs is about 45000 μs, not 60000 μs, which corresponds to a ~64% reduction rather than 50%.", "claim": "Increasing the number of AODs from 1 to 4 reduces the QFT benchmark’s execution time from approximately 125000 μs to about 45000 μs, a decrease of roughly 64%.", "label": true }, { "paperid": "2409.17134v1", "paper_path": "./SciVer/papers/2409.17134v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.2", "3.3" ], "image_path": "./SciVer/images/2409.17134v1_figure_4.png", "request_id": 1168, "origin_statement": "FourierNet at ~0.95 bpp yields ~30.1 dB PSNR, approximately 2.5 dB lower than JPEG’s ~32.6 dB at the same bit-rate.", "perturbed_statement": "FourierNet at ~0.95 bpp yields ~32.5 dB PSNR, approximately 0.1 dB lower than JPEG’s ~32.6 dB at the same bit-rate.", "perturbed_explanation": "The figure shows that FourierNet at around 0.95 bpp achieves about 30.1 dB PSNR, not 32.5 dB. Therefore, the actual gap to JPEG’s ~32.6 dB is roughly 2.5 dB, not 0.1 dB.", "claim": "FourierNet at ~0.95 bpp yields ~30.1 dB PSNR, approximately 2.5 dB lower than JPEG’s ~32.6 dB at the same bit-rate.", "label": true }, { "paperid": "2410.07140v1", "paper_path": "./SciVer/papers/2410.07140v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.3.3" ], "image_path": "./SciVer/images/2410.07140v1_figure_5.png", "request_id": 1180, "origin_statement": "The horizontal distance between the centroids of the City town cluster (x≈−80) and the Place of birth cluster (x≈65) is about 145 units, indicating the widest horizontal separation among labeled relation types.", "perturbed_statement": "The horizontal distance between the centroids of the City town cluster (x≈−80) and the Place of birth cluster (x≈65) is about 100 units, indicating the widest horizontal separation among labeled relation types.", "perturbed_explanation": "This statement is incorrect because the actual x-coordinates for City town and Place of birth are approximately −80 and 65, respectively, which yields a horizontal separation of about 145 units, not 100 units.", "claim": "The horizontal distance between the centroids of the City town cluster (x≈−80) and the Place of birth cluster (x≈65) is about 145 units, indicating the widest horizontal separation among labeled relation types.", "label": true }, { "paperid": "2411.15893v1", "paper_path": "./SciVer/papers/2411.15893v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.15893v1_figure_6.png", "request_id": 1196, "origin_statement": "Increasing the number of dimensions from 0 to 8 reduces MAE from approximately 1.82 to 1.66 and reduces RMSE from around 3.47 to 3.29.", "perturbed_statement": "Increasing the number of dimensions from 0 to 8 reduces MAE from approximately 1.82 to 1.72 and reduces RMSE from around 3.47 to 3.39.", "perturbed_explanation": "This statement is wrong because in Figure 6(a), the MAE at dₘ=8 is about 1.66 (not 1.72) and the RMSE is about 3.29 (not 3.39).", "claim": "Increasing the number of dimensions from 0 to 8 reduces MAE from approximately 1.82 to 1.66 and reduces RMSE from around 3.47 to 3.29.", "label": true }, { "paperid": "2409.12320v1", "paper_path": "./SciVer/papers/2409.12320v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.12320v1_figure_5.png", "request_id": 4, "origin_statement": "According to Figure 5, the mean domain expert overall score for Article Psy increased by approximately 1.3 points after training (from about –2.3 to –1.0), whereas the mean score for Article GunViolence decreased by about 1.0 points (from roughly –0.1 to –1.1).", "perturbed_statement": "According to Figure 5, the mean domain expert overall score for Article Psy increased by approximately 1.3 points after training (from about –2.3 to –1.0), whereas the mean score for Article GunViolence decreased by about 2.0 points (from roughly –0.1 to –2.1).", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows Article GunViolence’s score dropped from about –0.1 to –1.1 (a decrease of ~1.0 point), not to –2.1 (which would imply a ~2.0 point decrease).", "claim": "According to Figure 5, the mean domain expert overall score for Article Psy increased by approximately 1.3 points after training (from about –2.3 to –1.0), whereas the mean score for Article GunViolence decreased by about 1.0 points (from roughly –0.1 to –1.1).", "label": true }, { "paperid": "2410.08207v1", "paper_path": "./SciVer/papers/2410.08207v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.1.2" ], "image_path": "./SciVer/images/2410.08207v1-Table2-1.png", "request_id": 7, "origin_statement": "The 'Ours + Paella' method achieves a structure distance of 11.34×10^3, which is approximately 84% lower than the continuous DDIM+SD1.4's 69.43×10^3.", "perturbed_statement": "The 'Ours + Paella' method achieves a structure distance of 13.34×10^3, which is approximately 90% lower than the continuous DDIM+SD1.4's 69.43×10^3.", "perturbed_explanation": "This statement is incorrect because Table 2 reports the 'Ours + Paella' structure distance as 11.34×10^3 (not 13.34×10^3), and the actual reduction from 69.43×10^3 is about 84%, not 90%.", "claim": "The 'Ours + Paella' method achieves a structure distance of 11.34×10^3, which is approximately 84% lower than the continuous DDIM+SD1.4's 69.43×10^3.", "label": true }, { "paperid": "2410.07073v2", "paper_path": "./SciVer/papers/2410.07073v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3", "4.1" ], "image_path": "./SciVer/images/2410.07073v2-Table2-1.png", "request_id": 9, "origin_statement": "Pixtral’s PDF page understanding score of 81.8 is 40.6 points higher than Qwen-VL 7B’s 41.2 score in the same category.", "perturbed_statement": "Pixtral’s PDF page understanding score of 81.8 is 30.6 points higher than Qwen-VL 7B’s 41.2 score in the same category.", "perturbed_explanation": "The actual difference between Pixtral’s PDF score (81.8) and Qwen-VL 7B’s (41.2) is 40.6 points, not 30.6.", "claim": "Pixtral’s PDF page understanding score of 81.8 is 40.6 points higher than Qwen-VL 7B’s 41.2 score in the same category.", "label": true }, { "paperid": "2410.16617v1", "paper_path": "./SciVer/papers/2410.16617v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.2", "5" ], "image_path": "./SciVer/images/2410.16617v1_figure_4.png", "request_id": 10, "origin_statement": "In panel (a) of Figure 4, the posterior probability of chikungunya presence remains below 0.05 until around bi-week 30, then climbs to 1.0 by bi-week 34, indicating a nearly complete increase in presence probability within four bi-weeks.", "perturbed_statement": "In panel (a) of Figure 4, the posterior probability of chikungunya presence remains at approximately 0.5 between bi-weeks 28 and 34, then increases to 1.0 by bi-week 36.", "perturbed_explanation": "Panel (a) actually shows the probability remains near zero until around bi-week 30 (not at 0.5 between bi-weeks 28–34) and it reaches 1.0 by bi-week 34 (not bi-week 36). These level and timing details contradict the chart.", "claim": "In panel (a) of Figure 4, the posterior probability of chikungunya presence remains below 0.05 until around bi-week 30, then climbs to 1.0 by bi-week 34, indicating a nearly complete increase in presence probability within four bi-weeks.", "label": true }, { "paperid": "2410.15939v1", "paper_path": "./SciVer/papers/2410.15939v1.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.15939v1_figure_3(b).png", "request_id": 12, "origin_statement": "Mistral’s performance on the Source query in the Alarm graph increases by approximately 0.47—from about 0.34 without context to about 0.81 with context.", "perturbed_statement": "Mistral’s performance on the Source query in the Alarm graph increases by approximately 0.29—from about 0.34 without context to about 0.63 with context.", "perturbed_explanation": "The perturbed statement is incorrect because the figure shows Mistral’s with-context performance on the Source query is roughly 0.81 (not 0.63), so the actual increase is about 0.47, not 0.29.", "claim": "Mistral’s performance on the Source query in the Alarm graph increases by approximately 0.47—from about 0.34 without context to about 0.81 with context.", "label": true }, { "paperid": "2410.09982v3", "paper_path": "./SciVer/papers/2410.09982v3.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.09982v3_figure_1.png", "request_id": 14, "origin_statement": "At a prune block size of 6, self-data distilled fine-tuning retains approximately 91.2% of the unpruned Llama3.1-8B Instruct model's quality, which is about 16.5 percentage points higher than no fine-tuning at around 74.7%.", "perturbed_statement": "At a prune block size of 6, self-data distilled fine-tuning retains approximately 88.2% of the unpruned Llama3.1-8B Instruct model's quality, which is about 14 percentage points higher than no fine-tuning at around 74.7%.", "perturbed_explanation": "This statement is wrong because the plot shows self-data distilled fine-tuning recovers 91.2% (not 88.2%) at block size 6, and the difference from no fine-tuning’s ~74.7% is about 16.5 percentage points, not 14.", "claim": "At a prune block size of 6, self-data distilled fine-tuning retains approximately 91.2% of the unpruned Llama3.1-8B Instruct model's quality, which is about 16.5 percentage points higher than no fine-tuning at around 74.7%.", "label": true }, { "paperid": "2409.01901v1", "paper_path": "./SciVer/papers/2409.01901v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.01901v1-Table2-1.png", "request_id": 21, "origin_statement": "The SEMLEX dataset provides 49,274 reference videos for 1,000 glosses, which is approximately 4.1 times more than WLASL’s 12,051 and about 49 times more than SB NGT’s 1,000.", "perturbed_statement": "The SEMLEX dataset provides 12,051 reference videos for 1,000 glosses, which is approximately 4.1 times more than WLASL’s 49,274 and about 49 times more than SB NGT’s 1,000.", "perturbed_explanation": "This statement is incorrect because the table shows SEMLEX actually has 49,274 reference videos and WLASL has 12,051; the numbers in the perturbed statement are swapped and contradict the source.", "claim": "The SEMLEX dataset provides 49,274 reference videos for 1,000 glosses, which is approximately 4.1 times more than WLASL’s 12,051 and about 49 times more than SB NGT’s 1,000.", "label": true }, { "paperid": "2409.05371v1", "paper_path": "./SciVer/papers/2409.05371v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.05371v1_figure_2(b).png", "request_id": 38, "origin_statement": "In the LQG strong field limit (blue curves), increasing the polymeric parameter P from 0.02 to 0.04 shifts the divergent impact parameter (where α diverges) from about b ≈ 5.19 to about b ≈ 5.16.", "perturbed_statement": "In the LQG strong field limit, raising P from 0.02 to 0.04 shifts the divergent impact parameter from approximately b ≈ 5.19 down to b ≈ 5.12.", "perturbed_explanation": "This is incorrect because the figure’s blue curves actually diverge at around b ≈ 5.19 for P=0.02 and around b ≈ 5.16 for P=0.04, not b ≈ 5.12.", "claim": "In the LQG strong field limit (blue curves), increasing the polymeric parameter P from 0.02 to 0.04 shifts the divergent impact parameter (where α diverges) from about b ≈ 5.19 to about b ≈ 5.16.", "label": true }, { "paperid": "2409.08851v1", "paper_path": "./SciVer/papers/2409.08851v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2409.08851v1-Table1-1.png", "request_id": 39, "origin_statement": "For the 4p7q(6) scheme, among its off-diagonal coefficients, a65 equals 2.92656837501595, which is the largest magnitude in this method, exceeding the next largest coefficient a42 = 1.89179076622108 by approximately 1.03477760879487.", "perturbed_statement": "In the 4p7q(6) scheme, the coefficient a42 = 1.89179076622108 is the largest off-diagonal value, exceeding a65 = 2.92656837501595 by approximately 1.03477760879487.", "perturbed_explanation": "This statement is incorrect because Table 1 shows that a65 (2.92656837501595) is larger than a42 (1.89179076622108), not the other way around.", "claim": "For the 4p7q(6) scheme, among its off-diagonal coefficients, a65 equals 2.92656837501595, which is the largest magnitude in this method, exceeding the next largest coefficient a42 = 1.89179076622108 by approximately 1.03477760879487.", "label": true }, { "paperid": "2410.03091v1", "paper_path": "./SciVer/papers/2410.03091v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "1", "2.2", "5" ], "image_path": "./SciVer/images/2410.03091v1_figure_2.png", "request_id": 44, "origin_statement": "In panel C, the median CGM follow-up duration (where CDF = 0.5) is about 3.5 days for the intervention group versus about 4 days for the control group, indicating shorter hospital stays under the intervention.", "perturbed_statement": "In panel C, the median CGM follow-up duration (where CDF = 0.5) is about 2 days for the intervention group versus about 3 days for the control group, indicating shorter hospital stays under the intervention.", "perturbed_explanation": "This is incorrect because panel C shows the empirical CDF curves reach 0.5 at around 3.5 days for the intervention group (magenta) and around 4 days for the control group (green), not at 2 and 3 days respectively.", "claim": "In panel C, the median CGM follow-up duration (where CDF = 0.5) is about 3.5 days for the intervention group versus about 4 days for the control group, indicating shorter hospital stays under the intervention.", "label": true }, { "paperid": "2410.19472v1", "paper_path": "./SciVer/papers/2410.19472v1.json", "claim_type": "direct", "type": "chart", "item": "7(b)", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.19472v1_figure_7(b).png", "request_id": 46, "origin_statement": "Between mesh levels l=0 and l=4, the relative error in the integrated wall shear stress norm decreases from 26.53% to 0.70%, representing a reduction by a factor of about 38.", "perturbed_statement": "Between mesh levels l=0 and l=4, the relative error in the integrated wall shear stress norm decreases from 26.53% to 1.5%, representing a reduction by a factor of about 18.", "perturbed_explanation": "The perturbed statement incorrectly reports the level-4 relative error as 1.5%, whereas the figure legend shows ε_rel = 0.0070 (0.70%) at l=4, not 1.5%.", "claim": "Between mesh levels l=0 and l=4, the relative error in the integrated wall shear stress norm decreases from 26.53% to 0.70%, representing a reduction by a factor of about 38.", "label": true }, { "paperid": "2410.13376v1", "paper_path": "./SciVer/papers/2410.13376v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.13376v1-Table3-1.png", "request_id": 47, "origin_statement": "The mean error ε_mean for v (8.148×10^-4) is approximately 1.82 times that for w (4.479×10^-4) according to Table 3.", "perturbed_statement": "The mean error ε_mean for v (8.148×10^-4) is approximately 2.5 times that for w (4.479×10^-4) according to Table 3.", "perturbed_explanation": "This is incorrect because Table 3 lists ε_mean for v as 8.148×10^-4 and for w as 4.479×10^-4; the actual ratio is 8.148×10^-4 ÷ 4.479×10^-4 ≈ 1.82, not 2.5.", "claim": "The mean error ε_mean for v (8.148×10^-4) is approximately 1.82 times that for w (4.479×10^-4) according to Table 3.", "label": true }, { "paperid": "2409.10343v1", "paper_path": "./SciVer/papers/2409.10343v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.4.2" ], "image_path": "./SciVer/images/2409.10343v1_figure_4.png", "request_id": 48, "origin_statement": "Recall@10 peaks at 0.10 when ε^max is 3%, rising from 0.095 at 1% and falling to 0.085 at 20%.", "perturbed_statement": "Recall@10 peaks at 0.11 when ε^max is 3%, rising from 0.095 at 1% and falling to 0.085 at 20%.", "perturbed_explanation": "In the ε^max subplot the orange circle at 3% corresponds to Recall@10≈0.10, not 0.11, so stating a peak of 0.11 contradicts the plotted value.", "claim": "Recall@10 peaks at 0.10 when ε^max is 3%, rising from 0.095 at 1% and falling to 0.085 at 20%.", "label": true }, { "paperid": "2410.22387v1", "paper_path": "./SciVer/papers/2410.22387v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3.8", "4.3" ], "image_path": "./SciVer/images/2410.22387v1_figure_6.png", "request_id": 50, "origin_statement": "In the heatmap, Gleason 9 shows more genes with z-scores ≥2 (more red bars) than Gleason 6, indicating more upregulated gene features at Gleason 9 compared to Gleason 6.", "perturbed_statement": "In the heatmap, Gleason 6 shows more genes with z-scores ≥2 (more red bars) than Gleason 9, indicating a greater number of upregulated gene features at Gleason 6 compared to Gleason 9.", "perturbed_explanation": "The heatmap clearly displays more red bars (z-scores ≥2) under the Gleason 9 column than under Gleason 6, so Gleason 9 actually has more upregulated genes, contradicting the perturbed claim.", "claim": "In the heatmap, Gleason 9 shows more genes with z-scores ≥2 (more red bars) than Gleason 6, indicating more upregulated gene features at Gleason 9 compared to Gleason 6.", "label": true }, { "paperid": "2411.01074v1", "paper_path": "./SciVer/papers/2411.01074v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.01074v1_figure_3(a).png", "request_id": 52, "origin_statement": "On the 4-class sub-task, MODA’s composed model achieves approximately 95.5% reuse accuracy, about 40.5 percentage points higher than MwT without fine-tuning’s roughly 55%.", "perturbed_statement": "On the 4-class sub-task, MODA’s composed model achieves approximately 85.5% reuse accuracy, about 30.5 percentage points higher than MwT without fine-tuning’s roughly 55%.", "perturbed_explanation": "Figure 3(a) shows that MODA’s reuse accuracy on the 4-class sub-task is about 95.5%, not 85.5%, so the perturbed value of 85.5% and the corresponding 30.5-point gap are incorrect.", "claim": "On the 4-class sub-task, MODA’s composed model achieves approximately 95.5% reuse accuracy, about 40.5 percentage points higher than MwT without fine-tuning’s roughly 55%.", "label": true }, { "paperid": "2410.21813v1", "paper_path": "./SciVer/papers/2410.21813v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2410.21813v1_figure_4.png", "request_id": 54, "origin_statement": "The SAM-Swin model correctly identifies 3688 out of 3750 malignant cases (98.35% recall), outperforming the SAM-FNet model which correctly recognizes 3610 malignant cases (96.27% recall).", "perturbed_statement": "The SAM-Swin model correctly identifies 3745 out of 3750 malignant cases (99.86% recall), outperforming the SAM-FNet model which correctly recognizes 3650 malignant cases (97.33% recall).", "perturbed_explanation": "According to the confusion matrix (j), SAM-Swin correctly predicts 3688 malignant cases – not 3745 – out of a total of 3750. Likewise, SAM-FNet correctly predicts 3610 malignant cases – not 3650 – so the stated figures are inaccurate.", "claim": "The SAM-Swin model correctly identifies 3688 out of 3750 malignant cases (98.35% recall), outperforming the SAM-FNet model which correctly recognizes 3610 malignant cases (96.27% recall).", "label": true }, { "paperid": "2410.12049v1", "paper_path": "./SciVer/papers/2410.12049v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "2.2" ], "image_path": "./SciVer/images/2410.12049v1_figure_3.png", "request_id": 56, "origin_statement": "Llama-3.1 8B achieves a 44.2% win rate and an 8.1% loss rate against Sabiá-3, representing the highest win rate and lowest loss rate among the seven compared models.", "perturbed_statement": "Llama-3.1 8B achieves a 35.0% win rate and a 15.0% loss rate against Sabiá-3, representing the highest win rate and lowest loss rate among the seven compared models.", "perturbed_explanation": "This perturbed statement is incorrect because Figure 3 shows that Llama-3.1 8B’s actual win rate is 44.2% (not 35.0%) and its loss rate is 8.1% (not 15.0%).", "claim": "Llama-3.1 8B achieves a 44.2% win rate and an 8.1% loss rate against Sabiá-3, representing the highest win rate and lowest loss rate among the seven compared models.", "label": true }, { "paperid": "2409.10031v1", "paper_path": "./SciVer/papers/2409.10031v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.1", "4.3" ], "image_path": "./SciVer/images/2409.10031v1-Table2-1.png", "request_id": 57, "origin_statement": "The CYBER2 violation saw a 99.8% decrease in transaction count (from 153,000 to 305) and a 99.96% drop in USD volume (from $8.3 B to $3 M) following sanctions.", "perturbed_statement": "The CYBER2 violation saw a 99.8% decrease in transaction count (from 153,000 to 500) and a 99.88% drop in USD volume (from $8.3 B to $10 M) following sanctions.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 shows the post-sanction transaction count for CYBER2 is 305 (not 500) and the post-sanction USD volume is $3 M (not $10 M).", "claim": "The CYBER2 violation saw a 99.8% decrease in transaction count (from 153,000 to 305) and a 99.96% drop in USD volume (from $8.3 B to $3 M) following sanctions.", "label": true }, { "paperid": "2409.01988v1", "paper_path": "./SciVer/papers/2409.01988v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.01988v1-Table1-1.png", "request_id": 73, "origin_statement": "On n-MNIST AWGN, adding a recovery loss to GloCal improves accuracy by 4.92 percentage points, boosting it from 91.47% without recovery loss to 96.39% with recovery loss.", "perturbed_statement": "On n-MNIST AWGN, adding a recovery loss to GloCal improves accuracy by 6.92 percentage points, boosting it from 89.47% without recovery loss to 96.39% with recovery loss.", "perturbed_explanation": "The table lists the GloCal without recovery loss accuracy on n-MNIST AWGN as 91.47%, not 89.47%, and the actual improvement to 96.39% is 4.92 percentage points, not 6.92.", "claim": "On n-MNIST AWGN, adding a recovery loss to GloCal improves accuracy by 4.92 percentage points, boosting it from 91.47% without recovery loss to 96.39% with recovery loss.", "label": true }, { "paperid": "2409.18903v2", "paper_path": "./SciVer/papers/2409.18903v2.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "6" ], "image_path": "./SciVer/images/2409.18903v2_figure_5.png", "request_id": 76, "origin_statement": "In the top row of Figure 5, u at x=0 decreases from 1 at t=0 to approximately -2 at t=2π, and then to about -5 at t=4π, indicating a roughly linear decline of 3 units every 2π time interval.", "perturbed_statement": "In the top row of Figure 5, u at x=0 decreases from 2 at t=0 to approximately -1 at t=2π, and then to about -4 at t=4π, indicating a roughly linear decline of 3 units every 2π time interval.", "perturbed_explanation": "The perturbed statement misreports the values of u at x=0. The actual plot shows u≈1 at t=0 (not 2), u≈-2 at t=2π (not -1), and u≈-5 at t=4π (not -4), as seen on the y-axis of the top-row curves.", "claim": "In the top row of Figure 5, u at x=0 decreases from 1 at t=0 to approximately -2 at t=2π, and then to about -5 at t=4π, indicating a roughly linear decline of 3 units every 2π time interval.", "label": true }, { "paperid": "2410.23507v1", "paper_path": "./SciVer/papers/2410.23507v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5" ], "image_path": "./SciVer/images/2410.23507v1-Table4-1.png", "request_id": 79, "origin_statement": "On the CWEB-G and CWEB-S test sets, MoECE-ST-Base outperforms MoECE-GS-Base by 0.15 and 0.13 points respectively, indicating a slight advantage of ST routing at base scale.", "perturbed_statement": "On the CWEB-G and CWEB-S test sets, MoECE-ST-Base outperforms MoECE-GS-Base by 0.25 and 0.23 points respectively, indicating a slight advantage of ST routing at base scale.", "perturbed_explanation": "This claim is incorrect because the actual table shows MoECE-GS-Base scores of 39.22 on CWEB-G and 27.77 on CWEB-S, while MoECE-ST-Base scores are 39.37 and 27.90. The true improvements are 0.15 (39.37−39.22) and 0.13 (27.90−27.77), not 0.25 and 0.23.", "claim": "On the CWEB-G and CWEB-S test sets, MoECE-ST-Base outperforms MoECE-GS-Base by 0.15 and 0.13 points respectively, indicating a slight advantage of ST routing at base scale.", "label": true }, { "paperid": "2409.00614v1", "paper_path": "./SciVer/papers/2409.00614v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "6.4" ], "image_path": "./SciVer/images/2409.00614v1_figure_4.png", "request_id": 80, "origin_statement": "At communication round 50 on Arabic Twitter, DAMe achieves approximately 0.85 local NMI, about 0.13 higher than FedAvg’s local NMI of around 0.72.", "perturbed_statement": "At communication round 50 on Arabic Twitter, DAMe achieves approximately 0.80 local NMI, about 0.08 higher than FedAvg’s local NMI of around 0.72.", "perturbed_explanation": "This is incorrect because in Figure 4 DAMe’s local NMI at round 50 on Arabic Twitter is about 0.85, not 0.80, making its lead over FedAvg (~0.72) roughly 0.13 rather than 0.08.", "claim": "At communication round 50 on Arabic Twitter, DAMe achieves approximately 0.85 local NMI, about 0.13 higher than FedAvg’s local NMI of around 0.72.", "label": true }, { "paperid": "2410.21705v1", "paper_path": "./SciVer/papers/2410.21705v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2410.21705v1-Table1-1.png", "request_id": 83, "origin_statement": "On the CIFAR100 dataset, AdaptGCD's \"All\" accuracy is 84.0%, which is 3.9 percentage points higher than SimGCD's 80.1%.", "perturbed_statement": "On the CIFAR100 dataset, AdaptGCD's \"All\" accuracy is 85.0%, which is 4.9 percentage points higher than SimGCD's 80.1%.", "perturbed_explanation": "The perturbed claim is wrong because the table shows AdaptGCD's \"All\" accuracy on CIFAR100 is actually 84.0%, not 85.0%. This makes the true improvement over SimGCD's 80.1% equal to 3.9 points, not 4.9 points.", "claim": "On the CIFAR100 dataset, AdaptGCD's \"All\" accuracy is 84.0%, which is 3.9 percentage points higher than SimGCD's 80.1%.", "label": true }, { "paperid": "2411.07253v1", "paper_path": "./SciVer/papers/2411.07253v1.json", "claim_type": "direct", "type": "chart", "item": "4(b)", "section": [ "7.2" ], "image_path": "./SciVer/images/2411.07253v1_figure_4(b).png", "request_id": 94, "origin_statement": "By iteration 500, ASPGMO-sc reduces ∥x^{k+1}−y^k∥ by about 8 orders of magnitude from roughly 10^{1} to 10^{−7}, while SPGMO only reduces by about 1 order from ∼10^{1} to ∼10^{0}.", "perturbed_statement": "By iteration 500, ASPGMO-sc reduces ∥x^{k+1}−y^k∥ by about 4 orders of magnitude from roughly 10^{1} to 10^{−3}, while SPGMO only reduces by about 1 order from ∼10^{1} to ∼10^{0}.", "perturbed_explanation": "This statement is incorrect because in Figure 4 the blue ASPGMO-sc curve at k=500 reaches approximately 10^{−7}, not 10^{−3}, so the actual reduction is about 8 orders of magnitude, not 4.", "claim": "By iteration 500, ASPGMO-sc reduces ∥x^{k+1}−y^k∥ by about 8 orders of magnitude from roughly 10^{1} to 10^{−7}, while SPGMO only reduces by about 1 order from ∼10^{1} to ∼10^{0}.", "label": true }, { "paperid": "2411.13607v2", "paper_path": "./SciVer/papers/2411.13607v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.13607v2-Table1-1.png", "request_id": 95, "origin_statement": "VioDat's 639 videos are nearly four times the 168 videos of HumanEva, yet still fewer than AIST++'s 1408 videos.", "perturbed_statement": "VioDat's 639 videos are nearly six times the 168 videos of HumanEva, yet still fewer than AIST++'s 1408 videos.", "perturbed_explanation": "The perturbed claim overstates the ratio: 639 videos divided by HumanEva's 168 videos equals about 3.8, which is closer to four times, not six. The table lists 168 videos for HumanEva, so 'nearly six times' is incorrect.", "claim": "VioDat's 639 videos are nearly four times the 168 videos of HumanEva, yet still fewer than AIST++'s 1408 videos.", "label": true }, { "paperid": "2411.00513v1", "paper_path": "./SciVer/papers/2411.00513v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2411.00513v1_figure_2.png", "request_id": 98, "origin_statement": "Shortly after Case BB RLO begins, the RLO mass loss rate peaks at ∼1×10⁻⁵ M⊙/yr, around twenty times the contemporaneous wind-driven mass loss rate of ∼5×10⁻⁷ M⊙/yr.", "perturbed_statement": "Shortly after Case BB RLO begins, the RLO mass loss rate peaks at ∼1×10⁻⁵ M⊙/yr, around two hundred times the wind-driven mass loss rate of ∼5×10⁻⁸ M⊙/yr.", "perturbed_explanation": "The figure shows the wind mass loss rate at that epoch is about 5×10⁻⁷ M⊙/yr, not 5×10⁻⁸ M⊙/yr. Thus the ratio to the RLO rate is ≈20, not ≈200, making the perturbed values inconsistent with the plotted data.", "claim": "Shortly after Case BB RLO begins, the RLO mass loss rate peaks at ∼1×10⁻⁵ M⊙/yr, around twenty times the contemporaneous wind-driven mass loss rate of ∼5×10⁻⁷ M⊙/yr.", "label": true }, { "paperid": "2411.00049v1", "paper_path": "./SciVer/papers/2411.00049v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.00049v1_figure_7.png", "request_id": 104, "origin_statement": "Between threshold t=0 and t=0.9, Hatespeech accuracy rises by 14.5 percentage points (84.5% to 99.0%), compared to a 2.9-point gain for Reuters (85.6% to 88.5%), indicating a much steeper improvement on the Hatespeech dataset.", "perturbed_statement": "Between threshold t=0 and t=0.9, Hatespeech accuracy rises by only 12.0 percentage points (84.5% to 99.0%), compared to a 2.9-point gain for Reuters (85.6% to 88.5%), indicating a much steeper improvement on the Hatespeech dataset.", "perturbed_explanation": "The increase from 84.5% to 99.0% on Hatespeech is actually 14.5 percentage points, not 12.0, so the perturbed statement understates the true improvement.", "claim": "Between threshold t=0 and t=0.9, Hatespeech accuracy rises by 14.5 percentage points (84.5% to 99.0%), compared to a 2.9-point gain for Reuters (85.6% to 88.5%), indicating a much steeper improvement on the Hatespeech dataset.", "label": true }, { "paperid": "2409.06166v1", "paper_path": "./SciVer/papers/2409.06166v1.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.06166v1_figure_3(b).png", "request_id": 112, "origin_statement": "RPP’s validation accuracy increases by 1.7% from 23.7% at 1-shot to 25.4% at 64-shot, whereas POMP’s validation accuracy remains nearly flat, rising only 0.6% from 23.3% to 23.9%.", "perturbed_statement": "RPP’s validation accuracy increases by 2.7% from 23.7% at 1-shot to 26.4% at 64-shot, whereas POMP’s validation accuracy remains nearly flat, rising only 0.6% from 23.3% to 23.9%.", "perturbed_explanation": "The perturbed statement incorrectly states RPP’s 64-shot validation accuracy as 26.4%, but the chart shows it is actually 25.4%, so the claimed 2.7% increase is unsupported.", "claim": "RPP’s validation accuracy increases by 1.7% from 23.7% at 1-shot to 25.4% at 64-shot, whereas POMP’s validation accuracy remains nearly flat, rising only 0.6% from 23.3% to 23.9%.", "label": true }, { "paperid": "2410.22378v1", "paper_path": "./SciVer/papers/2410.22378v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4" ], "image_path": "./SciVer/images/2410.22378v1_figure_4.png", "request_id": 114, "origin_statement": "At n=1, increasing A from 1 to 10 raises the quantum effect from approximately 0.35 to about 1.0, while at n=5 the same A increase only raises it from around 0.6 to roughly 0.85, indicating that sensitivity to A decreases as n increases.", "perturbed_statement": "At n=1, increasing A from 1 to 10 raises the quantum effect from approximately 0.5 to about 1.2, while at n=5 the same A increase only raises it from around 0.6 to roughly 0.85.", "perturbed_explanation": "The perturbation is incorrect because the figure shows that at n=1 the quantum effect actually increases from about 0.35 at A=1 to about 1.0 at A=10—not from 0.5 to 1.2 as stated, so the cited values contradict the plotted data.", "claim": "At n=1, increasing A from 1 to 10 raises the quantum effect from approximately 0.35 to about 1.0, while at n=5 the same A increase only raises it from around 0.6 to roughly 0.85, indicating that sensitivity to A decreases as n increases.", "label": true }, { "paperid": "2411.06184v1", "paper_path": "./SciVer/papers/2411.06184v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.06184v1-Table1-1.png", "request_id": 117, "origin_statement": "The GLCM feature set, with 23 features, is nearly three times larger than the shape feature set, which has 8 features.", "perturbed_statement": "The GLCM feature set, with 18 features, is almost three times larger than the shape feature set, which has 8 features.", "perturbed_explanation": "This is incorrect because Table 1 shows the GLCM feature set contains 23 features, not 18. Therefore, the perturbed count contradicts the source data.", "claim": "The GLCM feature set, with 23 features, is nearly three times larger than the shape feature set, which has 8 features.", "label": true }, { "paperid": "2410.15592v2", "paper_path": "./SciVer/papers/2410.15592v2.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5" ], "image_path": "./SciVer/images/2410.15592v2_figure_3.png", "request_id": 118, "origin_statement": "On the test set, OMEGAFold's average pLDDT score is 81.2%, which is 5.2 percentage points higher than ESMfold's 76.0%.", "perturbed_statement": "On the test set, ESMfold's average pLDDT score is 81.2%, which is 5.2 percentage points higher than OMEGAFold's 76.0%.", "perturbed_explanation": "This statement is incorrect because the chart shows ESMfold achieves 76.0% and OMEGAFold achieves 81.2% on the test set, not the other way around.", "claim": "On the test set, OMEGAFold's average pLDDT score is 81.2%, which is 5.2 percentage points higher than ESMfold's 76.0%.", "label": true }, { "paperid": "2411.18243v1", "paper_path": "./SciVer/papers/2411.18243v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1", "3" ], "image_path": "./SciVer/images/2411.18243v1-Table1-1.png", "request_id": 119, "origin_statement": "For GRB 211211A between 5.8 and 8.8 s, the QPO feature in the Fermi PDS has a FWHM of ∼0.005 Hz, making it about 50 times narrower than the ∼0.25 Hz FWHM measured in the ASIM PDS, indicating a much sharper peak in the Fermi data.", "perturbed_statement": "For GRB 211211A between 5.8 and 8.8 s, the QPO feature in the Fermi PDS has a FWHM of ∼0.25 Hz, making it about 50 times broader than the ∼0.005 Hz FWHM measured in the ASIM PDS, indicating a much broader peak in the Fermi data.", "perturbed_explanation": "This is incorrect because the table lists the Fermi QPO FWHM as (5^{+200}_{−3})×10^{−3} Hz (about 0.005 Hz), not ~0.25 Hz, and the ASIM QPO FWHM as 0.25^{+0.90}_{−0.24} Hz, not ~0.005 Hz.", "claim": "For GRB 211211A between 5.8 and 8.8 s, the QPO feature in the Fermi PDS has a FWHM of ∼0.005 Hz, making it about 50 times narrower than the ∼0.25 Hz FWHM measured in the ASIM PDS, indicating a much sharper peak in the Fermi data.", "label": true }, { "paperid": "2410.13995v2", "paper_path": "./SciVer/papers/2410.13995v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.13995v2-Table3-1.png", "request_id": 125, "origin_statement": "The Q-value of action a in the poisoned start state equals γ/(1−γ), which exceeds the fixed Q-value of 1 for action a+ whenever γ>0.5, both with and without action manipulation.", "perturbed_statement": "The Q-value of action a in the poisoned start state equals γ/(1+γ), which is always less than the fixed Q-value of 1 for action a+, both with and without action manipulation.", "perturbed_explanation": "Table 3 specifies that QπM′(δ(Start),a) is γ/(1−γ), not γ/(1+γ). The denominator in the perturbation is incorrect, making the perturbed claim inconsistent with the shown Q-value formula.", "claim": "The Q-value of action a in the poisoned start state equals γ/(1−γ), which exceeds the fixed Q-value of 1 for action a+ whenever γ>0.5, both with and without action manipulation.", "label": true }, { "paperid": "2411.18433v1", "paper_path": "./SciVer/papers/2411.18433v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.4", "5.1" ], "image_path": "./SciVer/images/2411.18433v1-Table1-1.png", "request_id": 127, "origin_statement": "In the proposed model, distance-dependent reciprocity is characterized by any non-zero φ, with φ>0 indicating increasing reciprocity with latent distance and φ<0 indicating decreasing reciprocity; homogeneous reciprocity arises when ρ≠0 but φ=0.", "perturbed_statement": "In the proposed model, distance-dependent reciprocity is characterized by any non-zero φ, with φ>0 indicating increasing reciprocity with latent distance and φ<0 indicating decreasing reciprocity; homogeneous reciprocity arises when ρ=0 but φ=0.", "perturbed_explanation": "The perturbed statement incorrectly identifies the parameter setting for homogeneous reciprocity. According to Table 1, homogeneous reciprocity requires ρ≠0 and φ=0, not ρ=0 and φ=0.", "claim": "In the proposed model, distance-dependent reciprocity is characterized by any non-zero φ, with φ>0 indicating increasing reciprocity with latent distance and φ<0 indicating decreasing reciprocity; homogeneous reciprocity arises when ρ≠0 but φ=0.", "label": true }, { "paperid": "2409.07796v1", "paper_path": "./SciVer/papers/2409.07796v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.07796v1_figure_5.png", "request_id": 130, "origin_statement": "For class 12, the frequency rises from roughly 40 in T1 to about 90 in T2 and reaches approximately 150 in T3, demonstrating a clear upward drift over time.", "perturbed_statement": "For class 12, the frequency rises from roughly 40 in T1 to about 90 in T2 and reaches approximately 200 in T3, demonstrating a clear upward drift over time.", "perturbed_explanation": "This claim is incorrect because Figure 5 shows the T3 (red) frequency for class 12 at around 150, not 200; stating 200 contradicts the plotted value for class 12 in the third time window.", "claim": "For class 12, the frequency rises from roughly 40 in T1 to about 90 in T2 and reaches approximately 150 in T3, demonstrating a clear upward drift over time.", "label": true }, { "paperid": "2411.05689v1", "paper_path": "./SciVer/papers/2411.05689v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.2", "3.3" ], "image_path": "./SciVer/images/2411.05689v1-Table2-1.png", "request_id": 131, "origin_statement": "The solve method requires eight input parameters, exactly four times the two input arguments required by extract_sc_pol.", "perturbed_statement": "The solve method requires six input parameters, exactly three times the two input arguments required by extract_sc_pol.", "perturbed_explanation": "The table lists eight input parameters for solve (x0, xmin, xmax, Ntrials, ngrid, iter_max, eps, psi), not six, so the stated count of six inputs is incorrect.", "claim": "The solve method requires eight input parameters, exactly four times the two input arguments required by extract_sc_pol.", "label": true }, { "paperid": "2411.17640v1", "paper_path": "./SciVer/papers/2411.17640v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2411.17640v1_figure_3.png", "request_id": 142, "origin_statement": "In Fig. 3, the seven longest-period TESS candidates (periods ~275–740 days) have apparent magnitudes between about 7.5 and 10.7, on average roughly 4 magnitudes brighter than Kepler’s long-period KOIs, which cluster between magnitudes 12 and 16.", "perturbed_statement": "In Fig. 3, the seven longest-period TESS candidates have apparent magnitudes between about 11.5 and 14.7, which is roughly 2 magnitudes fainter than Kepler’s KOIs that cluster between magnitudes 8 and 11.", "perturbed_explanation": "This is incorrect because the TESS candidates in Fig. 3 actually span magnitudes from about 7.5 to 10.7 (not 11.5–14.7), and the long-period KOIs lie between magnitudes 12 and 16 (not 8–11). These specific ranges contradict the plotted data.", "claim": "In Fig. 3, the seven longest-period TESS candidates (periods ~275–740 days) have apparent magnitudes between about 7.5 and 10.7, on average roughly 4 magnitudes brighter than Kepler’s long-period KOIs, which cluster between magnitudes 12 and 16.", "label": true }, { "paperid": "2411.18328v1", "paper_path": "./SciVer/papers/2411.18328v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.18328v1_figure_7.png", "request_id": 148, "origin_statement": "EventCrab uses 147M parameters compared to ExACT's 155M (approx. 5% reduction) and processes 669 GFLOPs versus 702 GFLOPs (approx. 4.7% reduction), while inducing only a 0.02 FPS drop (about 7%).", "perturbed_statement": "EventCrab uses 140M parameters compared to ExACT's 155M (a 15M reduction) and processes 650 GFLOPs versus 702 GFLOPs (a 52G reduction), while inference speed remains within 0.02 FPS of ExACT.", "perturbed_explanation": "The perturbed statement is incorrect because the chart shows EventCrab uses 147M parameters (not 140M) and 669 GFLOPs (not 650 GFLOPs), so both the parameter count and FLOPs values are misreported.", "claim": "EventCrab uses 147M parameters compared to ExACT's 155M (approx. 5% reduction) and processes 669 GFLOPs versus 702 GFLOPs (approx. 4.7% reduction), while inducing only a 0.02 FPS drop (about 7%).", "label": true }, { "paperid": "2411.01960v1", "paper_path": "./SciVer/papers/2411.01960v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4" ], "image_path": "./SciVer/images/2411.01960v1-Table1-1.png", "request_id": 149, "origin_statement": "The Ricean-mean method yields power-law indices (α) of 0.59 for the filament, 0.30 for HH 211 MMS, and 0.34 for IC 348 MMS, so the filament’s α is nearly twice that of HH 211 MMS and about 1.7 times higher than IC 348 MMS.", "perturbed_statement": "The Ricean-mean method yields power-law indices (α) of 0.30 for the filament, 0.59 for HH 211 MMS, and 0.34 for IC 348 MMS, so the filament’s α is lower than both cores, implying weaker density-dependence in the filament.", "perturbed_explanation": "This statement is incorrect because Table 1 actually shows the filament’s α under the Ricean-mean method is 0.59 (not 0.30), and HH 211 MMS’s α is 0.30 (not 0.59). Therefore the filament’s index is higher, not lower, than those of the cores.", "claim": "The Ricean-mean method yields power-law indices (α) of 0.59 for the filament, 0.30 for HH 211 MMS, and 0.34 for IC 348 MMS, so the filament’s α is nearly twice that of HH 211 MMS and about 1.7 times higher than IC 348 MMS.", "label": true }, { "paperid": "2411.01543v1", "paper_path": "./SciVer/papers/2411.01543v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.01543v1-Table3-1.png", "request_id": 151, "origin_statement": "For the Mo modulator, the A-SWIFT method reduces the average STD of selected ROIs from 286 HU (SCFM) to 96 HU, a reduction greater than the 102 HU decrease (from 202 HU to 100 HU) observed with the Cu modulator.", "perturbed_statement": "For the Mo modulator, the A-SWIFT method reduces the average STD of selected ROIs from 286 HU (SCFM) to 106 HU, a reduction greater than the 102 HU decrease (from 202 HU to 100 HU) observed with the Cu modulator.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows that for the Mo modulator, A-SWIFT actually reduces the average STD to 96 HU, not 106 HU.", "claim": "For the Mo modulator, the A-SWIFT method reduces the average STD of selected ROIs from 286 HU (SCFM) to 96 HU, a reduction greater than the 102 HU decrease (from 202 HU to 100 HU) observed with the Cu modulator.", "label": true }, { "paperid": "2409.20332v1", "paper_path": "./SciVer/papers/2409.20332v1.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.20332v1_figure_6(a).png", "request_id": 154, "origin_statement": "On the AbdomenCT-1K dataset, the model trained on real data achieves a kidney Dice score of 76%, compared to 63% with Medical Diffusion and 68% with Lad (Ours).", "perturbed_statement": "On the AbdomenCT-1K dataset, the model trained on real data achieves a kidney Dice score of 74%, compared to 65% with Medical Diffusion and 68% with Lad (Ours).", "perturbed_explanation": "The actual Dice scores in Figure 6(a) are 76% for the real data model and 63% for Medical Diffusion, not 74% and 65% as stated in the perturbed claim.", "claim": "On the AbdomenCT-1K dataset, the model trained on real data achieves a kidney Dice score of 76%, compared to 63% with Medical Diffusion and 68% with Lad (Ours).", "label": true }, { "paperid": "2410.02001v2", "paper_path": "./SciVer/papers/2410.02001v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3", "4" ], "image_path": "./SciVer/images/2410.02001v2-Table1-1.png", "request_id": 157, "origin_statement": "For cvs_th=0.92, increasing SNR_th from 0 to 20 decreases the number of filters n from 5 to 4 and lowers WCO from 289 to 174.", "perturbed_statement": "For cvs_th=0.92, increasing SNR_th from 0 to 20 decreases the number of filters n from 4 to 3 and lowers WCO from 289 to 174.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 shows that at cvs_th=0.92 and SNR_th=0, n is 5 (not 4), and at SNR_th=20, n is 4 (not 3).", "claim": "For cvs_th=0.92, increasing SNR_th from 0 to 20 decreases the number of filters n from 5 to 4 and lowers WCO from 289 to 174.", "label": true }, { "paperid": "2409.12428v1", "paper_path": "./SciVer/papers/2409.12428v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "4.2", "5.1" ], "image_path": "./SciVer/images/2409.12428v1_figure_1.png", "request_id": 166, "origin_statement": "In the NWF dataset, for LAST_LOGIN, the unprivileged group's JSD drift increases from 0.3 (LBC_vs_PC) to 0.6 (LBC_vs_PeC), doubling its earlier drift and roughly tripling the privileged group's 0.2 drift over the same period.", "perturbed_statement": "In the NWF dataset, for LAST_LOGIN, the unprivileged group's JSD drift increases from 0.4 (LBC_vs_PC) to 0.8 (LBC_vs_PeC), doubling its earlier drift and roughly tripling the privileged group's 0.2 drift over the same period.", "perturbed_explanation": "The figure shows the unprivileged group’s LAST_LOGIN drift is 0.3 JSD for LBC_vs_PC and 0.6 JSD for LBC_vs_PeC, not 0.4 and 0.8 as claimed, so the perturbed values contradict the actual data.", "claim": "In the NWF dataset, for LAST_LOGIN, the unprivileged group's JSD drift increases from 0.3 (LBC_vs_PC) to 0.6 (LBC_vs_PeC), doubling its earlier drift and roughly tripling the privileged group's 0.2 drift over the same period.", "label": true }, { "paperid": "2411.17977v1", "paper_path": "./SciVer/papers/2411.17977v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2411.17977v1-Table2-1.png", "request_id": 167, "origin_statement": "The power-law form has an AIC of 30.20, which is 8.80 lower than the Rephaeli form's AIC of 39.00.", "perturbed_statement": "The power-law form has an AIC of 30.20, which is 10.00 lower than the Rephaeli form's AIC of 39.00.", "perturbed_explanation": "The actual difference between the power-law AIC (30.20) and the Rephaeli AIC (39.00) is 8.80, not 10.00, so the stated 10.00 gap is incorrect according to the table.", "claim": "The power-law form has an AIC of 30.20, which is 8.80 lower than the Rephaeli form's AIC of 39.00.", "label": true }, { "paperid": "2410.07836v3", "paper_path": "./SciVer/papers/2410.07836v3.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.2", "4.3", "5" ], "image_path": "./SciVer/images/2410.07836v3-Table4-1.png", "request_id": 175, "origin_statement": "On the Quadruped Run task, GIT-STORM lowers FVD from 3560.33 to 1000.91, achieving a 71.9% reduction compared to STORM.", "perturbed_statement": "On the Quadruped Run task, GIT-STORM lowers FVD from 3560.33 to 1000.91, achieving a 56.3% reduction compared to STORM.", "perturbed_explanation": "The claim is incorrect because reducing FVD from 3560.33 to 1000.91 corresponds to a (1 – 1000.91/3560.33) ≈ 0.719, or a 71.9% reduction, not 56.3%.", "claim": "On the Quadruped Run task, GIT-STORM lowers FVD from 3560.33 to 1000.91, achieving a 56.3% reduction compared to STORM.", "label": false }, { "paperid": "2411.06665v1", "paper_path": "./SciVer/papers/2411.06665v1.json", "claim_type": "direct", "type": "chart", "item": "3(c)", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.06665v1_figure_3(c).png", "request_id": 178, "origin_statement": "In the Office-Home P→C scenario under one-shot, classification accuracy peaks at 82.4% when λ_pr=3 and λ_pr=9, which is 0.4% higher than the 82.0% observed at λ_pr=5 and λ_pr=7.", "perturbed_statement": "In the Office-Home P→C scenario under one-shot, classification accuracy peaks at 83.0% when λ_pr=3 and λ_pr=9, which is 1.0% higher than the 82.0% observed at λ_pr=5 and λ_pr=7.", "perturbed_explanation": "The perturbed statement is incorrect because the figure shows a peak accuracy of 82.4% (not 83.0%) at λ_pr=3 and λ_pr=9, resulting in a difference of 0.4% (not 1.0%) compared to the 82.0% at λ_pr=5 and λ_pr=7.", "claim": "In the Office-Home P→C scenario under one-shot, classification accuracy peaks at 83.0% when λ_pr=3 and λ_pr=9, which is 1.0% higher than the 82.0% observed at λ_pr=5 and λ_pr=7.", "label": false }, { "paperid": "2409.13394v1", "paper_path": "./SciVer/papers/2409.13394v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.3", "4.2" ], "image_path": "./SciVer/images/2409.13394v1-Table2-1.png", "request_id": 181, "origin_statement": "Membrane and microfluidic channel capacitances (0.3 pF and 0.2 pF) sum to 0.5 pF, which is 40 times smaller than the parasitic capacitance of 20 pF.", "perturbed_statement": "Membrane and microfluidic channel capacitances (0.3 pF and 0.2 pF) sum to 0.5 pF, which is a quarter of the parasitic capacitance of 2 pF.", "perturbed_explanation": "Table 2 lists the parasitic capacitance C_i as 20 pF, not 2 pF, so the claim that 0.5 pF is a quarter of 2 pF directly contradicts the actual 20 pF value.", "claim": "Membrane and microfluidic channel capacitances (0.3 pF and 0.2 pF) sum to 0.5 pF, which is a quarter of the parasitic capacitance of 2 pF.", "label": false }, { "paperid": "2411.11677v1", "paper_path": "./SciVer/papers/2411.11677v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5.3.2" ], "image_path": "./SciVer/images/2411.11677v1-Table4-1.png", "request_id": 189, "origin_statement": "On the ML-1M dataset, model N's N@10 score of 0.620 is 0.059 higher than model B's N@10 score of 0.561.", "perturbed_statement": "On the ML-1M dataset, model N's N@10 score of 0.620 is 0.061 higher than model B's N@10 score of 0.561.", "perturbed_explanation": "The actual N@10 scores from the table are 0.620 for model N and 0.561 for model B, making the true difference 0.059 rather than the 0.061 claimed.", "claim": "On the ML-1M dataset, model N's N@10 score of 0.620 is 0.061 higher than model B's N@10 score of 0.561.", "label": false }, { "paperid": "2410.08847v2", "paper_path": "./SciVer/papers/2410.08847v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2410.08847v2-Table2-1.png", "request_id": 191, "origin_statement": "In Table 2, the Llama-3-8B model’s preferred token 'Sure' shows the largest absolute probability decrease of 0.99, plunging from 0.99 to 0.00, exceeding the drop observed for any other model-token pair.", "perturbed_statement": "In Table 2, the Llama-3-8B model’s preferred token 'Sure' shows the largest absolute probability decrease of 0.94, plunging from 0.99 to 0.05, exceeding the drop observed for any other model-token pair.", "perturbed_explanation": "This statement is incorrect because Table 2 reports that 'Sure' under Llama-3-8B decreases by 0.99, from 0.99 to 0.00, not by 0.94 or to 0.05.", "claim": "In Table 2, the Llama-3-8B model’s preferred token 'Sure' shows the largest absolute probability decrease of 0.94, plunging from 0.99 to 0.05, exceeding the drop observed for any other model-token pair.", "label": false }, { "paperid": "2410.22785v1", "paper_path": "./SciVer/papers/2410.22785v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2410.22785v1-Table2-1.png", "request_id": 197, "origin_statement": "In the table, the separation between successive vibrational transitions in 28Si16O decreases slightly, measuring 301.763 MHz between ν=0 and ν=1, 301.520 MHz between ν=1 and ν=2, and 301.195 MHz between ν=2 and ν=3.", "perturbed_statement": "The separation between successive vibrational transitions in 28Si16O increases slightly, measuring 301.763 MHz between ν=0 and ν=1 and 302.195 MHz between ν=2 and ν=3.", "perturbed_explanation": "This is incorrect because the table lists rest frequencies 42820.570 MHz for ν=2 and 42519.375 MHz for ν=3, yielding a difference of 301.195 MHz, not 302.195 MHz.", "claim": "The separation between successive vibrational transitions in 28Si16O increases slightly, measuring 301.763 MHz between ν=0 and ν=1 and 302.195 MHz between ν=2 and ν=3.", "label": false }, { "paperid": "2411.17181v2", "paper_path": "./SciVer/papers/2411.17181v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.4.1" ], "image_path": "./SciVer/images/2411.17181v2_figure_4.png", "request_id": 204, "origin_statement": "At σ=1e-10, the Rouge-1 F1 score peaks at approximately 0.50, which is about 0.11 points higher than the Rouge-2 score of around 0.39, marking the largest gap between these metrics across all tested σ values.", "perturbed_statement": "At σ=1e-10, the Rouge-1 F1 score peaks at approximately 0.50, which is about 0.20 points higher than the Rouge-2 score of around 0.39, marking the largest gap between these metrics across all tested σ values.", "perturbed_explanation": "The perturbed statement incorrectly states the difference as 0.20 points, but the chart shows Rouge-1 at ~0.50 and Rouge-2 at ~0.39 at σ=1e-10, a gap of only ~0.11 points, not 0.20.", "claim": "At σ=1e-10, the Rouge-1 F1 score peaks at approximately 0.50, which is about 0.20 points higher than the Rouge-2 score of around 0.39, marking the largest gap between these metrics across all tested σ values.", "label": false }, { "paperid": "2409.05633v1", "paper_path": "./SciVer/papers/2409.05633v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2409.05633v1_figure_4.png", "request_id": 208, "origin_statement": "On the Instrument dataset, CoGCL achieves an NDCG@10 of 0.0435, which is approximately 0.0006 higher than the next best variant, w/o Add (0.0429).", "perturbed_statement": "On the Instrument dataset, CoGCL achieves an NDCG@10 of 0.0420, which is 0.0009 lower than the next best variant, w/o Add (0.0429).", "perturbed_explanation": "This is incorrect because the chart shows CoGCL’s NDCG@10 is actually 0.0435 (not 0.0420) on the Instrument dataset, and it exceeds — not falls below — the w/o Add variant’s score of 0.0429.", "claim": "On the Instrument dataset, CoGCL achieves an NDCG@10 of 0.0420, which is 0.0009 lower than the next best variant, w/o Add (0.0429).", "label": false }, { "paperid": "2411.07825v1", "paper_path": "./SciVer/papers/2411.07825v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2411.07825v1-Table2-1.png", "request_id": 211, "origin_statement": "The cumulative product term (1/b)∏_{j=0}^i c_j increases monotonically from 0.9091 at iteration 0 to 1.0096 at iteration 4, surpassing unity at iteration 4.", "perturbed_statement": "The cumulative product term (1/b)∏_{j=0}^i c_j increases monotonically from 0.9091 at iteration 0 to 1.0096 at iteration 4, surpassing unity at iteration 3.", "perturbed_explanation": "The perturbed statement is incorrect because at iteration 3 the cumulative product term is 0.9770 (below 1), and it only exceeds unity at iteration 4 when it reaches 1.0096, as shown in the table.", "claim": "The cumulative product term (1/b)∏_{j=0}^i c_j increases monotonically from 0.9091 at iteration 0 to 1.0096 at iteration 4, surpassing unity at iteration 3.", "label": false }, { "paperid": "2409.08522v1", "paper_path": "./SciVer/papers/2409.08522v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.08522v1_figure_2.png", "request_id": 216, "origin_statement": "At 600 words, PolitiFact achieves an F1 of about 0.95, which is approximately 0.27 higher than FakeHealth’s peak of 0.68 and 0.55 above GossipCop’s peak of 0.40 at the same word count.", "perturbed_statement": "At 600 words, PolitiFact achieves an F1 of about 0.85, which is approximately 0.15 higher than FakeHealth’s peak of 0.68 and 0.45 above GossipCop’s peak of 0.40 at the same word count.", "perturbed_explanation": "The chart shows PolitiFact’s F1 at around 0.95 at 600 words, not 0.85. Consequently, the true differences from FakeHealth (0.68) and GossipCop (0.40) are about 0.27 and 0.55, not the stated 0.15 and 0.45.", "claim": "At 600 words, PolitiFact achieves an F1 of about 0.85, which is approximately 0.15 higher than FakeHealth’s peak of 0.68 and 0.45 above GossipCop’s peak of 0.40 at the same word count.", "label": false }, { "paperid": "2411.06965v1", "paper_path": "./SciVer/papers/2411.06965v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.06965v1_figure_4.png", "request_id": 220, "origin_statement": "GAIL's archive covers nearly 100% of cells but its highest cumulative reward is under 3000, whereas Expert policies cover less than 50% of the archive but achieve rewards above 8000.", "perturbed_statement": "GAIL's archive covers nearly 100% of cells but its highest cumulative reward exceeds 5000, whereas Expert policies cover over 80% of the archive but achieve rewards above 8000.", "perturbed_explanation": "The perturbed claim is wrong because in Figure 4 GAIL's colorbar peaks at about 3000, not above 5000, and the Expert heatmap covers under half of the archive (below 50%), not over 80%.", "claim": "GAIL's archive covers nearly 100% of cells but its highest cumulative reward exceeds 5000, whereas Expert policies cover over 80% of the archive but achieve rewards above 8000.", "label": false }, { "paperid": "2411.16516v1", "paper_path": "./SciVer/papers/2411.16516v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2411.16516v1-Table3-1.png", "request_id": 221, "origin_statement": "Table 3 defines six distinct notations used in the FP analysis: M_ϑ, ϑ=θ, ϑ=θ̃, S*, Ŝ, and ε*(ϑ), ξ*(ϑ).", "perturbed_statement": "Table 3 defines five distinct notations used in the FP analysis: M_ϑ, ϑ=θ, ϑ=θ̃, S*, Ŝ, and ε*(ϑ), ξ*(ϑ).", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 actually lists six distinct notations. It erroneously claims there are only five, omitting one notation from the count.", "claim": "Table 3 defines five distinct notations used in the FP analysis: M_ϑ, ϑ=θ, ϑ=θ̃, S*, Ŝ, and ε*(ϑ), ξ*(ϑ).", "label": false }, { "paperid": "2410.09850v1", "paper_path": "./SciVer/papers/2410.09850v1.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2410.09850v1_figure_6(a).png", "request_id": 222, "origin_statement": "In the high correlation setting, the original correlation between x1 and x2 is 0.8993 and the generated correlation is 0.8249, indicating a decrease of approximately 0.0744.", "perturbed_statement": "In the high correlation setting, the original correlation between x1 and x2 is 0.9100 and the generated correlation is 0.8249, indicating a decrease of approximately 0.0851.", "perturbed_explanation": "The perturbed statement is incorrect because the original correlation value is misreported. The actual original correlation between x1 and x2 is 0.8993, not 0.9100, and thus the decrease is 0.0744, not 0.0851.", "claim": "In the high correlation setting, the original correlation between x1 and x2 is 0.9100 and the generated correlation is 0.8249, indicating a decrease of approximately 0.0851.", "label": false }, { "paperid": "2409.11212v1", "paper_path": "./SciVer/papers/2409.11212v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.11212v1-Table4-1.png", "request_id": 241, "origin_statement": "UPO/StepUPO achieves a 13.04% win rate on AlpacaEval 2.0, exceeding DPO/StepDPO’s 9.12% by 3.92 percentage points.", "perturbed_statement": "UPO/StepUPO achieves a 12.04% win rate on AlpacaEval 2.0, exceeding DPO/StepDPO’s 8.12% by 4.92 percentage points.", "perturbed_explanation": "According to the table, UPO/StepUPO’s AlpacaEval 2.0 win rate is 13.04% (not 12.04%) and DPO/StepDPO’s rate is 9.12% (not 8.12%), so the stated values and computed difference are incorrect.", "claim": "UPO/StepUPO achieves a 12.04% win rate on AlpacaEval 2.0, exceeding DPO/StepDPO’s 8.12% by 4.92 percentage points.", "label": false }, { "paperid": "2409.14063v1", "paper_path": "./SciVer/papers/2409.14063v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.14063v1_figure_4.png", "request_id": 254, "origin_statement": "In Figure 4(b), the distance between the centroids of missing and owning class clusters spans about half the plot width, roughly double the separation seen in Figure 4(a), illustrating our method doubles inter-class cluster separation over FedAvg.", "perturbed_statement": "In Figure 4(b), the distance between the centroids of missing and owning class clusters spans about half the plot width, roughly triple the separation seen in Figure 4(a), illustrating our method triples inter-class cluster separation over FedAvg.", "perturbed_explanation": "The perturbed claim states a triple increase in centroid separation, but the plot shows the gap in Figure 4(b) is about half the width versus roughly a quarter in Figure 4(a), indicating a two-fold—not three-fold—difference.", "claim": "In Figure 4(b), the distance between the centroids of missing and owning class clusters spans about half the plot width, roughly triple the separation seen in Figure 4(a), illustrating our method triples inter-class cluster separation over FedAvg.", "label": false }, { "paperid": "2411.04281v1", "paper_path": "./SciVer/papers/2411.04281v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.3.1", "3.3.2", "3.3.3", "3.4" ], "image_path": "./SciVer/images/2411.04281v1-Table3-1.png", "request_id": 257, "origin_statement": "On MIMIC-III, MedGAN exhibits the smallest TSTR AUC drop (-0.063), while Plasmode has the largest drop (-0.268), indicating MedGAN better preserves discriminative utility.", "perturbed_statement": "On MIMIC-III, MedGAN exhibits the largest TSTR AUC drop (-0.063), while Plasmode has the smallest drop (-0.268), indicating MedGAN worse preserves discriminative utility.", "perturbed_explanation": "The perturbed claim reverses the actual TSTR AUC drops from Table 3: MedGAN’s drop of -0.063 is actually the smallest among methods, not the largest, and Plasmode’s drop of -0.268 is the largest, not the smallest.", "claim": "On MIMIC-III, MedGAN exhibits the largest TSTR AUC drop (-0.063), while Plasmode has the smallest drop (-0.268), indicating MedGAN worse preserves discriminative utility.", "label": false }, { "paperid": "2411.14914v1", "paper_path": "./SciVer/papers/2411.14914v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.14914v1_figure_5.png", "request_id": 262, "origin_statement": "On q3 queries, Mistral-tiny averaged about 50 search fields, approximately 17 more than Mistral-small’s 33 fields.", "perturbed_statement": "On q3 queries, Mistral-small averaged about 50 search fields, roughly 17 more than Mistral-tiny’s 33 fields.", "perturbed_explanation": "This statement is incorrect because the chart shows that Mistral-tiny uses around 50 search fields on q3 queries, while Mistral-small uses about 33 fields, not the other way around.", "claim": "On q3 queries, Mistral-small averaged about 50 search fields, roughly 17 more than Mistral-tiny’s 33 fields.", "label": false }, { "paperid": "2411.15173v1", "paper_path": "./SciVer/papers/2411.15173v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5" ], "image_path": "./SciVer/images/2411.15173v1-Table1-1.png", "request_id": 265, "origin_statement": "On CIFAR-10-C under mixed distribution, FreDA achieves an average classification error rate of 22.9%, which is 20.6 percentage points lower than the baseline WRN-28 model's 43.5% error rate.", "perturbed_statement": "On CIFAR-10-C under mixed distribution, FreDA achieves an average classification error rate of 19.9%, which is 23.6 percentage points lower than the baseline WRN-28 model's 43.5% error rate.", "perturbed_explanation": "This statement is incorrect because Table 1 shows FreDA’s average error on CIFAR-10-C is 22.9%, not 19.9%, and the difference from the baseline 43.5% is 20.6 percentage points, not 23.6.", "claim": "On CIFAR-10-C under mixed distribution, FreDA achieves an average classification error rate of 19.9%, which is 23.6 percentage points lower than the baseline WRN-28 model's 43.5% error rate.", "label": false }, { "paperid": "2410.21088v1", "paper_path": "./SciVer/papers/2410.21088v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.21088v1_figure_4.png", "request_id": 266, "origin_statement": "At TPR@1%FPR of 1.0, Shallow Diffuse yields an LPIPS of approximately 0.0, whereas RingID requires an LPIPS of about 0.4 to reach the same robustness.", "perturbed_statement": "At TPR@1%FPR of 1.0, Shallow Diffuse yields an LPIPS of approximately 0.4, whereas RingID requires an LPIPS of about 0.0 to reach the same robustness.", "perturbed_explanation": "The LPIPS plot shows that Shallow Diffuse reaches TPR=1 at an LPIPS near 0.0 (leftmost blue points) and RingID reaches TPR=1 at an LPIPS around 0.4 (green points). The perturbed statement incorrectly swaps these values.", "claim": "At TPR@1%FPR of 1.0, Shallow Diffuse yields an LPIPS of approximately 0.4, whereas RingID requires an LPIPS of about 0.0 to reach the same robustness.", "label": false }, { "paperid": "2409.01490v1", "paper_path": "./SciVer/papers/2409.01490v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.01490v1-Table2-1.png", "request_id": 269, "origin_statement": "Enabling the STM boosts convergence rate for L2 smoothing in Cartesian coordinates from 78% to 89%, an 11 percentage-point increase.", "perturbed_statement": "Enabling the STM boosts convergence rate for L2 smoothing in Cartesian coordinates from 78% to 91%, a 13 percentage-point increase.", "perturbed_explanation": "The table lists the convergence rate for L2 smoothing in Cartesian coordinates with STM as 89%, not 91%, and without STM as 78%. Therefore, the true increase is 11 percentage points, not 13.", "claim": "Enabling the STM boosts convergence rate for L2 smoothing in Cartesian coordinates from 78% to 91%, a 13 percentage-point increase.", "label": false }, { "paperid": "2409.08158v1", "paper_path": "./SciVer/papers/2409.08158v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.1", "3.2", "3.3" ], "image_path": "./SciVer/images/2409.08158v1-Table3-1.png", "request_id": 273, "origin_statement": "Poland’s Cited SCC coefficient (352.7***) exceeds Norway’s (147.9*) by 204.8, showing Polish authors exhibit a significantly larger citation bias in social cost of carbon estimates.", "perturbed_statement": "Sweden’s Cited SCC coefficient (352.7***) exceeds Norway’s (147.9*) by 204.8, showing Swedish authors exhibit a significantly larger citation bias in social cost of carbon estimates.", "perturbed_explanation": "This statement is incorrect because Table 3 shows that the Cited SCC coefficient of 352.7*** refers to Poland, not Sweden, and Sweden’s actual Cited SCC coefficient is 152.1*, not 352.7.", "claim": "Sweden’s Cited SCC coefficient (352.7***) exceeds Norway’s (147.9*) by 204.8, showing Swedish authors exhibit a significantly larger citation bias in social cost of carbon estimates.", "label": false }, { "paperid": "2409.03044v1", "paper_path": "./SciVer/papers/2409.03044v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.03044v1-Table3-1.png", "request_id": 275, "origin_statement": "Within general comments, negative feedback (315; 80%) is nearly three times more prevalent than positive feedback (108; 27%).", "perturbed_statement": "Within general comments, negative feedback (325; 80%) is nearly three times more prevalent than positive feedback (108; 27%).", "perturbed_explanation": "The table reports 315 negative comments in the general category—not 325. This discrepancy in the negative comment count contradicts the source data.", "claim": "Within general comments, negative feedback (325; 80%) is nearly three times more prevalent than positive feedback (108; 27%).", "label": false }, { "paperid": "2409.11428v1", "paper_path": "./SciVer/papers/2409.11428v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "5.5" ], "image_path": "./SciVer/images/2409.11428v1_figure_8.png", "request_id": 278, "origin_statement": "APFO reduces average file loss by approximately 50%, lowering it from 133.33 files under RTrap to 66.61 files.", "perturbed_statement": "APFO reduces average file loss by approximately 60%, lowering it from 133.33 files under RTrap to 50 files.", "perturbed_explanation": "The perturbed statement is wrong because the chart shows APFO’s average file loss is 66.61 files, not 50, and the drop from 133.33 to 66.61 corresponds to about a 50% reduction, not 60%.", "claim": "APFO reduces average file loss by approximately 60%, lowering it from 133.33 files under RTrap to 50 files.", "label": false }, { "paperid": "2410.11566v2", "paper_path": "./SciVer/papers/2410.11566v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.11566v2-Table3-1.png", "request_id": 281, "origin_statement": "The BE estimator's average estimation error of 13.19° is 3.0° lower than the MEKF's average error of 16.19°.", "perturbed_statement": "The BE estimator's average estimation error of 13.19° is 4.0° lower than the MEKF's average error of 16.19°.", "perturbed_explanation": "This is incorrect because the table shows MEKF’s error is 16.19° and BE’s error is 13.19°, a difference of 3.0°, not 4.0°.", "claim": "The BE estimator's average estimation error of 13.19° is 4.0° lower than the MEKF's average error of 16.19°.", "label": false }, { "paperid": "2409.12052v3", "paper_path": "./SciVer/papers/2409.12052v3.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "3.2.1", "3.2.2", "3.2.3" ], "image_path": "./SciVer/images/2409.12052v3_figure_2.png", "request_id": 284, "origin_statement": "At 1 K, Zr2NS2 shows a maximum superconducting gap of 1.74 meV, which is about two times larger than the 0.85 meV gap of 6% strained Zr2NCl2 and roughly six times greater than the 0.27 meV gap of Sc2NCl2.", "perturbed_statement": "At 1 K, Zr2NS2 shows a maximum superconducting gap of 0.74 meV, which is about equal to the 0.85 meV gap of 6% strained Zr2NCl2 and roughly three times greater than the 0.27 meV gap of Sc2NCl2.", "perturbed_explanation": "The perturbed statement is incorrect because the image shows Zr2NS2 actually has a maximum gap of 1.74 meV, not 0.74 meV, and this value is approximately double the 0.85 meV gap of 6% strained Zr2NCl2 rather than equal.", "claim": "At 1 K, Zr2NS2 shows a maximum superconducting gap of 0.74 meV, which is about equal to the 0.85 meV gap of 6% strained Zr2NCl2 and roughly three times greater than the 0.27 meV gap of Sc2NCl2.", "label": false }, { "paperid": "2411.03363v1", "paper_path": "./SciVer/papers/2411.03363v1.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "3.2", "3.3.2" ], "image_path": "./SciVer/images/2411.03363v1_figure_3(b).png", "request_id": 290, "origin_statement": "The model-based TDD method’s AUROC increases from approximately 0.59 at 16 hidden units to around 0.75 at 256 hidden units, reflecting a 16-point absolute gain as the MLP’s capacity grows.", "perturbed_statement": "The model-based TDD method’s AUROC increases from approximately 0.58 at 16 hidden units to around 0.78 at 256 hidden units, reflecting a 20-point absolute gain as the MLP’s capacity grows.", "perturbed_explanation": "This statement is incorrect because the actual AUROC values from the figure are about 0.59 at 16 hidden units and about 0.75 at 256 hidden units, not 0.58 and 0.78 respectively.", "claim": "The model-based TDD method’s AUROC increases from approximately 0.58 at 16 hidden units to around 0.78 at 256 hidden units, reflecting a 20-point absolute gain as the MLP’s capacity grows.", "label": false }, { "paperid": "2411.00311v1", "paper_path": "./SciVer/papers/2411.00311v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.00311v1-Table3-1.png", "request_id": 291, "origin_statement": "The full C2A model achieves 80.2% accuracy, while omitting the context embedding reduces accuracy by 2.2 percentage points to 78.0%, a larger drop than the 1.8-point reduction to 78.4% when omitting the label embedding.", "perturbed_statement": "The full C2A model achieves 80.2% accuracy, while omitting the context embedding reduces accuracy by only 1.8 percentage points to 78.4%, which is a smaller drop than the 2.2-point reduction to 78.0% when omitting the label embedding.", "perturbed_explanation": "This is incorrect because the table shows omitting the context embedding actually reduces accuracy by 2.2 points to 78.0%, and omitting the label embedding reduces accuracy by 1.8 points to 78.4%, not the reverse.", "claim": "The full C2A model achieves 80.2% accuracy, while omitting the context embedding reduces accuracy by only 1.8 percentage points to 78.4%, which is a smaller drop than the 2.2-point reduction to 78.0% when omitting the label embedding.", "label": false }, { "paperid": "2411.02264v1", "paper_path": "./SciVer/papers/2411.02264v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.02264v1_figure_9.png", "request_id": 296, "origin_statement": "The quadratic 440 mode has a normalized probability peak of about 140 at |A|≃0.04, roughly 20% higher than the quadratic 220 peak of about 115 at |A|≃0.40.", "perturbed_statement": "The quadratic 440 mode has a normalized probability peak of about 110 at |A|≃0.04, roughly equal to the quadratic 220 peak of about 115 at |A|≃0.40.", "perturbed_explanation": "The figure shows the quadratic 440 distribution actually peaks at around 140—not 110—at |A|≈0.04, so the perturbed peak value contradicts the plotted data.", "claim": "The quadratic 440 mode has a normalized probability peak of about 110 at |A|≃0.04, roughly equal to the quadratic 220 peak of about 115 at |A|≃0.40.", "label": false }, { "paperid": "2411.07273v2", "paper_path": "./SciVer/papers/2411.07273v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4" ], "image_path": "./SciVer/images/2411.07273v2-Table2-1.png", "request_id": 307, "origin_statement": "The deterministic finite automata representation achieves sublinear space usage of O(n^c) (empirical), compared to the linear Θ(n) space required by retrograde analysis.", "perturbed_statement": "The deterministic finite automata representation achieves linear space usage Θ(n), whereas retrograde analysis only requires sublinear space O(n^c) (empirical).", "perturbed_explanation": "Table 2 indicates that the deterministic finite automata method uses O(n^c) (empirical) space and retrograde analysis uses Θ(n) space. The perturbed statement wrongly swaps these details, claiming DFAs use Θ(n) and retrograde uses O(n^c), contradicting the table.", "claim": "The deterministic finite automata representation achieves linear space usage Θ(n), whereas retrograde analysis only requires sublinear space O(n^c) (empirical).", "label": false }, { "paperid": "2410.03705v2", "paper_path": "./SciVer/papers/2410.03705v2.json", "claim_type": "direct", "type": "chart", "item": "1(a)", "section": [ "5" ], "image_path": "./SciVer/images/2410.03705v2_figure_1(a).png", "request_id": 312, "origin_statement": "On the CD dataset, Random Forest achieves about 77.5% ROC AUC with roughly 10 seconds of training time, whereas TabTransformer yields only about 71.2% ROC AUC but requires approximately 1200 seconds, making Random Forest around 6.3 percentage points higher in AUC and over 100 times faster.", "perturbed_statement": "On the CD dataset, Random Forest achieves about 78.5% ROC AUC with roughly 10 seconds of training time, whereas TabTransformer yields only about 71.2% ROC AUC but requires approximately 300 seconds, making Random Forest around 7.3 percentage points higher in AUC and over 30 times faster.", "perturbed_explanation": "The perturbed statement is incorrect because Random Forest’s ROC AUC is actually 77.5%, not 78.5%, and TabTransformer’s training time is around 1200 seconds, not 300 seconds, as shown in the plot.", "claim": "On the CD dataset, Random Forest achieves about 78.5% ROC AUC with roughly 10 seconds of training time, whereas TabTransformer yields only about 71.2% ROC AUC but requires approximately 300 seconds, making Random Forest around 7.3 percentage points higher in AUC and over 30 times faster.", "label": false }, { "paperid": "2409.00511v1", "paper_path": "./SciVer/papers/2409.00511v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.00511v1-Table1-1.png", "request_id": 313, "origin_statement": "On the aPY dataset, our Diffusion model RevCD achieves a harmonic mean accuracy of 46.5%, which is 13.1 percentage points higher than GAN’s harmonic mean of 33.4%.", "perturbed_statement": "On the aPY dataset, our Diffusion model RevCD achieves a harmonic mean accuracy of 43.6%, which is 10.2 percentage points higher than GAN’s harmonic mean of 33.4%.", "perturbed_explanation": "This statement is incorrect because Table 1 shows RevCD’s harmonic mean on aPY is actually 46.5%, not 43.6%, and the true difference over GAN’s 33.4% is 13.1 percentage points, not 10.2.", "claim": "On the aPY dataset, our Diffusion model RevCD achieves a harmonic mean accuracy of 43.6%, which is 10.2 percentage points higher than GAN’s harmonic mean of 33.4%.", "label": false }, { "paperid": "2409.01466v1", "paper_path": "./SciVer/papers/2409.01466v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.01466v1_figure_6.png", "request_id": 314, "origin_statement": "In the left panel, the Judge-labeled Facebook coefficient for “Promote” ads is about 0.18, roughly 50% higher than the CoT estimate of 0.12 and slightly below the Human estimate of 0.20.", "perturbed_statement": "In the left panel, the Judge-labeled Facebook coefficient for “Promote” ads is about 0.05, roughly 50% lower than the CoT estimate of 0.12 and slightly higher than the Human estimate of 0.20.", "perturbed_explanation": "This claim is incorrect because the Judge-labeled coefficient in the left panel is actually around 0.18 (not 0.05), making it higher (not lower) than the CoT estimate of roughly 0.12, and it is slightly below (not above) the Human estimate of about 0.20.", "claim": "In the left panel, the Judge-labeled Facebook coefficient for “Promote” ads is about 0.05, roughly 50% lower than the CoT estimate of 0.12 and slightly higher than the Human estimate of 0.20.", "label": false }, { "paperid": "2409.16657v1", "paper_path": "./SciVer/papers/2409.16657v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.6" ], "image_path": "./SciVer/images/2409.16657v1_figure_4.png", "request_id": 318, "origin_statement": "CaSnS3 achieves a maximum SLME of 31.20%, which is 1.67 percentage points higher than SrSnS3’s maximum SLME of 29.53%.", "perturbed_statement": "CaSnS3 achieves a maximum SLME of 32.20%, which is 3.67 percentage points higher than SrSnS3’s maximum SLME of 29.53%.", "perturbed_explanation": "The perturbed statement is incorrect because the actual maximum SLME of CaSnS3 is 31.20%, not 32.20%, and the difference from SrSnS3’s 29.53% is 1.67 percentage points, not 3.67.", "claim": "CaSnS3 achieves a maximum SLME of 32.20%, which is 3.67 percentage points higher than SrSnS3’s maximum SLME of 29.53%.", "label": false }, { "paperid": "2409.07162v2", "paper_path": "./SciVer/papers/2409.07162v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "6.1", "6.2" ], "image_path": "./SciVer/images/2409.07162v2_figure_4.png", "request_id": 320, "origin_statement": "On the Netflix app, GPT-4's 5-shot f1 score is about 5.4, 1.2 points above ChatGPT's ~4.2 and 1.6 above LLama-2-70B's ~3.8.", "perturbed_statement": "On the Netflix app, GPT-4's 5-shot f1 score reaches about 6.0, outperforming ChatGPT's ~4.5 by 1.5 points and LLama-2-70B's ~4.0 by 2.0 points.", "perturbed_explanation": "The chart shows GPT-4's f1 for Netflix is approximately 5.4 (not 6.0), ChatGPT's is around 4.2 (not 4.5), and LLama-2-70B's is about 3.8 (not 4.0), making the stated values incorrect.", "claim": "On the Netflix app, GPT-4's 5-shot f1 score reaches about 6.0, outperforming ChatGPT's ~4.5 by 1.5 points and LLama-2-70B's ~4.0 by 2.0 points.", "label": false }, { "paperid": "2410.12855v2", "paper_path": "./SciVer/papers/2410.12855v2.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "8.3" ], "image_path": "./SciVer/images/2410.12855v2_figure_9.png", "request_id": 328, "origin_statement": "In the JAILJUDGETRAIN dataset, Q5 prompts (18,693) appear over 14 times as often as Q4 prompts (1,309).", "perturbed_statement": "In the JAILJUDGETRAIN dataset, Q5 prompts (18,693) appear over 20 times as often as Q4 prompts (1,309).", "perturbed_explanation": "The perturbed statement is wrong because dividing 18,693 by 1,309 yields about 14.3, not over 20, so Q5 prompts are only around 14 times more frequent than Q4 prompts, not over 20 times as claimed.", "claim": "In the JAILJUDGETRAIN dataset, Q5 prompts (18,693) appear over 20 times as often as Q4 prompts (1,309).", "label": false }, { "paperid": "2411.17616v1", "paper_path": "./SciVer/papers/2411.17616v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.17616v1_figure_4.png", "request_id": 330, "origin_statement": "In the model with skip branches, low cosine similarities (<0.92) occur at both very early timesteps (<50) and very late timesteps (>900), while in the model without skip branches, such low similarities are only seen at the earliest timesteps (<50).", "perturbed_statement": "In the model with skip branches, low cosine similarities (<0.92) occur only at the very early timesteps (<50), while in the model without skip branches, such low similarities appear at both very early (<50) and very late (>900) timesteps.", "perturbed_explanation": "This is incorrect because the w/ Skip Branches heat map clearly shows a dark band (cosine similarity <0.92) at timesteps above 900, contradicting the claim that low similarities occur only at early timesteps. Additionally, the w/o Skip Branches map does not show such late-timestep drops, so it is wrong to state that low similarities appear there at >900.", "claim": "In the model with skip branches, low cosine similarities (<0.92) occur only at the very early timesteps (<50), while in the model without skip branches, such low similarities appear at both very early (<50) and very late (>900) timesteps.", "label": false }, { "paperid": "2409.11599v1", "paper_path": "./SciVer/papers/2409.11599v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11599v1-Table2-1.png", "request_id": 331, "origin_statement": "The electronic micrometer (Characteristic 32) exhibited the highest t-statistic (3.506) and the lowest p-value (0.003) among the gauges, surpassing the internal micrometer’s t-statistic of 2.548 and p-value of 0.020, which shows the strongest expert–novice difference occurred on the electronic micrometer.", "perturbed_statement": "The electronic caliper (Characteristic 35) exhibited the highest t-statistic (3.506) and the lowest p-value (0.003) among the gauges, surpassing the internal micrometer’s t-statistic of 2.548 and p-value of 0.020, indicating the strongest expert–novice difference occurred on the electronic caliper.", "perturbed_explanation": "This is incorrect because Table 2 shows that the 3.506 t-statistic and 0.003 p-value correspond to the electronic micrometer (Characteristic 32), while the electronic caliper (Characteristic 35) actually has a t-statistic of 2.862 and a p-value of 0.010.", "claim": "The electronic caliper (Characteristic 35) exhibited the highest t-statistic (3.506) and the lowest p-value (0.003) among the gauges, surpassing the internal micrometer’s t-statistic of 2.548 and p-value of 0.020, indicating the strongest expert–novice difference occurred on the electronic caliper.", "label": false }, { "paperid": "2411.03500v1", "paper_path": "./SciVer/papers/2411.03500v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.03500v1-Table1-1.png", "request_id": 333, "origin_statement": "Table 1 defines two binary variable categories (R_{} and L_c) but specifies three ILP constraints, meaning there is one more constraint than variable types.", "perturbed_statement": "Table 1 defines two binary variable categories (R_{} and L_c) but specifies four ILP constraints, meaning there are two more constraints than variable types.", "perturbed_explanation": "The table actually lists only three constraint inequalities, not four. Since there are three constraints but the claim mentions four, it contradicts the number of constraints shown in the table.", "claim": "Table 1 defines two binary variable categories (R_{} and L_c) but specifies four ILP constraints, meaning there are two more constraints than variable types.", "label": false }, { "paperid": "2409.02399v1", "paper_path": "./SciVer/papers/2409.02399v1.json", "claim_type": "direct", "type": "chart", "item": "1(c)", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.02399v1_figure_1(c).png", "request_id": 334, "origin_statement": "In the d=15 scenario, BPF’s logZ estimate errors span approximately −15 to +5, while iAPF’s errors span only about −1 to +1, indicating that iAPF has substantially lower variability than BPF.", "perturbed_statement": "In the d=15 scenario, BPF’s logZ estimate errors span approximately −10 to +10, while iAPF’s errors span only about −3 to +3, indicating that iAPF has substantially lower variability than BPF.", "perturbed_explanation": "This is incorrect because the actual boxplots show BPF’s errors range from about −15 to +5 and iAPF’s errors range from about −1 to +1, not −10 to +10 or −3 to +3 as stated.", "claim": "In the d=15 scenario, BPF’s logZ estimate errors span approximately −10 to +10, while iAPF’s errors span only about −3 to +3, indicating that iAPF has substantially lower variability than BPF.", "label": false }, { "paperid": "2411.14533v1", "paper_path": "./SciVer/papers/2411.14533v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.14533v1_figure_9.png", "request_id": 340, "origin_statement": "The median percentage deviation for geometric instances is around -2%, while for random graphs it is about +1%, indicating BRKGA-B slightly underperforms on geometric graphs compared to the baseline but slightly overperforms on random ones.", "perturbed_statement": "The median percentage deviation for geometric instances is around +2%, while for random graphs it is about -1%, indicating BRKGA-B slightly overperforms on geometric graphs compared to the baseline but underperforms on random ones.", "perturbed_explanation": "The boxplot shows the median deviation for geometric instances below zero (approximately -2%), not +2%, and the median for random graphs above zero (around +1%), not -1%. Therefore, the perturbed values contradict the plotted medians.", "claim": "The median percentage deviation for geometric instances is around +2%, while for random graphs it is about -1%, indicating BRKGA-B slightly overperforms on geometric graphs compared to the baseline but underperforms on random ones.", "label": false }, { "paperid": "2409.12887v2", "paper_path": "./SciVer/papers/2409.12887v2.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.12887v2_figure_1.png", "request_id": 342, "origin_statement": "SynCSE has about 1000 more false positives than SimCSE, while MultiCSR reduces false positives by roughly 3000 compared to SimCSE yet increases false negatives from about 50 to around 200.", "perturbed_statement": "SynCSE has about 1000 fewer false positives than SimCSE, while MultiCSR reduces false positives by roughly 3000 compared to SimCSE yet increases false negatives from about 50 to around 200.", "perturbed_explanation": "The perturbed statement incorrectly claims SynCSE has fewer false positives than SimCSE. In the chart, SynCSE’s false positive bar (~7000) is higher than SimCSE’s (~6000), so SynCSE actually has about 1000 more, not fewer.", "claim": "SynCSE has about 1000 fewer false positives than SimCSE, while MultiCSR reduces false positives by roughly 3000 compared to SimCSE yet increases false negatives from about 50 to around 200.", "label": false }, { "paperid": "2409.09549v1", "paper_path": "./SciVer/papers/2409.09549v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.09549v1-Table2-1.png", "request_id": 343, "origin_statement": "After data cleaning, DiabDeep’s test set reduced from 4188 to 3717, a drop of 471, whereas MHDeep’s test set dropped from 5420 to 5123, a drop of 297.", "perturbed_statement": "After data cleaning, DiabDeep’s test set reduced from 4188 to 3717, a drop of 400, whereas MHDeep’s test set dropped from 5420 to 5123, a drop of 350.", "perturbed_explanation": "The perturbed statement incorrectly reports the drops: the DiabDeep test set actually fell by 4188−3717 = 471, not 400, and the MHDeep test set fell by 5420−5123 = 297, not 350.", "claim": "After data cleaning, DiabDeep’s test set reduced from 4188 to 3717, a drop of 400, whereas MHDeep’s test set dropped from 5420 to 5123, a drop of 350.", "label": false }, { "paperid": "2409.05314v2", "paper_path": "./SciVer/papers/2409.05314v2.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "8.3" ], "image_path": "./SciVer/images/2409.05314v2_figure_8.png", "request_id": 344, "origin_statement": "During instruction fine-tuning, Gemma-2B-Tele's training loss decreases by about 1.0 from 1.8 at epoch 0 to 0.8 at epoch 1, while Tinyllama-1.1B-Tele drops roughly 0.3 from 0.85 to 0.55 and LLama-3-8B-Tele decreases about 0.3 from 0.98 to 0.68.", "perturbed_statement": "During instruction fine-tuning, Gemma-2B-Tele's training loss decreases by about 0.6 from 1.8 at epoch 0 to 1.2 at epoch 1, while Tinyllama-1.1B-Tele drops roughly 0.15 from 0.85 to 0.70 and LLama-3-8B-Tele decreases about 0.3 from 0.98 to 0.68.", "perturbed_explanation": "The perturbed statement misreports Gemma-2B-Tele's loss drop as 0.6 to a final loss of 1.2, but the figure shows it falls by about 1.0 from 1.8 down to ~0.8. It also understates Tinyllama-1.1B-Tele's drop: the loss goes from 0.85 to ~0.55 (a ~0.3 decrease), not to 0.70 (~0.15).", "claim": "During instruction fine-tuning, Gemma-2B-Tele's training loss decreases by about 0.6 from 1.8 at epoch 0 to 1.2 at epoch 1, while Tinyllama-1.1B-Tele drops roughly 0.15 from 0.85 to 0.70 and LLama-3-8B-Tele decreases about 0.3 from 0.98 to 0.68.", "label": false }, { "paperid": "2410.09253v1", "paper_path": "./SciVer/papers/2410.09253v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1" ], "image_path": "./SciVer/images/2410.09253v1-Table1-1.png", "request_id": 349, "origin_statement": "The F115W & F360M filter pair has a total exposure time of 773.047 s, which is approximately 44% of the 1739.357 s exposures used for each of the other three filter pairs.", "perturbed_statement": "The F212N & F470N filter pair has the shortest total exposure time at 773.047 s, which is only 44% of the 1739.357 s exposures used by the other three filter pairs.", "perturbed_explanation": "This is incorrect because the table shows the F212N & F470N pair actually has a total exposure time of 1739.357 s. The F115W & F360M pair is the one with the 773.047 s exposure, making it the shortest, not F212N & F470N.", "claim": "The F212N & F470N filter pair has the shortest total exposure time at 773.047 s, which is only 44% of the 1739.357 s exposures used by the other three filter pairs.", "label": false }, { "paperid": "2410.13343v1", "paper_path": "./SciVer/papers/2410.13343v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5.2.3" ], "image_path": "./SciVer/images/2410.13343v1_figure_3.png", "request_id": 352, "origin_statement": "In the Standard dataset, Mistral-7B predicted neutral about 80%, significantly higher than its entailment (~14%) and contradiction (~6%) predictions.", "perturbed_statement": "In the Standard dataset, Mistral-7B predicted neutral about 60%, significantly higher than its entailment (~14%) and contradiction (~6%) predictions.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 3(a) shows Mistral-7B’s neutral prediction rate at approximately 80%, not 60%, contradicting the stated value.", "claim": "In the Standard dataset, Mistral-7B predicted neutral about 60%, significantly higher than its entailment (~14%) and contradiction (~6%) predictions.", "label": false }, { "paperid": "2409.05878v2", "paper_path": "./SciVer/papers/2409.05878v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3.5" ], "image_path": "./SciVer/images/2409.05878v2-Table4-1.png", "request_id": 373, "origin_statement": "On the Anime dataset, SGL’s per-epoch training time (18711.6s) is over twice that of NGCF (8571.4s), while its GPU memory usage (3.89 GB) is 0.98 GB higher.", "perturbed_statement": "On the Anime dataset, SGL’s per-epoch training time (18711.6s) is under twice that of NGCF (8571.4s), while its GPU memory usage (3.89 GB) is 1.1 GB higher.", "perturbed_explanation": "The perturbed claim is incorrect because 18711.6s divided by 8571.4s is about 2.18, which is over twice, not under twice. Additionally, the memory gap between 3.89 GB and 2.91 GB is 0.98 GB, not 1.1 GB.", "claim": "On the Anime dataset, SGL’s per-epoch training time (18711.6s) is under twice that of NGCF (8571.4s), while its GPU memory usage (3.89 GB) is 1.1 GB higher.", "label": false }, { "paperid": "2411.07140v2", "paper_path": "./SciVer/papers/2411.07140v2.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3.3.4" ], "image_path": "./SciVer/images/2411.07140v2_figure_6.png", "request_id": 376, "origin_statement": "Baichuan2-7B experiences the largest F-score drop, falling from 26.3 to 14.0 (a 12.3-point decrease, about 47%), whereas InternLM2.5-7B slightly improves from 25.3 to 25.7, a 0.4-point increase after alignment.", "perturbed_statement": "Baichuan2-7B experiences the largest F-score drop, falling from 26.3 to 15.0 (an 11.3-point decrease, about 43%), whereas InternLM2.5-7B slightly improves from 25.3 to 26.2, a 0.9-point increase after alignment.", "perturbed_explanation": "The chart shows Baichuan2-7B’s F-score drops from 26.3 to 14.0 (12.3-point drop, ~47%), not to 15.0 or an 11.3-point (43%) decrease. It also shows InternLM2.5-7B rising from 25.3 to 25.7 (0.4-point increase), not to 26.2 or a 0.9-point gain.", "claim": "Baichuan2-7B experiences the largest F-score drop, falling from 26.3 to 15.0 (an 11.3-point decrease, about 43%), whereas InternLM2.5-7B slightly improves from 25.3 to 26.2, a 0.9-point increase after alignment.", "label": false }, { "paperid": "2409.04290v1", "paper_path": "./SciVer/papers/2409.04290v1.json", "claim_type": "direct", "type": "chart", "item": "11", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2409.04290v1_figure_11.png", "request_id": 386, "origin_statement": "For group_46 CNV values below 10, the predicted hazard contribution remains near zero, but it rises sharply after 15, reaching approximately 50 at a CNV of 20.", "perturbed_statement": "For group_46 CNV values below 10, the predicted hazard contribution remains near zero, but it rises sharply after 8, reaching approximately 30 at a CNV of 20.", "perturbed_explanation": "This statement is incorrect because the hazard contribution for group_46 CNV does not begin to rise sharply until after a CNV of about 12–13, not 8, and at a CNV of 20 the hazard contribution reaches roughly 50, not 30 as claimed.", "claim": "For group_46 CNV values below 10, the predicted hazard contribution remains near zero, but it rises sharply after 8, reaching approximately 30 at a CNV of 20.", "label": false }, { "paperid": "2409.07019v1", "paper_path": "./SciVer/papers/2409.07019v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4" ], "image_path": "./SciVer/images/2409.07019v1_figure_5.png", "request_id": 388, "origin_statement": "By 2030, the cumulative transit-time offset for K2-2 b using the original ephemeris grows to roughly 310 hours, while the new ephemeris keeps the predicted transit time within ±0.2 hours (3σ uncertainty).", "perturbed_statement": "By 2030, the cumulative transit-time offset for K2-2 b using the original ephemeris grows to roughly 250 hours, while the new ephemeris keeps the predicted transit time within ±0.5 hours (3σ uncertainty).", "perturbed_explanation": "This is incorrect because Figure 5 shows the original ephemeris offset reaches about 310 hours by 2030 (not 250 hours), and the updated ephemeris uncertainty remains within ±0.2 hours at the 3σ level (not ±0.5 hours).", "claim": "By 2030, the cumulative transit-time offset for K2-2 b using the original ephemeris grows to roughly 250 hours, while the new ephemeris keeps the predicted transit time within ±0.5 hours (3σ uncertainty).", "label": false }, { "paperid": "2409.00163v1", "paper_path": "./SciVer/papers/2409.00163v1.json", "claim_type": "direct", "type": "chart", "item": "3(c)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.00163v1_figure_3(c).png", "request_id": 390, "origin_statement": "At time 90, all five DeepHit-predicted survival curves intersect at S(t)=0.5, indicating identical survival probabilities across patient IDs at the midpoint.", "perturbed_statement": "At time 100, all five DeepHit-predicted survival curves intersect at S(t)=0.5, indicating identical survival probabilities across patient IDs.", "perturbed_explanation": "This is wrong because the curves decline linearly from 1.0 at t=0 to 0 at t=180, so at t=100 the survival probability is about 0.44, not 0.5.", "claim": "At time 100, all five DeepHit-predicted survival curves intersect at S(t)=0.5, indicating identical survival probabilities across patient IDs.", "label": false }, { "paperid": "2411.01739v2", "paper_path": "./SciVer/papers/2411.01739v2.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.5" ], "image_path": "./SciVer/images/2411.01739v2-Table5-1.png", "request_id": 391, "origin_statement": "Compared to max pooling, GeM prompt fusion increases Avg Acc on Split-Clothing by 4.51 points, reduces FTT by 4.98, and raises HM by 2.64.", "perturbed_statement": "Compared to max pooling, GeM prompt fusion increases Avg Acc on Split-Clothing by 3.51 points, reduces FTT by 4.98, and raises HM by 3.64.", "perturbed_explanation": "The perturbed statement incorrectly states a 3.51-point increase in Avg Acc (actual increase is 4.51) and a 3.64-point increase in HM (actual increase is 2.64) when comparing GeM to max pooling.", "claim": "Compared to max pooling, GeM prompt fusion increases Avg Acc on Split-Clothing by 3.51 points, reduces FTT by 4.98, and raises HM by 3.64.", "label": false }, { "paperid": "2411.01537v1", "paper_path": "./SciVer/papers/2411.01537v1.json", "claim_type": "direct", "type": "chart", "item": "5(a)", "section": [ "4.6" ], "image_path": "./SciVer/images/2411.01537v1_figure_5(a).png", "request_id": 392, "origin_statement": "In SASRec’s layer 1 heatmap, the attention score at the 15th historical item (~0.052) is over twice the score at the 10th item (~0.022).", "perturbed_statement": "In SASRec’s layer 1 heatmap, the attention score at the 20th historical item (~0.052) is over twice the score at the 10th item (~0.022).", "perturbed_explanation": "This is incorrect because the peak attention score (approximately 0.052) actually occurs at the 15th item, not the 20th item, as shown by the darkest cell around the 15th position.", "claim": "In SASRec’s layer 1 heatmap, the attention score at the 20th historical item (~0.052) is over twice the score at the 10th item (~0.022).", "label": false }, { "paperid": "2410.10672v1", "paper_path": "./SciVer/papers/2410.10672v1.json", "claim_type": "direct", "type": "table", "item": "11", "section": [ "5.2.2" ], "image_path": "./SciVer/images/2410.10672v1-Table11-1.png", "request_id": 393, "origin_statement": "On the OpenWebText2 dataset, the matrix nuclear-norm decreases from 0.7147 at 111M to 0.4991 at 13B, a reduction of approximately 30.2%.", "perturbed_statement": "On the OpenWebText2 dataset, the matrix nuclear-norm decreases from 0.7147 at 256M to 0.4991 at 13B, a reduction of approximately 30.2%.", "perturbed_explanation": "This statement is incorrect because the matrix nuclear-norm value of 0.7147 corresponds to the 111M model, not the 256M model (which actually has a nuclear-norm of 0.7066).", "claim": "On the OpenWebText2 dataset, the matrix nuclear-norm decreases from 0.7147 at 256M to 0.4991 at 13B, a reduction of approximately 30.2%.", "label": false }, { "paperid": "2410.11716v1", "paper_path": "./SciVer/papers/2410.11716v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.11716v1_figure_2.png", "request_id": 396, "origin_statement": "At a highest dose success rate of 0.8, the population-based test achieves approximately 54% power, whereas the residual-based randomization tests achieve around 85% power, a difference of about 31 percentage points.", "perturbed_statement": "At a highest dose success rate of 0.8, the population-based test achieves approximately 65% power, whereas the residual-based randomization tests achieve around 75% power, a difference of about 10 percentage points.", "perturbed_explanation": "This statement is wrong because in the figure at success rate 0.8 the population-based test’s power is about 54%, not 65%, and the residual-based tests’ power is about 85%, not 75%, so the true difference is closer to 31%, not 10%.", "claim": "At a highest dose success rate of 0.8, the population-based test achieves approximately 65% power, whereas the residual-based randomization tests achieve around 75% power, a difference of about 10 percentage points.", "label": false }, { "paperid": "2410.05522v1", "paper_path": "./SciVer/papers/2410.05522v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.05522v1_figure_6.png", "request_id": 398, "origin_statement": "The worst-case prediction in Figure 6 has an R^2 of 0.551, which is 0.427 lower than the best-case R^2 of 0.978, indicating a significantly poorer fit.", "perturbed_statement": "The worst-case prediction in Figure 6 has an R^2 of 0.751, which is 0.227 lower than the best-case R^2 of 0.978, indicating a significantly poorer fit.", "perturbed_explanation": "Figure 6 shows the worst-case R^2 as 0.551, not 0.751, and the gap between 0.551 and 0.978 is 0.427. The perturbed statement’s values (0.751 and 0.227) conflict with the actual R^2 and difference shown in the figure.", "claim": "The worst-case prediction in Figure 6 has an R^2 of 0.751, which is 0.227 lower than the best-case R^2 of 0.978, indicating a significantly poorer fit.", "label": false }, { "paperid": "2409.17201v2", "paper_path": "./SciVer/papers/2409.17201v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "6.1" ], "image_path": "./SciVer/images/2409.17201v2-Table3-1.png", "request_id": 399, "origin_statement": "The immersed parameter dimension n~ in CNN2 increases by 513 parameters from its original n, which is greater than the MLP's increase (201) and CNN's increase (129).", "perturbed_statement": "The immersed parameter dimension n~ in CNN2 increases by 129 parameters from its original n, which is smaller than the MLP's increase (201) and CNN's increase (513).", "perturbed_explanation": "This is incorrect because Table 3 shows CNN2’s n~ (582,539) minus n (582,026) equals 513, not 129. In contrast, CNN’s increase is 1,200,011 − 1,199,882 = 129, not 513.", "claim": "The immersed parameter dimension n~ in CNN2 increases by 129 parameters from its original n, which is smaller than the MLP's increase (201) and CNN's increase (513).", "label": false }, { "paperid": "2410.14508v1", "paper_path": "./SciVer/papers/2410.14508v1.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.14508v1_figure_3(b).png", "request_id": 400, "origin_statement": "Participants aged 18–30 make up 73.8% of the sample, which is approximately 3.9 times the 19.0% share of participants aged 30–45.", "perturbed_statement": "Participants aged 18–30 make up 73.8% of the sample, which is approximately 5 times the 19.0% share of participants aged 30–45.", "perturbed_explanation": "The ratio between 73.8% and 19.0% is about 3.9, not 5, so stating it is 5 times greater contradicts the actual percentages shown.", "claim": "Participants aged 18–30 make up 73.8% of the sample, which is approximately 5 times the 19.0% share of participants aged 30–45.", "label": false }, { "paperid": "2409.17608v1", "paper_path": "./SciVer/papers/2409.17608v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.17608v1-Table2-1.png", "request_id": 409, "origin_statement": "On the ShanghaiTech (SHT) dataset, our flow-based model achieves an AUC of 75.6%, which is 0.8 percentage points higher than the second-best prediction-based model VEC [45], which scores 74.8%.", "perturbed_statement": "On the ShanghaiTech (SHT) dataset, our flow-based model achieves an AUC of 76.6%, which is 1.8 percentage points higher than the second-best prediction-based model VEC [45], which scores 74.8%.", "perturbed_explanation": "The perturbed statement incorrectly claims an AUC of 76.6% for the flow-based model, but the table shows it as 75.6%. Consequently, the stated 1.8-point advantage is wrong, since the actual margin is only 0.8 points.", "claim": "On the ShanghaiTech (SHT) dataset, our flow-based model achieves an AUC of 76.6%, which is 1.8 percentage points higher than the second-best prediction-based model VEC [45], which scores 74.8%.", "label": false }, { "paperid": "2410.12261v1", "paper_path": "./SciVer/papers/2410.12261v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.12261v1-Table3-1.png", "request_id": 411, "origin_statement": "CATCH achieves an AUC-ROC of 0.838 on the CalIt2 dataset, which is 0.020 higher than the CD channel correlation strategy's 0.818 on the same dataset.", "perturbed_statement": "CATCH achieves an AUC-ROC of 0.848 on the CalIt2 dataset, which is 0.030 higher than the CD channel correlation strategy's 0.828 on the same dataset.", "perturbed_explanation": "This is incorrect because Table 3 reports CATCH’s AUC-ROC on CalIt2 as 0.838 (not 0.848) and the CD channel correlation strategy’s score as 0.818 (not 0.828). These incorrect values contradict the table.", "claim": "CATCH achieves an AUC-ROC of 0.848 on the CalIt2 dataset, which is 0.030 higher than the CD channel correlation strategy's 0.828 on the same dataset.", "label": false }, { "paperid": "2411.11706v1", "paper_path": "./SciVer/papers/2411.11706v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.5" ], "image_path": "./SciVer/images/2411.11706v1_figure_6.png", "request_id": 412, "origin_statement": "In the W/ k-means vs. W/o k-means results, the weighted score for Choice-V rises from about 0.82 to 0.89 with k-means initialization—an improvement of roughly 0.07, which is the largest gain across all five experiments.", "perturbed_statement": "In the W/ k-means vs. W/o k-means results, the weighted score for Choice-V rises from about 0.82 to 0.85 with k-means initialization—an improvement of only 0.03, making it the smallest gain among all five experiments.", "perturbed_explanation": "The actual weighted Choice-V score with k-means initialization is approximately 0.89 (not 0.85), resulting in an improvement of about 0.07, and this improvement is the largest, not the smallest, among the experiments.", "claim": "In the W/ k-means vs. W/o k-means results, the weighted score for Choice-V rises from about 0.82 to 0.85 with k-means initialization—an improvement of only 0.03, making it the smallest gain among all five experiments.", "label": false }, { "paperid": "2409.19028v1", "paper_path": "./SciVer/papers/2409.19028v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.19028v1-Table5-1.png", "request_id": 413, "origin_statement": "Gpt3.5’s median (4.50) is twice Llama2’s (2.25) and three times Tinyllama’s (1.50), indicating substantially stronger performance.", "perturbed_statement": "Gpt3.5’s median (4.50) is only 1.5 times Llama2’s (3.00) and 2.5 times Tinyllama’s (1.80), showing moderate performance gains.", "perturbed_explanation": "The statement is wrong because the table shows Llama2’s median score is actually 2.25 (not 3.00) and Tinyllama’s median is 1.50 (not 1.80), invalidating the quoted ratios.", "claim": "Gpt3.5’s median (4.50) is only 1.5 times Llama2’s (3.00) and 2.5 times Tinyllama’s (1.80), showing moderate performance gains.", "label": false }, { "paperid": "2411.01076v2", "paper_path": "./SciVer/papers/2411.01076v2.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "5.2.3" ], "image_path": "./SciVer/images/2411.01076v2_figure_8.png", "request_id": 418, "origin_statement": "For G=3 and P=4, approximately one in three iterations yields only one token, while for P=2 and P=3, after iteration 3 every subsequent iteration produces 3 tokens.", "perturbed_statement": "For G=3 and P=4, approximately one in four iterations yields only one token, while for P=2 and P=3, after iteration 3 every subsequent iteration produces 3 tokens.", "perturbed_explanation": "The red curve (G=3, P=4) shows 10 out of 30 iterations (one in three) producing only one token, not 7.5 out of 30 (one in four), so the stated one-in-four rate contradicts the actual one-in-three mis-speculation frequency.", "claim": "For G=3 and P=4, approximately one in four iterations yields only one token, while for P=2 and P=3, after iteration 3 every subsequent iteration produces 3 tokens.", "label": false }, { "paperid": "2410.23893v3", "paper_path": "./SciVer/papers/2410.23893v3.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.23893v3-Table1-1.png", "request_id": 419, "origin_statement": "On the SNL dataset, DiffBatt achieves an RMSE of 125, which is 75 cycles lower than the PCR model’s RMSE of 200.", "perturbed_statement": "On the SNL dataset, DiffBatt achieves an RMSE of 125, which is 60 cycles lower than the PCR model’s RMSE of 200.", "perturbed_explanation": "This statement is incorrect because the actual difference between DiffBatt’s RMSE of 125 and PCR’s RMSE of 200 on the SNL dataset is 75 cycles, not 60.", "claim": "On the SNL dataset, DiffBatt achieves an RMSE of 125, which is 60 cycles lower than the PCR model’s RMSE of 200.", "label": false }, { "paperid": "2410.22551v2", "paper_path": "./SciVer/papers/2410.22551v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.22551v2-Table1-1.png", "request_id": 431, "origin_statement": "FairSkin reduces DP by 5.33 (from 15.28 to 9.95) and boosts ESSP by 1.93 (from 5.85 to 7.78) compared to CBRS.", "perturbed_statement": "FairSkin reduces DP by 4.00 (from 15.28 to 9.95) and boosts ESSP by 2.50 (from 5.85 to 7.78) compared to CBRS.", "perturbed_explanation": "The statement is incorrect because Table 1 shows the actual DP reduction is 15.28 − 9.95 = 5.33, not 4.00, and the actual ESSP increase is 7.78 − 5.85 = 1.93, not 2.50.", "claim": "FairSkin reduces DP by 4.00 (from 15.28 to 9.95) and boosts ESSP by 2.50 (from 5.85 to 7.78) compared to CBRS.", "label": false }, { "paperid": "2410.06905v2", "paper_path": "./SciVer/papers/2410.06905v2.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2410.06905v2_figure_2.png", "request_id": 432, "origin_statement": "Approximately 75% of the 200 random samples (about 150 green plusses vs. 50 blue crosses) lie within the 1−α(p) contour, yielding a confidence level of roughly 0.75 at point p.", "perturbed_statement": "Approximately 85% of the 200 random samples (about 170 green plusses vs. 30 blue crosses) lie within the 1−α(p) contour, yielding a confidence level of roughly 0.85 at point p.", "perturbed_explanation": "The image actually shows around 150 green plusses inside the contour out of roughly 200 samples (≈75%), not 170 plusses (85%).", "claim": "Approximately 85% of the 200 random samples (about 170 green plusses vs. 30 blue crosses) lie within the 1−α(p) contour, yielding a confidence level of roughly 0.85 at point p.", "label": false }, { "paperid": "2409.11088v1", "paper_path": "./SciVer/papers/2409.11088v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.2.3" ], "image_path": "./SciVer/images/2409.11088v1-Table1-1.png", "request_id": 433, "origin_statement": "From redshift 2 to redshift 5, the mean transmission rate of Ly photons decreases by approximately 46%, dropping from 0.93 to 0.50.", "perturbed_statement": "From redshift 2 to redshift 5, the mean transmission rate of Ly photons decreases by approximately 58%, dropping from 0.93 to 0.50.", "perturbed_explanation": "This statement is incorrect because Table 1 shows the transmission rate falls from 0.93 at z=2 to 0.50 at z=5, a drop of 0.43. That corresponds to about a 46% decrease relative to 0.93, not 58%.", "claim": "From redshift 2 to redshift 5, the mean transmission rate of Ly photons decreases by approximately 58%, dropping from 0.93 to 0.50.", "label": false }, { "paperid": "2410.22938v2", "paper_path": "./SciVer/papers/2410.22938v2.json", "claim_type": "direct", "type": "chart", "item": "3(c)", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.22938v2_figure_3(c).png", "request_id": 442, "origin_statement": "When DiffLight is trained at a 10% missing rate, its relative generalization performance increases by 6% on both 30% and 50% test missing rates (relative scores of 1.06) compared to using a 10% test missing rate.", "perturbed_statement": "When DiffLight is trained at a 10% missing rate, its relative generalization performance increases by 16% on both 30% and 50% test missing rates (relative scores of 1.16) compared to using a 10% test missing rate.", "perturbed_explanation": "The perturbed statement claims a 16% improvement (1.16), but the figure shows the actual relative scores are 1.06 (6% improvement) when trained at 10% and tested at 30% or 50%. Thus the 1.16 value contradicts the data.", "claim": "When DiffLight is trained at a 10% missing rate, its relative generalization performance increases by 16% on both 30% and 50% test missing rates (relative scores of 1.16) compared to using a 10% test missing rate.", "label": false }, { "paperid": "2410.06765v1", "paper_path": "./SciVer/papers/2410.06765v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.2", "5.3", "5.4" ], "image_path": "./SciVer/images/2410.06765v1_figure_6.png", "request_id": 444, "origin_statement": "At step 5000 in the finetuning stage of the Two-Layer MLP-based model, increasing resolution from 224 to 448 reduces the loss from approximately 0.82 to about 0.75, a decrease of around 0.07.", "perturbed_statement": "At step 5000 in the finetuning stage of the Two-Layer MLP-based model, increasing resolution from 224 to 448 increases the loss from approximately 0.82 to about 0.90, an increase of around 0.08.", "perturbed_explanation": "Figure 6’s finetuning curve for Two-Layer MLP shows the 448-resolution loss is about 0.75 at step 5000, not 0.90, and it is lower than the 224-resolution loss (~0.82). Thus the claim that the loss increases to 0.90 contradicts the actual plotted values.", "claim": "At step 5000 in the finetuning stage of the Two-Layer MLP-based model, increasing resolution from 224 to 448 increases the loss from approximately 0.82 to about 0.90, an increase of around 0.08.", "label": false }, { "paperid": "2409.10132v1", "paper_path": "./SciVer/papers/2409.10132v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.5" ], "image_path": "./SciVer/images/2409.10132v1-Table4-1.png", "request_id": 447, "origin_statement": "StruEdit's 3-hop average latency of 2.25 seconds is about 16% lower than IKE's 2.67 seconds.", "perturbed_statement": "StruEdit's 3-hop average latency of 2.25 seconds is about 8% lower than IKE's 2.45 seconds.", "perturbed_explanation": "This is incorrect because IKE’s actual 3-hop latency from the table is 2.67 s (not 2.45 s), and the true reduction is roughly (2.67−2.25)/2.67≈15.7%, not 8%.", "claim": "StruEdit's 3-hop average latency of 2.25 seconds is about 8% lower than IKE's 2.45 seconds.", "label": false }, { "paperid": "2410.17831v1", "paper_path": "./SciVer/papers/2410.17831v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.3.2" ], "image_path": "./SciVer/images/2410.17831v1-Table3-1.png", "request_id": 451, "origin_statement": "On the Cave circuit dataset for humans, our method’s feasibility success rate is 97%, which is 90 percentage points higher than CHOMP’s 7%, highlighting a more than thirteenfold improvement.", "perturbed_statement": "On the Cave circuit dataset for humans, our method’s feasibility success rate is 95%, which is 78 percentage points higher than CHOMP’s 17%, highlighting a fivefold improvement.", "perturbed_explanation": "The table shows that for the Cave circuit dataset (Human), CHOMP with one GPDF has a 7% success rate (not 17%), and Ours records 97% (not 95%), so both the percentages and the calculated improvement in the perturbed statement contradict the source.", "claim": "On the Cave circuit dataset for humans, our method’s feasibility success rate is 95%, which is 78 percentage points higher than CHOMP’s 17%, highlighting a fivefold improvement.", "label": false }, { "paperid": "2410.23701v1", "paper_path": "./SciVer/papers/2410.23701v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.23701v1-Table4-1.png", "request_id": 453, "origin_statement": "The Diffusion BPS evaluator method achieved a 65% success rate on hard objects, which is 45 percentage points higher than the 20% achieved by FRoGGeR without an evaluator.", "perturbed_statement": "The Diffusion BPS evaluator method achieved a 75% success rate on hard objects, which is 45 percentage points higher than the 30% achieved by FRoGGeR without an evaluator.", "perturbed_explanation": "Table 4 shows Diffusion BPS succeeded on hard objects 26/40 trials (65%), not 75%, and FRoGGeR succeeded 8/40 trials (20%), not 30%, so the perturbed percentages contradict the actual values in the table.", "claim": "The Diffusion BPS evaluator method achieved a 75% success rate on hard objects, which is 45 percentage points higher than the 30% achieved by FRoGGeR without an evaluator.", "label": false }, { "paperid": "2409.00140v1", "paper_path": "./SciVer/papers/2409.00140v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.00140v1-Table3-1.png", "request_id": 455, "origin_statement": "Among the one-fourth sized models, FQReLU-QFC-1-4 has 5,296 parameters, 224 fewer than FQReLU-QIP-1-4's 5,520 parameters.", "perturbed_statement": "Among the one-fourth sized models, FQReLU-QFC-1-4 has 5,296 parameters, 100 fewer than FQReLU-QIP-1-4's 5,520 parameters.", "perturbed_explanation": "The perturbed statement incorrectly states the difference as 100 parameters. In the table, FQReLU-QFC-1-4 actually has 5,296 parameters and FQReLU-QIP-1-4 has 5,520, so the true difference is 224 parameters, not 100.", "claim": "Among the one-fourth sized models, FQReLU-QFC-1-4 has 5,296 parameters, 100 fewer than FQReLU-QIP-1-4's 5,520 parameters.", "label": false }, { "paperid": "2410.05817v3", "paper_path": "./SciVer/papers/2410.05817v3.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.05817v3_figure_7.png", "request_id": 456, "origin_statement": "The mother-tongue relation has approximately 2.3 times as many CK-based outputs (~5200) as the-official-language relation (~2300).", "perturbed_statement": "The mother-tongue relation has approximately 3 times as many CK-based outputs (~5200) as the-official-language relation (~2300).", "perturbed_explanation": "The actual chart shows about 5200 CK outputs for mother-tongue and about 2300 for the-official-language, a ratio of roughly 2.3:1. Claiming a 3:1 ratio is incorrect based on these values.", "claim": "The mother-tongue relation has approximately 3 times as many CK-based outputs (~5200) as the-official-language relation (~2300).", "label": false }, { "paperid": "2409.10233v1", "paper_path": "./SciVer/papers/2409.10233v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.10233v1_figure_5.png", "request_id": 462, "origin_statement": "At Σ = 138 meV in Fig. 5b for the PLX1 center, the ISC transition rates are Γ_z ≈ 0.01 MHz and Γ_⊥ ≈ 0.002 MHz, resulting in a Γ_z/Γ_⊥ ratio of about 5.", "perturbed_statement": "At Σ = 138 meV in Fig. 5b for the PLX1 center, the ISC transition rates are Γ_z ≈ 0.01 MHz and Γ_⊥ ≈ 0.002 MHz, resulting in a Γ_z/Γ_⊥ ratio of about 3.", "perturbed_explanation": "The perturbed ratio is incorrect. From Fig. 5b the black dotted line at Σ = 138 meV corresponds to Γ_z/Γ_⊥ ≈ 5, and numerically dividing 0.01 MHz by 0.002 MHz yields 5, not 3.", "claim": "At Σ = 138 meV in Fig. 5b for the PLX1 center, the ISC transition rates are Γ_z ≈ 0.01 MHz and Γ_⊥ ≈ 0.002 MHz, resulting in a Γ_z/Γ_⊥ ratio of about 3.", "label": false }, { "paperid": "2409.00119v2", "paper_path": "./SciVer/papers/2409.00119v2.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.00119v2_figure_1.png", "request_id": 464, "origin_statement": "RoAd achieves around 88.8% accuracy on the GLUE benchmark using only about 0.05% of model parameters for training, surpassing full finetuning’s 88.6% accuracy.", "perturbed_statement": "RoAd achieves around 88.8% accuracy on the GLUE benchmark using only about 0.5% of model parameters for training, surpassing full finetuning’s 88.6% accuracy.", "perturbed_explanation": "The percentage of trainable parameters is misstated: the figure shows RoAd uses approximately 0.05% trainable parameters, not 0.5%, making the perturbed claim incorrect.", "claim": "RoAd achieves around 88.8% accuracy on the GLUE benchmark using only about 0.5% of model parameters for training, surpassing full finetuning’s 88.6% accuracy.", "label": false }, { "paperid": "2410.05729v1", "paper_path": "./SciVer/papers/2410.05729v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.05729v1-Table2-1.png", "request_id": 465, "origin_statement": "On the KITTI dataset, our method achieves the lowest rotation error of 0.92°, outperforming SpinNet’s 1.08° by 0.16°, and a translation error of 8.74 cm, 5.86 cm lower than DGR’s 14.6 cm.", "perturbed_statement": "On the KITTI dataset, our method achieves the lowest rotation error of 0.78°, outperforming SpinNet’s 1.08° by 0.30°, and a translation error of 8.74 cm, 5.86 cm lower than DGR’s 14.6 cm.", "perturbed_explanation": "The table lists our rotation error as 0.92°, not 0.78°, so the claimed 0.78° value and the 0.30° improvement over SpinNet are incorrect.", "claim": "On the KITTI dataset, our method achieves the lowest rotation error of 0.78°, outperforming SpinNet’s 1.08° by 0.30°, and a translation error of 8.74 cm, 5.86 cm lower than DGR’s 14.6 cm.", "label": false }, { "paperid": "2410.22187v1", "paper_path": "./SciVer/papers/2410.22187v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "6.1" ], "image_path": "./SciVer/images/2410.22187v1-Table1-1.png", "request_id": 467, "origin_statement": "At B=5 on Caltech101, our method achieves 92.8% accuracy, which is 1.7 percentage points higher than Random’s 91.1% and 3.4 points higher than Entropy’s 89.4%.", "perturbed_statement": "At B=5 on Caltech101, our method achieves 94.8% accuracy, which is 3.7 percentage points higher than Random’s 91.1% and 5.4 points higher than Entropy’s 89.4%.", "perturbed_explanation": "The perturbed statement incorrectly reports our method’s accuracy as 94.8%. According to the table, our method actually achieves 92.8% at B=5 on Caltech101, not 94.8%.", "claim": "At B=5 on Caltech101, our method achieves 94.8% accuracy, which is 3.7 percentage points higher than Random’s 91.1% and 5.4 points higher than Entropy’s 89.4%.", "label": false }, { "paperid": "2411.05009v1", "paper_path": "./SciVer/papers/2411.05009v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.05009v1_figure_2(b).png", "request_id": 472, "origin_statement": "For 512,000 particles, the Kokkos variant on the H100 GPU completes in about 90 s, which is roughly 1.7 times faster than the same variant on the V100 GPU (~150 s).", "perturbed_statement": "For 512,000 particles, the Kokkos variant on the H100 GPU completes in about 120 s, which is roughly 1.25 times faster than the same variant on the V100 GPU (~150 s).", "perturbed_explanation": "This is incorrect because the solid dark green line (H100 Kokkos) at 512,000 particles reads about 90 s in Figure 2(b), not 120 s. Consequently, the actual speedup over the V100 (~150 s) is about 1.7×, not 1.25×.", "claim": "For 512,000 particles, the Kokkos variant on the H100 GPU completes in about 120 s, which is roughly 1.25 times faster than the same variant on the V100 GPU (~150 s).", "label": false }, { "paperid": "2410.05317v2", "paper_path": "./SciVer/papers/2410.05317v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.05317v2-Table1-1.png", "request_id": 473, "origin_statement": "ToCa (N=3, R=80%) achieves a 2.21× speedup over PixArt-α while increasing MS-COCO2017 FID by just 0.73 (from 28.09 to 28.82).", "perturbed_statement": "ToCa (N=3, R=80%) achieves a 3.21× speedup over PixArt-α while increasing MS-COCO2017 FID by only 0.5 (from 28.09 to 28.59).", "perturbed_explanation": "The table shows ToCa (N=3, R=80%) has a 2.21× speedup, not 3.21×, and its FID rises from 28.09 to 28.82 (a 0.73 increase), not to 28.59 (0.5 increase).", "claim": "ToCa (N=3, R=80%) achieves a 3.21× speedup over PixArt-α while increasing MS-COCO2017 FID by only 0.5 (from 28.09 to 28.59).", "label": false }, { "paperid": "2411.17502v1", "paper_path": "./SciVer/papers/2411.17502v1.json", "claim_type": "direct", "type": "chart", "item": "11", "section": [ "6.3.x" ], "image_path": "./SciVer/images/2411.17502v1_figure_11.png", "request_id": 482, "origin_statement": "For Sort Prediction - Day of Operations, conditional efficiency rises from 1.435 under No Shift to 1.703 under External Shift, a gain of 0.268.", "perturbed_statement": "For Sort Prediction - Day of Operations, conditional efficiency rises from 1.435 under No Shift to 1.803 under External Shift, a gain of 0.368.", "perturbed_explanation": "This is incorrect because the chart shows the External Shift efficiency for Sort Prediction on the Day of Operations is 1.703, not 1.803. Therefore, the actual gain is 0.268, not 0.368.", "claim": "For Sort Prediction - Day of Operations, conditional efficiency rises from 1.435 under No Shift to 1.803 under External Shift, a gain of 0.368.", "label": false }, { "paperid": "2409.16745v1", "paper_path": "./SciVer/papers/2409.16745v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.2" ], "image_path": "./SciVer/images/2409.16745v1-Table1-1.png", "request_id": 487, "origin_statement": "On 07/12/2023, the R-filter observations peaked at 42, fourteen times the three images taken in each B and V filter that day.", "perturbed_statement": "On 07/12/2023, the R-filter observations peaked at 24, eight times the three images taken in each B and V filter that day.", "perturbed_explanation": "The table shows 42 R-filter observations on 07/12/2023, not 24; therefore the claim of an eightfold ratio versus three B or V images is incorrect (the actual ratio is fourteen).", "claim": "On 07/12/2023, the R-filter observations peaked at 24, eight times the three images taken in each B and V filter that day.", "label": false }, { "paperid": "2411.08343v1", "paper_path": "./SciVer/papers/2411.08343v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3" ], "image_path": "./SciVer/images/2411.08343v1-Table3-1.png", "request_id": 489, "origin_statement": "Spiderman: Homecoming has 266 more sentences and 106 more words than Spiderman: Far From Home.", "perturbed_statement": "Spiderman: Homecoming has 300 more sentences and 200 more words than Spiderman: Far From Home.", "perturbed_explanation": "The perturbed counts are incorrect. According to the table, Homecoming has 2,196 sentences versus Far From Home’s 1,930 (a difference of 266), and 12,295 words versus 12,189 (a difference of 106), not 300 sentences and 200 words.", "claim": "Spiderman: Homecoming has 300 more sentences and 200 more words than Spiderman: Far From Home.", "label": false }, { "paperid": "2411.09443v1", "paper_path": "./SciVer/papers/2411.09443v1.json", "claim_type": "direct", "type": "chart", "item": "14", "section": [ "5.6" ], "image_path": "./SciVer/images/2411.09443v1_figure_14.png", "request_id": 490, "origin_statement": "In the cross-matched sample, the 0.6 elements (vanilla, chocolate, strawberry, cookie_dough, mint_chocolate), so stating there are 4 flavor options contradicts the actual number shown in the image-to-code demo.", "claim": "In Fig10, Pixtral generates an HTML page with 5 distinct flavor options and a “Next” button; in Fig11, Pixtral-12B achieves an 8 rating—167% higher than QwenVL-7B’s score of 3 and 300% above Gemini-Flash-8B’s 2.", "label": true }, { "paperid": "2410.13376v1", "paper_path": "./SciVer/papers/2410.13376v1.json", "claim_type": "parallel", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13376v1_figure_4.png", "item2_path": "./SciVer/images/2410.13376v1_figure_6.png", "section": [ "4.1" ], "request_id": 49, "origin_statement": "Using a 4-dimensional latent space, the CAE-FFNN model reduces the error indicator for v from about 0.052 at ε*≈0.014 to approximately 0.030 at ε*≈0.030.", "perturbed_statement": "Using a 4-dimensional latent space, the CAE-FFNN model reduces the error indicator for v from about 0.052 at ε*≈0.014 to approximately 0.025 at ε*≈0.030.", "perturbed_explanation": "Figure 6 shows that the error indicator for v at ε*≈0.030 is around 0.030, not 0.025, so the perturbed value 0.025 contradicts the actual plot.", "claim": "Using a 4-dimensional latent space, the CAE-FFNN model reduces the error indicator for v from about 0.052 at ε*≈0.014 to approximately 0.030 at ε*≈0.030.", "label": true }, { "paperid": "2410.12049v1", "paper_path": "./SciVer/papers/2410.12049v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.12049v1_figure_1.png", "item2_path": "./SciVer/images/2410.12049v1_figure_2.png", "section": [ "2.1" ], "request_id": 62, "origin_statement": "At approximately 2 USD per million tokens, Sabiá-3 attains ~79% average accuracy, exceeding Llama 3.1 405B’s ~75.8% at ~3.7 USD. In the ENADE 2022/23 exams, Sabiá-3 outperforms Sabiá-2 Medium in 76% of the 54 tests, especially in Control and Automation Engineering (78 vs 33 points).", "perturbed_statement": "At approximately 1 USD per million tokens, Sabiá-3 attains ~79% average accuracy, exceeding Llama 3.1 405B’s ~75.8% at ~3.7 USD. In the ENADE 2022/23 exams, Sabiá-3 outperforms Sabiá-2 Medium in 80% of the 54 tests, especially in Control and Automation Engineering (78 vs 33 points).", "perturbed_explanation": "The perturbed price of \"1 USD per million tokens\" contradicts Figure 1, which shows Sabiá-3 at about 2 USD per million. The claim of \"80% of the 54 tests\" is wrong because Figure 2’s caption and bars indicate Sabiá-3 outperforms Sabiá-2 Medium on 76% of the ENADE exams, not 80%.", "claim": "At approximately 2 USD per million tokens, Sabiá-3 attains ~79% average accuracy, exceeding Llama 3.1 405B’s ~75.8% at ~3.7 USD. In the ENADE 2022/23 exams, Sabiá-3 outperforms Sabiá-2 Medium in 76% of the 54 tests, especially in Control and Automation Engineering (78 vs 33 points).", "label": true }, { "paperid": "2411.06018v1", "paper_path": "./SciVer/papers/2411.06018v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06018v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.06018v1_figure_7(b).png", "section": [ "6.3" ], "request_id": 74, "origin_statement": "On EMG, GPT-4o accuracy jumps from 33% under ZST to 92% with ICL, a 59-point gain, whereas GPT-4o-mini only rises from 33% to 35%, a mere 2-point increase.", "perturbed_statement": "On EMG, GPT-4o accuracy jumps from 33% under ZST to 92% with ICL, a 59-point gain, whereas GPT-4o-mini only rises from 33% to 50%, a 17-point increase.", "perturbed_explanation": "The perturbed statement misreports GPT-4o-mini’s ICL accuracy on EMG as 50% (and a 17-point gain). The figure actually shows GPT-4o-mini improving from 33% to 35% on EMG under ICL, a 2-point increase.", "claim": "On EMG, GPT-4o accuracy jumps from 33% under ZST to 92% with ICL, a 59-point gain, whereas GPT-4o-mini only rises from 33% to 35%, a mere 2-point increase.", "label": true }, { "paperid": "2409.19025v1", "paper_path": "./SciVer/papers/2409.19025v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19025v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.19025v1-Table4-1.png", "section": [ "4.1" ], "request_id": 85, "origin_statement": "For the attack coping strategy, frustration is rated at about 4.5 whereas fear remains below 2; Table 4 highlights frustration in bold for attack but lists fear only under distance, confirming participants’ emotion judgments match theoretical associations.", "perturbed_statement": "For the attack coping strategy, frustration is rated at about 4.5 whereas fear reaches roughly 3.5; Table 4 highlights frustration in bold for attack but lists fear only under distance.", "perturbed_explanation": "The radial chart in Figure 2 shows that fear for the attack strategy is rated below 2, not around 3.5 as stated, so the perturbed fear value contradicts the actual plotted data.", "claim": "For the attack coping strategy, frustration is rated at about 4.5 whereas fear remains below 2; Table 4 highlights frustration in bold for attack but lists fear only under distance, confirming participants’ emotion judgments match theoretical associations.", "label": true }, { "paperid": "2411.01006v2", "paper_path": "./SciVer/papers/2411.01006v2.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01006v2_figure_6.png", "item2_path": "./SciVer/images/2411.01006v2_figure_7.png", "section": [ "5.2" ], "request_id": 104, "origin_statement": "In s0–s255, roughly 125 codes (e.g., s1, s2, s3) show a monotonic upward trend, and in s256–s511 about 130 codes (e.g., s257, s259, s261) do, so nearly half of the 512 codebook entries are simple increasing line shapes.", "perturbed_statement": "In s0–s255, roughly 200 codes (e.g., s1, s2, s3) show a monotonic upward trend, and in s256–s511 about 200 codes (e.g., s257, s259, s261) do, so almost 80% of the 512 codebook entries are simple increasing line shapes.", "perturbed_explanation": "The perturbed statement overestimates the count of upward-trending shapes. Visually, only about 125 of the first 256 and about 130 of the second 256 codes display a clear monotonic increase, not 200 in each half or 80% of the codebook.", "claim": "In s0–s255, roughly 125 codes (e.g., s1, s2, s3) show a monotonic upward trend, and in s256–s511 about 130 codes (e.g., s257, s259, s261) do, so nearly half of the 512 codebook entries are simple increasing line shapes.", "label": true }, { "paperid": "2411.18433v1", "paper_path": "./SciVer/papers/2411.18433v1.json", "claim_type": "parallel", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18433v1_figure_7.png", "item2_path": "./SciVer/images/2411.18433v1-Table3-1.png", "section": [ "5.3" ], "request_id": 140, "origin_statement": "The 95% credible interval for the distance coefficient φ in the Information-Sharing network is (0.37, 0.71), spanning 0.34, and the distance-dependent model achieves the lowest AIC of 2158, improving by 18 units over the homogeneous model’s 2176.", "perturbed_statement": "The 95% credible interval for the distance coefficient φ in the Information-Sharing network is (0.47, 0.71), spanning 0.24, and the distance-dependent model achieves the lowest AIC of 2158, improving by 18 units over the homogeneous model’s 2176.", "perturbed_explanation": "The perturbed statement misreports the credible interval lower bound as 0.47 (and its span as 0.24), whereas Figure 7 shows the 95% credible interval for φ is actually (0.37, 0.71), spanning 0.34.", "claim": "The 95% credible interval for the distance coefficient φ in the Information-Sharing network is (0.37, 0.71), spanning 0.34, and the distance-dependent model achieves the lowest AIC of 2158, improving by 18 units over the homogeneous model’s 2176.", "label": true }, { "paperid": "2410.13114v1", "paper_path": "./SciVer/papers/2410.13114v1.json", "claim_type": "parallel", "item1": "7", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13114v1_figure_7.png", "item2_path": "./SciVer/images/2410.13114v1-Table2-1.png", "section": [ "3.1" ], "request_id": 156, "origin_statement": "Mozilla Common Voice contains roughly 2,500 hours of Spanish audio, compared to about 80 hours in AudioSet; neither dataset documents contributors’ race/ethnicity.", "perturbed_statement": "Mozilla Common Voice contains roughly 2,500 hours of Spanish audio, compared to about 80 hours in AudioSet; both datasets document contributors’ race/ethnicity.", "perturbed_explanation": "The perturbation wrongly claims that both datasets document race/ethnicity. Table 2 shows 'Race/Ethnicity' is 'no' for Mozilla Common Voice and 'no' for AudioSet, so neither records this demographic.", "claim": "Mozilla Common Voice contains roughly 2,500 hours of Spanish audio, compared to about 80 hours in AudioSet; neither dataset documents contributors’ race/ethnicity.", "label": true }, { "paperid": "2411.17640v1", "paper_path": "./SciVer/papers/2411.17640v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17640v1_figure_1.png", "item2_path": "./SciVer/images/2411.17640v1-Table2-1.png", "section": [ "3.2" ], "request_id": 158, "origin_statement": "In sector 44, a likely transit showed a TTV of −0.0237 d (−34.1 min) and depth ≈0.45%, while TOI-588.01’s +40 s (0.00046 d) period correction would accumulate ≈0.0081 d TTV over 17.5 orbits—3× smaller.", "perturbed_statement": "In sector 44, a likely transit showed a TTV of −0.0237 d (−34.1 min) and depth ≈0.45%, while TOI-588.01’s +400 s (0.0046 d) period correction would accumulate ≈0.081 d TTV over 17.5 orbits—3× smaller.", "perturbed_explanation": "Table 2 lists TOI-588.01’s period correction as +40 s, not +400 s. The perturbed statement incorrectly multiplies the actual ΔP by 10, contradicting the ΔP value in the table.", "claim": "In sector 44, a likely transit showed a TTV of −0.0237 d (−34.1 min) and depth ≈0.45%, while TOI-588.01’s +40 s (0.00046 d) period correction would accumulate ≈0.0081 d TTV over 17.5 orbits—3× smaller.", "label": true }, { "paperid": "2411.17459v1", "paper_path": "./SciVer/papers/2411.17459v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17459v1_figure_4.png", "item2_path": "./SciVer/images/2411.17459v1-Table3-1.png", "section": [ "4.3" ], "request_id": 186, "origin_statement": "WF-VAE-L, with 192 base channels, achieves 28.66 PSNR on Kinetics-400 while needing about 55 GB decode memory at 768² resolution, nearly 20 GB less than CogVideoX’s 75 GB under the same setting.", "perturbed_statement": "WF-VAE-L, with 192 base channels, achieves 29.66 PSNR on Kinetics-400 while needing about 65 GB decode memory at 768² resolution, nearly 10 GB less than CogVideoX’s 75 GB under the same setting.", "perturbed_explanation": "The PSNR for WF-VAE-L is actually 28.66 (not 29.66) as shown in Table 3, and its decode memory at 768² is about 55 GB rather than 65 GB according to Figure 4.", "claim": "WF-VAE-L, with 192 base channels, achieves 28.66 PSNR on Kinetics-400 while needing about 55 GB decode memory at 768² resolution, nearly 20 GB less than CogVideoX’s 75 GB under the same setting.", "label": true }, { "paperid": "2411.17593v1", "paper_path": "./SciVer/papers/2411.17593v1.json", "claim_type": "parallel", "item1": "5", "item2": "10", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17593v1_figure_5.png", "item2_path": "./SciVer/images/2411.17593v1_figure_10.png", "section": [ "3.3" ], "request_id": 233, "origin_statement": "Although 85.2% of text chunks were predicted for KS3 and only roughly 5% for KS5, the highlighted most complex excerpt is classified KS5 at full confidence (1.00), whereas the least complex excerpt is KS4 with 0.73 confidence.", "perturbed_statement": "Although 85.2% of text chunks were predicted for KS3 and only roughly 15% for KS5, the highlighted most complex excerpt is classified KS5 at full confidence (1.00), whereas the least complex excerpt is KS4 with 0.73 confidence.", "perturbed_explanation": "The perturbed statement incorrectly claims that about 15% of text chunks are classified as KS5. In the bar chart, KS5 accounts for only around 5% of chunks (with KS3 at 85.185% and KS4 around 10%), so the 15% figure contradicts the visualised distribution.", "claim": "Although 85.2% of text chunks were predicted for KS3 and only roughly 5% for KS5, the highlighted most complex excerpt is classified KS5 at full confidence (1.00), whereas the least complex excerpt is KS4 with 0.73 confidence.", "label": true }, { "paperid": "2411.14751v1", "paper_path": "./SciVer/papers/2411.14751v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14751v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.14751v1-Table4-1.png", "section": [ "4.3" ], "request_id": 263, "origin_statement": "At the highest SDMap noise level (level-8), the baseline model’s mAP falls to about 22% (Figure 3), while the rot5_std5_prob0.5‐trained model retains about 29%. Table 4 shows this noisy SD model still achieves 35.1% mAP on clean maps versus 40.2% for the noise-free baseline.", "perturbed_statement": "At the highest SDMap noise level (level-8), the baseline model’s mAP falls to about 32% (Figure 3), while the rot5_std5_prob0.5-trained model retains about 29%. Table 4 shows this noisy SD model still achieves 35.1% mAP on clean maps versus 40.2% for the noise-free baseline.", "perturbed_explanation": "The perturbed statement incorrectly claims the baseline model’s mAP is about 32% at noise level-8, but Figure 3 shows it actually drops to around 22% at that noise level.", "claim": "At the highest SDMap noise level (level-8), the baseline model’s mAP falls to about 22% (Figure 3), while the rot5_std5_prob0.5‐trained model retains about 29%. Table 4 shows this noisy SD model still achieves 35.1% mAP on clean maps versus 40.2% for the noise-free baseline.", "label": true }, { "paperid": "2409.10604v4", "paper_path": "./SciVer/papers/2409.10604v4.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.10604v4_figure_2.png", "item2_path": "./SciVer/images/2409.10604v4-Table3-1.png", "section": [ "5", "3.3" ], "request_id": 298, "origin_statement": "Spectra in Figure 2 show that the central Lyα peak in image 1.4 (blue) reaches ∼0.16 normalized flux, ≈30% higher than image 200.8 (green) at ∼0.12. Table 3 indicates image 200.8 uniquely has Mask #2 at [385–455] km s⁻¹, 15 km s⁻¹ lower than 1.4's [400–460] km s⁻¹.", "perturbed_statement": "Spectra in Figure 2 show that the central Lyα peak in image 1.4 (blue) reaches ∼0.16 normalized flux, ≈50% higher than image 200.8 (green) at ∼0.12. Table 3 indicates image 200.8 uniquely has Mask #2 at [360–410] km s⁻¹, 40 km s⁻¹ lower than 1.4's [400–460] km s⁻¹.", "perturbed_explanation": "The perturbation is incorrect because the actual flux ratio between the central peaks of images 1.4 and 200.8 is ≈30%, not 50%. Moreover, Table 3 lists Mask #2 for image 200.8 as [385–455] km s⁻¹, not [360–410] km s⁻¹, and the offset is 15 km s⁻¹ rather than 40 km s⁻¹.", "claim": "Spectra in Figure 2 show that the central Lyα peak in image 1.4 (blue) reaches ∼0.16 normalized flux, ≈30% higher than image 200.8 (green) at ∼0.12. Table 3 indicates image 200.8 uniquely has Mask #2 at [385–455] km s⁻¹, 15 km s⁻¹ lower than 1.4's [400–460] km s⁻¹.", "label": true }, { "paperid": "2411.14533v1", "paper_path": "./SciVer/papers/2411.14533v1.json", "claim_type": "parallel", "item1": "11(b)", "item2": "14", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14533v1_figure_11(b).png", "item2_path": "./SciVer/images/2411.14533v1-Table14-1.png", "section": [ "6.1" ], "request_id": 387, "origin_statement": "In the boxplot, the bipartite class shows a median deviation of about 6% in favor of BRKGA+R+LS, whereas Table 14 reports just a 0.17% mean improvement in average colors for complement of bipartite graphs.", "perturbed_statement": "In the boxplot, the bipartite class shows a median deviation of about 4% in favor of BRKGA+R+LS, whereas Table 14 reports just a 1.7% mean improvement in average colors for complement of bipartite graphs.", "perturbed_explanation": "The perturbation is wrong because the boxplot’s median deviation for the bipartite class is approximately 6%, not 4%, and Table 14 shows a mean improvement of 0.17% in average colors for complement of bipartite graphs, not 1.7%.", "claim": "In the boxplot, the bipartite class shows a median deviation of about 6% in favor of BRKGA+R+LS, whereas Table 14 reports just a 0.17% mean improvement in average colors for complement of bipartite graphs.", "label": true }, { "paperid": "2409.05314v2", "paper_path": "./SciVer/papers/2409.05314v2.json", "claim_type": "parallel", "item1": "8", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05314v2_figure_8.png", "item2_path": "./SciVer/images/2409.05314v2-Table6-1.png", "section": [ "8.3" ], "request_id": 393, "origin_statement": "LLama-3-8B-Tele’s training loss decreases from about 0.98 at the start to around 0.68 by epoch 1, and its Tele-Instruct score (34.51) outperforms its Tele score (29.60) by 4.91 points.", "perturbed_statement": "LLama-3-8B-Tele’s training loss decreases from about 0.88 at the start to around 0.68 by epoch 1, and its Tele-Instruct score (34.51) outperforms its Tele score (29.60) by 4.91 points.", "perturbed_explanation": "The perturbed statement incorrectly states the initial training loss as 0.88, whereas Figure 8 shows that LLama-3-8B-Tele’s training loss begins at approximately 0.98.", "claim": "LLama-3-8B-Tele’s training loss decreases from about 0.98 at the start to around 0.68 by epoch 1, and its Tele-Instruct score (34.51) outperforms its Tele score (29.60) by 4.91 points.", "label": true }, { "paperid": "2411.09586v1", "paper_path": "./SciVer/papers/2411.09586v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09586v1_figure_4(b).png", "item2_path": "./SciVer/images/2411.09586v1_figure_4(c).png", "section": [ "5.1", "6.3" ], "request_id": 456, "origin_statement": "Over 95% of districts with zero USD p.c. windfalls (white on the windfall map) coincide with untreated controls (white on the treatment map).", "perturbed_statement": "All districts with zero USD p.c. windfalls coincide with untreated controls on the treatment map.", "perturbed_explanation": "The perturbed statement is wrong because the treatment map shows several districts that are white in the windfall map (zero windfalls) but colored brown in the treatment map, indicating they are treated rather than untreated.", "claim": "Over 95% of districts with zero USD p.c. windfalls (white on the windfall map) coincide with untreated controls (white on the treatment map).", "label": true }, { "paperid": "2409.17201v2", "paper_path": "./SciVer/papers/2409.17201v2.json", "claim_type": "parallel", "item1": "2(d)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17201v2_figure_2(d).png", "item2_path": "./SciVer/images/2409.17201v2_figure_3(b).png", "section": [ "6.2" ], "request_id": 465, "origin_statement": "Between iteration 0 and 20, FL in the CNN-Momentum model improves accuracy from about 0.855 to 0.96, while in CNN2-Momentum the loss decreases from roughly 2.02 to 1.70 over the same global iterations.", "perturbed_statement": "Between iteration 0 and 20, FL in the CNN-Momentum model improves accuracy from about 0.855 to 0.98, while in CNN2-Momentum the loss decreases from roughly 2.02 to 1.70 over the same global iterations.", "perturbed_explanation": "The perturbed statement claims the CNN-Momentum accuracy reaches 0.98 by iteration 20, but Figure 2 shows it is only about 0.96 at iteration 20, contradicting the actual accuracy curve.", "claim": "Between iteration 0 and 20, FL in the CNN-Momentum model improves accuracy from about 0.855 to 0.96, while in CNN2-Momentum the loss decreases from roughly 2.02 to 1.70 over the same global iterations.", "label": true }, { "paperid": "2410.23300v2", "paper_path": "./SciVer/papers/2410.23300v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.23300v2-Table2-1.png", "item2_path": "./SciVer/images/2410.23300v2-Table3-1.png", "section": [ "6.2" ], "request_id": 612, "origin_statement": "For AmazonBook, MF's stable rank warm-start cuts training time by 66.03% (from 426.6 to 145.0 minutes) without NDCG@20 change, whereas LightGCN only reduces runtime by 10.27% but increases NDCG@20 by 4.69%.", "perturbed_statement": "For AmazonBook, MF's stable rank warm-start cuts training time by 56.03% (from 426.6 to 145.0 minutes) without NDCG@20 change, whereas LightGCN only reduces runtime by 10.27% but increases NDCG@20 by 4.69%.", "perturbed_explanation": "The perturbed statement misreports the MF runtime reduction on AmazonBook. Table 2 shows a 66.03% decrease, not 56.03%, in runtime when using the stable rank warm-start.", "claim": "For AmazonBook, MF's stable rank warm-start cuts training time by 66.03% (from 426.6 to 145.0 minutes) without NDCG@20 change, whereas LightGCN only reduces runtime by 10.27% but increases NDCG@20 by 4.69%.", "label": true }, { "paperid": "2409.09586v1", "paper_path": "./SciVer/papers/2409.09586v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.09586v1_figure_6.png", "item2_path": "./SciVer/images/2409.09586v1_figure_7.png", "section": [ "5.3", "4.3.1" ], "request_id": 625, "origin_statement": "In the collaborative writing vignette, LMs assigned around 90% of responses to 'Agree' or 'Strongly Agree' for Desired Values for AI Tools, while humans gave roughly 50%, and on the fundamental values scatter, LMs rated Interpretability about 0.7 points higher (y=1.5 vs x=0.8).", "perturbed_statement": "In the collaborative writing vignette, LMs assigned around 60% of responses to 'Agree' or 'Strongly Agree' for Desired Values for AI Tools, while humans gave roughly 50%, and on the fundamental values scatter, LMs rated Interpretability about 0.7 points higher (y=1.5 vs x=0.8).", "perturbed_explanation": "The perturbed statement incorrectly reports that LMs gave around 60% 'Agree' or 'Strongly Agree' for Desired Values for AI Tools in the collaborative writing vignette. Figure 7(B) shows LMs actually assigned about 90% of responses in these top two categories.", "claim": "In the collaborative writing vignette, LMs assigned around 90% of responses to 'Agree' or 'Strongly Agree' for Desired Values for AI Tools, while humans gave roughly 50%, and on the fundamental values scatter, LMs rated Interpretability about 0.7 points higher (y=1.5 vs x=0.8).", "label": true }, { "paperid": "2410.12831v1", "paper_path": "./SciVer/papers/2410.12831v1.json", "claim_type": "parallel", "item1": "3", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.12831v1_figure_3.png", "item2_path": "./SciVer/images/2410.12831v1_figure_9.png", "section": [ "3.1" ], "request_id": 654, "origin_statement": "In Fig. 3, mask ID:4 appears in two of the six generated anatomy‐agnostic queries, while in Fig. 9 the Liver has roughly 1,600 labeled images, making it the most represented organ.", "perturbed_statement": "In Fig. 3, mask ID:4 appears in three of the six generated anatomy‐agnostic queries, while in Fig. 9 the Liver has roughly 1,600 labeled images, making it the most represented organ.", "perturbed_explanation": "This statement is incorrect because in Fig. 3 mask ID:4 is referenced only twice (for the “largest” and “right‐most” queries), not three times.", "claim": "In Fig. 3, mask ID:4 appears in two of the six generated anatomy‐agnostic queries, while in Fig. 9 the Liver has roughly 1,600 labeled images, making it the most represented organ.", "label": true }, { "paperid": "2410.16255v1", "paper_path": "./SciVer/papers/2410.16255v1.json", "claim_type": "parallel", "item1": "8(a)", "item2": "8(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16255v1_figure_8(a).png", "item2_path": "./SciVer/images/2410.16255v1_figure_8(c).png", "section": [ "4.3" ], "request_id": 666, "origin_statement": "At α=0.9, β=0.995 the heatmaps show a detection AUROC of 83.85% and pixel AUPRO of 73.06%, representing a 10.79 point gap between detection and localization metrics.", "perturbed_statement": "At α=0.9, β=0.995 the heatmaps show a detection AUROC of 83.85% and pixel AUPRO of 74.06%, representing a 9.79 point gap between detection and localization metrics.", "perturbed_explanation": "The perturbation incorrectly reports the pixel AUPRO as 74.06% (and thus a 9.79 point gap). In the second heatmap at α=0.9, β=0.995, the true pixel AUPRO is 73.06%, yielding a 10.79 point difference from the AUROC, not 9.79.", "claim": "At α=0.9, β=0.995 the heatmaps show a detection AUROC of 83.85% and pixel AUPRO of 73.06%, representing a 10.79 point gap between detection and localization metrics.", "label": true }, { "paperid": "2409.11040v1", "paper_path": "./SciVer/papers/2409.11040v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.11040v1-Table2-1.png", "item2_path": "./SciVer/images/2409.11040v1-Table4-1.png", "section": [ "5.1" ], "request_id": 695, "origin_statement": "With the full-data Poisson estimate for β2 at 2.326, the zero-inflation algorithm’s success rate falls from 83.82% at 20% missing to 72.38% at 50%, a decline of 11.44 percentage points.", "perturbed_statement": "With the full-data Poisson estimate for β2 at 2.500, the zero-inflation algorithm’s success rate falls from 83.82% at 20% missing to 72.38% at 50%, a decline of 11.44 percentage points.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 shows the Poisson estimate for β2 is 2.326, not 2.500.", "claim": "With the full-data Poisson estimate for β2 at 2.326, the zero-inflation algorithm’s success rate falls from 83.82% at 20% missing to 72.38% at 50%, a decline of 11.44 percentage points.", "label": true }, { "paperid": "2410.10054v1", "paper_path": "./SciVer/papers/2410.10054v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10054v1_figure_3.png", "item2_path": "./SciVer/images/2410.10054v1-Table3-1.png", "section": [ "4.2" ], "request_id": 698, "origin_statement": "For LLaMA-2, AlphaLoRA assigns up to 9 experts in layers 17–20 compared to 6 by MoLA(2468), correlating with a 3.67% higher MultiArith zero-shot accuracy (91.33% vs 87.66%).", "perturbed_statement": "For LLaMA-2, AlphaLoRA assigns up to 9 experts in layers 17–20 compared to 8 by MoLA(2468), correlating with a 3.67% higher MultiArith zero-shot accuracy (91.33% vs 87.66%).", "perturbed_explanation": "The perturbation incorrectly states that MoLA(2468) allocates 8 experts in layers 17–20; Figure 3 shows it allocates only 6 experts per layer in that range.", "claim": "For LLaMA-2, AlphaLoRA assigns up to 9 experts in layers 17–20 compared to 6 by MoLA(2468), correlating with a 3.67% higher MultiArith zero-shot accuracy (91.33% vs 87.66%).", "label": true }, { "paperid": "2410.01727v1", "paper_path": "./SciVer/papers/2410.01727v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.01727v1_figure_4.png", "item2_path": "./SciVer/images/2410.01727v1-Table2-1.png", "section": [ "5.1" ], "request_id": 734, "origin_statement": "On XES3G5M, DKVMN gains a +6.11% relative AUC boost from KCQRL (Table 2), and in Fig. 4 its non-accumulative AUC at 20% observed history increases from ~77.2% to ~79.0% with our framework.", "perturbed_statement": "On XES3G5M, DKVMN gains a +5.11% relative AUC boost from KCQRL (Table 2), and in Fig. 4 its non-accumulative AUC at 20% observed history increases from ~77.2% to ~79.0% with our framework.", "perturbed_explanation": "Table 2 reports a +6.11% relative improvement for DKVMN on XES3G5M, not +5.11%. The perturbed +5.11% contradicts the actual value in the table.", "claim": "On XES3G5M, DKVMN gains a +6.11% relative AUC boost from KCQRL (Table 2), and in Fig. 4 its non-accumulative AUC at 20% observed history increases from ~77.2% to ~79.0% with our framework.", "label": true }, { "paperid": "2411.11641v2", "paper_path": "./SciVer/papers/2411.11641v2.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.11641v2_figure_4.png", "item2_path": "./SciVer/images/2411.11641v2-Table3-1.png", "section": [ "4.4.2" ], "request_id": 807, "origin_statement": "Table 3 shows that the F1 score on SMD increases from 80.29% to 81.76% when using the pre-trained LLM encoder, and in Figure 4, the LLM feature trace reaches peaks of about 7 compared to less than 1 in the original data during the anomaly interval.", "perturbed_statement": "Table 3 shows that the F1 score on SMD decreases from 80.29% to 79.50% when using the pre-trained LLM encoder, and in Figure 4, the LLM feature trace reaches peaks of about 7 compared to less than 1 in the original data during the anomaly interval.", "perturbed_explanation": "The perturbed statement incorrectly claims a drop to 79.50% for SMD; Table 3 actually reports an increase from 80.29% to 81.76% when the pre-trained LLM encoder is applied, so the stated decrease contradicts the table.", "claim": "Table 3 shows that the F1 score on SMD increases from 80.29% to 81.76% when using the pre-trained LLM encoder, and in Figure 4, the LLM feature trace reaches peaks of about 7 compared to less than 1 in the original data during the anomaly interval.", "label": true }, { "paperid": "2409.19257v1", "paper_path": "./SciVer/papers/2409.19257v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19257v1_figure_3.png", "item2_path": "./SciVer/images/2409.19257v1-Table3-1.png", "section": [ "6.3" ], "request_id": 860, "origin_statement": "The Spearman correlation between Pick-up Artists and The Red Pill is 0.65, and Table 3 shows PuA's top words include 'daygame' and 'seddit', whereas TRP lists 'asktrp' and 'hamstering'.", "perturbed_statement": "The Spearman correlation between Pick-up Artists and The Red Pill is 0.85, and Table 3 shows PuA's top words include 'blackpill' and 'seddit', whereas TRP lists 'asktrp' and 'hamstering'.", "perturbed_explanation": "The stated correlation of 0.85 contradicts Figure 3, which shows a correlation around 0.65 between PuA and TRP. Additionally, 'blackpill' is not listed under PuA in Table 3—it is a top term for Incels.", "claim": "The Spearman correlation between Pick-up Artists and The Red Pill is 0.65, and Table 3 shows PuA's top words include 'daygame' and 'seddit', whereas TRP lists 'asktrp' and 'hamstering'.", "label": true }, { "paperid": "2409.16914v1", "paper_path": "./SciVer/papers/2409.16914v1.json", "claim_type": "parallel", "item1": "5", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16914v1_figure_5.png", "item2_path": "./SciVer/images/2409.16914v1_figure_6(b).png", "section": [ "4.4" ], "request_id": 882, "origin_statement": "On WritingPrompts at a target length of 135 tokens, Fast-DetectGPT+TOCSIN boosts GPT-4 detection AUROC to about 0.94, while on GPT-2 passages with n=10 copies and ρ=1.5%, it achieves nearly 0.999 AUROC.", "perturbed_statement": "On WritingPrompts at a target length of 45 tokens, Fast-DetectGPT+TOCSIN boosts GPT-4 detection AUROC to about 0.94, while on GPT-2 passages with n=10 copies and ρ=1.5%, it achieves nearly 0.999 AUROC.", "perturbed_explanation": "The perturbed statement wrongly claims an AUROC of ~0.94 for GPT-4 at 45 tokens. Figure 5 shows that at 45 tokens the AUROC is only around 0.65 (Fast-DetectGPT) and ~0.66 (+TOCSIN), not 0.94.", "claim": "On WritingPrompts at a target length of 135 tokens, Fast-DetectGPT+TOCSIN boosts GPT-4 detection AUROC to about 0.94, while on GPT-2 passages with n=10 copies and ρ=1.5%, it achieves nearly 0.999 AUROC.", "label": true }, { "paperid": "2409.10944v1", "paper_path": "./SciVer/papers/2409.10944v1.json", "claim_type": "parallel", "item1": "1(c)", "item2": "1(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10944v1_figure_1(c).png", "item2_path": "./SciVer/images/2409.10944v1_figure_1(d).png", "section": [ "1" ], "request_id": 901, "origin_statement": "In Contrasformer t-SNE, CALTECH (blue) subjects in (c) are evenly split between the lower-left and upper-right quadrants (~50% each), whereas scanning duration 150 s (green) subjects in (d) predominantly cluster in the upper half (~60%).", "perturbed_statement": "In Contrasformer t-SNE, CALTECH (blue) subjects in (c) are evenly split between the lower-left and upper-right quadrants (~50% each), whereas scanning duration 150 s (green) subjects in (d) predominantly cluster in the lower half (~60%).", "perturbed_explanation": "The perturbation incorrectly states that 150 s (green) subjects cluster in the lower half of the t-SNE plot. In the actual figure (d), the majority of green points occupy the upper region, not the lower half, contradicting the stated distribution.", "claim": "In Contrasformer t-SNE, CALTECH (blue) subjects in (c) are evenly split between the lower-left and upper-right quadrants (~50% each), whereas scanning duration 150 s (green) subjects in (d) predominantly cluster in the upper half (~60%).", "label": true }, { "paperid": "2409.06178v1", "paper_path": "./SciVer/papers/2409.06178v1.json", "claim_type": "parallel", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06178v1_figure_7.png", "item2_path": "./SciVer/images/2409.06178v1-Table3-1.png", "section": [ "6.5" ], "request_id": 904, "origin_statement": "SQLuc users had a 70% correct completion rate overall and averaged 1.39 correctly completed hard tasks, nearly 2.6 times DIY’s 0.54 hard tasks.", "perturbed_statement": "SQLuc users had a 65% correct completion rate overall and averaged 1.39 correctly completed hard tasks, nearly 2.6 times DIY’s 0.54 hard tasks.", "perturbed_explanation": "The perturbed statement misstates the overall correct completion rate: Figure 7 shows SQLuc’s correct rate is 70%, not 65%.", "claim": "SQLuc users had a 70% correct completion rate overall and averaged 1.39 correctly completed hard tasks, nearly 2.6 times DIY’s 0.54 hard tasks.", "label": true }, { "paperid": "2410.22082v1", "paper_path": "./SciVer/papers/2410.22082v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.22082v1-Table3-1.png", "item2_path": "./SciVer/images/2410.22082v1-Table4-1.png", "section": [ "4.4" ], "request_id": 910, "origin_statement": "Gemma-7B achieves a 35.0 percentage point absolute improvement in execution accuracy on Spider-dev with Actor-Critic (from 30.3% to 65.3%), exactly matching its 35.0 pp gain on Spider-DK (from 23.2% to 58.2%), but only a 31.1 pp gain on Spider-SYN.", "perturbed_statement": "Gemma-7B achieves a 35.0 percentage point absolute improvement in execution accuracy on Spider-dev with Actor-Critic (from 30.3% to 65.3%), exactly matching its 34.0 pp gain on Spider-DK (from 23.2% to 58.2%), but only a 31.1 pp gain on Spider-SYN.", "perturbed_explanation": "The perturbed statement incorrectly reports the Spider-DK absolute improvement as 34.0 pp. According to Table 4, Gemma-7B improves from 23.2% to 58.2% on Spider-DK, which is a 35.0 pp increase, not 34.0 pp.", "claim": "Gemma-7B achieves a 35.0 percentage point absolute improvement in execution accuracy on Spider-dev with Actor-Critic (from 30.3% to 65.3%), exactly matching its 35.0 pp gain on Spider-DK (from 23.2% to 58.2%), but only a 31.1 pp gain on Spider-SYN.", "label": true }, { "paperid": "2410.04223v1", "paper_path": "./SciVer/papers/2410.04223v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04223v1-Table2-1.png", "item2_path": "./SciVer/images/2410.04223v1-Table3-1.png", "section": [ "5.3.2" ], "request_id": 915, "origin_statement": "Llamole's Qwen2-7B achieves 17.9% material retrosynthetic success (Table 2). In contrast, a base Qwen2 model with unlimited planning time (Table 3) reaches 27.3%, outperforming Llamole by 9.4 percentage points.", "perturbed_statement": "Llamole's Qwen2-7B achieves 27.9% material retrosynthetic success (Table 2). In contrast, a base Qwen2 model with unlimited planning time (Table 3) reaches 27.3%, underperforming Llamole by 0.6 percentage points.", "perturbed_explanation": "Table 2 actually reports Llamole’s Qwen2-7B material success as 17.9%, not 27.9%, so the perturbed statement misstates the value from Table 2.", "claim": "Llamole's Qwen2-7B achieves 17.9% material retrosynthetic success (Table 2). In contrast, a base Qwen2 model with unlimited planning time (Table 3) reaches 27.3%, outperforming Llamole by 9.4 percentage points.", "label": true }, { "paperid": "2411.11699v1", "paper_path": "./SciVer/papers/2411.11699v1.json", "claim_type": "parallel", "item1": "9", "item2": "10", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11699v1_figure_9.png", "item2_path": "./SciVer/images/2411.11699v1_figure_10.png", "section": [ "7.2" ], "request_id": 917, "origin_statement": "On TS2, the LiTformer’s intrinsic output peaks at about 0.9 V around 200 ps while crosstalk peaks near 17 mV. In the 2-link TX waveform, this crosstalk causes an early baseline offset of roughly 0.15 V from 50 to 350 ps, about nine times larger.", "perturbed_statement": "On TS2, the LiTformer’s intrinsic output peaks at about 0.9 V around 200 ps while crosstalk peaks near 25 mV. In the 2-link TX waveform, this crosstalk causes an early baseline offset of roughly 0.15 V from 50 to 350 ps, about nine times larger.", "perturbed_explanation": "The perturbation incorrectly states the TS2 crosstalk peak as 25 mV, whereas Figure 9(a) shows it peaks at approximately 17 mV.", "claim": "On TS2, the LiTformer’s intrinsic output peaks at about 0.9 V around 200 ps while crosstalk peaks near 17 mV. In the 2-link TX waveform, this crosstalk causes an early baseline offset of roughly 0.15 V from 50 to 350 ps, about nine times larger.", "label": true }, { "paperid": "2409.16561v1", "paper_path": "./SciVer/papers/2409.16561v1.json", "claim_type": "parallel", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16561v1_figure_5.png", "item2_path": "./SciVer/images/2409.16561v1_figure_7.png", "section": [ "4.3", "4.3.2" ], "request_id": 935, "origin_statement": "In MOCHA (C3), 14 participants rated knowledge gained about the data at 4 or 5, compared to 9 in C1; yet only 3 participants in C3 rated mental demand as 6 or 7 on the NASA-TLX, indicating improved insight without increased cognitive load.", "perturbed_statement": "In MOCHA (C3), 9 participants rated knowledge gained about the data at 4 or 5, compared to 14 in C1; yet 6 participants in C3 rated mental demand as 6 or 7 on the NASA-TLX, indicating improved insight with higher cognitive load.", "perturbed_explanation": "The perturbed statement misreports the questionnaire and TLX data. Figure 5 shows 14 participants in C3 actually rated knowledge gained at levels 4–5, not 9. Figure 7 shows only 3 participants rated mental demand at 6 or 7 in C3, not 6, so the claim contradicts the visualized counts.", "claim": "In MOCHA (C3), 14 participants rated knowledge gained about the data at 4 or 5, compared to 9 in C1; yet only 3 participants in C3 rated mental demand as 6 or 7 on the NASA-TLX, indicating improved insight without increased cognitive load.", "label": true }, { "paperid": "2410.16853v1", "paper_path": "./SciVer/papers/2410.16853v1.json", "claim_type": "parallel", "item1": "7(b)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16853v1_figure_7(b).png", "item2_path": "./SciVer/images/2410.16853v1-Table5-1.png", "section": [ "4.4" ], "request_id": 1003, "origin_statement": "On Flickr30K, reducing w_inter from 1 to 0.05 boosts DIAS’s rSum from approximately 520 to 525 (≈5 points), which is less than half the 12-point gain observed when switching DIAS’s backbone from BiGRU (476.3) to BERT (488.3).", "perturbed_statement": "On Flickr30K, reducing w_inter from 1 to 0.05 boosts DIAS’s rSum from approximately 518 to 525 (≈7 points), which is less than half the 15-point gain observed when switching DIAS’s backbone from BiGRU (476.3) to BERT (488.3).", "perturbed_explanation": "The statement is incorrect because the figure shows rSum at w_inter=1 is about 520 (not 518), so the increase is ≈5 points, not 7. Also, the DIAS gain from BiGRU to BERT is 12 points (476.3→488.3), not 15 points.", "claim": "On Flickr30K, reducing w_inter from 1 to 0.05 boosts DIAS’s rSum from approximately 520 to 525 (≈5 points), which is less than half the 12-point gain observed when switching DIAS’s backbone from BiGRU (476.3) to BERT (488.3).", "label": true }, { "paperid": "2410.10021v1", "paper_path": "./SciVer/papers/2410.10021v1.json", "claim_type": "parallel", "item1": "2(c)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.10021v1_figure_2(c).png", "item2_path": "./SciVer/images/2410.10021v1_figure_2(d).png", "section": [ "4.2" ], "request_id": 1019, "origin_statement": "In both Product Review parameter sweeps, OCEAN achieves perfect MRR (1.0) at intermediate parameter settings (x = –2 and 0), while performance falls to approximately 0.87 at the extreme values x = –5 or x = 5.", "perturbed_statement": "In both Product Review parameter sweeps, OCEAN achieves perfect MRR (1.0) at intermediate parameter settings (x = –2 and 0), while performance falls to approximately 0.75 at the extreme values x = –5 or x = 5.", "perturbed_explanation": "The perturbed claim misstates the drop in MRR: both plots show MRR falling to about 0.87 at x = –5 and x = 5, not 0.75. The visual markers at the extremes are clearly around 0.87.", "claim": "In both Product Review parameter sweeps, OCEAN achieves perfect MRR (1.0) at intermediate parameter settings (x = –2 and 0), while performance falls to approximately 0.87 at the extreme values x = –5 or x = 5.", "label": true }, { "paperid": "2411.02824v1", "paper_path": "./SciVer/papers/2411.02824v1.json", "claim_type": "parallel", "item1": "8(a)", "item2": "10", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02824v1_figure_8(a).png", "item2_path": "./SciVer/images/2411.02824v1-Table10-1.png", "section": [ "5.2" ], "request_id": 1026, "origin_statement": "At 4K state dimension, LAST achieves ~55% accuracy, outperforming Uniform H∞ (~30%) and Global H∞ (~25%); Table 10 shows LAST’s 10%-pruned S4D model yields 94.61% vs 94.36% for Uniform H∞.", "perturbed_statement": "At 4K state dimension, LAST achieves ~75% accuracy, outperforming Uniform H∞ (~30%) and Global H∞ (~25%); Table 10 shows LAST’s 10%-pruned S4D model yields 94.86% vs 94.36% for Uniform H∞.", "perturbed_explanation": "Figure 8 indicates that at 4K state dimension LAST attains only about 55% accuracy, not 75%. Moreover, Table 10 reports LAST’s pruned S4D accuracy as 94.61%, not 94.86%.", "claim": "At 4K state dimension, LAST achieves ~55% accuracy, outperforming Uniform H∞ (~30%) and Global H∞ (~25%); Table 10 shows LAST’s 10%-pruned S4D model yields 94.61% vs 94.36% for Uniform H∞.", "label": true }, { "paperid": "2410.21211v1", "paper_path": "./SciVer/papers/2410.21211v1.json", "claim_type": "parallel", "item1": "2", "item2": "3(e)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21211v1_figure_2.png", "item2_path": "./SciVer/images/2410.21211v1_figure_3(e).png", "section": [ "5.3" ], "request_id": 1049, "origin_statement": "Figure 3’s Mamba segmentation shows green areas covering roughly 60% versus pink furniture at about 15%, while Figure 2’s Pure Mamba block uses two LayerNorm operations around the Mamba layer.", "perturbed_statement": "Figure 3’s Mamba segmentation shows green areas covering roughly 70% versus pink furniture at about 15%, while Figure 2’s Pure Mamba block uses a single LayerNorm operation around the Mamba layer.", "perturbed_explanation": "The perturbed statement misreports two facts: in Figure 3 the green region occupies about 60%, not 70%, and in Figure 2 the Pure Mamba block contains two LayerNorm operations, not one.", "claim": "Figure 3’s Mamba segmentation shows green areas covering roughly 60% versus pink furniture at about 15%, while Figure 2’s Pure Mamba block uses two LayerNorm operations around the Mamba layer.", "label": true }, { "paperid": "2411.11240v2", "paper_path": "./SciVer/papers/2411.11240v2.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.11240v2_figure_4.png", "item2_path": "./SciVer/images/2411.11240v2-Table5-1.png", "section": [ "5.5" ], "request_id": 1054, "origin_statement": "On Steam Game, D3Rec’s inference latency is 9.06 s, 3.62 s higher than MultVAE’s 5.44 s. However, when trained with 30% noise, D3Rec’s Recall@20 only falls by 1.23%, compared to MultVAE’s 3.04% drop.", "perturbed_statement": "On Steam Game, D3Rec’s inference latency is 6.58 s, 1.14 s higher than MultVAE’s 5.44 s. However, when trained with 30% noise, D3Rec’s Recall@20 only falls by 1.23%, compared to MultVAE’s 3.04% drop.", "perturbed_explanation": "The perturbed statement misstates D3Rec’s inference time: Figure 4 shows its latency on Steam Game is 9.06 s (not 6.58 s), making the claimed 6.58 s and the 1.14 s difference with MultVAE incorrect.", "claim": "On Steam Game, D3Rec’s inference latency is 9.06 s, 3.62 s higher than MultVAE’s 5.44 s. However, when trained with 30% noise, D3Rec’s Recall@20 only falls by 1.23%, compared to MultVAE’s 3.04% drop.", "label": true }, { "paperid": "2410.11851v1", "paper_path": "./SciVer/papers/2410.11851v1.json", "claim_type": "parallel", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.11851v1_figure_4.png", "item2_path": "./SciVer/images/2410.11851v1-Table1-1.png", "section": [ "4.1" ], "request_id": 1063, "origin_statement": "Using the Boltzmann constant k = 1.38×10⁻²³ kg·m²·K⁻¹ from Table 1 and the 6.17×10⁻⁸ K normalization in Fig. 4, a black hole with MBH/M⊙ = 5 has TH ≈ 0.2×6.17×10⁻⁸ K ≈ 1.23×10⁻⁸ K.", "perturbed_statement": "Using the Boltzmann constant k = 1.38×10⁻²² kg·m²·K⁻¹ from Table 1 and the 6.17×10⁻⁸ K normalization in Fig. 4, a black hole with MBH/M⊙ = 5 has TH ≈ 0.2×6.17×10⁻⁸ K ≈ 1.23×10⁻⁸ K.", "perturbed_explanation": "The perturbed statement gives k = 1.38×10⁻²² kg·m²·K⁻¹, but Table 1 lists the Boltzmann constant as 1.38×10⁻²³ kg·m²·K⁻¹, so the altered value contradicts the provided table.", "claim": "Using the Boltzmann constant k = 1.38×10⁻²³ kg·m²·K⁻¹ from Table 1 and the 6.17×10⁻⁸ K normalization in Fig. 4, a black hole with MBH/M⊙ = 5 has TH ≈ 0.2×6.17×10⁻⁸ K ≈ 1.23×10⁻⁸ K.", "label": true }, { "paperid": "2409.01685v1", "paper_path": "./SciVer/papers/2409.01685v1.json", "claim_type": "parallel", "item1": "7", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01685v1_figure_7.png", "item2_path": "./SciVer/images/2409.01685v1-Table4-1.png", "section": [ "3.3" ], "request_id": 1071, "origin_statement": "On the test set, XGBoost achieved an AUC of 0.9228 with 95% CI [0.8748–0.9613], exceeding Random Forest’s AUC of 0.8727 by 0.0501, while Random Forest recorded a higher accuracy (0.9212) than XGBoost (0.8966).", "perturbed_statement": "On the test set, XGBoost achieved an AUC of 0.9128 with 95% CI [0.8748–0.9613], exceeding Random Forest’s AUC of 0.8727 by 0.0401, while Random Forest recorded a higher accuracy (0.9212) than XGBoost (0.8966).", "perturbed_explanation": "The perturbed statement misreports XGBoost’s AUC as 0.9128 and the AUC difference as 0.0401. According to Table 4 and Figure 7, XGBoost’s true test-set AUC is 0.9228, making the actual difference versus Random Forest 0.0501, not 0.0401.", "claim": "On the test set, XGBoost achieved an AUC of 0.9228 with 95% CI [0.8748–0.9613], exceeding Random Forest’s AUC of 0.8727 by 0.0501, while Random Forest recorded a higher accuracy (0.9212) than XGBoost (0.8966).", "label": true }, { "paperid": "2411.01705v1", "paper_path": "./SciVer/papers/2411.01705v1.json", "claim_type": "parallel", "item1": "2", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01705v1_figure_2.png", "item2_path": "./SciVer/images/2411.01705v1_figure_3(b).png", "section": [ "6.1.2" ], "request_id": 1083, "origin_statement": "On MedmcQA, the paraphrased extraction attack attains a 72.5% ASR with a 62.2 ROUGE, whereas the verbatim extraction with a two-word trigger boosts ROUGE by 29.45 points to 91.65.", "perturbed_statement": "On MedmcQA, the paraphrased extraction attack attains an 82.5% ASR with a 62.2 ROUGE, whereas the verbatim extraction with a two-word trigger boosts ROUGE by 29.45 points to 91.65.", "perturbed_explanation": "The paraphrased extraction ASR on MedmcQA is incorrectly stated as 82.5%; Figure 2 shows it is actually 72.5%, so the perturbed statement contradicts the true ASR value.", "claim": "On MedmcQA, the paraphrased extraction attack attains a 72.5% ASR with a 62.2 ROUGE, whereas the verbatim extraction with a two-word trigger boosts ROUGE by 29.45 points to 91.65.", "label": true }, { "paperid": "2410.18071v1", "paper_path": "./SciVer/papers/2410.18071v1.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.18071v1_figure_3.png", "item2_path": "./SciVer/images/2410.18071v1-Table2-1.png", "section": [ "5.2.1" ], "request_id": 1086, "origin_statement": "Figure 3 shows InternVL achieved roughly 240% accuracy improvement on the multiple_image_analysis task, yet Table 2 indicates it improved only 16 out of 30 tasks, corresponding to a 40.4% improvement ratio.", "perturbed_statement": "Figure 3 shows InternVL achieved roughly 240% accuracy improvement on the multiple_image_analysis task, yet Table 2 indicates it improved only 16 out of 30 tasks, corresponding to a 36.7% improvement ratio.", "perturbed_explanation": "The perturbed ratio \"36.7%\" contradicts Table 2, which explicitly reports a 40.4% improvement ratio for InternVL, not 36.7%.", "claim": "Figure 3 shows InternVL achieved roughly 240% accuracy improvement on the multiple_image_analysis task, yet Table 2 indicates it improved only 16 out of 30 tasks, corresponding to a 40.4% improvement ratio.", "label": true }, { "paperid": "2410.20441v1", "paper_path": "./SciVer/papers/2410.20441v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20441v1_figure_3.png", "item2_path": "./SciVer/images/2410.20441v1-Table3-1.png", "section": [ "4.2" ], "request_id": 1095, "origin_statement": "Because the inclusion’s shear modulus (2×10⁻³ GPa) is 22 000 times smaller than the matrix’s (44 GPa), the effective shear stiffness components C₄₄–C₆₆ drop to about 0.46 of the maximum Cₘₙ, while the normal components C₁₁–C₃₃ remain near 1.00.", "perturbed_statement": "Because the inclusion’s shear modulus (2×10⁻³ GPa) is 22 000 times smaller than the matrix’s (44 GPa), the effective shear stiffness components C₄₄–C₆₆ drop to about 0.75 of the maximum Cₘₙ, while the normal components C₁₁–C₃₃ remain near 1.00.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3b shows the normalized shear stiffness components C₄₄, C₅₅, and C₆₆ at approximately 0.46, not 0.75, of the maximum Cₘₙ.", "claim": "Because the inclusion’s shear modulus (2×10⁻³ GPa) is 22 000 times smaller than the matrix’s (44 GPa), the effective shear stiffness components C₄₄–C₆₆ drop to about 0.46 of the maximum Cₘₙ, while the normal components C₁₁–C₃₃ remain near 1.00.", "label": true }, { "paperid": "2411.02848v1", "paper_path": "./SciVer/papers/2411.02848v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02848v1_figure_2.png", "item2_path": "./SciVer/images/2411.02848v1_figure_3.png", "section": [ "3.2" ], "request_id": 1113, "origin_statement": "The mel spectrogram dimension is 1200×400 frames, generated using a 50 ms frame length and 25 ms shift, and in the ShipsEar dataset, ro-ro ship recordings under < 50 m source range number 18, exceeding pilot ships (3) by a sixfold margin.", "perturbed_statement": "The mel spectrogram dimension is 1200×256 frames, generated using a 50 ms frame length and 25 ms shift, and in the ShipsEar dataset, ro-ro ship recordings under < 50 m source range number 18, exceeding pilot ships (3) by a sixfold margin.", "perturbed_explanation": "The perturbed statement incorrectly lists the mel spectrogram as 1200×256, whereas Figure 2 specifies a Mel filter bank size of 400, yielding a dimension of 1200×400.", "claim": "The mel spectrogram dimension is 1200×400 frames, generated using a 50 ms frame length and 25 ms shift, and in the ShipsEar dataset, ro-ro ship recordings under < 50 m source range number 18, exceeding pilot ships (3) by a sixfold margin.", "label": true }, { "paperid": "2409.15044v3", "paper_path": "./SciVer/papers/2409.15044v3.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15044v3_figure_3(a).png", "item2_path": "./SciVer/images/2409.15044v3_figure_3(c).png", "section": [ "5" ], "request_id": 1114, "origin_statement": "The D0→K−ηe+νe sample peaks at about 6 events per 20 MeV around Umiss=0, roughly triple the ~2-event maximum seen in D+→ηe+νe.", "perturbed_statement": "The D0→K−ηe+νe sample peaks at about 6 events per 20 MeV around Umiss=0, roughly triple the ~3-event maximum seen in D+→ηe+νe.", "perturbed_explanation": "The D+→ηe+νe distribution never reaches 3 events in any 20 MeV bin; its highest bin around Umiss=0 is approximately 2 events. Thus claiming a ~3-event maximum contradicts the plotted data.", "claim": "The D0→K−ηe+νe sample peaks at about 6 events per 20 MeV around Umiss=0, roughly triple the ~2-event maximum seen in D+→ηe+νe.", "label": true }, { "paperid": "2411.02646v1", "paper_path": "./SciVer/papers/2411.02646v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "8(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02646v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.02646v1_figure_8(b).png", "section": [ "7" ], "request_id": 1135, "origin_statement": "The two-cell EMI mesh (Fig. 4) shows exactly two brown intersection nodes on the green gap-junction interface, which is one-sixth of the roughly twelve preconditioned conjugate gradient iterations reached at the stimulus onset (green curve) in the 15-cell simulation (Fig. 8).", "perturbed_statement": "The two-cell EMI mesh (Fig. 4) shows exactly two brown intersection nodes on the green gap-junction interface, which is one-sixth of the roughly fifteen preconditioned conjugate gradient iterations reached at the stimulus onset (green curve) in the 15-cell simulation (Fig. 8).", "perturbed_explanation": "Figure 8’s iteration history peaks at about twelve PCG iterations at stimulus onset, not fifteen. Hence stating fifteen iterations contradicts the graph’s data.", "claim": "The two-cell EMI mesh (Fig. 4) shows exactly two brown intersection nodes on the green gap-junction interface, which is one-sixth of the roughly twelve preconditioned conjugate gradient iterations reached at the stimulus onset (green curve) in the 15-cell simulation (Fig. 8).", "label": true }, { "paperid": "2409.12320v1", "paper_path": "./SciVer/papers/2409.12320v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12320v1_figure_5.png", "item2_path": "./SciVer/images/2409.12320v1_figure_6.png", "section": [ "6.2" ], "request_id": 4, "origin_statement": "For Article Psy, the mean overall score improved from roughly -2.25 to -1.0 (an increase of ~1.25) after training, coinciding with a decrease in the average factual errors per post from ~1.25 to ~0.92.", "perturbed_statement": "For Article Psy, the mean overall score improved from roughly -2.25 to -0.5 (an increase of ~1.75) after training, coinciding with a decrease in the average factual errors per post from ~1.25 to ~0.60.", "perturbed_explanation": "The perturbed overall score of -0.5 contradicts Figure 5, which shows the post-training score is about -1.0, not -0.5. Likewise, Figure 6 indicates the factual errors fell to approximately 0.92 per post, not 0.60.", "claim": "For Article Psy, the mean overall score improved from roughly -2.25 to -1.0 (an increase of ~1.25) after training, coinciding with a decrease in the average factual errors per post from ~1.25 to ~0.92.", "label": true }, { "paperid": "2410.08207v1", "paper_path": "./SciVer/papers/2410.08207v1.json", "claim_type": "parallel", "item1": "2", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08207v1_figure_2.png", "item2_path": "./SciVer/images/2410.08207v1_figure_7(b).png", "section": [ "3.2" ], "request_id": 7, "origin_statement": "In the uniform λ schedule (λ=0.3), the highest CLIP similarity reaches approximately 22.5 with a structure distance of about 0.065, while non-ODE editing samples two independent noise-injected trajectories (red and green arrows in Figure 2d) to reconstruct x0 without requiring an ODE path.", "perturbed_statement": "In the linear λ schedule (λ=0.3), the highest CLIP similarity reaches approximately 22.5 with a structure distance of about 0.065, while non-ODE editing samples two independent noise-injected trajectories (red and green arrows in Figure 2d) to reconstruct x0 without requiring an ODE path.", "perturbed_explanation": "This is incorrect because Figure 7 shows that the uniform schedule—not the linear schedule—achieves the peak CLIP similarity of ~22.5 and structure distance of ~0.065. The linear schedule actually peaks near CLIP ~22.2 with structure distance ~0.055.", "claim": "In the uniform λ schedule (λ=0.3), the highest CLIP similarity reaches approximately 22.5 with a structure distance of about 0.065, while non-ODE editing samples two independent noise-injected trajectories (red and green arrows in Figure 2d) to reconstruct x0 without requiring an ODE path.", "label": true }, { "paperid": "2410.16617v1", "paper_path": "./SciVer/papers/2410.16617v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16617v1_figure_3.png", "item2_path": "./SciVer/images/2410.16617v1_figure_4.png", "section": [ "5", "4.2" ], "request_id": 10, "origin_statement": "In neighborhoods where average Zika favorability λ̄2i exceeds 1.5 (Figure 3), the MS-ZIARMN model estimates chikungunya presence probability jumps from near 0 to above 0.8 by bi-week 32 in Catumbi (Figure 4a).", "perturbed_statement": "In neighborhoods where average Zika favorability λ̄2i exceeds 1.5 (Figure 3), the MS-ZIARMN model estimates chikungunya presence probability jumps from near 0 to above 0.4 by bi-week 28 in Catumbi (Figure 4a).", "perturbed_explanation": "Figure 4a shows the probability of chikungunya presence stays below 0.1 until about bi-week 30 and only reaches around 0.9 by week 32, so it does not exceed 0.4 by bi-week 28 as stated.", "claim": "In neighborhoods where average Zika favorability λ̄2i exceeds 1.5 (Figure 3), the MS-ZIARMN model estimates chikungunya presence probability jumps from near 0 to above 0.8 by bi-week 32 in Catumbi (Figure 4a).", "label": true }, { "paperid": "2410.15939v1", "paper_path": "./SciVer/papers/2410.15939v1.json", "claim_type": "parallel", "item1": "9", "item2": "10", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.15939v1_figure_9.png", "item2_path": "./SciVer/images/2410.15939v1_figure_10.png", "section": [ "4.1" ], "request_id": 13, "origin_statement": "For the Source task, raising node count from 10 to 30 lowers F1 by 0.37 (0.69→0.32), compared to a 0.21 drop (0.58→0.37) when edge count doubles from 1× to 2×, indicating node complexity impacts Source queries more severely than edge complexity.", "perturbed_statement": "For the Source task, raising node count from 10 to 30 lowers F1 by 0.27 (0.69→0.42), compared to a 0.21 drop (0.58→0.37) when edge count doubles from 1× to 2×, indicating node complexity impacts Source queries more severely than edge complexity.", "perturbed_explanation": "The perturbed statement wrongly cites Source F1 at 30 nodes as 0.42, but the chart shows it is actually 0.32, making the drop 0.37 rather than 0.27.", "claim": "For the Source task, raising node count from 10 to 30 lowers F1 by 0.37 (0.69→0.32), compared to a 0.21 drop (0.58→0.37) when edge count doubles from 1× to 2×, indicating node complexity impacts Source queries more severely than edge complexity.", "label": true }, { "paperid": "2411.00119v2", "paper_path": "./SciVer/papers/2411.00119v2.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00119v2_figure_1.png", "item2_path": "./SciVer/images/2411.00119v2-Table1-1.png", "section": [ "3.1" ], "request_id": 20, "origin_statement": "With ratings θ_C=30, θ_A=20, θ_B=10, the induced ranking C≻A≻B aligns with the two votes of type C≻A≻B in the profile of five votes, yielding a total Kendall-tau distance of 5 across all five votes.", "perturbed_statement": "With ratings θ_C=30, θ_A=20, θ_B=10, the induced ranking C≻A≻B aligns with the three votes of type C≻A≻B in the profile of five votes, yielding a total Kendall-tau distance of 5 across all five votes.", "perturbed_explanation": "Table 1 shows that there are only two votes ranking C≻A≻B, not three, so claiming three such votes contradicts the preference profile.", "claim": "With ratings θ_C=30, θ_A=20, θ_B=10, the induced ranking C≻A≻B aligns with the two votes of type C≻A≻B in the profile of five votes, yielding a total Kendall-tau distance of 5 across all five votes.", "label": true }, { "paperid": "2409.01901v1", "paper_path": "./SciVer/papers/2409.01901v1.json", "claim_type": "parallel", "item1": "7(c)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01901v1_figure_7(c).png", "item2_path": "./SciVer/images/2409.01901v1-Table1-1.png", "section": [ "3.3" ], "request_id": 21, "origin_statement": "Figure 7 shows the StretchSense gloves confuse the 'five' handshape with 'open b', and signer 03 recorded 590 NGT signs—59 times signer 01’s 10—using YES pedal control, suggesting the extensive NGT dataset likely contains numerous ambiguous 'five' recordings.", "perturbed_statement": "Figure 7 shows the StretchSense gloves confuse the 'five' handshape with 'closed b', and signer 03 recorded 150 NGT signs—15 times signer 01’s 10—using YES pedal control, suggesting the extensive NGT dataset likely contains numerous ambiguous 'five' recordings.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 7 specifies confusion between 'five' and 'open b', not 'closed b', and Table 1 indicates signer 03 contributed 590 NGT signs, not 150.", "claim": "Figure 7 shows the StretchSense gloves confuse the 'five' handshape with 'open b', and signer 03 recorded 590 NGT signs—59 times signer 01’s 10—using YES pedal control, suggesting the extensive NGT dataset likely contains numerous ambiguous 'five' recordings.", "label": true }, { "paperid": "2409.05371v1", "paper_path": "./SciVer/papers/2409.05371v1.json", "claim_type": "parallel", "item1": "6(f)", "item2": "6(g)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05371v1_figure_6(f).png", "item2_path": "./SciVer/images/2409.05371v1_figure_6(g).png", "section": [ "5" ], "request_id": 41, "origin_statement": "Both LQG black hole images show a photon ring at ≈4.8 units from the center, but the first profile peaks at ≈1.8 intensity—over three times the ≈0.5 maximum in the second.", "perturbed_statement": "Both LQG black hole images show a photon ring at ≈6.0 units from the center, but the first profile peaks at ≈1.8 intensity—over three times the ≈0.5 maximum in the second.", "perturbed_explanation": "The photon rings in both images actually lie at roughly 4.8 units (as indicated by the axes), not at 6.0. Claiming a radius of ≈6.0 contradicts the scale marks on the plotted images.", "claim": "Both LQG black hole images show a photon ring at ≈4.8 units from the center, but the first profile peaks at ≈1.8 intensity—over three times the ≈0.5 maximum in the second.", "label": true }, { "paperid": "2409.08851v1", "paper_path": "./SciVer/papers/2409.08851v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08851v1_figure_2(b).png", "item2_path": "./SciVer/images/2409.08851v1_figure_2(c).png", "section": [ "3.2" ], "request_id": 42, "origin_statement": "In the CFL=0.7, Reτ=395 simulation the mean streamwise velocity reaches U+ ≈20 at y+ ≈200, while in the CFL=0.5, Reτ=590 case it attains U+ ≈22 at the same y+, indicating a ≈10% higher peak velocity in the higher-Reτ flow.", "perturbed_statement": "In the CFL=0.7, Reτ=395 simulation the mean streamwise velocity reaches U+ ≈20 at y+ ≈200, while in the CFL=0.5, Reτ=590 case it attains U+ ≈25 at the same y+, indicating a ≈25% higher peak velocity in the higher-Reτ flow.", "perturbed_explanation": "The perturbed statement claims the CFL=0.5, Reτ=590 case reaches U+ ≈25 at y+ ≈200, but the plotted profile in the second image actually peaks around U+ ≈22, not 25; this contradicts the visual data.", "claim": "In the CFL=0.7, Reτ=395 simulation the mean streamwise velocity reaches U+ ≈20 at y+ ≈200, while in the CFL=0.5, Reτ=590 case it attains U+ ≈22 at the same y+, indicating a ≈10% higher peak velocity in the higher-Reτ flow.", "label": true }, { "paperid": "2410.03091v1", "paper_path": "./SciVer/papers/2410.03091v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.03091v1_figure_1.png", "item2_path": "./SciVer/images/2410.03091v1-Table1-1.png", "section": [ "4.2" ], "request_id": 47, "origin_statement": "In the informative follow-up scenario (Fig 1F), Group 3’s proportion of available CGM readings declines to around 0.3 by day 2, and in Table 1 at n=50 the proposed estimator yields a relative bias of 5.7% for Group 3 in the 70–180 mg/dL range, compared to 0.3% in Group 1.", "perturbed_statement": "In the informative follow-up scenario (Fig 1F), Group 3’s proportion of available CGM readings declines to around 0.5 by day 2, and in Table 1 at n=50 the proposed estimator yields a relative bias of 5.7% for Group 3 in the 70–180 mg/dL range, compared to 0.3% in Group 1.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 1F shows that Group 3’s availability drops to about 0.3 (not 0.5) by day 2 under informative follow-up, so the claim of 0.5 contradicts the plotted curve.", "claim": "In the informative follow-up scenario (Fig 1F), Group 3’s proportion of available CGM readings declines to around 0.3 by day 2, and in Table 1 at n=50 the proposed estimator yields a relative bias of 5.7% for Group 3 in the 70–180 mg/dL range, compared to 0.3% in Group 1.", "label": true }, { "paperid": "2409.10343v1", "paper_path": "./SciVer/papers/2409.10343v1.json", "claim_type": "parallel", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10343v1_figure_7.png", "item2_path": "./SciVer/images/2409.10343v1_figure_8.png", "section": [ "7.4.3" ], "request_id": 50, "origin_statement": "Figure 7’s false-positive update template explicitly caps the adjusted user profile at 100 words, whereas Figure 8’s false-negative template includes no such word-count constraint, contrasting their output restrictions.", "perturbed_statement": "Figure 7’s false-positive update template explicitly caps the adjusted user profile at 200 words, whereas Figure 8’s false-negative template includes no such word-count constraint.", "perturbed_explanation": "Figure 7’s prompt clearly states “Please ensure that the ‘profile’ is no longer than 100 words,” not 200, so the claim of a 200-word limit contradicts the false-positive template.", "claim": "Figure 7’s false-positive update template explicitly caps the adjusted user profile at 100 words, whereas Figure 8’s false-negative template includes no such word-count constraint, contrasting their output restrictions.", "label": true }, { "paperid": "2410.21813v1", "paper_path": "./SciVer/papers/2410.21813v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.21813v1-Table3-1.png", "item2_path": "./SciVer/images/2410.21813v1-Table4-1.png", "section": [ "4.1.3" ], "request_id": 58, "origin_statement": "During adaptation, SAM2 fine-tuning uses 96 samples per batch at 1024×1024 resolution, whereas in SAM2-Swin Stage 1 training batches quadruple to 256 but images downsample to 256×256.", "perturbed_statement": "During adaptation, SAM2 fine-tuning uses 96 samples per batch at 1024×1024 resolution, whereas in SAM2-Swin Stage 1 training batches double to 128 but images downsample to 256×256.", "perturbed_explanation": "The perturbed statement incorrectly claims that Stage 1 uses a batch size of 128. Table 4 actually specifies a batch size of 256 for SAM2-Swin Stage 1, not 128.", "claim": "During adaptation, SAM2 fine-tuning uses 96 samples per batch at 1024×1024 resolution, whereas in SAM2-Swin Stage 1 training batches quadruple to 256 but images downsample to 256×256.", "label": true }, { "paperid": "2411.10399v1", "paper_path": "./SciVer/papers/2411.10399v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10399v1_figure_3.png", "item2_path": "./SciVer/images/2411.10399v1_figure_4.png", "section": [ "2.4" ], "request_id": 61, "origin_statement": "With N=2 LPs over M=2 atomic ranges, a non-crossing trade (Case 1) distributes a single fee reward f1 among LPs using only one interval’s active liquidity, whereas a crossing trade (Case 2) splits total rewards f1 and f2 across both intervals proportionally to K_{n,1} and K_{n,2} sums.", "perturbed_statement": "With N=2 LPs over M=2 atomic ranges, a non-crossing trade (Case 1) distributes both fee rewards f1 and f2 among LPs using only one interval’s active liquidity, whereas a crossing trade (Case 2) splits total rewards f1 and f2 across both intervals proportionally to K_{n,1} and K_{n,2} sums.", "perturbed_explanation": "The perturbed statement incorrectly asserts that a non-crossing trade distributes both f1 and f2. In fact, by definition (Figure 3) a non-crossing trade only spans one atomic interval and thus only allocates its single fee reward f_m (e.g., f1), not both f1 and f2.", "claim": "With N=2 LPs over M=2 atomic ranges, a non-crossing trade (Case 1) distributes a single fee reward f1 among LPs using only one interval’s active liquidity, whereas a crossing trade (Case 2) splits total rewards f1 and f2 across both intervals proportionally to K_{n,1} and K_{n,2} sums.", "label": true }, { "paperid": "2409.10031v1", "paper_path": "./SciVer/papers/2409.10031v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10031v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.10031v1_figure_4.png", "section": [ "4.1" ], "request_id": 63, "origin_statement": "Among the 48 BTC-related entities in the SDN list (29 individuals, 19 companies), 25 (≈52%) had sent no funds by 90 days post-sanction, as shown by the “no send money” count in Figure 4.", "perturbed_statement": "Among the 48 BTC-related entities in the SDN list (29 individuals, 19 companies), 21 (≈44%) had sent no funds by 90 days post-sanction, as shown by the “no send money” count in Figure 4.", "perturbed_explanation": "The bar for “no send money” at the 90-day interval in Figure 4 indicates 25 entities, not 21, so stating 21 contradicts the actual count.", "claim": "Among the 48 BTC-related entities in the SDN list (29 individuals, 19 companies), 25 (≈52%) had sent no funds by 90 days post-sanction, as shown by the “no send money” count in Figure 4.", "label": true }, { "paperid": "2411.07533v1", "paper_path": "./SciVer/papers/2411.07533v1.json", "claim_type": "parallel", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07533v1_figure_8.png", "item2_path": "./SciVer/images/2411.07533v1_figure_9.png", "section": [ "5.3" ], "request_id": 64, "origin_statement": "Chinese F1 for form in llama2 peaks at around 0.9 while its meaning F1 peaks at about 0.6 (figure 8), and overall Chinese form competence (~0.95) exceeds meaning competence (~0.60) by ~0.35, consistent with the positive correlation (R2=0.48) between form and meaning in figure 9.", "perturbed_statement": "Chinese F1 for form in llama2 peaks at around 0.9 while its meaning F1 peaks at about 0.6 (figure 8), and overall Chinese form competence (~0.85) exceeds meaning competence (~0.60) by ~0.25, consistent with the positive correlation (R2=0.48) between form and meaning in figure 9.", "perturbed_explanation": "The perturbed statement incorrectly reports Chinese form competence as ~0.85 and a 0.25 gap. Figure 9 shows Chinese form competence values around 0.95–0.98, giving an actual difference of ~0.35, not ~0.25.", "claim": "Chinese F1 for form in llama2 peaks at around 0.9 while its meaning F1 peaks at about 0.6 (figure 8), and overall Chinese form competence (~0.95) exceeds meaning competence (~0.60) by ~0.35, consistent with the positive correlation (R2=0.48) between form and meaning in figure 9.", "label": true }, { "paperid": "2411.10924v1", "paper_path": "./SciVer/papers/2411.10924v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10924v1_figure_5.png", "item2_path": "./SciVer/images/2411.10924v1_figure_6.png", "section": [ "6.1" ], "request_id": 78, "origin_statement": "In Fig. 6, the Oland CCP star sits near (9.5, 2.9), only about 1.5 units in x and 1.6 in y (Euclidean ≈2.2) from the Halland CCP at (8.0, 1.3). In Fig. 5, this proximity corresponds to a 3.33% Oland→Halland misclassification.", "perturbed_statement": "In Fig. 6, the Oland CCP star sits near (9.5, 2.9), only about 0.5 units in x and 1.6 in y (Euclidean ≈1.6) from the Halland CCP at (8.0, 1.3). In Fig. 5, this proximity corresponds to a 1.11% Oland→Halland misclassification.", "perturbed_explanation": "The perturbation is incorrect because in Fig. 5 the confusion matrix shows a 3.33% misclassification rate of Oland samples as Halland (not 1.11%). Additionally, the horizontal separation between the Oland and Halland CCP stars is about 1.5 units in Fig. 6, not 0.5 units.", "claim": "In Fig. 6, the Oland CCP star sits near (9.5, 2.9), only about 1.5 units in x and 1.6 in y (Euclidean ≈2.2) from the Halland CCP at (8.0, 1.3). In Fig. 5, this proximity corresponds to a 3.33% Oland→Halland misclassification.", "label": true }, { "paperid": "2409.18903v2", "paper_path": "./SciVer/papers/2409.18903v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.18903v2_figure_2.png", "item2_path": "./SciVer/images/2409.18903v2_figure_3.png", "section": [ "4.2" ], "request_id": 87, "origin_statement": "By constructing f so that V̄_ξ(ξ)=V̄_{Δx,ξ}(f(ξ)) a.e. on each [ξ₃ⱼ,ξ₃ⱼ₊₃), the L¹ norm ‖V̄_ξ−V̄_{Δx,ξ}(f)‖_{L¹(S_c)} collapses to zero contributions on those intervals, yielding the O(Δx^{β/2}) convergence rate in Figure 2.", "perturbed_statement": "By constructing f so that V̄_ξ(ξ)=V̄_{Δx,ξ}(f(ξ)) a.e. on each [ξ₃ⱼ,ξ₃ⱼ₊₃), the L¹ norm ‖V̄_ξ−V̄_{Δx,ξ}(f)‖_{L¹(S_c)} collapses to zero contributions on those intervals, yielding the O(Δx^{β/4}) convergence rate in Figure 2.", "perturbed_explanation": "Figure 2 specifically bounds ‖V̄_ξ−V̄_{Δx,ξ}(f)‖_{L¹(S_c)} by O(Δx^{β/2}); replacing β/2 with β/4 contradicts the shown convergence order.", "claim": "By constructing f so that V̄_ξ(ξ)=V̄_{Δx,ξ}(f(ξ)) a.e. on each [ξ₃ⱼ,ξ₃ⱼ₊₃), the L¹ norm ‖V̄_ξ−V̄_{Δx,ξ}(f)‖_{L¹(S_c)} collapses to zero contributions on those intervals, yielding the O(Δx^{β/2}) convergence rate in Figure 2.", "label": true }, { "paperid": "2409.15951v1", "paper_path": "./SciVer/papers/2409.15951v1.json", "claim_type": "parallel", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15951v1_figure_4.png", "item2_path": "./SciVer/images/2409.15951v1-Table4-1.png", "section": [ "4" ], "request_id": 90, "origin_statement": "In Fig. 6, the purple best-fit spectrum tracks the orange noisy data within five red-shaded continuum windows between 500.4 and 502.0 nm, while Table 4 indicates [Mg/H]=0.65 dex, which is 0.33 dex higher than [Ti/H]=0.32 dex, signifying substantially stronger magnesium lines.", "perturbed_statement": "In Fig. 6, the purple best-fit spectrum tracks the orange noisy data within eight red-shaded continuum windows between 500.4 and 502.0 nm, while Table 4 indicates [Mg/H]=0.65 dex, which is 0.33 dex higher than [Ti/H]=0.32 dex, signifying substantially stronger magnesium lines.", "perturbed_explanation": "The perturbed statement wrongly claims there are eight red-shaded continuum windows, but Fig. 6 clearly shows only five such windows between 500.4 and 502.0 nm, so the count of continuum regions is incorrect.", "claim": "In Fig. 6, the purple best-fit spectrum tracks the orange noisy data within five red-shaded continuum windows between 500.4 and 502.0 nm, while Table 4 indicates [Mg/H]=0.65 dex, which is 0.33 dex higher than [Ti/H]=0.32 dex, signifying substantially stronger magnesium lines.", "label": true }, { "paperid": "2410.23507v1", "paper_path": "./SciVer/papers/2410.23507v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23507v1_figure_4.png", "item2_path": "./SciVer/images/2410.23507v1-Table5-1.png", "section": [ "6.2" ], "request_id": 91, "origin_statement": "In Figure 4, with the error type loss, the average routing score for PUNCT tokens is about 0.55, compared to roughly 0.45 without the loss; in Table 5, the GShard router with both load balancing and error type losses achieves an average F₀.₅ score of 51.25.", "perturbed_statement": "In Figure 4, with the error type loss, the average routing score for PUNCT tokens is about 0.65, compared to roughly 0.45 without the loss; in Table 5, the GShard router with both load balancing and error type losses achieves an average F₀.₅ score of 52.25.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 shows the PUNCT token routing score rises from around 0.45 to about 0.55 (not 0.65) when using the error type loss. Additionally, Table 5 reports an average F₀.₅ of 51.25 (not 52.25) for the GShard LB+ET configuration.", "claim": "In Figure 4, with the error type loss, the average routing score for PUNCT tokens is about 0.55, compared to roughly 0.45 without the loss; in Table 5, the GShard router with both load balancing and error type losses achieves an average F₀.₅ score of 51.25.", "label": true }, { "paperid": "2409.00614v1", "paper_path": "./SciVer/papers/2409.00614v1.json", "claim_type": "parallel", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00614v1_figure_4.png", "item2_path": "./SciVer/images/2409.00614v1-Table4-1.png", "section": [ "6.4" ], "request_id": 92, "origin_statement": "Per-FedAvg incurs 2945 min total training, 1.55× DAMe’s 1896 min (Table 4), yet in English Twitter (Figure 4), DAMe obtains Local NMI ≈0.76 by round 50, outperforming Per-FedAvg’s ≈0.74.", "perturbed_statement": "Per-FedAvg incurs 2945 min total training, 1.55× DAMe’s 1688 min (Table 4), yet in English Twitter (Figure 4), DAMe obtains Local NMI ≈0.74 by round 50, outperforming Per-FedAvg’s ≈0.78.", "perturbed_explanation": "Table 4 shows DAMe’s total training time is 1896 min, not 1688 min. Figure 4 shows DAMe’s Local NMI reaches ≈0.76 and Per-FedAvg’s is ≈0.74 at round 50, not ≈0.74 vs. ≈0.78.", "claim": "Per-FedAvg incurs 2945 min total training, 1.55× DAMe’s 1896 min (Table 4), yet in English Twitter (Figure 4), DAMe obtains Local NMI ≈0.76 by round 50, outperforming Per-FedAvg’s ≈0.74.", "label": true }, { "paperid": "2410.20797v1", "paper_path": "./SciVer/papers/2410.20797v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20797v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.20797v1_figure_1(b).png", "section": [ "4.4" ], "request_id": 95, "origin_statement": "On CIFAR-10, Rplg achieves peak test accuracy of about 87.5% at α=0.3, and its pseudo-label accuracy converges to around 90% by epoch 100.", "perturbed_statement": "On CIFAR-10, Rplg achieves peak test accuracy of about 88.5% at α=0.5, and its pseudo-label accuracy converges to around 92% by epoch 50.", "perturbed_explanation": "The peak test accuracy is wrongly stated as 88.5% at α=0.5, but Figure 1(a) shows about 87.5% at α=0.3. Also, pseudo-label accuracy does not converge to 92% by epoch 50; Figure 1(b) indicates it reaches about 90% only by around epoch 100.", "claim": "On CIFAR-10, Rplg achieves peak test accuracy of about 87.5% at α=0.3, and its pseudo-label accuracy converges to around 90% by epoch 100.", "label": true }, { "paperid": "2410.21705v1", "paper_path": "./SciVer/papers/2410.21705v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21705v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.21705v1_figure_4(b).png", "section": [ "4.4" ], "request_id": 97, "origin_statement": "On CUB-200, increasing the bottleneck dimension from 4 to 32 raises new-class accuracy from 58% to 65%, and the confusion matrices show new-class misclassification counts drop from 278 (no constraint) to 120 (with balanced loss).", "perturbed_statement": "On CUB-200, increasing the bottleneck dimension from 4 to 32 raises new-class accuracy from 58% to 65%, and the confusion matrices show new-class misclassification counts drop from 278 (no constraint) to 150 (with balanced loss).", "perturbed_explanation": "The confusion matrix in Figure 4 for the balanced loss condition indicates 120 new-class samples misclassified as old classes (bottom-left entry), not 150 as stated.", "claim": "On CUB-200, increasing the bottleneck dimension from 4 to 32 raises new-class accuracy from 58% to 65%, and the confusion matrices show new-class misclassification counts drop from 278 (no constraint) to 120 (with balanced loss).", "label": true }, { "paperid": "2411.18385v1", "paper_path": "./SciVer/papers/2411.18385v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18385v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.18385v1_figure_2(c).png", "section": [ "4.1" ], "request_id": 101, "origin_statement": "On SVHN, FedIvon’s test loss drops below 1.0 in under 50 communication rounds, whereas on CIFAR-10 it remains above 1.1 even after 1000 rounds, highlighting FedIvon’s much faster convergence on SVHN compared to CIFAR-10.", "perturbed_statement": "On SVHN, FedIvon’s test loss drops below 0.8 in under 50 communication rounds, whereas on CIFAR-10 it remains above 1.3 even after 1000 rounds, highlighting FedIvon’s much faster convergence on SVHN compared to CIFAR-10.", "perturbed_explanation": "The perturbed claim is incorrect because in the SVHN plot FedIvon’s loss only falls below 1.0 by around 50 rounds, not below 0.8. Likewise, in the CIFAR-10 plot the loss at 1000 rounds is about 1.15–1.2, not above 1.3.", "claim": "On SVHN, FedIvon’s test loss drops below 1.0 in under 50 communication rounds, whereas on CIFAR-10 it remains above 1.1 even after 1000 rounds, highlighting FedIvon’s much faster convergence on SVHN compared to CIFAR-10.", "label": true }, { "paperid": "2411.07253v1", "paper_path": "./SciVer/papers/2411.07253v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07253v1_figure_1(b).png", "item2_path": "./SciVer/images/2411.07253v1-Table1-1.png", "section": [ "7.2" ], "request_id": 110, "origin_statement": "On the Imbalance1 problem (n=2, m=2, x_L=(-2,-2), x_U=(2,2)), with k_max=500, SPGMO generates F1 values up to ~4.0×10^6, whereas ASPGMO-sc keeps F1 below ~5×10^3, achieving over an 800-fold reduction in worst-case F1.", "perturbed_statement": "On the Imbalance1 problem (n=2, m=2, x_L=(-2,-2), x_U=(2,2)), with k_max=500, SPGMO generates F1 values up to ~2.0×10^6, whereas ASPGMO-sc keeps F1 below ~1.0×10^4, achieving over a 200-fold reduction in worst-case F1.", "perturbed_explanation": "The perturbed statement misreports the SPGMO maximum and ASPGMO-sc threshold. Figure 1(b) shows SPGMO’s F1 reaching about 4×10^6 (not 2×10^6) and ASPGMO-sc’s F1 staying under roughly 5×10^3 (not 1×10^4). This contradicts the visual data.", "claim": "On the Imbalance1 problem (n=2, m=2, x_L=(-2,-2), x_U=(2,2)), with k_max=500, SPGMO generates F1 values up to ~4.0×10^6, whereas ASPGMO-sc keeps F1 below ~5×10^3, achieving over an 800-fold reduction in worst-case F1.", "label": true }, { "paperid": "2410.04078v1", "paper_path": "./SciVer/papers/2410.04078v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.04078v1_figure_5.png", "item2_path": "./SciVer/images/2410.04078v1_figure_6.png", "section": [ "5.3" ], "request_id": 115, "origin_statement": "In Fig6’s Reflect step, the student’s acquired knowledge increases from 1/2 to 2/2 components. Fig5’s profile overview rates Academic Stress as High and Self-Efficacy as Low, demonstrating the system updates knowledge state for a student with high anxiety and low confidence.", "perturbed_statement": "In Fig6’s Reflect step, the student’s acquired knowledge increases from 0/2 to 2/2 components. Fig5’s profile overview rates Academic Stress as Low and Self-Efficacy as High, demonstrating the system updates knowledge state for a student with low anxiety and high confidence.", "perturbed_explanation": "The perturbed statement is incorrect because in Figure 6 the pre-Reflect knowledge state shows 1 of 2 components acquired (not 0/2), and in Figure 5 the generated profile labels Academic Stress as High (not Low) and Self-Efficacy as Low (not High).", "claim": "In Fig6’s Reflect step, the student’s acquired knowledge increases from 1/2 to 2/2 components. Fig5’s profile overview rates Academic Stress as High and Self-Efficacy as Low, demonstrating the system updates knowledge state for a student with high anxiety and low confidence.", "label": true }, { "paperid": "2411.00049v1", "paper_path": "./SciVer/papers/2411.00049v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.00049v1-Table1-1.png", "item2_path": "./SciVer/images/2411.00049v1-Table2-1.png", "section": [ "5.2" ], "request_id": 117, "origin_statement": "On the Reuters dataset, the iterative FOIL reduces memory consumption from 4.27 GiB to 2.92 GiB (about 31.6% decrease), while on the Spambase dataset it boosts accuracy from 87.69% to 89.71% (2.02 percentage points).", "perturbed_statement": "On the Reuters dataset, the iterative FOIL reduces memory consumption from 4.27 GiB to 3.92 GiB (about 31.6% decrease), while on the Spambase dataset it boosts accuracy from 87.69% to 89.71% (2.02 percentage points).", "perturbed_explanation": "The perturbed statement wrongly claims that FOIL-iter uses 3.92 GiB on Reuters, but Table 1 reports it uses 2.92 GiB, so the memory figure is incorrect.", "claim": "On the Reuters dataset, the iterative FOIL reduces memory consumption from 4.27 GiB to 2.92 GiB (about 31.6% decrease), while on the Spambase dataset it boosts accuracy from 87.69% to 89.71% (2.02 percentage points).", "label": true }, { "paperid": "2409.06166v1", "paper_path": "./SciVer/papers/2409.06166v1.json", "claim_type": "parallel", "item1": "1", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.06166v1-Table1-1.png", "item2_path": "./SciVer/images/2409.06166v1-Table4-1.png", "section": [ "4.1" ], "request_id": 124, "origin_statement": "RPP attains 20.2% with ResNet50 and 24.9% with ViT-B/16 on ImageNet-21K (gains of +0.8 and +0.9 over POMP), and on CoOp base-to-new tasks it raises the harmonic mean from 74.09% to 79.39%, a 5.3-point boost.", "perturbed_statement": "RPP attains 20.2% with ResNet50 and 26.1% with ViT-B/16 on ImageNet-21K (gains of +0.8 and +0.9 over POMP), and on CoOp base-to-new tasks it raises the harmonic mean from 74.09% to 82.19%, an 8.1-point boost.", "perturbed_explanation": "The perturbation is incorrect because Table 1 reports ViT-B/16 zero-shot accuracy of 24.9%, not 26.1%, and Table 4 shows the harmonic mean rising to 79.39%, not 82.19%.", "claim": "RPP attains 20.2% with ResNet50 and 24.9% with ViT-B/16 on ImageNet-21K (gains of +0.8 and +0.9 over POMP), and on CoOp base-to-new tasks it raises the harmonic mean from 74.09% to 79.39%, a 5.3-point boost.", "label": true }, { "paperid": "2411.06184v1", "paper_path": "./SciVer/papers/2411.06184v1.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06184v1_figure_3.png", "item2_path": "./SciVer/images/2411.06184v1-Table2-1.png", "section": [ "4.4" ], "request_id": 128, "origin_statement": "For N=64 under mean±3SD discretization (Fig3(i)), MTBO achieves its optimal loss of 0.1824 by the sixth evaluation, while STBO only converges to 0.1944 around the 17th evaluation, yielding nearly a threefold speed-up (Table2).", "perturbed_statement": "For N=64 under mean±3SD discretization (Fig3(i)), MTBO reaches its optimal loss of 0.1904 by the ninth evaluation, while STBO converges to 0.1944 after about 12 evaluations, resulting in a 2× speed-up (Table2).", "perturbed_explanation": "The perturbed statement wrongly reports MTBO’s optimal loss as 0.1904, but Table 2 lists it as 0.1824. It also misstates convergence speed: Fig 3(i) shows MTBO drops below 0.19 by the sixth evaluation (not the ninth), and STBO doesn’t reach its final loss until around the 17th evaluation (not the 12th).", "claim": "For N=64 under mean±3SD discretization (Fig3(i)), MTBO achieves its optimal loss of 0.1824 by the sixth evaluation, while STBO only converges to 0.1944 around the 17th evaluation, yielding nearly a threefold speed-up (Table2).", "label": true }, { "paperid": "2410.15592v2", "paper_path": "./SciVer/papers/2410.15592v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.15592v2_figure_2.png", "item2_path": "./SciVer/images/2410.15592v2_figure_3.png", "section": [ "5" ], "request_id": 130, "origin_statement": "By epoch 50, the 25%/9:0:1 masking strategy yields the lowest perplexity (~1.55), about 0.2 below the 15%/8:1:1 distribution’s perplexity (~1.75); concurrently, AlphaFold predictions maintain the highest pLDDT on the test set at 91.8%, exceeding OMEGAFold’s 81.2%.", "perturbed_statement": "By epoch 50, the 25%/9:0:1 masking strategy yields the lowest perplexity (~1.55), about 0.3 below the 15%/8:1:1 distribution’s perplexity (~1.85); concurrently, AlphaFold predictions maintain the highest pLDDT on the test set at 93.8%, exceeding OMEGAFold’s 81.2%.", "perturbed_explanation": "The 15%/8:1:1 distribution’s perplexity at epoch 50 is actually around 1.75 (not 1.85), and the test-set pLDDT for AlphaFold is 91.8% (not 93.8%), as shown in Figures 2 and 3.", "claim": "By epoch 50, the 25%/9:0:1 masking strategy yields the lowest perplexity (~1.55), about 0.2 below the 15%/8:1:1 distribution’s perplexity (~1.75); concurrently, AlphaFold predictions maintain the highest pLDDT on the test set at 91.8%, exceeding OMEGAFold’s 81.2%.", "label": true }, { "paperid": "2411.18243v1", "paper_path": "./SciVer/papers/2411.18243v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18243v1_figure_1(b).png", "item2_path": "./SciVer/images/2411.18243v1-Table1-1.png", "section": [ "2.1" ], "request_id": 131, "origin_statement": "After 5.8 s in the GRB 211211A PDS, the Fermi data exhibits QPO peaks at 2.8 ± 0.3 Hz and 5.2 ± 0.5 Hz (separated by ≈2.4 Hz), with ASIM observing matching peaks of 2.9 ± 0.4 Hz and 5.2 ± 1.0 Hz.", "perturbed_statement": "After 5.8 s in the GRB 211211A PDS, the Fermi data exhibits QPO peaks at 3.8 ± 0.3 Hz and 5.2 ± 0.5 Hz (separated by ≈1.4 Hz), with ASIM observing matching peaks of 2.9 ± 0.4 Hz and 5.2 ± 1.0 Hz.", "perturbed_explanation": "The perturbed statement misstates the Fermi QPO frequency as 3.8 Hz; Table 1 reports it as 2.8 ± 0.3 Hz, so both the absolute frequency and the calculated separation (1.4 Hz vs. the actual ≈2.4 Hz) are incorrect.", "claim": "After 5.8 s in the GRB 211211A PDS, the Fermi data exhibits QPO peaks at 2.8 ± 0.3 Hz and 5.2 ± 0.5 Hz (separated by ≈2.4 Hz), with ASIM observing matching peaks of 2.9 ± 0.4 Hz and 5.2 ± 1.0 Hz.", "label": true }, { "paperid": "2410.13995v2", "paper_path": "./SciVer/papers/2410.13995v2.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13995v2_figure_3.png", "item2_path": "./SciVer/images/2410.13995v2-Table2-1.png", "section": [ "3.2" ], "request_id": 138, "origin_statement": "In this simple MDP (Figure 3), bounded reward poisoning caps adversarial rewards to 1 at the poisoned Start state, giving Q′(Start,a⁺)=1 and Q′(Start,a)=γ/(1–γ) per Table 2; since γ/(1–γ)>1 for γ>0.5, bounded poisoning cannot make a⁺ optimal.", "perturbed_statement": "In this simple MDP (Figure 3), bounded reward poisoning caps adversarial rewards to 1 at the poisoned Start state, giving Q′(Start,a⁺)=1 and Q′(Start,a)=γ/(1–γ) per Table 2; since 1>γ/(1–γ) for γ>0.5, bounded poisoning makes a⁺ optimal.", "perturbed_explanation": "The perturbation incorrectly asserts that 1>γ/(1–γ) when γ>0.5. Table 2 shows Q′(Start,a⁺)=1 and Q′(Start,a)=γ/(1–γ), and for any γ>0.5, γ/(1–γ) actually exceeds 1, so the inequality is reversed.", "claim": "In this simple MDP (Figure 3), bounded reward poisoning caps adversarial rewards to 1 at the poisoned Start state, giving Q′(Start,a⁺)=1 and Q′(Start,a)=γ/(1–γ) per Table 2; since γ/(1–γ)>1 for γ>0.5, bounded poisoning cannot make a⁺ optimal.", "label": true }, { "paperid": "2410.13883v1", "paper_path": "./SciVer/papers/2410.13883v1.json", "claim_type": "parallel", "item1": "1(c)", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13883v1_figure_1(c).png", "item2_path": "./SciVer/images/2410.13883v1_figure_2.png", "section": [ "1" ], "request_id": 141, "origin_statement": "In 2022, transformer-based CU publications surged to about 1000, exceeding the combined CNN (≈590) and GNN (≈60) output by around 350, coinciding with the adoption of multi-modal cross-attention blocks that jointly process visual and tabular inputs.", "perturbed_statement": "In 2021, transformer-based CU publications surged to about 1000, exceeding the combined CNN (≈590) and GNN (≈60) output by around 350, coinciding with the adoption of multi-modal cross-attention blocks that jointly process visual and tabular inputs.", "perturbed_explanation": "Figure 1 shows that in 2021 transformer publications were only about 800 (not 1000), while CNN publications were about 840 (not 590) and GNN around 50 (not 60). The year and figures in the perturbed statement thus contradict the chart.", "claim": "In 2022, transformer-based CU publications surged to about 1000, exceeding the combined CNN (≈590) and GNN (≈60) output by around 350, coinciding with the adoption of multi-modal cross-attention blocks that jointly process visual and tabular inputs.", "label": true }, { "paperid": "2409.07796v1", "paper_path": "./SciVer/papers/2409.07796v1.json", "claim_type": "parallel", "item1": "1", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07796v1_figure_1.png", "item2_path": "./SciVer/images/2409.07796v1_figure_2(b).png", "section": [ "1" ], "request_id": 144, "origin_statement": "At Location 3, images from Dec 30, 2011 to May 5, 2012 show seasonal shift. EfficientNet-B2 (1.0 GFLOP) accuracy improves from 72% before FT to 78% after FT, a 6-point increase.", "perturbed_statement": "At Location 3, images from Dec 30, 2011 to May 5, 2012 show seasonal shift. EfficientNet-B2 (1.0 GFLOP) accuracy improves from 72% before FT to 82% after FT, a 10-point increase.", "perturbed_explanation": "The perturbed statement claims EfficientNet-B2’s post-fine-tuning accuracy reaches 82% (a 10-point gain), but the plot shows B2 improves only to about 78% after FT (≈6-point gain), so the 82% and 10-point increase are incorrect.", "claim": "At Location 3, images from Dec 30, 2011 to May 5, 2012 show seasonal shift. EfficientNet-B2 (1.0 GFLOP) accuracy improves from 72% before FT to 78% after FT, a 6-point increase.", "label": true }, { "paperid": "2411.05689v1", "paper_path": "./SciVer/papers/2411.05689v1.json", "claim_type": "parallel", "item1": "2", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05689v1_figure_2.png", "item2_path": "./SciVer/images/2411.05689v1_figure_3(a).png", "section": [ "4.3" ], "request_id": 145, "origin_statement": "Figure 2 shows dual_annealing never outperforms the proposed solver and reaches 1000 instances by f–f_proposed≈5, while Figure 3 demonstrates that proposed_5 achieves negative differences down to about –30 000 in some cases, indicating large improvements over Ntrials=1.", "perturbed_statement": "Figure 2 shows dual_annealing never outperforms the proposed solver and reaches 1000 instances by f–f_proposed≈2, while Figure 3 demonstrates that proposed_5 achieves negative differences down to about –20 000 in some cases, indicating large improvements over Ntrials=1.", "perturbed_explanation": "The perturbation is incorrect because in Figure 2 the blue dual_annealing curve does not reach a cumulative count of 1000 until around f–f_proposed≈5, not ≈2, and in Figure 3 the proposed_5 line extends negative differences to about –30 000, not –20 000.", "claim": "Figure 2 shows dual_annealing never outperforms the proposed solver and reaches 1000 instances by f–f_proposed≈5, while Figure 3 demonstrates that proposed_5 achieves negative differences down to about –30 000 in some cases, indicating large improvements over Ntrials=1.", "label": true }, { "paperid": "2411.07664v1", "paper_path": "./SciVer/papers/2411.07664v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07664v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.07664v1_figure_3(c).png", "section": [ "3.4" ], "request_id": 150, "origin_statement": "The Mixtral-generated image contains two spheres, double the number in the GPT-4-generated image, while the GPT-4 image places its single sphere directly below a cube with a red cylinder beside it, compared to Mixtral’s cylinder below the cube.", "perturbed_statement": "The Mixtral-generated image contains three spheres, triple the number in the GPT-4-generated image, while the GPT-4 image places its single sphere directly below a cube with a red cylinder beside it, compared to Mixtral’s cylinder below the cube.", "perturbed_explanation": "The perturbed statement is wrong because the Mixtral-generated image actually shows only two spheres (one above and one below the cube), not three as claimed.", "claim": "The Mixtral-generated image contains two spheres, double the number in the GPT-4-generated image, while the GPT-4 image places its single sphere directly below a cube with a red cylinder beside it, compared to Mixtral’s cylinder below the cube.", "label": true }, { "paperid": "2410.04503v1", "paper_path": "./SciVer/papers/2410.04503v1.json", "claim_type": "parallel", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.04503v1_figure_7.png", "item2_path": "./SciVer/images/2410.04503v1_figure_8.png", "section": [ "5.3" ], "request_id": 154, "origin_statement": "At layer 32, T-SNE plots show cleanly separated helpfulness and harmlessness clusters, matching a jump in PDS accuracy: helpfulness from 15% at layer 4 to 48%, and harmlessness from 22% to 55%.", "perturbed_statement": "At layer 32, T-SNE plots show cleanly separated helpfulness and harmlessness clusters, matching a jump in PDS accuracy: helpfulness from 20% at layer 4 to 48%, and harmlessness from 22% to 50%.", "perturbed_explanation": "The perturbed statement misreports the initial PDS accuracy for helpfulness (it is about 15% at layer 4, not 20%) and understates the final PDS accuracy for harmlessness (it reaches about 55% at layer 32, not 50%).", "claim": "At layer 32, T-SNE plots show cleanly separated helpfulness and harmlessness clusters, matching a jump in PDS accuracy: helpfulness from 15% at layer 4 to 48%, and harmlessness from 22% to 55%.", "label": true }, { "paperid": "2411.18328v1", "paper_path": "./SciVer/papers/2411.18328v1.json", "claim_type": "parallel", "item1": "4", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18328v1_figure_4.png", "item2_path": "./SciVer/images/2411.18328v1_figure_7.png", "section": [ "4.5" ], "request_id": 164, "origin_statement": "After SCL, the “hurdle start” event sequence shows about a 66% reduction in event points (red to blue), and EventCrab reduces FLOPs by 33 (from 702 to 669) compared to ExACT, demonstrating both data-level sparsification and computational savings.", "perturbed_statement": "After SCL, the “hurdle start” event sequence shows about an 80% reduction in event points (red to blue), and EventCrab reduces FLOPs by 50 (from 702 to 652) compared to ExACT, demonstrating both data-level sparsification and computational savings.", "perturbed_explanation": "The perturbation is incorrect because Fig. 4’s visualization indicates roughly a 66% drop in event points after SCL, not an 80% drop, and Fig. 7 reports a 33-FLOP reduction from 702 to 669 for EventCrab versus ExACT, not a 50-FLOP reduction to 652.", "claim": "After SCL, the “hurdle start” event sequence shows about a 66% reduction in event points (red to blue), and EventCrab reduces FLOPs by 33 (from 702 to 669) compared to ExACT, demonstrating both data-level sparsification and computational savings.", "label": true }, { "paperid": "2411.01960v1", "paper_path": "./SciVer/papers/2411.01960v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01960v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.01960v1_figure_4.png", "section": [ "3" ], "request_id": 167, "origin_statement": "In the 850 μm continuum map, polarization fractions reach ~10% near core peaks (Fig.2), while the Fig.4 histogram counts ~38 measurements in 60°–80° bins (θ>50°) versus ~28 in <40° bins, indicating perpendicular-like orientations are ~1.4 times more common than parallel-like.", "perturbed_statement": "In the 850 μm continuum map, polarization fractions reach ~15% near core peaks (Fig.2), while the Fig.4 histogram counts ~38 measurements in 60°–80° bins (θ>50°) versus ~28 in <40° bins, indicating perpendicular-like orientations are ~1.4 times more common than parallel-like.", "perturbed_explanation": "Figure 2’s reference vector length corresponds to a 10% polarization fraction, not 15%, so the stated maximum of ~15% contradicts the image.", "claim": "In the 850 μm continuum map, polarization fractions reach ~10% near core peaks (Fig.2), while the Fig.4 histogram counts ~38 measurements in 60°–80° bins (θ>50°) versus ~28 in <40° bins, indicating perpendicular-like orientations are ~1.4 times more common than parallel-like.", "label": true }, { "paperid": "2409.19148v1", "paper_path": "./SciVer/papers/2409.19148v1.json", "claim_type": "parallel", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19148v1_figure_1.png", "item2_path": "./SciVer/images/2409.19148v1_figure_4.png", "section": [ "5.3" ], "request_id": 171, "origin_statement": "Figure1 shows 324 candidate HLQs, and Figure4 shows that both Logistic Regression and SVC classifiers sustain class 1 F1-scores at roughly 0.80 when features are reduced from 30 down to 8.", "perturbed_statement": "Figure1 shows 324 candidate HLQs, and Figure4 shows that both Logistic Regression and SVC classifiers sustain class 1 F1-scores at roughly 0.75 when features are reduced from 30 down to 8.", "perturbed_explanation": "In Figure4, the plotted class 1 F1-scores for both Logistic Regression and SVC remain close to 0.80 across 30 to 8 features; claiming they are about 0.75 contradicts those actual values.", "claim": "Figure1 shows 324 candidate HLQs, and Figure4 shows that both Logistic Regression and SVC classifiers sustain class 1 F1-scores at roughly 0.80 when features are reduced from 30 down to 8.", "label": true }, { "paperid": "2409.20332v1", "paper_path": "./SciVer/papers/2409.20332v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.20332v1_figure_6(a).png", "item2_path": "./SciVer/images/2409.20332v1_figure_6(d).png", "section": [ "4.3" ], "request_id": 172, "origin_statement": "Models trained only on Lad synthetic volumes achieve a spleen dice of 65%, and augmenting real data with Lad yields a 2.8-point spleen improvement, surpassing the 1.8-point gain from Medical Diffusion augmentation.", "perturbed_statement": "Models trained only on Lad synthetic volumes achieve a spleen dice of 68%, and augmenting real data with Lad yields a 2.8-point spleen improvement, surpassing the 1.8-point gain from Medical Diffusion augmentation.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 6(a) shows the spleen dice score for the Lad-only model is 65%, not 68%.", "claim": "Models trained only on Lad synthetic volumes achieve a spleen dice of 65%, and augmenting real data with Lad yields a 2.8-point spleen improvement, surpassing the 1.8-point gain from Medical Diffusion augmentation.", "label": true }, { "paperid": "2409.02285v1", "paper_path": "./SciVer/papers/2409.02285v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.02285v1-Table3-1.png", "item2_path": "./SciVer/images/2409.02285v1-Table5-1.png", "section": [ "4.2" ], "request_id": 173, "origin_statement": "In Malawi, the lagged FS×lagged FI interaction coefficient for moderate food insecurity in Table 3 is 0.032 (SE = 0.082, not significant), whereas Table 5 reports a significant positive baseline FS×baseline HHI coefficient of 0.270 (SE = 0.113, p<0.05).", "perturbed_statement": "In Malawi, the lagged FS×lagged FI interaction coefficient for moderate food insecurity in Table 3 is 0.132 (SE = 0.082, not significant), whereas Table 5 reports a significant positive baseline FS×baseline HHI coefficient of 0.270 (SE = 0.113, p<0.05).", "perturbed_explanation": "The perturbed statement misstates the dynamic panel coefficient: Table 3 actually shows a lagged FS×lagged FI coefficient of 0.032 for moderate food insecurity in Malawi, not 0.132.", "claim": "In Malawi, the lagged FS×lagged FI interaction coefficient for moderate food insecurity in Table 3 is 0.032 (SE = 0.082, not significant), whereas Table 5 reports a significant positive baseline FS×baseline HHI coefficient of 0.270 (SE = 0.113, p<0.05).", "label": true }, { "paperid": "2411.12846v1", "paper_path": "./SciVer/papers/2411.12846v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12846v1_figure_2.png", "item2_path": "./SciVer/images/2411.12846v1_figure_3.png", "section": [ "3.1" ], "request_id": 174, "origin_statement": "Publications in 2022 (112) exceed those in 2019 (105) by about 7%, while citations in 2022 (1,400) are roughly 83% lower than the 2019 peak of 8,200, highlighting increased output but reduced citation impact.", "perturbed_statement": "Publications in 2022 (102) are about 3% lower than those in 2019 (105), while citations in 2022 (1,400) are roughly 83% lower than the 2019 peak of 8,200, suggesting both output and impact declined.", "perturbed_explanation": "The perturbation incorrectly states 102 publications in 2022; Figure 2 shows 112 publications that year. Therefore, output did not decline compared to 2019’s 105 publications.", "claim": "Publications in 2022 (112) exceed those in 2019 (105) by about 7%, while citations in 2022 (1,400) are roughly 83% lower than the 2019 peak of 8,200, highlighting increased output but reduced citation impact.", "label": true }, { "paperid": "2410.02674v1", "paper_path": "./SciVer/papers/2410.02674v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02674v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.02674v1_figure_3(b).png", "section": [ "3.3" ], "request_id": 176, "origin_statement": "At K=20, bert-base-uncased_1 on the absolute token embeddings peaks at roughly 0.508 purity, about 0.031 higher than google/canine-s-1’s 0.477 purity on the relative set.", "perturbed_statement": "At K=20, bert-base-uncased_1 on the absolute token embeddings peaks at roughly 0.52 purity, about 0.04 higher than google/canine-s-1’s 0.46 purity on the relative set.", "perturbed_explanation": "The perturbed statement is incorrect because the absolute-set purity of bert-base-uncased_1 at K=20 is about 0.508 (not 0.52), and the relative-set purity of google/canine-s-1 at K=20 is about 0.477 (not 0.46).", "claim": "At K=20, bert-base-uncased_1 on the absolute token embeddings peaks at roughly 0.508 purity, about 0.031 higher than google/canine-s-1’s 0.477 purity on the relative set.", "label": true }, { "paperid": "2410.02001v2", "paper_path": "./SciVer/papers/2410.02001v2.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02001v2_figure_5.png", "item2_path": "./SciVer/images/2410.02001v2-Table3-1.png", "section": [ "4" ], "request_id": 178, "origin_statement": "At a 5 dB SNR threshold, raising the cvs threshold from 0.92 to 0.95 lowers the minimal spectral angle from about 0.41 to 0.38, and CFBS_0.95 misclassifies 174 objects versus 459 for MOCR, a 62% reduction.", "perturbed_statement": "At a 5 dB SNR threshold, raising the cvs threshold from 0.92 to 0.95 lowers the minimal spectral angle from about 0.41 to 0.35, and CFBS_0.95 misclassifies 200 objects versus 459 for MOCR, a 56% reduction.", "perturbed_explanation": "The perturbed statement conflicts with the plot, which shows the minimal spectral angle for cvs=0.95 at 5 dB is around 0.38, not 0.35. It also contradicts Table 3, where CFBS_0.95 is reported to misclassify 174 objects, not 200.", "claim": "At a 5 dB SNR threshold, raising the cvs threshold from 0.92 to 0.95 lowers the minimal spectral angle from about 0.41 to 0.38, and CFBS_0.95 misclassifies 174 objects versus 459 for MOCR, a 62% reduction.", "label": true }, { "paperid": "2410.20579v1", "paper_path": "./SciVer/papers/2410.20579v1.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20579v1_figure_3.png", "item2_path": "./SciVer/images/2410.20579v1-Table2-1.png", "section": [ "5.2" ], "request_id": 182, "origin_statement": "Table 2 reports CSD-iPOT achieves superior marginal calibration over baselines in 95 of 104 comparisons, and Figure 3’s FLCHAIN panel shows its calibration violin lies entirely below the KM red dashed mean, whereas Baseline and CSD exceed that empirical lower bound.", "perturbed_statement": "Table 2 reports CSD-iPOT achieves superior marginal calibration over baselines in 85 of 104 comparisons, and Figure 3’s FLCHAIN panel shows its calibration violin lies above the KM red dashed mean, whereas Baseline and CSD lie below that empirical lower bound.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 actually shows 95 wins for Cal_margin, not 85. Moreover, in the FLCHAIN panel of Figure 3, the green CSD-iPOT calibration violin is below the red dashed KM line, not above it.", "claim": "Table 2 reports CSD-iPOT achieves superior marginal calibration over baselines in 95 of 104 comparisons, and Figure 3’s FLCHAIN panel shows its calibration violin lies entirely below the KM red dashed mean, whereas Baseline and CSD exceed that empirical lower bound.", "label": true }, { "paperid": "2411.00249v1", "paper_path": "./SciVer/papers/2411.00249v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00249v1_figure_1.png", "item2_path": "./SciVer/images/2411.00249v1-Table1-1.png", "section": [ "3.3" ], "request_id": 188, "origin_statement": "In Figure 1, G1 (balanced by switching v1) is one of the three balanced variants of G, and in Table 1 G1’s spectrum shows the eigenvalue 4 appears twice (multiplicity 2), whereas G5 (unbalanced) has four distinct eigenvalues 2±√2 and 3±√3.", "perturbed_statement": "In Figure 1, G1 (balanced by switching v1) is one of the three balanced variants of G, and in Table 1 G1’s spectrum shows the eigenvalue 4 appears three times (multiplicity 3), whereas G5 (unbalanced) has only two distinct eigenvalues 2±√2 and 3±√3.", "perturbed_explanation": "The perturbation is incorrect because Table 1 lists G1’s eigenvalues as {0, 2, 4, 4}, showing eigenvalue 4 with multiplicity 2, not 3. Moreover, G5’s eigenvalues in Table 1 are 2–√2, 3–√3, 2+√2, and 3+√3 (four distinct values), not just two.", "claim": "In Figure 1, G1 (balanced by switching v1) is one of the three balanced variants of G, and in Table 1 G1’s spectrum shows the eigenvalue 4 appears twice (multiplicity 2), whereas G5 (unbalanced) has four distinct eigenvalues 2±√2 and 3±√3.", "label": true }, { "paperid": "2409.12428v1", "paper_path": "./SciVer/papers/2409.12428v1.json", "claim_type": "parallel", "item1": "1", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12428v1_figure_1.png", "item2_path": "./SciVer/images/2409.12428v1_figure_5.png", "section": [ "5.1" ], "request_id": 189, "origin_statement": "In the NWF dataset, the unprivileged group’s LAST_LOGIN drift rises from 0.32 (LBCvsPC) to 0.60 (LBCvsPeC), while in the BAF dataset under DIR+XGB, the overall DCD for WGEI is +0.023 linked to 2.32% unfairness.", "perturbed_statement": "In the NWF dataset, the unprivileged group’s LAST_LOGIN drift rises from 0.28 (LBCvsPC) to 0.60 (LBCvsPeC), while in the BAF dataset under DIR+XGB, the overall DCD for WGEI is +0.023 linked to 3.32% unfairness.", "perturbed_explanation": "The statement is incorrect because in Figure 1 LAST_LOGIN drift for the unprivileged group from LBCvsPC is 0.32, not 0.28. Moreover, in Figure 5 the WGEI unfairness is 2.32%, not 3.32%.", "claim": "In the NWF dataset, the unprivileged group’s LAST_LOGIN drift rises from 0.32 (LBCvsPC) to 0.60 (LBCvsPeC), while in the BAF dataset under DIR+XGB, the overall DCD for WGEI is +0.023 linked to 2.32% unfairness.", "label": true }, { "paperid": "2411.17977v1", "paper_path": "./SciVer/papers/2411.17977v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17977v1_figure_4.png", "item2_path": "./SciVer/images/2411.17977v1-Table3-1.png", "section": [ "3" ], "request_id": 190, "origin_statement": "At 0.15 GHz, the in-situ form’s ~350 mJy is ~75% higher than the power-law’s ~200 mJy, and in the DM-only fits the NFW b-channel yields the minimum BIC=23.74, just 0.02 lower than τ-channel’s 23.76.", "perturbed_statement": "At 0.15 GHz, the in-situ form’s ~300 mJy is ~50% higher than the power-law’s ~200 mJy, and in the DM-only fits the NFW b-channel yields the minimum BIC=23.84, 0.08 lower than τ-channel’s 23.76.", "perturbed_explanation": "The image shows the in-situ form predicts ~350 mJy at 0.15 GHz, not ~300 mJy, and Table 3 lists the NFW b-channel BIC as 23.74, not 23.84.", "claim": "At 0.15 GHz, the in-situ form’s ~350 mJy is ~75% higher than the power-law’s ~200 mJy, and in the DM-only fits the NFW b-channel yields the minimum BIC=23.74, just 0.02 lower than τ-channel’s 23.76.", "label": true }, { "paperid": "2411.03177v1", "paper_path": "./SciVer/papers/2411.03177v1.json", "claim_type": "parallel", "item1": "2", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03177v1_figure_2.png", "item2_path": "./SciVer/images/2411.03177v1_figure_7.png", "section": [ "3.3" ], "request_id": 196, "origin_statement": "At 128px size, the cosine-weighted model in Fig.2 produces a visually sharp golden retriever compared to the blurred uniform-weighted version, and Fig.7 shows setting the classifier guidance scale to ~1.5 reduces FID@512 from about 15.5 down to roughly 3.8.", "perturbed_statement": "At 128px size, the cosine-weighted model in Fig.2 produces a visually sharp golden retriever compared to the blurred uniform-weighted version, and Fig.7 shows setting the classifier guidance scale to ~1.5 reduces FID@512 from about 15.5 down to roughly 2.1.", "perturbed_explanation": "The perturbed statement misreports FID@512: Fig.7’s middle plot shows the minimum FID@512 at guidance scale ~1.5 is about 3.8, not 2.1 (2.1 is the lowest FID@256 in the rightmost plot).", "claim": "At 128px size, the cosine-weighted model in Fig.2 produces a visually sharp golden retriever compared to the blurred uniform-weighted version, and Fig.7 shows setting the classifier guidance scale to ~1.5 reduces FID@512 from about 15.5 down to roughly 2.1.", "label": false }, { "paperid": "2410.07836v3", "paper_path": "./SciVer/papers/2410.07836v3.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.07836v3_figure_5.png", "item2_path": "./SciVer/images/2410.07836v3_figure_6.png", "section": [ "5" ], "request_id": 200, "origin_statement": "By t=48 in the Atari Freeway task, GIT-STORM’s imagined cars remain sharply aligned with the horizontal lane markings, whereas STORM’s predictions exhibit noticeable blur and an offset of about two pixels; similarly, in DMC’s Hopper Run at t=48, GIT-STORM maintains accurate joint positions, while STORM shows a vertical shift.", "perturbed_statement": "By t=48 in the Atari Freeway task, STORM’s imagined cars remain sharply aligned with the horizontal lane markings, whereas GIT-STORM’s predictions exhibit noticeable blur and an offset of about two pixels; similarly, in DMC’s Hopper Run at t=48, STORM maintains accurate joint positions, while GIT-STORM shows a vertical shift.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows that GIT-STORM’s Freeway frames stay sharp and aligned, while STORM’s frames are blurred and offset. Likewise, Figure 6 demonstrates that GIT-STORM’s Hopper Run predictions match the ground truth joint positions, whereas STORM’s are shifted vertically.", "claim": "By t=48 in the Atari Freeway task, STORM’s imagined cars remain sharply aligned with the horizontal lane markings, whereas GIT-STORM’s predictions exhibit noticeable blur and an offset of about two pixels; similarly, in DMC’s Hopper Run at t=48, STORM maintains accurate joint positions, while GIT-STORM shows a vertical shift.", "label": false }, { "paperid": "2409.13394v1", "paper_path": "./SciVer/papers/2409.13394v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.13394v1_figure_5.png", "item2_path": "./SciVer/images/2409.13394v1_figure_6.png", "section": [ "3.3" ], "request_id": 208, "origin_statement": "The circuit model in Figure 5 shows the inlet tube resistance Rt,1→∞ isolating the nanopore, with microfluidic resistances Rmf,1 and Rmf,2 roughly equal. In Figure 6, 13 blue (“good”) pores exhibit linear I–V slopes of ~25 nS and phase angles near 0° below 10 Hz, confirming resistive behavior.", "perturbed_statement": "The circuit model in Figure 5 shows the inlet tube resistance Rt,1→∞ isolating the nanopore, with microfluidic resistances Rmf,1 and Rmf,2 roughly equal. In Figure 6, 13 blue (“good”) pores exhibit linear I–V slopes of ~30 nS and phase angles near 0° below 10 Hz, confirming resistive behavior.", "perturbed_explanation": "Figure 6’s I–V plots and conductance histogram clearly indicate the average conductance of the 13 good pores is ~25 nS (not 30 nS), so stating ~30 nS contradicts the measured data.", "claim": "The circuit model in Figure 5 shows the inlet tube resistance Rt,1→∞ isolating the nanopore, with microfluidic resistances Rmf,1 and Rmf,2 roughly equal. In Figure 6, 13 blue (“good”) pores exhibit linear I–V slopes of ~30 nS and phase angles near 0° below 10 Hz, confirming resistive behavior.", "label": false }, { "paperid": "2411.14318v1", "paper_path": "./SciVer/papers/2411.14318v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.14318v1-Table3-1.png", "item2_path": "./SciVer/images/2411.14318v1-Table4-1.png", "section": [ "3.4" ], "request_id": 212, "origin_statement": "CodeLlama-Reweighted achieves the highest SAT performance at 62.5, but CodeLlama-Velocitune surpasses it in coding avg with 39.3. Meanwhile, Llama-3-Velocitune boosts CmdGen-NVIDIA accuracy to 61.95, 5.85 points above Llama-3-Reweighted’s 56.10.", "perturbed_statement": "CodeLlama-Reweighted achieves the highest SAT performance at 56.2, but CodeLlama-Velocitune surpasses it in coding avg with 39.3. Meanwhile, Llama-3-Velocitune boosts CmdGen-NVIDIA accuracy to 61.95, 5.85 points above Llama-3-Reweighted’s 56.10.", "perturbed_explanation": "The SAT score for CodeLlama-Reweighted is incorrectly stated as 56.2; Table 3 shows it is actually 62.5, so the perturbed SAT detail contradicts the source data.", "claim": "CodeLlama-Reweighted achieves the highest SAT performance at 56.2, but CodeLlama-Velocitune surpasses it in coding avg with 39.3. Meanwhile, Llama-3-Velocitune boosts CmdGen-NVIDIA accuracy to 61.95, 5.85 points above Llama-3-Reweighted’s 56.10.", "label": false }, { "paperid": "2411.11677v1", "paper_path": "./SciVer/papers/2411.11677v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11677v1_figure_4.png", "item2_path": "./SciVer/images/2411.11677v1_figure_5.png", "section": [ "5.5" ], "request_id": 216, "origin_statement": "Increasing query budgets and recommendation list lengths yield mutual gains: NARM’s N@10 climbs from ~0.58 at 1k queries to ~0.625 at 5k, while SASRec’s Agr@10 rises from ~0.72 at list length 50 to ~0.74 at length 200.", "perturbed_statement": "Increasing query budgets and recommendation list lengths yield mutual gains: NARM’s N@10 climbs from ~0.58 at 1k queries to ~0.625 at 5k, while SASRec’s Agr@10 rises from ~0.72 at list length 50 to ~0.76 at length 200.", "perturbed_explanation": "Figure 5 (right) shows that SASRec’s Agr@10 at a recommendation list length of 200 is approximately 0.74, not 0.76 as stated, so the perturbed final Agr@10 value contradicts the data.", "claim": "Increasing query budgets and recommendation list lengths yield mutual gains: NARM’s N@10 climbs from ~0.58 at 1k queries to ~0.625 at 5k, while SASRec’s Agr@10 rises from ~0.72 at list length 50 to ~0.76 at length 200.", "label": false }, { "paperid": "2411.09585v1", "paper_path": "./SciVer/papers/2411.09585v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09585v1_figure_3.png", "item2_path": "./SciVer/images/2411.09585v1-Table3-1.png", "section": [ "4.2" ], "request_id": 224, "origin_statement": "D3’s CIFAR-10 fine-tuning only increases training time by ≈20% over vanilla, while with a 5% reserved dataset it reduces BadNets ASR to 0.74% at 90.77% ACC.", "perturbed_statement": "D3’s CIFAR-10 fine-tuning only increases training time by ≈40% over vanilla, while with a 5% reserved dataset it reduces BadNets ASR to 1.20% at 90.77% ACC.", "perturbed_explanation": "The perturbation is incorrect because Figure 3 shows D3’s scaled training time on CIFAR-10 is about 1.2× (≈20% overhead), not 1.4× (≈40%), and Table 3 reports a BadNets ASR of 0.74% at 5% reserved size, not 1.20%.", "claim": "D3’s CIFAR-10 fine-tuning only increases training time by ≈40% over vanilla, while with a 5% reserved dataset it reduces BadNets ASR to 1.20% at 90.77% ACC.", "label": false }, { "paperid": "2411.06786v1", "paper_path": "./SciVer/papers/2411.06786v1.json", "claim_type": "parallel", "item1": "1", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06786v1_figure_1.png", "item2_path": "./SciVer/images/2411.06786v1_figure_5(c).png", "section": [ "1" ], "request_id": 225, "origin_statement": "In ScaleKD’s dual-view feature mimicking (Figure 1b), the direct DCT component is filtered, critical as BEiT-L/14 (Figure 5) shows a zero-frequency response of ≈2300, roughly 4× larger than the ≈550 non-zero frequencies, thus exposing subtle lower-frequency features.", "perturbed_statement": "In ScaleKD’s dual-view feature mimicking (Figure 1b), the direct DCT component is filtered, critical as BEiT-L/14 (Figure 5) shows a zero-frequency response of ≈2300, roughly 15× larger than the ≈150 non-zero frequencies, thus exposing subtle lower-frequency features.", "perturbed_explanation": "The perturbation misstates the non-zero frequency magnitude and its ratio to the zero-frequency component. Figure 5 shows non-zero frequencies averaging around ≈550 (not ≈150), making the actual ratio closer to 4× rather than 15×.", "claim": "In ScaleKD’s dual-view feature mimicking (Figure 1b), the direct DCT component is filtered, critical as BEiT-L/14 (Figure 5) shows a zero-frequency response of ≈2300, roughly 15× larger than the ≈150 non-zero frequencies, thus exposing subtle lower-frequency features.", "label": false }, { "paperid": "2411.12355v1", "paper_path": "./SciVer/papers/2411.12355v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12355v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.12355v1_figure_6.png", "section": [ "4.5" ], "request_id": 230, "origin_statement": "On MSVD-QA, increasing the filtered prototype ratio L from 0.4 to 0.8 raises MSVD accuracy from about 63% to 68%, while DynFocus processes MSVD-QA with roughly 650 tokens—about 4.8× fewer than VideoLLAVA’s ~3100 tokens.", "perturbed_statement": "On MSVD-QA, increasing the filtered prototype ratio L from 0.4 to 0.8 raises MSVD accuracy from about 63% to 75%, while DynFocus processes MSVD-QA with roughly 650 tokens—about 4.8× fewer than VideoLLAVA’s ~3100 tokens.", "perturbed_explanation": "The perturbed statement overestimates the MSVD accuracy at L=0.8: Figure 5(a) shows it peaks around 68%, not 75%, so the claimed jump to 75% contradicts the actual data.", "claim": "On MSVD-QA, increasing the filtered prototype ratio L from 0.4 to 0.8 raises MSVD accuracy from about 63% to 75%, while DynFocus processes MSVD-QA with roughly 650 tokens—about 4.8× fewer than VideoLLAVA’s ~3100 tokens.", "label": false }, { "paperid": "2411.17181v2", "paper_path": "./SciVer/papers/2411.17181v2.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17181v2_figure_5.png", "item2_path": "./SciVer/images/2411.17181v2-Table3-1.png", "section": [ "5", "1", "4.5" ], "request_id": 232, "origin_statement": "WGSS’s Rouge-LCS on Kaggle (0.48) exceeds BenSumm’s by 0.22 (0.26), as seen in Table 3 and depicted by the blue line extending further than the red dashed line on the Kaggle axis in Fig. 5(c).", "perturbed_statement": "WGSS’s Rouge-LCS on Kaggle (0.52) exceeds BenSumm’s by 0.26 (0.26), as seen in Table 3 and depicted by the blue line extending further than the red dashed line on the Kaggle axis in Fig. 5(c).", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 reports WGSS’s Rouge-LCS on Kaggle as 0.48 (not 0.52), and the radar chart in Fig. 5(c) shows the blue WGSS value at 0.48 on the Kaggle axis, not 0.52.", "claim": "WGSS’s Rouge-LCS on Kaggle (0.52) exceeds BenSumm’s by 0.26 (0.26), as seen in Table 3 and depicted by the blue line extending further than the red dashed line on the Kaggle axis in Fig. 5(c).", "label": false }, { "paperid": "2411.07825v1", "paper_path": "./SciVer/papers/2411.07825v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07825v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.07825v1_figure_1(b).png", "section": [ "5" ], "request_id": 240, "origin_statement": "At iteration 4, ||P_i−P_{i−1}|| peaks at about 85 while ||K_i−K_{i−1}|| peaks near 3.1, before both decline steadily to near zero by iteration 10.", "perturbed_statement": "At iteration 5, ||P_i−P_{i−1}|| peaks at about 75 while ||K_i−K_{i−1}|| peaks near 3.1, before both decline steadily to near zero by iteration 10.", "perturbed_explanation": "This statement is incorrect because the maximal ||P_i−P_{i−1}|| and ||K_i−K_{i−1}|| actually occur at iteration 4 (indicated by the dashed vertical line), with P’s peak value ≈85, not iteration 5 and value 75.", "claim": "At iteration 5, ||P_i−P_{i−1}|| peaks at about 75 while ||K_i−K_{i−1}|| peaks near 3.1, before both decline steadily to near zero by iteration 10.", "label": false }, { "paperid": "2409.08522v1", "paper_path": "./SciVer/papers/2409.08522v1.json", "claim_type": "parallel", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.08522v1_figure_5.png", "item2_path": "./SciVer/images/2409.08522v1-Table2-1.png", "section": [ "5" ], "request_id": 245, "origin_statement": "The HTX output in Figure 5b attributes a high word_count reliability score (0.8) to the GossipCop example, consistent with GossipCop’s large document count of 20,645 in Table 2 compared to 642 and 1,626 in PolitiFact and FakeHealth.", "perturbed_statement": "The HTX output in Figure 5b attributes a high word_count reliability score (0.8) to the GossipCop example, consistent with GossipCop’s small document count of 1,626 in Table 2 compared to 642 and 20,645 in PolitiFact and FakeHealth.", "perturbed_explanation": "Table 2 shows that GossipCop actually contains 20,645 documents, not 1,626; the document count of 1,626 belongs to FakeHealth, so the perturbed statement's figure for GossipCop contradicts the table.", "claim": "The HTX output in Figure 5b attributes a high word_count reliability score (0.8) to the GossipCop example, consistent with GossipCop’s small document count of 1,626 in Table 2 compared to 642 and 20,645 in PolitiFact and FakeHealth.", "label": false }, { "paperid": "2411.06965v1", "paper_path": "./SciVer/papers/2411.06965v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06965v1_figure_1.png", "item2_path": "./SciVer/images/2411.06965v1_figure_2.png", "section": [ "1" ], "request_id": 249, "origin_statement": "The synergy of WQDIL’s Wasserstein adversarial training and Single-Step Archive Exploration yields Humanoid behaviors with left-leg contact varying from 56% to 81% and Walker2d behaviors with right-leg contact from 6% to 62%, showcasing broad behavioral diversity under limited demonstrations.", "perturbed_statement": "The synergy of WQDIL’s Wasserstein adversarial training and Single-Step Archive Exploration yields Humanoid behaviors with left-leg contact varying from 56% to 81% and Walker2d behaviors with right-leg contact from 8% to 68%, showcasing broad behavioral diversity under limited demonstrations.", "perturbed_explanation": "The perturbed statement incorrectly reports Walker2d’s right-leg contact range as 8% to 68%, whereas the actual values from Figure 2 are 6% for one behavior and 62% for the other.", "claim": "The synergy of WQDIL’s Wasserstein adversarial training and Single-Step Archive Exploration yields Humanoid behaviors with left-leg contact varying from 56% to 81% and Walker2d behaviors with right-leg contact from 8% to 68%, showcasing broad behavioral diversity under limited demonstrations.", "label": false }, { "paperid": "2410.22534v1", "paper_path": "./SciVer/papers/2410.22534v1.json", "claim_type": "parallel", "item1": "1(c)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22534v1_figure_1(c).png", "item2_path": "./SciVer/images/2410.22534v1-Table4-1.png", "section": [ "5" ], "request_id": 253, "origin_statement": "Increasing the number of latent classes from G=2 to G=3 reduces LOOIC by 55 (from 4118 to 4063), and the G=3 trajectories reveal a tiny subgroup 3 (only three individuals) with standardized normMMSE plunging below −3 by age65≈3.", "perturbed_statement": "Increasing the number of latent classes from G=2 to G=3 reduces LOOIC by 75 (from 4118 to 4063), and the G=3 trajectories reveal a tiny subgroup 3 (only three individuals) with standardized normMMSE plunging below −3 by age65≈3.", "perturbed_explanation": "The perturbed statement wrongly claims a 75-point drop in LOOIC, but Table 4 shows LOOIC decreases from 4118 to 4063, a 55-point drop, not 75.", "claim": "Increasing the number of latent classes from G=2 to G=3 reduces LOOIC by 75 (from 4118 to 4063), and the G=3 trajectories reveal a tiny subgroup 3 (only three individuals) with standardized normMMSE plunging below −3 by age65≈3.", "label": false }, { "paperid": "2410.09850v1", "paper_path": "./SciVer/papers/2410.09850v1.json", "claim_type": "parallel", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09850v1_figure_4.png", "item2_path": "./SciVer/images/2410.09850v1-Table1-1.png", "section": [ "4.3.1" ], "request_id": 254, "origin_statement": "In Geometric Brownian motion type 2, the MLP-generated median log return at length 25 (~0.07) slightly exceeds the original median (~0.06), and for Ornstein–Uhlenbeck process type 2 at length 25, the MLP generator’s reverting speed (3.409) nearly matches the original speed (3.481).", "perturbed_statement": "In Geometric Brownian motion type 2, the MLP-generated median log return at length 25 (~0.08) slightly exceeds the original median (~0.06), and for Ornstein–Uhlenbeck process type 2 at length 25, the MLP generator’s reverting speed (2.409) nearly matches the original speed (3.481).", "perturbed_explanation": "The perturbation is incorrect because Figure 4 shows the MLP-generated median log return at length 25 for GBM type 2 is about 0.07, not 0.08. Additionally, Table 1 lists the MLP reverting speed for OU type 2 at length 25 as 3.409, not 2.409.", "claim": "In Geometric Brownian motion type 2, the MLP-generated median log return at length 25 (~0.08) slightly exceeds the original median (~0.06), and for Ornstein–Uhlenbeck process type 2 at length 25, the MLP generator’s reverting speed (2.409) nearly matches the original speed (3.481).", "label": false }, { "paperid": "2410.14148v3", "paper_path": "./SciVer/papers/2410.14148v3.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.14148v3_figure_3.png", "item2_path": "./SciVer/images/2410.14148v3-Table1-1.png", "section": [ "3" ], "request_id": 261, "origin_statement": "FiSAO achieves a token-level reward of 6.3—ten times above its 0.61 baseline—while requiring no reward model, additional data, or GPT-assisted support, unlike VLfeedback, Human-Preference, and POVID.", "perturbed_statement": "FiSAO achieves a token-level reward of 6.3—ten times above its 0.45 baseline—while requiring GPT-assisted support despite using no reward model or additional data.", "perturbed_explanation": "The perturbed statement is incorrect because the baseline average reward shown in Figure 3 is 0.61, not 0.45. Additionally, Table 1 indicates that FiSAO does not use GPT-assisted support (✗ under GPT-Assisted), so claiming it requires GPT assistance contradicts the table.", "claim": "FiSAO achieves a token-level reward of 6.3—ten times above its 0.45 baseline—while requiring GPT-assisted support despite using no reward model or additional data.", "label": false }, { "paperid": "2409.11212v1", "paper_path": "./SciVer/papers/2409.11212v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.11212v1-Table1-1.png", "item2_path": "./SciVer/images/2409.11212v1-Table2-1.png", "section": [ "4.1" ], "request_id": 277, "origin_statement": "Zephyr-7B-UPO achieves the highest AlpacaEval 2.0 score of 13.04% and MT-bench score of 7.02, while Zephyr-7B-UPO-Iter2 yields the peak head-to-head win rate of 71.53% with an average response length of 1148 tokens.", "perturbed_statement": "Zephyr-7B-UPO achieves the highest AlpacaEval 2.0 score of 13.04% and MT-bench score of 7.12, while Zephyr-7B-UPO-Iter2 yields the peak head-to-head win rate of 71.53% with an average response length of 1200 tokens.", "perturbed_explanation": "The perturbation is incorrect because the MT-bench score for Zephyr-7B-UPO is actually 7.02 (not 7.12) according to Table 1, and the average response length for Zephyr-7B-UPO-Iter2 is 1148 tokens (not 1200) as shown in Table 2.", "claim": "Zephyr-7B-UPO achieves the highest AlpacaEval 2.0 score of 13.04% and MT-bench score of 7.12, while Zephyr-7B-UPO-Iter2 yields the peak head-to-head win rate of 71.53% with an average response length of 1200 tokens.", "label": false }, { "paperid": "2411.09702v1", "paper_path": "./SciVer/papers/2411.09702v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09702v1_figure_5.png", "item2_path": "./SciVer/images/2411.09702v1_figure_6.png", "section": [ "1", "4.2" ], "request_id": 281, "origin_statement": "Attention Distillation’s layer-30 CKA similarity to the fine-tuned model is about 0.9 (Figure 5), yet ensembling it with the fine-tuned MAE yields only a +0.7% boost to 86.3% accuracy, compared to scratch’s +2.7% uplift to 85.7% (Figure 6).", "perturbed_statement": "Attention Distillation’s layer-30 CKA similarity to the fine-tuned model is about 0.6 (Figure 5), yet ensembling it with the fine-tuned MAE yields only a +0.7% boost to 86.3% accuracy, compared to scratch’s +2.7% uplift to 85.7% (Figure 6).", "perturbed_explanation": "Figure 5 shows that the layer-30 CKA value for Attention Distillation is approximately 0.9, not 0.6, so stating 0.6 contradicts the plotted similarity at the final layer.", "claim": "Attention Distillation’s layer-30 CKA similarity to the fine-tuned model is about 0.6 (Figure 5), yet ensembling it with the fine-tuned MAE yields only a +0.7% boost to 86.3% accuracy, compared to scratch’s +2.7% uplift to 85.7% (Figure 6).", "label": false }, { "paperid": "2410.09349v1", "paper_path": "./SciVer/papers/2410.09349v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09349v1_figure_3.png", "item2_path": "./SciVer/images/2410.09349v1_figure_4.png", "section": [ "3.1" ], "request_id": 296, "origin_statement": "In Gemma-7b, interventions at layer 20 on MultiNLI with the yes/no label space achieve peak flip rates of approximately 1.0, while on IMDb the same layer yields about 0.85 flip rate for the good/bad label space.", "perturbed_statement": "In Gemma-7b, interventions at layer 20 on MultiNLI with the yes/no label space achieve peak flip rates of approximately 1.0, while on IMDb the same layer yields about 0.95 flip rate for the good/bad label space.", "perturbed_explanation": "This is incorrect because Figure 4 shows the flip rate for the good/bad label space at layer 20 on IMDb is around 0.85, not 0.95.", "claim": "In Gemma-7b, interventions at layer 20 on MultiNLI with the yes/no label space achieve peak flip rates of approximately 1.0, while on IMDb the same layer yields about 0.95 flip rate for the good/bad label space.", "label": false }, { "paperid": "2410.09635v1", "paper_path": "./SciVer/papers/2410.09635v1.json", "claim_type": "parallel", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09635v1_figure_6.png", "item2_path": "./SciVer/images/2410.09635v1-Table2-1.png", "section": [ "4.4" ], "request_id": 299, "origin_statement": "With an AUROC of 0.759 (Table 2), AIMEN with MLP_v5 achieves approximately 0.62 sensitivity at a 20% false positive rate, compared to a 20% sensitivity of a random classifier at the same FPR (Fig 6a).", "perturbed_statement": "With an AUROC of 0.859 (Table 2), AIMEN with MLP_v5 achieves approximately 0.62 sensitivity at a 20% false positive rate, compared to a 20% sensitivity of a random classifier at the same FPR (Fig 6a).", "perturbed_explanation": "The perturbed statement incorrectly increases the AUROC from 0.759 to 0.859, but Table 2 shows the true AUROC for MLP_v5 is 0.759, not 0.859.", "claim": "With an AUROC of 0.859 (Table 2), AIMEN with MLP_v5 achieves approximately 0.62 sensitivity at a 20% false positive rate, compared to a 20% sensitivity of a random classifier at the same FPR (Fig 6a).", "label": false }, { "paperid": "2409.01887v1", "paper_path": "./SciVer/papers/2409.01887v1.json", "claim_type": "parallel", "item1": "8", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01887v1_figure_8.png", "item2_path": "./SciVer/images/2409.01887v1-Table5-1.png", "section": [ "4.2" ], "request_id": 303, "origin_statement": "The number of dangling domains in the 400k–600k Tranco ranking bucket (330) exceeds the number of DVA-uncovered domain-borrowing CDN providers (24) by more than 13-fold.", "perturbed_statement": "The number of dangling domains in the 400k–600k Tranco ranking bucket (330) exceeds the number of DVA-uncovered domain-borrowing CDN providers (24) by more than 20-fold.", "perturbed_explanation": "The perturbed claim is incorrect because 330 dangling domains divided by 24 vulnerable CDNs yields about a 13.75-fold difference, not over 20-fold as stated.", "claim": "The number of dangling domains in the 400k–600k Tranco ranking bucket (330) exceeds the number of DVA-uncovered domain-borrowing CDN providers (24) by more than 20-fold.", "label": false }, { "paperid": "2411.14914v1", "paper_path": "./SciVer/papers/2411.14914v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14914v1_figure_4.png", "item2_path": "./SciVer/images/2411.14914v1_figure_5.png", "section": [ "4.3" ], "request_id": 306, "origin_statement": "In Q4 tasks, Zephyr generates roughly 360 terms per query—double the expert-crafted average of about 180—and uses around 95 PubMed search fields, exceeding the expert average of 78 fields per query.", "perturbed_statement": "In Q4 tasks, Zephyr generates roughly 400 terms per query—double the expert-crafted average of about 180—and uses around 70 PubMed search fields, exceeding the expert average of 78 fields per query.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 shows Zephyr produces about 360 terms per Q4 query, not 400. Additionally, Figure 5 indicates Zephyr uses roughly 95 search fields, not 70, and 70 would fall below the expert-crafted average of 78 fields rather than exceed it.", "claim": "In Q4 tasks, Zephyr generates roughly 400 terms per query—double the expert-crafted average of about 180—and uses around 70 PubMed search fields, exceeding the expert average of 78 fields per query.", "label": false }, { "paperid": "2411.15173v1", "paper_path": "./SciVer/papers/2411.15173v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15173v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.15173v1_figure_2(d).png", "section": [ "4.1" ], "request_id": 310, "origin_statement": "The t-SNE plot of adapted model features (b) shows two classes with considerable overlap, while high-frequency representations in (d) produce four distinctly separated clusters corresponding to C2–C5, demonstrating that high-frequency features improve domain separability across four centers compared to model features for two classes.", "perturbed_statement": "The t-SNE plot of adapted model features (b) shows two classes with considerable overlap, while high-frequency representations in (d) produce five distinctly separated clusters corresponding to C2–C5, demonstrating that high-frequency features improve domain separability across five centers compared to model features for two classes.", "perturbed_explanation": "The perturbed statement incorrectly claims there are five clusters and centers. Figure 2(d) depicts only four distinct clusters (for centers C2, C3, C4, and C5), not five, so both the number of clusters and centers are misstated.", "claim": "The t-SNE plot of adapted model features (b) shows two classes with considerable overlap, while high-frequency representations in (d) produce five distinctly separated clusters corresponding to C2–C5, demonstrating that high-frequency features improve domain separability across five centers compared to model features for two classes.", "label": false }, { "paperid": "2410.21088v1", "paper_path": "./SciVer/papers/2410.21088v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21088v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.21088v1_figure_5(b).png", "section": [ "5.4", "1" ], "request_id": 312, "origin_statement": "At t=0.2T, Shallow Diffuse achieves its highest consistency (PSNR ≈29.2 dB and SSIM ≈0.83) and near-peak robustness (AUC ≈0.977, accuracy ≈95.5%), outperforming the metrics at t=1.0T where PSNR falls to ≈28.6 dB and AUC to ≈0.968.", "perturbed_statement": "At t=0.2T, Shallow Diffuse achieves its highest consistency (PSNR ≈30.2 dB and SSIM ≈0.83) and near-peak robustness (AUC ≈0.977, accuracy ≈95.5%), outperforming the metrics at t=1.0T where PSNR falls to ≈28.0 dB and AUC to ≈0.968.", "perturbed_explanation": "The perturbation is incorrect because Figure 5(a) reports PSNR ≈29.2 dB at t=0.2T (not 30.2 dB) and ≈28.6 dB at t=1.0T (not 28.0 dB), contradicting the shown consistency curves.", "claim": "At t=0.2T, Shallow Diffuse achieves its highest consistency (PSNR ≈30.2 dB and SSIM ≈0.83) and near-peak robustness (AUC ≈0.977, accuracy ≈95.5%), outperforming the metrics at t=1.0T where PSNR falls to ≈28.0 dB and AUC to ≈0.968.", "label": false }, { "paperid": "2409.01490v1", "paper_path": "./SciVer/papers/2409.01490v1.json", "claim_type": "parallel", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01490v1_figure_6.png", "item2_path": "./SciVer/images/2409.01490v1-Table4-1.png", "section": [ "4.2" ], "request_id": 313, "origin_statement": "Figure 6’s five-revolution trajectory reaches Dionysus in 3534 days, and Table 4 shows that with STM enabled, L2 smoothing in modified equinoctial elements converges 72% of trials—32 points more than Cartesian L2 with STM (40%).", "perturbed_statement": "Figure 6’s five-revolution trajectory reaches Dionysus in 3534 days, and Table 4 shows that with STM enabled, L2 smoothing in modified equinoctial elements converges 72% of trials—32 points more than Cartesian L2 with STM (45%).", "perturbed_explanation": "The perturbed statement wrongly lists the convergence rate for Cartesian L2 with STM as 45%, whereas Table 4 actually reports it as 40%, making the stated 32-point difference incorrect.", "claim": "Figure 6’s five-revolution trajectory reaches Dionysus in 3534 days, and Table 4 shows that with STM enabled, L2 smoothing in modified equinoctial elements converges 72% of trials—32 points more than Cartesian L2 with STM (45%).", "label": false }, { "paperid": "2410.21350v1", "paper_path": "./SciVer/papers/2410.21350v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.21350v1-Table4-1.png", "item2_path": "./SciVer/images/2410.21350v1-Table5-1.png", "section": [ "4.3" ], "request_id": 314, "origin_statement": "The coefficient of variation of the secondary damping ratio ζ_s is 0.5, which is five times that of the secondary mass m_s (0.1), while the enhanced SDIS method achieves a relative efficiency of 27.92, about 34% higher than SuS’s 20.89.", "perturbed_statement": "The coefficient of variation of the secondary damping ratio ζ_s is 0.3, which is three times that of the secondary mass m_s (0.1), while the enhanced SDIS method achieves a relative efficiency of 22.5, about 7.6% higher than SuS’s 20.89.", "perturbed_explanation": "This statement is incorrect because Table 4 lists the COV of ζ_s as 0.5 (not 0.3), and Table 5 shows the relEff of enhanced SDIS is 27.92 (not 22.5).", "claim": "The coefficient of variation of the secondary damping ratio ζ_s is 0.3, which is three times that of the secondary mass m_s (0.1), while the enhanced SDIS method achieves a relative efficiency of 22.5, about 7.6% higher than SuS’s 20.89.", "label": false }, { "paperid": "2409.08158v1", "paper_path": "./SciVer/papers/2409.08158v1.json", "claim_type": "parallel", "item1": "2", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08158v1_figure_2.png", "item2_path": "./SciVer/images/2409.08158v1_figure_6.png", "section": [ "2" ], "request_id": 316, "origin_statement": "The highest density in the social cost histogram occurs at the $25–50/tC bin (~0.19), while the USA leads with about 150 SCC papers—more than double the UK's ~60 contributions.", "perturbed_statement": "The highest density in the social cost histogram occurs at the $50–75/tC bin (~0.19), while the USA leads with about 200 SCC papers—nearly triple the UK's ~60.", "perturbed_explanation": "This statement is incorrect because the histogram’s peak density is in the $25–50/tC bin, not $50–75/tC, and the USA authored about 150 papers (not 200), which is roughly 2.5 times, not three times, the UK's count.", "claim": "The highest density in the social cost histogram occurs at the $50–75/tC bin (~0.19), while the USA leads with about 200 SCC papers—nearly triple the UK's ~60.", "label": false }, { "paperid": "2410.15712v1", "paper_path": "./SciVer/papers/2410.15712v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15712v1_figure_4.png", "item2_path": "./SciVer/images/2410.15712v1-Table2-1.png", "section": [ "4", "4.2" ], "request_id": 320, "origin_statement": "Figure 4 shows dense [S II] 5σ detections covering the bow-shock’s extent, corresponding to Ser OB2’s estimated bow-shock area of 719 pc² in Table 2—over 17× larger than Ser OB1B’s 42 pc² photoionized region.", "perturbed_statement": "Figure 4 shows dense [S II] 5σ detections covering the bow-shock’s extent, corresponding to Sct OB3’s estimated bow-shock area of 1179 pc² in Table 2—over 28× larger than Ser OB1B’s 42 pc² photoionized region.", "perturbed_explanation": "Table 2 lists Sct OB3’s bow-shock area A as 550 pc², not 1179 pc². The value 1179 pc corresponds to its photoionization radius r₍d₌2.5 kpc₎, so quoting 1179 pc² for area (and deriving 28×) contradicts the tabulated A=550 pc² and the correct 13× ratio to Ser OB1B.", "claim": "Figure 4 shows dense [S II] 5σ detections covering the bow-shock’s extent, corresponding to Sct OB3’s estimated bow-shock area of 1179 pc² in Table 2—over 28× larger than Ser OB1B’s 42 pc² photoionized region.", "label": false }, { "paperid": "2409.11428v1", "paper_path": "./SciVer/papers/2409.11428v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11428v1_figure_6(a).png", "item2_path": "./SciVer/images/2409.11428v1_figure_6(b).png", "section": [ "5.4" ], "request_id": 323, "origin_statement": "Affinity Propagation selects 3 trap exemplars from a 10-file directory (30%) but 10 traps from a 100-file directory (10%), reducing the trap-to-file ratio by two-thirds when directory size increases tenfold.", "perturbed_statement": "Affinity Propagation selects 3 trap exemplars from a 10-file directory (30%) but 12 traps from a 100-file directory (12%), reducing the trap-to-file ratio by three-quarters when directory size increases tenfold.", "perturbed_explanation": "The perturbed statement wrongly claims 12 traps in the 100-file scenario. The image clearly shows only 10 cluster centers (traps) selected from 100 files, not 12.", "claim": "Affinity Propagation selects 3 trap exemplars from a 10-file directory (30%) but 12 traps from a 100-file directory (12%), reducing the trap-to-file ratio by three-quarters when directory size increases tenfold.", "label": false }, { "paperid": "2410.11566v2", "paper_path": "./SciVer/papers/2410.11566v2.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.11566v2_figure_3.png", "item2_path": "./SciVer/images/2410.11566v2-Table3-1.png", "section": [ "4.3" ], "request_id": 327, "origin_statement": "In Case III, BE achieves an averaged estimation error of 13.19° compared to MEKF’s 16.19° (Table 3), and Fig. 3(c) shows BE’s uncertainty drop from about 45° to under 10° within the first second.", "perturbed_statement": "In Case III, BE achieves an averaged estimation error of 13.19° compared to MEKF’s 16.19° (Table 3), and Fig. 3(c) shows BE’s uncertainty drop from about 45° to under 5° within the first second.", "perturbed_explanation": "The perturbation claims BE’s uncertainty falls below 5°, but Fig. 3(c) indicates BE’s uncertainty only decreases to around 8°–10° by 1 s, not below 5°.", "claim": "In Case III, BE achieves an averaged estimation error of 13.19° compared to MEKF’s 16.19° (Table 3), and Fig. 3(c) shows BE’s uncertainty drop from about 45° to under 5° within the first second.", "label": false }, { "paperid": "2409.12052v3", "paper_path": "./SciVer/papers/2409.12052v3.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12052v3_figure_2.png", "item2_path": "./SciVer/images/2409.12052v3_figure_3.png", "section": [ "3.2.2" ], "request_id": 330, "origin_statement": "Zr2NS2 has a maximum superconducting gap Δ of 1.74 meV and an electron–phonon coupling λ of 0.71 at the Fermi surface (Fig. 2), whereas its isotropic Eliashberg λ obtained from α²F(ω) up to 400 cm⁻¹ is only ~0.50 (Fig. 3), indicating ~70% coupling arises from low-frequency modes.", "perturbed_statement": "Zr2NS2 has a maximum superconducting gap Δ of 1.74 meV and an electron–phonon coupling λ of 0.71 at the Fermi surface (Fig. 2), whereas its isotropic Eliashberg λ obtained from α²F(ω) up to 400 cm⁻¹ is only ~0.30 (Fig. 3), indicating ~70% coupling arises from low-frequency modes.", "perturbed_explanation": "The perturbation underestimates the isotropic λ from the Eliashberg spectral function: Fig. 3 shows λ reaches about 0.50 by 400 cm⁻¹, not 0.30, so the stated ~0.30 value contradicts the plotted cumulative λ curve.", "claim": "Zr2NS2 has a maximum superconducting gap Δ of 1.74 meV and an electron–phonon coupling λ of 0.71 at the Fermi surface (Fig. 2), whereas its isotropic Eliashberg λ obtained from α²F(ω) up to 400 cm⁻¹ is only ~0.30 (Fig. 3), indicating ~70% coupling arises from low-frequency modes.", "label": false }, { "paperid": "2411.00311v1", "paper_path": "./SciVer/papers/2411.00311v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.00311v1-Table1-1.png", "item2_path": "./SciVer/images/2411.00311v1-Table2-1.png", "section": [ "4.4" ], "request_id": 336, "origin_statement": "C2A employs 0.097% of parameters in 20Newsgroup yielding 71.6% accuracy at β=5.0, while with just 0.049% parameters in XGLUE-NC it reaches 82.8% accuracy at β=5.0, marking an 11.2-point higher performance despite halving parameter usage.", "perturbed_statement": "C2A employs 0.049% of parameters in 20Newsgroup yielding 71.6% accuracy at β=5.0, while with just 0.097% parameters in XGLUE-NC it reaches 82.8% accuracy at β=5.0, marking an 11.2-point higher performance despite doubling parameter usage.", "perturbed_explanation": "The perturbed statement swaps the parameter percentages: Table 1 reports C2A uses 0.097% parameters on 20Newsgroup (not 0.049%), and Table 2 reports 0.049% on XGLUE-NC (not 0.097%), so the percent values are incorrect.", "claim": "C2A employs 0.049% of parameters in 20Newsgroup yielding 71.6% accuracy at β=5.0, while with just 0.097% parameters in XGLUE-NC it reaches 82.8% accuracy at β=5.0, marking an 11.2-point higher performance despite doubling parameter usage.", "label": false }, { "paperid": "2411.01228v1", "paper_path": "./SciVer/papers/2411.01228v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.01228v1-Table2-1.png", "item2_path": "./SciVer/images/2411.01228v1-Table4-1.png", "section": [ "8.3.2" ], "request_id": 343, "origin_statement": "In Table 2, v2’s GEE β for ‘Control over actions’ (0.4521, p<0.001) is roughly 6.8 times higher than v3’s β (0.0666, p=0.573). This magnitude underscores Table 4’s call for dynamic in-situ controls in user AI interactions.", "perturbed_statement": "In Table 2, v2’s GEE β for ‘Control over actions’ (0.4521, p<0.001) is only twice as high as v3’s β (0.2266, p=0.573). This magnitude underscores Table 4’s call for dynamic in-situ controls in user AI interactions.", "perturbed_explanation": "The perturbation is wrong because Table 2 actually reports v3’s β for ‘Control over actions’ as 0.0666, not 0.2266, and the ratio of v2 to v3 is about 6.8, not 2.", "claim": "In Table 2, v2’s GEE β for ‘Control over actions’ (0.4521, p<0.001) is only twice as high as v3’s β (0.2266, p=0.573). This magnitude underscores Table 4’s call for dynamic in-situ controls in user AI interactions.", "label": false }, { "paperid": "2411.05322v1", "paper_path": "./SciVer/papers/2411.05322v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.05322v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.05322v1-Table2-1.png", "section": [ "5.2" ], "request_id": 345, "origin_statement": "On the HumanRF training set, our Ours(High) configuration attains 37.40 dB PSNR at 80.63 KB per frame—about 2.6 dB more than TeTriRF’s 34.84 dB, while using ~14 KB less data.", "perturbed_statement": "On the HumanRF training set, our Ours(High) configuration attains 37.40 dB PSNR at 80.63 KB per frame—about 3.6 dB more than TeTriRF’s 34.84 dB, while using ~34 KB less data.", "perturbed_explanation": "The actual PSNR gap is 37.40 dB−34.84 dB≈2.56 dB (not 3.6 dB), and the bit‐size difference is 94.14 KB−80.63 KB≈13.5 KB (not 34 KB), contradicting the table values.", "claim": "On the HumanRF training set, our Ours(High) configuration attains 37.40 dB PSNR at 80.63 KB per frame—about 3.6 dB more than TeTriRF’s 34.84 dB, while using ~34 KB less data.", "label": false }, { "paperid": "2411.15633v1", "paper_path": "./SciVer/papers/2411.15633v1.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15633v1_figure_1.png", "item2_path": "./SciVer/images/2411.15633v1_figure_3.png", "section": [ "4.3", "1", "3.1" ], "request_id": 361, "origin_statement": "By 200 iterations, the Xception detector’s fake loss falls to near 0 while real loss remains above 1.0 (~100× fake loss), and t-SNE plots reveal that seen fake samples (orange) form a dense cluster distinct from unseen real images (green) in both Xception and CLIP full-finetune.", "perturbed_statement": "By 200 iterations, the Xception detector’s fake loss remains around 0.70 while real loss drops below 0.05, and t-SNE plots show seen fake samples (orange) intermingling with unseen real images (green) in CLIP full-finetune.", "perturbed_explanation": "The perturbed statement misstates the losses: Figure 1 shows fake loss reaching near zero by iteration 200 and real loss staying above 1.0 (≈100× higher), not fake loss ~0.70 and real loss <0.05. It also contradicts Figure 3, where seen fake points form a separate cluster, not intermingle with unseen real samples.", "claim": "By 200 iterations, the Xception detector’s fake loss remains around 0.70 while real loss drops below 0.05, and t-SNE plots show seen fake samples (orange) intermingling with unseen real images (green) in CLIP full-finetune.", "label": false }, { "paperid": "2409.01466v1", "paper_path": "./SciVer/papers/2409.01466v1.json", "claim_type": "parallel", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01466v1_figure_6.png", "item2_path": "./SciVer/images/2409.01466v1-Table2-1.png", "section": [ "4.3" ], "request_id": 363, "origin_statement": "For the attack category, the judge model in few-shot mode (k=6) achieved an F1 score of 0.5856 (Table 2), yet it estimated a Facebook coefficient of approximately –0.10 for attack ads in the pre-filter regression (Figure 6 left panel).", "perturbed_statement": "For the attack category, the judge model in few-shot mode (k=6) achieved an F1 score of 0.5856 (Table 2), yet it estimated a Facebook coefficient of approximately +0.10 for attack ads in the pre-filter regression (Figure 6 left panel).", "perturbed_explanation": "The perturbation claims a positive Facebook coefficient (+0.10) for attack ads, but Figure 6’s left panel shows the judge model’s estimate is negative (around –0.10), directly contradicting the published regression results.", "claim": "For the attack category, the judge model in few-shot mode (k=6) achieved an F1 score of 0.5856 (Table 2), yet it estimated a Facebook coefficient of approximately +0.10 for attack ads in the pre-filter regression (Figure 6 left panel).", "label": false }, { "paperid": "2410.05935v1", "paper_path": "./SciVer/papers/2410.05935v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.05935v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.05935v1_figure_5(b).png", "section": [ "4.2.2" ], "request_id": 364, "origin_statement": "At threshold = 100, Table 2 shows the proposed method outperforms default augmentation by 5 AP points on unseen classes, and in Fig. 5 it correctly detects the right face with a different expression that all baseline methods miss.", "perturbed_statement": "At threshold = 100, Table 2 shows the proposed method outperforms default augmentation by 15 AP points on unseen classes, and in Fig. 5 it correctly detects the right face with a different expression that all baseline methods miss.", "perturbed_explanation": "The perturbation claims a 15-point AP improvement at threshold 100, but Table 2 actually reports only a 5-point gain for unseen classes, so the 15-point figure contradicts the table.", "claim": "At threshold = 100, Table 2 shows the proposed method outperforms default augmentation by 15 AP points on unseen classes, and in Fig. 5 it correctly detects the right face with a different expression that all baseline methods miss.", "label": false }, { "paperid": "2410.20483v2", "paper_path": "./SciVer/papers/2410.20483v2.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20483v2_figure_4.png", "item2_path": "./SciVer/images/2410.20483v2-Table3-1.png", "section": [ "4.2" ], "request_id": 365, "origin_statement": "The minimum SEVT for query at node 7 is 1 by flipping hyperlipidemia at node 3 to follow the LL path to node 10, compared to SEVT of 2 via LR or LLR paths from node 1.", "perturbed_statement": "The minimum SEVT for query at node 7 is 2 by flipping hyperlipidemia at node 3 to follow the LL path to node 10, compared to SEVT of 1 via LR or LLR paths from node 1.", "perturbed_explanation": "This is incorrect because Table 3 and Figure 4 show that flipping hyperlipidemia at node 3 along the LL path to node 10 yields SEVT=1 (not 2), and both the LR and LLR paths from node 1 yield SEVT=2 (not 1).", "claim": "The minimum SEVT for query at node 7 is 2 by flipping hyperlipidemia at node 3 to follow the LL path to node 10, compared to SEVT of 1 via LR or LLR paths from node 1.", "label": false }, { "paperid": "2410.12855v2", "paper_path": "./SciVer/papers/2410.12855v2.json", "claim_type": "parallel", "item1": "9", "item2": "17", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.12855v2_figure_9.png", "item2_path": "./SciVer/images/2410.12855v2_figure_17.png", "section": [ "8.3" ], "request_id": 373, "origin_statement": "The JAILJUDGETRAIN dataset contains 18,693 Q5 prompts, which is roughly 26 times the number of jailbroken samples in JAILJUDGE OOD, given its 11.4% jailbroken rate among 6,300 instances (≈718 samples).", "perturbed_statement": "The JAILJUDGETRAIN dataset contains 18,693 Q5 prompts, which is roughly 24 times the number of jailbroken samples in JAILJUDGE OOD, given its 12.4% jailbroken rate among 6,300 instances (≈780 samples).", "perturbed_explanation": "The perturbed statement is incorrect because Figure 17 shows an 11.4% jailbroken rate (≈718 samples), not 12.4% (≈780 samples), so both the percentage and the implied multiplication factor are wrong.", "claim": "The JAILJUDGETRAIN dataset contains 18,693 Q5 prompts, which is roughly 24 times the number of jailbroken samples in JAILJUDGE OOD, given its 12.4% jailbroken rate among 6,300 instances (≈780 samples).", "label": false }, { "paperid": "2411.17616v1", "paper_path": "./SciVer/papers/2411.17616v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17616v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.17616v1_figure_2(b).png", "section": [ "3.3", "1" ], "request_id": 375, "origin_statement": "At DDPM step 10 on Taichi, skip branches boost the minimum normalized feature similarity from about 0.80 to 0.98 (a 0.18 rise), whereas at step 250 they raise it from roughly 0.94 to 0.97 (a 0.03 uplift), demonstrating stronger smoothing benefits early in denoising.", "perturbed_statement": "At DDPM step 10 on Taichi, skip branches boost the minimum normalized feature similarity from about 0.90 to 0.98 (a 0.08 rise), whereas at step 250 they raise it from roughly 0.94 to 0.97 (a 0.03 uplift), demonstrating stronger smoothing benefits early in denoising.", "perturbed_explanation": "The perturbed statement wrongly claims the minimum similarity without skip branches at step 10 is 0.90. In the figure for step 10, the minimum similarity without skip branches is about 0.80, not 0.90, so the 0.08 rise is inaccurate.", "claim": "At DDPM step 10 on Taichi, skip branches boost the minimum normalized feature similarity from about 0.90 to 0.98 (a 0.08 rise), whereas at step 250 they raise it from roughly 0.94 to 0.97 (a 0.03 uplift), demonstrating stronger smoothing benefits early in denoising.", "label": false }, { "paperid": "2409.11599v1", "paper_path": "./SciVer/papers/2409.11599v1.json", "claim_type": "parallel", "item1": "1", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11599v1_figure_1.png", "item2_path": "./SciVer/images/2409.11599v1_figure_2(a).png", "section": [ "4.2" ], "request_id": 376, "origin_statement": "Session 1’s AR overlay shows using the electronic caliper’s outside contacts to measure Characteristic 5’s 0.492–0.502 in (0.010 in tolerance) via 11 guided steps, while Session 2 requires selecting between that same caliper and the height gauge to measure Characteristic 35’s 3.740–3.760 in range (0.020 in tolerance), twice as wide.", "perturbed_statement": "Session 1’s AR overlay shows using the electronic caliper’s outside contacts to measure Characteristic 5’s 0.492–0.502 in (0.010 in tolerance) via 11 guided steps, while Session 2 requires selecting between that same caliper and the height gauge to measure Characteristic 35’s 3.740–3.760 in range (0.100 in tolerance), ten times as wide.", "perturbed_explanation": "Characteristic 35’s tolerance as shown is 3.740–3.760 in, which equals a 0.020 in range—not 0.100 in—so the stated 0.100 in tolerance and tenfold comparison are incorrect.", "claim": "Session 1’s AR overlay shows using the electronic caliper’s outside contacts to measure Characteristic 5’s 0.492–0.502 in (0.010 in tolerance) via 11 guided steps, while Session 2 requires selecting between that same caliper and the height gauge to measure Characteristic 35’s 3.740–3.760 in range (0.100 in tolerance), ten times as wide.", "label": false }, { "paperid": "2411.03500v1", "paper_path": "./SciVer/papers/2411.03500v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.03500v1-Table4-1.png", "item2_path": "./SciVer/images/2411.03500v1-Table5-1.png", "section": [ "6.3" ], "request_id": 378, "origin_statement": "On TPC-H 1GB with initial indexes, -Tune ran only five configurations—over 100 times fewer than UDO’s 617—yet it recommends an effective_cache_size of 45GB and sets effective_io_concurrency to 200.", "perturbed_statement": "On TPC-H 1GB with initial indexes, -Tune ran only five configurations—over 100 times fewer than UDO’s 707—yet it recommends an effective_cache_size of 45GB and sets effective_io_concurrency to 200.", "perturbed_explanation": "The perturbed statement misstates UDO’s trial count. Table 4 shows that UDO evaluated 617 configurations for TPC-H 1GB with initial indexes, not 707, so the comparison is incorrect.", "claim": "On TPC-H 1GB with initial indexes, -Tune ran only five configurations—over 100 times fewer than UDO’s 707—yet it recommends an effective_cache_size of 45GB and sets effective_io_concurrency to 200.", "label": false }, { "paperid": "2409.05306v2", "paper_path": "./SciVer/papers/2409.05306v2.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05306v2_figure_4.png", "item2_path": "./SciVer/images/2409.05306v2-Table2-1.png", "section": [ "3.3" ], "request_id": 389, "origin_statement": "At epoch 20, the training accuracy reaches about 92% while the validation accuracy is around 88% in Fig.4b, and Table 2’s confusion matrices confirm that the sum of true positives and true negatives surpasses the combined false positives and false negatives.", "perturbed_statement": "At epoch 20, the validation accuracy reaches about 92% while the training accuracy is around 88% in Fig.4b, and Table 2’s confusion matrices confirm that the sum of true positives and true negatives surpasses the combined false positives and false negatives.", "perturbed_explanation": "This is incorrect because Fig.4b clearly shows that at epoch 20 the training accuracy (~92%) is higher than the validation accuracy (~88%), not the other way around.", "claim": "At epoch 20, the validation accuracy reaches about 92% while the training accuracy is around 88% in Fig.4b, and Table 2’s confusion matrices confirm that the sum of true positives and true negatives surpasses the combined false positives and false negatives.", "label": false }, { "paperid": "2409.12887v2", "paper_path": "./SciVer/papers/2409.12887v2.json", "claim_type": "parallel", "item1": "7", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.12887v2_figure_7.png", "item2_path": "./SciVer/images/2409.12887v2-Table4-1.png", "section": [ "4.3" ], "request_id": 391, "origin_statement": "Default GCSE yields an STS-B Spearman correlation of 82.54%, while Fig.7(b) shows its predicted similarity scores for labels ≥4 tightly cluster between approximately 0.8 and 1.0, whereas the gold labels (Fig.7(c)) span from −1 to 6.", "perturbed_statement": "Default GCSE yields an STS-B Spearman correlation of 85.16%, while Fig.7(b) shows its predicted similarity scores for labels ≥4 tightly cluster between approximately 0.8 and 1.0, whereas the gold labels (Fig.7(c)) span from −1 to 6.", "perturbed_explanation": "The perturbed STS-B Spearman correlation of 85.16% contradicts Table 4, which reports a correlation of 82.54% for the default GCSE model on STS-B.", "claim": "Default GCSE yields an STS-B Spearman correlation of 85.16%, while Fig.7(b) shows its predicted similarity scores for labels ≥4 tightly cluster between approximately 0.8 and 1.0, whereas the gold labels (Fig.7(c)) span from −1 to 6.", "label": false }, { "paperid": "2409.09549v1", "paper_path": "./SciVer/papers/2409.09549v1.json", "claim_type": "parallel", "item1": "9(a)", "item2": "9(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.09549v1_figure_9(a).png", "item2_path": "./SciVer/images/2409.09549v1_figure_9(b).png", "section": [ "5.2" ], "request_id": 392, "origin_statement": "While COMFORT with full fine-tuning peaks at 98.2% test accuracy on DiabDeep with 40% data, for MHDeep it requires 70% data to reach its highest 94.0%, demonstrating that MHDeep demands 30 percentage points more data to attain optimal accuracy under FFT.", "perturbed_statement": "While COMFORT with full fine-tuning peaks at 98.2% test accuracy on DiabDeep with 40% data, for MHDeep it requires 70% data to reach its highest 94.0%, demonstrating that MHDeep demands 20 percentage points more data to attain optimal accuracy under FFT.", "perturbed_explanation": "The perturbed statement incorrectly claims that MHDeep demands 20 percentage points more data than DiabDeep. According to the plots, COMFORT with FFT peaks at 40% data on DiabDeep and at 70% data on MHDeep, which is a 30 percentage-point difference, not 20.", "claim": "While COMFORT with full fine-tuning peaks at 98.2% test accuracy on DiabDeep with 40% data, for MHDeep it requires 70% data to reach its highest 94.0%, demonstrating that MHDeep demands 20 percentage points more data to attain optimal accuracy under FFT.", "label": false }, { "paperid": "2410.09253v1", "paper_path": "./SciVer/papers/2410.09253v1.json", "claim_type": "parallel", "item1": "2(d)", "item2": "11", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09253v1_figure_2(d).png", "item2_path": "./SciVer/images/2410.09253v1_figure_11.png", "section": [ "4.1", "4.1.1" ], "request_id": 397, "origin_statement": "The two massive protostars G359.44a and G359.44b in Fig. 2 are separated by ≈5″ (∼0.20 pc), while Fig. 7(b.2) shows at least 8 labeled outflow knots densely clustered within a ≈2.6″ (0.10 pc) filamentary region, about half their separation.", "perturbed_statement": "The two massive protostars G359.44a and G359.44b in Fig. 2 are separated by ≈8″ (∼0.32 pc), while Fig. 7(b.2) shows at least 8 labeled outflow knots densely clustered within a ≈2.6″ (0.10 pc) filamentary region, about half their separation.", "perturbed_explanation": "The scale bar in Fig. 2 shows 5″ corresponds to ∼0.198 pc, matching the ≈5″ (∼0.20 pc) separation of G359.44a and G359.44b. Claiming an 8″ (∼0.32 pc) separation contradicts the measured 5″ distance indicated by the image.", "claim": "The two massive protostars G359.44a and G359.44b in Fig. 2 are separated by ≈8″ (∼0.32 pc), while Fig. 7(b.2) shows at least 8 labeled outflow knots densely clustered within a ≈2.6″ (0.10 pc) filamentary region, about half their separation.", "label": false }, { "paperid": "2410.13343v1", "paper_path": "./SciVer/papers/2410.13343v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13343v1_figure_4.png", "item2_path": "./SciVer/images/2410.13343v1-Table3-1.png", "section": [ "5.2.4" ], "request_id": 401, "origin_statement": "GPT-3.5-Turbo’s accuracy drops by 13.3 percentage points, from 61.3% on premise-start tautologies to 48.0% when tautologies are at the start of the hypothesis, illustrating the distraction effect shown in Figure 4.", "perturbed_statement": "GPT-3.5-Turbo’s accuracy drops by 20.0 percentage points, from 61.3% on premise-start tautologies to 48.0% when tautologies are at the start of the hypothesis, illustrating the distraction effect shown in Figure 4.", "perturbed_explanation": "The perturbed statement misstates the magnitude of the drop: the actual difference between 61.3% and 48.0% is 13.3 percentage points, not 20.0.", "claim": "GPT-3.5-Turbo’s accuracy drops by 20.0 percentage points, from 61.3% on premise-start tautologies to 48.0% when tautologies are at the start of the hypothesis, illustrating the distraction effect shown in Figure 4.", "label": false }, { "paperid": "2409.14032v1", "paper_path": "./SciVer/papers/2409.14032v1.json", "claim_type": "parallel", "item1": "2(c)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14032v1_figure_2(c).png", "item2_path": "./SciVer/images/2409.14032v1_figure_2(d).png", "section": [ "4" ], "request_id": 413, "origin_statement": "At subsample size r=400, the OSP estimator's ASE is approximately 0.045 for the logistic model in case1 and about 0.053 in case2, compared to UNIF ASEs of roughly 0.060 and 0.075, indicating relative ASE reductions of ~25% and ~29% respectively.", "perturbed_statement": "At subsample size r=400, the OSP estimator's ASE is approximately 0.050 for the logistic model in case1 and about 0.053 in case2, compared to UNIF ASEs of roughly 0.060 and 0.070, indicating relative ASE reductions of ~17% and ~23% respectively.", "perturbed_explanation": "The perturbed statement misreports two values: in case1 the OSP ASE at r=400 is actually around 0.045—not 0.050—and in case2 the UNIF ASE at r=400 is about 0.075—not 0.070—according to the plotted ASE curves in Figure 2(c) and (d).", "claim": "At subsample size r=400, the OSP estimator's ASE is approximately 0.050 for the logistic model in case1 and about 0.053 in case2, compared to UNIF ASEs of roughly 0.060 and 0.070, indicating relative ASE reductions of ~17% and ~23% respectively.", "label": false }, { "paperid": "2409.06924v1", "paper_path": "./SciVer/papers/2409.06924v1.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06924v1_figure_3.png", "item2_path": "./SciVer/images/2409.06924v1-Table1-1.png", "section": [ "4" ], "request_id": 430, "origin_statement": "TOI-4562 c’s semi‐major axis of 5.225 AU places its orbit just beyond Jupiter’s 5.2 AU, while TOI‐4562 b shows transit‐timing variations ranging from −30.7 min at cycle 0 up to +44.2 min at cycle 5.", "perturbed_statement": "TOI-4562 c’s semi‐major axis of 5.225 AU places its orbit just beyond Jupiter’s 5.2 AU, while TOI‐4562 b shows transit‐timing variations ranging from −30.7 min at cycle 0 up to +54.2 min at cycle 5.", "perturbed_explanation": "The perturbed TTV value (+54.2 min) contradicts Table 1, which lists the maximum transit‐timing variation at cycle 5 as +44.2080 min, not +54.2 min.", "claim": "TOI-4562 c’s semi‐major axis of 5.225 AU places its orbit just beyond Jupiter’s 5.2 AU, while TOI‐4562 b shows transit‐timing variations ranging from −30.7 min at cycle 0 up to +54.2 min at cycle 5.", "label": false }, { "paperid": "2410.22046v2", "paper_path": "./SciVer/papers/2410.22046v2.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.22046v2-Table4-1.png", "item2_path": "./SciVer/images/2410.22046v2-Table5-1.png", "section": [ "4.1.2" ], "request_id": 436, "origin_statement": "Across all sequence-length intervals, note prediction accuracy consistently outperforms chord prediction accuracy by about 13–17 percentage points; for instance, in the (50.8,76.2) bin, note accuracy is 74.80% versus chord accuracy of 58.44%, and both accuracies decrease as sequence length increases.", "perturbed_statement": "Across all sequence-length intervals, note prediction accuracy consistently outperforms chord prediction accuracy by about 8–10 percentage points; for instance, in the (50.8,76.2) bin, note accuracy is 74.80% versus chord accuracy of 58.44%, and both accuracies decrease as sequence length increases.", "perturbed_explanation": "The perturbation incorrectly states that the accuracy gap is 8–10 percentage points. In reality, the tables show differences ranging from about 14.62% (75.85−61.23) up to 18.82% (72.28−53.46), e.g. 74.80%−58.44%=16.36% in the (50.8,76.2) bin.", "claim": "Across all sequence-length intervals, note prediction accuracy consistently outperforms chord prediction accuracy by about 8–10 percentage points; for instance, in the (50.8,76.2) bin, note accuracy is 74.80% versus chord accuracy of 58.44%, and both accuracies decrease as sequence length increases.", "label": false }, { "paperid": "2410.23296v1", "paper_path": "./SciVer/papers/2410.23296v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23296v1_figure_2.png", "item2_path": "./SciVer/images/2410.23296v1_figure_4.png", "section": [ "4.2" ], "request_id": 438, "origin_statement": "On BTC-USD (Figure 2), the qDense PDF reaches a maximum density of about 26 around a 0% log return compared to qLSTM’s ∼9.5, while on synthetic normal data (Figure 4), the qLSTM PDF peaks at ∼17.3 versus LQR’s ∼13.", "perturbed_statement": "On BTC-USD (Figure 2), the qLSTM PDF peaks at around 26 density at a 0% log return compared to qDense’s ∼9.5, while on synthetic normal data (Figure 4), the qLSTM PDF peaks at ∼13 versus LQR’s ∼17.3 density.", "perturbed_explanation": "This is incorrect because Figure 2 shows the qDense PDF (not qLSTM) peaks at about 26 while qLSTM peaks around 9.5. And in Figure 4 the qLSTM PDF actually peaks at about 17.3, not 13, with LQR peaking around 13.", "claim": "On BTC-USD (Figure 2), the qLSTM PDF peaks at around 26 density at a 0% log return compared to qDense’s ∼9.5, while on synthetic normal data (Figure 4), the qLSTM PDF peaks at ∼13 versus LQR’s ∼17.3 density.", "label": false }, { "paperid": "2409.04290v1", "paper_path": "./SciVer/papers/2409.04290v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04290v1_figure_6.png", "item2_path": "./SciVer/images/2409.04290v1_figure_7.png", "section": [ "4.2.1" ], "request_id": 445, "origin_statement": "While the mean blood pressure term in Fig.6 drops from a hazard contribution of ~0.8 at 0 mmHg to ~0 at 75 mmHg, the PySR fit in Fig.7 correctly spans the activation’s full range from ~–1.0 at neuron –1.5 to ~0.8 at neuron +1.5, unlike the flat auto-symbolic fit.", "perturbed_statement": "While the mean blood pressure term in Fig.6 rises from a hazard contribution of ~0.8 at 0 mmHg to ~0 at 75 mmHg, the PySR fit in Fig.7 correctly spans the activation’s full range from ~–1.0 at neuron –1.5 to ~0.8 at neuron +1.5.", "perturbed_explanation": "This is incorrect because Fig.6 shows the mean blood pressure hazard contribution decreases (drops) from about 0.8 at 0 mmHg to near zero at 75 mmHg; it does not rise.", "claim": "While the mean blood pressure term in Fig.6 rises from a hazard contribution of ~0.8 at 0 mmHg to ~0 at 75 mmHg, the PySR fit in Fig.7 correctly spans the activation’s full range from ~–1.0 at neuron –1.5 to ~0.8 at neuron +1.5.", "label": false }, { "paperid": "2409.07019v1", "paper_path": "./SciVer/papers/2409.07019v1.json", "claim_type": "parallel", "item1": "6", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.07019v1_figure_6.png", "item2_path": "./SciVer/images/2409.07019v1-Table6-1.png", "section": [ "4" ], "request_id": 447, "origin_statement": "Table 6 reports an RV slope γ˙=0.00239±0.00039 m/s/day (∼0.87 m/s/yr), smaller than the HARPS-N jitter σ_J=2.30+0.24 m/s; yet Fig 6 places K2-2 b at ∼10 M⊕ and 2.45 R⊕ on the 100%-H2O/50%-H2O tracks, confirming its water-rich composition.", "perturbed_statement": "Table 6 reports an RV slope γ˙=0.0239±0.00039 m/s/day (∼8.7 m/s/yr), smaller than the HARPS-N jitter σ_J=2.30+0.24 m/s; yet Fig 6 places K2-2 b at ∼10 M⊕ and 2.45 R⊕ on the 100%-H2O/50%-H2O tracks, confirming its water-rich composition.", "perturbed_explanation": "The perturbed slope γ˙=0.0239 m/s/day contradicts Table 6, which lists γ˙=0.00239±0.00039 m/s/day (an order of magnitude lower), so the ∼8.7 m/s annual drift is incorrect.", "claim": "Table 6 reports an RV slope γ˙=0.0239±0.00039 m/s/day (∼8.7 m/s/yr), smaller than the HARPS-N jitter σ_J=2.30+0.24 m/s; yet Fig 6 places K2-2 b at ∼10 M⊕ and 2.45 R⊕ on the 100%-H2O/50%-H2O tracks, confirming its water-rich composition.", "label": false }, { "paperid": "2409.00163v1", "paper_path": "./SciVer/papers/2409.00163v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00163v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.00163v1_figure_3(b).png", "section": [ "4.2" ], "request_id": 450, "origin_statement": "At time 150, DeepSurv predicts survival probabilities approximately 0.25 for patient 373 versus 0.15 under CoxPH, and about 0.40 for patient 1479 compared to 0.30 under CoxPH, indicating around 10% higher survival at that horizon.", "perturbed_statement": "At time 150, DeepSurv predicts survival probabilities approximately 0.35 for patient 373 versus 0.15 under CoxPH, and about 0.50 for patient 1479 compared to 0.30 under CoxPH, indicating around 20% higher survival at that horizon.", "perturbed_explanation": "The perturbed statement overstates DeepSurv’s survival estimates: the actual DeepSurv curves show ~0.25 for patient 373 (not 0.35) and ~0.40 for patient 1479 (not 0.50) at time 150, contradicting the plotted values.", "claim": "At time 150, DeepSurv predicts survival probabilities approximately 0.35 for patient 373 versus 0.15 under CoxPH, and about 0.50 for patient 1479 compared to 0.30 under CoxPH, indicating around 20% higher survival at that horizon.", "label": false }, { "paperid": "2411.01739v2", "paper_path": "./SciVer/papers/2411.01739v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.01739v2-Table2-1.png", "item2_path": "./SciVer/images/2411.01739v2-Table3-1.png", "section": [ "5.4" ], "request_id": 451, "origin_statement": "CompILer’s object accuracy in Split-Clothing (5 tasks) at 96.67% surpasses its object accuracy in Split-UT-Zappos (5 tasks) at 79.56% by 17.11 points, while its HM also drops from 94.18% to 66.31%.", "perturbed_statement": "CompILer’s object accuracy in Split-Clothing (5 tasks) at 96.67% surpasses its object accuracy in Split-UT-Zappos (5 tasks) at 86.55% by 10.12 points, while its HM also drops from 94.18% to 66.31%.", "perturbed_explanation": "The perturbed statement wrongly claims a 86.55% object accuracy for Split-UT-Zappos (5 tasks), but Table 3 shows it is actually 79.56%.", "claim": "CompILer’s object accuracy in Split-Clothing (5 tasks) at 96.67% surpasses its object accuracy in Split-UT-Zappos (5 tasks) at 86.55% by 10.12 points, while its HM also drops from 94.18% to 66.31%.", "label": false }, { "paperid": "2411.01537v1", "paper_path": "./SciVer/papers/2411.01537v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01537v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.01537v1_figure_5(b).png", "section": [ "4.6" ], "request_id": 452, "origin_statement": "SASRec layer1's heatmap shows a maximum attention score of about 0.05 at sequence position 15, whereas layer2's heatmap peaks at roughly 0.035 around position 22, indicating layer1 allocates about 43% more weight to a single item than layer2.", "perturbed_statement": "SASRec layer1's heatmap shows a maximum attention score of about 0.05 at sequence position 15, whereas layer2's heatmap peaks at roughly 0.045 around position 22, indicating layer1 allocates about 11% more weight to a single item than layer2.", "perturbed_explanation": "The perturbation incorrectly raises layer2’s peak from ~0.035 to 0.045. The second heatmap’s colorbar clearly maxes out around 0.035, so stating a peak of 0.045 contradicts the visual data.", "claim": "SASRec layer1's heatmap shows a maximum attention score of about 0.05 at sequence position 15, whereas layer2's heatmap peaks at roughly 0.045 around position 22, indicating layer1 allocates about 11% more weight to a single item than layer2.", "label": false }, { "paperid": "2410.10672v1", "paper_path": "./SciVer/papers/2410.10672v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.10672v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.10672v1_figure_2(c).png", "section": [ "5.2.2" ], "request_id": 453, "origin_statement": "From 256M to 13B parameters, Wikipedia’s cross-entropy loss decreases from about 2.93 to 2.03 (a drop of ~0.90), while its matrix entropy falls from approximately 0.608 to 0.513 (a drop of ~0.095), indicating consistent gains in both metrics with scale.", "perturbed_statement": "From 256M to 13B parameters, Wikipedia’s cross-entropy loss decreases from about 2.93 to 2.03 (a drop of ~0.90), while its matrix entropy falls from approximately 0.608 to 0.563 (a drop of ~0.045), indicating consistent gains in both metrics with scale.", "perturbed_explanation": "The perturbed statement claims Wikipedia’s matrix entropy at 13B is 0.563, but the chart shows it is actually around 0.513, so the stated final entropy value (0.563) contradicts the image.", "claim": "From 256M to 13B parameters, Wikipedia’s cross-entropy loss decreases from about 2.93 to 2.03 (a drop of ~0.90), while its matrix entropy falls from approximately 0.608 to 0.563 (a drop of ~0.045), indicating consistent gains in both metrics with scale.", "label": false }, { "paperid": "2410.11716v1", "paper_path": "./SciVer/papers/2410.11716v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.11716v1-Table1-1.png", "item2_path": "./SciVer/images/2410.11716v1-Table2-1.png", "section": [ "4.1" ], "request_id": 458, "origin_statement": "For the largest sample size of 490 without time trend, PBD randomization yields a type-I error of 9.78% and power of 85.65% under Test 3, compared to CR randomization’s 9.93% type-I error and 81.16% power, a 4.49% higher power.", "perturbed_statement": "For the largest sample size of 490 without time trend, PBD randomization yields a type-I error of 9.78% and power of 90.65% under Test 3, compared to CR randomization’s 9.93% type-I error and 81.16% power, a 9.49% higher power.", "perturbed_explanation": "The perturbed statement misstates the PBD power for Test 3: Table 2 shows 85.65% power under PBD at n=490 without time trend, not 90.65%. Consequently, the claimed 90.65% power and the 9.49% power difference contradict the actual results.", "claim": "For the largest sample size of 490 without time trend, PBD randomization yields a type-I error of 9.78% and power of 90.65% under Test 3, compared to CR randomization’s 9.93% type-I error and 81.16% power, a 9.49% higher power.", "label": false }, { "paperid": "2410.05522v1", "paper_path": "./SciVer/papers/2410.05522v1.json", "claim_type": "parallel", "item1": "9", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05522v1_figure_9.png", "item2_path": "./SciVer/images/2410.05522v1-Table4-1.png", "section": [ "3.5" ], "request_id": 463, "origin_statement": "The combined set model achieves a median testing R^2 of 0.988, and in the second ground truth mesh the maximum temperature reaches about 0.455°C.", "perturbed_statement": "The combined set model achieves a median testing R^2 of 0.992, and in the second ground truth mesh the maximum temperature reaches about 0.455°C.", "perturbed_explanation": "This statement is incorrect because Table 4 reports a median testing R² of 0.988 for the Combined Set model (0.992 is the testing R² for the Voronoi Set), so the value 0.992 contradicts the table.", "claim": "The combined set model achieves a median testing R^2 of 0.992, and in the second ground truth mesh the maximum temperature reaches about 0.455°C.", "label": false }, { "paperid": "2411.15469v1", "paper_path": "./SciVer/papers/2411.15469v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15469v1_figure_4.png", "item2_path": "./SciVer/images/2411.15469v1-Table3-1.png", "section": [ "5.3" ], "request_id": 464, "origin_statement": "Figure 4(c) shows that with η=0.95 on 10-split CIFAR-100, the model achieves 89.60% accuracy and 2.57% forgetting, identical to the full-projection configuration’s 89.60% accuracy and 2.57% forgetting in Table 3.", "perturbed_statement": "Figure 4(c) shows that with η=0.95 on 10-split CIFAR-100, the model achieves 90.17% accuracy and 2.57% forgetting, identical to the full-projection configuration’s 89.60% accuracy and 2.57% forgetting in Table 3.", "perturbed_explanation": "The perturbed statement incorrectly raises the accuracy to 90.17% at η=0.95 on 10-split CIFAR-100, but Figure 4(c) reports an accuracy of 89.60% for η=0.95. This contradiction makes the perturbed claim untrue.", "claim": "Figure 4(c) shows that with η=0.95 on 10-split CIFAR-100, the model achieves 90.17% accuracy and 2.57% forgetting, identical to the full-projection configuration’s 89.60% accuracy and 2.57% forgetting in Table 3.", "label": false }, { "paperid": "2409.02137v1", "paper_path": "./SciVer/papers/2409.02137v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.02137v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.02137v1_figure_2(b).png", "section": [ "2.4" ], "request_id": 471, "origin_statement": "Under random exploration, cubes 2–5 are entirely unvisited, while WaypointRL covers 100% of cells in depths 0–2 of cube 3 and leaves only one unvisited cell in each of depths 3–5.", "perturbed_statement": "Under random exploration, cubes 1–5 are entirely unvisited, while WaypointRL covers 100% of cells in depths 0–2 of cube 3 and leaves only one unvisited cell in each of depths 3–5.", "perturbed_explanation": "The perturbed claim wrongly asserts that cube 1 is completely unvisited under random exploration; however, the first image shows a colored heatmap over cube 1 indicating it was partially explored.", "claim": "Under random exploration, cubes 1–5 are entirely unvisited, while WaypointRL covers 100% of cells in depths 0–2 of cube 3 and leaves only one unvisited cell in each of depths 3–5.", "label": false }, { "paperid": "2410.12261v1", "paper_path": "./SciVer/papers/2410.12261v1.json", "claim_type": "parallel", "item1": "4(c)", "item2": "4(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.12261v1_figure_4(c).png", "item2_path": "./SciVer/images/2410.12261v1_figure_4(d).png", "section": [ "4.2" ], "request_id": 480, "origin_statement": "GECCO’s AUC-ROC stays above 0.94 across all training patch sizes and rises above 0.96 for testing patch sizes, outperforming CICIDS by roughly 0.18 AUC-ROC at testing patch size 32.", "perturbed_statement": "GECCO’s AUC-ROC stays above 0.98 across all training patch sizes and rises above 0.97 for testing patch sizes, outperforming CICIDS by roughly 0.10 AUC-ROC at testing patch size 32.", "perturbed_explanation": "The perturbed statement misreports GECCO’s performance: Figure 4(c) shows GECCO’s AUC-ROC ranges from about 0.94 to 0.96 for training patch sizes, not above 0.98. It also understates the gap at testing size 32, which is ~0.97 − 0.79 ≈ 0.18, not 0.10.", "claim": "GECCO’s AUC-ROC stays above 0.98 across all training patch sizes and rises above 0.97 for testing patch sizes, outperforming CICIDS by roughly 0.10 AUC-ROC at testing patch size 32.", "label": false }, { "paperid": "2411.14034v1", "paper_path": "./SciVer/papers/2411.14034v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.14034v1-Table2-1.png", "item2_path": "./SciVer/images/2411.14034v1-Table4-1.png", "section": [ "6" ], "request_id": 481, "origin_statement": "CrabNet's LOCO-CV conductivity prediction MAE is 1.81 ± 0.87 dex, while the leading candidate Na0.025Zn0.975O0.988 exhibits log10 σ_pred 3.57, surpassing the MAE by ~1.76 dex.", "perturbed_statement": "CrabNet's LOCO-CV conductivity prediction MAE is 2.11 ± 0.87 dex, while the leading candidate Na0.025Zn0.975O0.988 exhibits log10 σ_pred 3.22, surpassing the MAE by ~1.01 dex.", "perturbed_explanation": "The LOCO-CV MAE for CrabNet is actually 1.81 ± 0.87 dex (not 2.11 dex), and Na0.025Zn0.975O0.988 has a predicted log10 σ of 3.57 (not 3.22) as shown in Tables 2 and 4.", "claim": "CrabNet's LOCO-CV conductivity prediction MAE is 2.11 ± 0.87 dex, while the leading candidate Na0.025Zn0.975O0.988 exhibits log10 σ_pred 3.22, surpassing the MAE by ~1.01 dex.", "label": false }, { "paperid": "2411.11706v1", "paper_path": "./SciVer/papers/2411.11706v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11706v1_figure_1.png", "item2_path": "./SciVer/images/2411.11706v1_figure_2.png", "section": [ "3.1" ], "request_id": 483, "origin_statement": "Figure2 shows MC-LLaVA uses k-means normalization over m*(k+1) concept tokens from k images per concept, and Figure1 demonstrates that with multi-concept training, MC-LLaVA correctly recognizes all three personalized characters (, , ) and identifies ’s blue-and-black striped shirt.", "perturbed_statement": "Figure2 shows MC-LLaVA uses k-means normalization over m*(k+2) concept tokens from k images per concept, and Figure1 demonstrates that with multi-concept training, MC-LLaVA correctly recognizes all four personalized characters (, , , ) and identifies ’s pink-and-black striped shirt.", "perturbed_explanation": "The perturbation is wrong because Figure2 specifies m*(k+1) concept tokens, not m*(k+2); Figure1 only includes three concepts (,,), not four; and is shown wearing a blue-and-black striped shirt, not pink-and-black.", "claim": "Figure2 shows MC-LLaVA uses k-means normalization over m*(k+2) concept tokens from k images per concept, and Figure1 demonstrates that with multi-concept training, MC-LLaVA correctly recognizes all four personalized characters (, , , ) and identifies ’s pink-and-black striped shirt.", "label": false }, { "paperid": "2409.19028v1", "paper_path": "./SciVer/papers/2409.19028v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19028v1_figure_3.png", "item2_path": "./SciVer/images/2409.19028v1-Table4-1.png", "section": [ "5.3" ], "request_id": 485, "origin_statement": "In Table 4, Llama2’s median context-aware score reached 4.00 in round 2, exceeding Tinyllama’s highest context-aware median of 2.75 by 1.25 points, while Figure 3 shows Llama2’s median cosine similarity under context-aware prompts (~0.75) is about 0.20 higher than Tinyllama’s (~0.55).", "perturbed_statement": "In Table 4, Llama2’s median context-aware score reached 3.00 in round 2, exceeding Tinyllama’s highest context-aware median of 2.75 by 0.25 points, while Figure 3 shows Llama2’s median cosine similarity under context-aware prompts (~0.75) is about 0.30 higher than Tinyllama’s (~0.55).", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 reports Llama2’s median context-aware score in round 2 as 4.00, not 3.00, and Figure 3 shows the difference in median cosine similarity as ~0.20 (0.75–0.55), not 0.30.", "claim": "In Table 4, Llama2’s median context-aware score reached 3.00 in round 2, exceeding Tinyllama’s highest context-aware median of 2.75 by 0.25 points, while Figure 3 shows Llama2’s median cosine similarity under context-aware prompts (~0.75) is about 0.30 higher than Tinyllama’s (~0.55).", "label": false }, { "paperid": "2410.23893v3", "paper_path": "./SciVer/papers/2410.23893v3.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23893v3_figure_1.png", "item2_path": "./SciVer/images/2410.23893v3_figure_2.png", "section": [ "2.2" ], "request_id": 492, "origin_statement": "Conditioned on the first 100 cycles of the capacity matrix via a transformer encoder (Fig.1b), DiffBatt's U-Net architecture (Fig.1a) denoises one-dimensional Gaussian noise over 11 equally spaced timesteps (Fig.2, from t=0 to t=1000) to reconstruct the SOH curve.", "perturbed_statement": "Conditioned on the first 200 cycles of the capacity matrix via a transformer encoder (Fig.1b), DiffBatt's U-Net architecture (Fig.1a) denoises one-dimensional Gaussian noise over 11 equally spaced timesteps (Fig.2, from t=0 to t=1000) to reconstruct the SOH curve.", "perturbed_explanation": "The perturbed statement incorrectly asserts use of the first 200 cycles for the capacity matrix. According to the context and Fig.1, DiffBatt conditions on only the first 100 cycles, not 200.", "claim": "Conditioned on the first 200 cycles of the capacity matrix via a transformer encoder (Fig.1b), DiffBatt's U-Net architecture (Fig.1a) denoises one-dimensional Gaussian noise over 11 equally spaced timesteps (Fig.2, from t=0 to t=1000) to reconstruct the SOH curve.", "label": false }, { "paperid": "2409.05224v1", "paper_path": "./SciVer/papers/2409.05224v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.05224v1-Table1-1.png", "item2_path": "./SciVer/images/2409.05224v1-Table2-1.png", "section": [ "6.3" ], "request_id": 497, "origin_statement": "In Table1, 2;2;8+WL+GPS(0.9) yields 33.13 H2H spBLEU; in Table2, the 30-language 2;2;8;8+WL+GPS(0.9) yields 29.92 H2H, a 3.21-point drop.", "perturbed_statement": "In Table1, 2;2;8+WL+GPS(0.9) yields 33.13 H2H spBLEU; in Table2, the 30-language 2;2;8;8+WL+GPS(0.9) yields 29.92 H2H, a 5.21-point drop.", "perturbed_explanation": "The perturbed statement wrongly claims a 5.21-point drop. According to Table1 (H2H = 33.13) and Table2 (H2H = 29.92), the actual difference is 33.13 – 29.92 = 3.21 spBLEU, not 5.21.", "claim": "In Table1, 2;2;8+WL+GPS(0.9) yields 33.13 H2H spBLEU; in Table2, the 30-language 2;2;8;8+WL+GPS(0.9) yields 29.92 H2H, a 5.21-point drop.", "label": false }, { "paperid": "2410.22551v2", "paper_path": "./SciVer/papers/2410.22551v2.json", "claim_type": "parallel", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22551v2_figure_4.png", "item2_path": "./SciVer/images/2410.22551v2_figure_6.png", "section": [ "4.4" ], "request_id": 508, "origin_statement": "While FairSkin-CS has lower overall ACC of 74.1% in Figure 4 compared to Vanilla’s 77.3%, at 900 augmentation samples in Figure 6 it raises demographic parity to about 17.8%, exceeding FairSkin-SS’s 14.6% parity at the same sample count.", "perturbed_statement": "While FairSkin-CS has higher overall ACC of 76.7% in Figure 4 compared to Vanilla’s 77.3%, at 900 augmentation samples in Figure 6 it raises demographic parity to about 18.5%, exceeding FairSkin-SS’s 16.2% parity at the same sample count.", "perturbed_explanation": "The perturbed statement misreports FairSkin-CS’s overall ACC: Figure 4 shows about 74.1%, not 76.7%. It also overstates FairSkin-CS’s demographic parity at 900 samples (actual ~17.8%, not 18.5%) and FairSkin-SS’s parity (actual ~14.6%, not 16.2%).", "claim": "While FairSkin-CS has higher overall ACC of 76.7% in Figure 4 compared to Vanilla’s 77.3%, at 900 augmentation samples in Figure 6 it raises demographic parity to about 18.5%, exceeding FairSkin-SS’s 16.2% parity at the same sample count.", "label": false }, { "paperid": "2410.22938v2", "paper_path": "./SciVer/papers/2410.22938v2.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.22938v2-Table1-1.png", "item2_path": "./SciVer/images/2410.22938v2-Table2-1.png", "section": [ "4.2" ], "request_id": 520, "origin_statement": "In D^2_HZ, DiffLight’s ATT increases from 333.90±2.67 at 50% random missing to 351.21±9.86 at 25% kriging missing, a gain of 17.31.", "perturbed_statement": "In D^2_HZ, DiffLight’s ATT increases from 343.90±2.67 at 50% random missing to 351.21±9.86 at 25% kriging missing, a gain of 7.31.", "perturbed_explanation": "The random-missing table reports DiffLight’s ATT on D^2_HZ at 50% missing as 333.90±2.67, not 343.90±2.67. Moreover, the increase from 333.90 to 351.21 is 17.31, not 7.31, so both the starting value and the stated gain are incorrect.", "claim": "In D^2_HZ, DiffLight’s ATT increases from 343.90±2.67 at 50% random missing to 351.21±9.86 at 25% kriging missing, a gain of 7.31.", "label": false }, { "paperid": "2409.10132v1", "paper_path": "./SciVer/papers/2409.10132v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10132v1_figure_1.png", "item2_path": "./SciVer/images/2409.10132v1_figure_2.png", "section": [ "1" ], "request_id": 526, "origin_statement": "Figure1 shows StruEdit achieves ~95% editing accuracy at ≈0.32 s per QA, versus IKE’s ~60% at ≈0.63 s. Figure2 reveals structural editing removes outdated parametric knowledge and refills reasoning via updated triple structures, avoiding locate-then-edit errors inherent to ME and ICE.", "perturbed_statement": "Figure1 shows StruEdit achieves ~75% editing accuracy at ≈0.32 s per QA, versus IKE’s ~60% at ≈0.63 s. Figure2 reveals structural editing removes outdated parametric knowledge and refills reasoning via updated triple structures, avoiding locate-then-edit errors inherent to ME and ICE.", "perturbed_explanation": "The perturbed statement misreports StruEdit’s editing accuracy as ~75%, but Figure 1 clearly shows StruEdit at the top with around ~95% accuracy, so the ~75% figure contradicts the plotted data.", "claim": "Figure1 shows StruEdit achieves ~75% editing accuracy at ≈0.32 s per QA, versus IKE’s ~60% at ≈0.63 s. Figure2 reveals structural editing removes outdated parametric knowledge and refills reasoning via updated triple structures, avoiding locate-then-edit errors inherent to ME and ICE.", "label": false }, { "paperid": "2410.17831v1", "paper_path": "./SciVer/papers/2410.17831v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17831v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.17831v1_figure_2(c).png", "section": [ "4.2.2", "4.2.1" ], "request_id": 532, "origin_statement": "The maximum obstacle distance in the horizontal Obstacle GPDF slice reaches around 3.4 m, which exceeds the peak ground distance of approximately 2.2 m in the vertical Ground GPDF slice by about 1.2 m.", "perturbed_statement": "The maximum obstacle distance in the horizontal Obstacle GPDF slice reaches around 2.0 m, which exceeds the peak ground distance of approximately 2.2 m in the vertical Ground GPDF slice by about 0.2 m.", "perturbed_explanation": "This is incorrect because the Obstacle GPDF colorbar clearly extends above 3.0 m (up to about 3.4 m), so stating a maximum of 2.0 m contradicts the actual obstacle slice data.", "claim": "The maximum obstacle distance in the horizontal Obstacle GPDF slice reaches around 2.0 m, which exceeds the peak ground distance of approximately 2.2 m in the vertical Ground GPDF slice by about 0.2 m.", "label": false }, { "paperid": "2410.23701v1", "paper_path": "./SciVer/papers/2410.23701v1.json", "claim_type": "parallel", "item1": "18", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23701v1_figure_18.png", "item2_path": "./SciVer/images/2410.23701v1-Table2-1.png", "section": [ "1" ], "request_id": 533, "origin_statement": "Even our 50% ablation dataset in Fig.18 has 1.63M grasps, which already exceeds DexGraspNet’s 1.32M grasps listed in Table2, demonstrating our full 3.26M set more than doubles that prior scale.", "perturbed_statement": "Even our 50% ablation dataset in Fig.18 has 820k grasps, which already exceeds DexGraspNet’s 1.32M grasps listed in Table2, demonstrating our full 3.26M set more than doubles that prior scale.", "perturbed_explanation": "The perturbed statement wrongly asserts the 50% ablation contains 820k grasps, but Fig.18 shows the 50% ablation level actually corresponds to about 1.63M grasps.", "claim": "Even our 50% ablation dataset in Fig.18 has 820k grasps, which already exceeds DexGraspNet’s 1.32M grasps listed in Table2, demonstrating our full 3.26M set more than doubles that prior scale.", "label": false }, { "paperid": "2409.00140v1", "paper_path": "./SciVer/papers/2409.00140v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00140v1_figure_6(a).png", "item2_path": "./SciVer/images/2409.00140v1_figure_6(b).png", "section": [ "3.2", "4.2" ], "request_id": 534, "origin_statement": "QHe initialization yields a mean accuracy of ~79.7% (CI [78.9,80.5]), about 3.2% higher than He (~76.5%). The smallest model (quarter parameters) achieves a mean accuracy within 0.2% of the largest model (~78.8% vs ~79.0%).", "perturbed_statement": "QHe initialization yields a mean accuracy of ~79.7% (CI [78.9,80.5]), about 3.2% higher than He (~76.5%). However, the smallest model (quarter parameters) achieves a mean accuracy that is 3% lower than the largest model (~75.8% vs ~79.0%).", "perturbed_explanation": "The perturbation is wrong because Figure 6 (right) shows the smallest and largest parameter groups have almost identical means (≈78.8% vs ≈79.0%) with overlapping confidence intervals, not a 3% gap.", "claim": "QHe initialization yields a mean accuracy of ~79.7% (CI [78.9,80.5]), about 3.2% higher than He (~76.5%). However, the smallest model (quarter parameters) achieves a mean accuracy that is 3% lower than the largest model (~75.8% vs ~79.0%).", "label": false }, { "paperid": "2409.00119v2", "paper_path": "./SciVer/papers/2409.00119v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00119v2_figure_2.png", "item2_path": "./SciVer/images/2409.00119v2_figure_3.png", "section": [ "3.2" ], "request_id": 541, "origin_statement": "In LoRA finetuning (Figure 2 middle), angular changes ΔD span roughly 0.6–0.95 while magnitude changes ΔM stay below 0.06, and in RoAd1 (Figure 3) each adjacent-dimension pair (h_d, h_{d+1}) is rotated by θ_d with a learnable scale α_d that modulates the original norm.", "perturbed_statement": "In LoRA finetuning (Figure 2 middle), angular changes ΔD span roughly 0.1–0.3 while magnitude changes ΔM stay below 0.06, and in RoAd1 (Figure 3) each adjacent-dimension pair (h_d, h_{d+1}) is rotated by θ_d with a learnable scale α_d that modulates the original norm.", "perturbed_explanation": "The perturbed statement understates angular changes: Figure 2 (middle) shows ΔD values clustered between about 0.6 and 0.95, not 0.1–0.3, so the claimed 0.1–0.3 range contradicts the actual data.", "claim": "In LoRA finetuning (Figure 2 middle), angular changes ΔD span roughly 0.1–0.3 while magnitude changes ΔM stay below 0.06, and in RoAd1 (Figure 3) each adjacent-dimension pair (h_d, h_{d+1}) is rotated by θ_d with a learnable scale α_d that modulates the original norm.", "label": false }, { "paperid": "2410.05729v1", "paper_path": "./SciVer/papers/2410.05729v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05729v1_figure_6(a).png", "item2_path": "./SciVer/images/2410.05729v1-Table4-1.png", "section": [ "4.3" ], "request_id": 542, "origin_statement": "At rank dimension 128 and output feature size 128, the LRFT module achieves an RMSE of approximately 2.0 cm (Fig. 6), a 65% decrease compared to the 5.68 cm translation error of our full model (Table 4, row 14).", "perturbed_statement": "At rank dimension 128 and output feature size 128, the LRFT module achieves an RMSE of approximately 1.2 cm (Fig. 6), a 79% decrease compared to the 5.68 cm translation error of our full model (Table 4, row 14).", "perturbed_explanation": "The figure shows that at rank 128 with output size 128 the RMSE is around 2.0 cm, not 1.2 cm; thus the perturbed RMSE value contradicts the data in Fig. 6.", "claim": "At rank dimension 128 and output feature size 128, the LRFT module achieves an RMSE of approximately 1.2 cm (Fig. 6), a 79% decrease compared to the 5.68 cm translation error of our full model (Table 4, row 14).", "label": false }, { "paperid": "2410.22187v1", "paper_path": "./SciVer/papers/2410.22187v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.22187v1-Table1-1.png", "item2_path": "./SciVer/images/2410.22187v1-Table2-1.png", "section": [ "6.1" ], "request_id": 543, "origin_statement": "Under a 5% annotation budget, the proposed method achieves a 71.8% average accuracy with CoOp prompt tuning (Table 1) and 75.0% with VPT tuning (Table 2), representing a 3.2-point gain when using VPT.", "perturbed_statement": "Under a 5% annotation budget, the proposed method achieves a 71.8% average accuracy with CoOp prompt tuning (Table 1) and 76.5% with VPT tuning (Table 2), representing a 4.7-point gain when using VPT.", "perturbed_explanation": "The perturbed statement misreports the VPT average: Table 2 shows a 75.0% average at 5%, not 76.5%. Consequently, the actual difference over the 71.8% CoOp result is 3.2 points, not 4.7.", "claim": "Under a 5% annotation budget, the proposed method achieves a 71.8% average accuracy with CoOp prompt tuning (Table 1) and 76.5% with VPT tuning (Table 2), representing a 4.7-point gain when using VPT.", "label": false }, { "paperid": "2411.05009v1", "paper_path": "./SciVer/papers/2411.05009v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.05009v1_figure_4(b).png", "item2_path": "./SciVer/images/2411.05009v1-Table6-1.png", "section": [ "4.3" ], "request_id": 548, "origin_statement": "On the sinusoidal initialization, the A100 OpenMP achieved roughly 8400 M particles/s, about 2.3× higher than the V100 Kokkos’s ~3600 M/s, and Table 6 shows OpenMP attains 98% performance portability for PIC across {V100, A100}.", "perturbed_statement": "On the sinusoidal initialization, the A100 OpenMP achieved roughly 8400 M particles/s, about 2.3× higher than the V100 Kokkos’s ~3600 M/s, and Table 6 shows OpenMP attains 90% performance portability for PIC across {V100, A100}.", "perturbed_explanation": "The perturbed portability figure of 90% contradicts Table 6, which actually reports a 98% performance portability for OpenMP on the {V100, A100} GPU set.", "claim": "On the sinusoidal initialization, the A100 OpenMP achieved roughly 8400 M particles/s, about 2.3× higher than the V100 Kokkos’s ~3600 M/s, and Table 6 shows OpenMP attains 90% performance portability for PIC across {V100, A100}.", "label": false }, { "paperid": "2410.05317v2", "paper_path": "./SciVer/papers/2410.05317v2.json", "claim_type": "parallel", "item1": "7", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05317v2_figure_7.png", "item2_path": "./SciVer/images/2410.05317v2-Table2-1.png", "section": [ "4.3" ], "request_id": 549, "origin_statement": "ToCa accelerates inference to 2.36× speed (Table 2) while its Aesthetic Quality metric decreases by only ~3 percentage points from ~55% (Original) to ~52% (Figure 8).", "perturbed_statement": "ToCa accelerates inference to 1.87× speed (Table 2) while its Aesthetic Quality metric decreases by only ~3 percentage points from ~55% (Original) to ~52% (Figure 8).", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 reports ToCa’s actual speed-up as 2.36×, not 1.87×. The 1.87× figure corresponds to FORA, not ToCa.", "claim": "ToCa accelerates inference to 1.87× speed (Table 2) while its Aesthetic Quality metric decreases by only ~3 percentage points from ~55% (Original) to ~52% (Figure 8).", "label": false }, { "paperid": "2409.15313v1", "paper_path": "./SciVer/papers/2409.15313v1.json", "claim_type": "parallel", "item1": "9", "item2": "12", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15313v1_figure_9.png", "item2_path": "./SciVer/images/2409.15313v1_figure_12.png", "section": [ "4.5" ], "request_id": 563, "origin_statement": "ResNet-34’s training loss declines from about 1.27 to around 0.4 by iteration 200 and remains near 0.4, while the Vision Transformer’s validation loss dips to approximately 0.34 by epoch 25, indicating the transformer achieves a slightly lower error mid-training.", "perturbed_statement": "ResNet-34’s training loss declines from about 1.27 to around 0.4 by iteration 200 and remains near 0.4, while the Vision Transformer’s validation loss dips to approximately 0.28 by epoch 25, indicating the transformer achieves a markedly lower error mid-training.", "perturbed_explanation": "The perturbed statement wrongly asserts that the Vision Transformer’s validation loss falls to about 0.28 by epoch 25. In the actual plot (Figure 12), the validation loss only dips to roughly 0.34 around epoch 25, not 0.28.", "claim": "ResNet-34’s training loss declines from about 1.27 to around 0.4 by iteration 200 and remains near 0.4, while the Vision Transformer’s validation loss dips to approximately 0.28 by epoch 25, indicating the transformer achieves a markedly lower error mid-training.", "label": false }, { "paperid": "2409.16745v1", "paper_path": "./SciVer/papers/2409.16745v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16745v1_figure_1(b).png", "item2_path": "./SciVer/images/2409.16745v1_figure_2(b).png", "section": [ "3.2" ], "request_id": 564, "origin_statement": "On 06 December 2023, X-ray polarization increases from ∼3.7% in the 2–8 keV band to ∼6.2% in the 5–8 keV band, exceeding the simultaneous optical B-band polarization of 4.27%.", "perturbed_statement": "On 06 December 2023, X-ray polarization increases from ∼3.7% in the 2–8 keV band to ∼6.2% in the 5–8 keV band, exceeding the simultaneous optical B-band polarization of 5.27%.", "perturbed_explanation": "The perturbed statement incorrectly cites the B-band optical polarization as 5.27%, whereas the measured value on 06 December 2023 is 4.27% (±0.32%) as shown in the bottom right panel of Figure 1 and described in the text.", "claim": "On 06 December 2023, X-ray polarization increases from ∼3.7% in the 2–8 keV band to ∼6.2% in the 5–8 keV band, exceeding the simultaneous optical B-band polarization of 5.27%.", "label": false }, { "paperid": "2411.08343v1", "paper_path": "./SciVer/papers/2411.08343v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.08343v1_figure_1.png", "item2_path": "./SciVer/images/2411.08343v1_figure_2.png", "section": [ "4" ], "request_id": 566, "origin_statement": "In Figure 1d, five yellow boxes indicate detected faces, yet in Figure 2b the GLM beta for number of faces (~0.6 μV) is roughly 20-fold smaller than that for index in sentence (~12 μV), showing sentence position drives this electrode’s neural response far more than face count.", "perturbed_statement": "In Figure 1d, ten yellow boxes indicate detected faces, yet in Figure 2b the GLM beta for number of faces (~0.6 μV) is roughly 5-fold smaller than that for index in sentence (~12 μV), showing sentence position drives this electrode’s neural response far more than face count.", "perturbed_explanation": "The perturbed statement wrongly asserts there are ten faces marked by yellow boxes in Figure 1d, whereas the image clearly shows only five. It also understates the beta ratio: with coefficients of ~12 μV versus ~0.6 μV, the index-in-sentence effect is about 20 times larger, not 5 times.", "claim": "In Figure 1d, ten yellow boxes indicate detected faces, yet in Figure 2b the GLM beta for number of faces (~0.6 μV) is roughly 5-fold smaller than that for index in sentence (~12 μV), showing sentence position drives this electrode’s neural response far more than face count.", "label": false }, { "paperid": "2411.09443v1", "paper_path": "./SciVer/papers/2411.09443v1.json", "claim_type": "parallel", "item1": "9", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09443v1_figure_9.png", "item2_path": "./SciVer/images/2411.09443v1-Table4-1.png", "section": [ "4.3.2" ], "request_id": 567, "origin_statement": "Among the 107 offset FLASH–WiggleZ systems in Fig. 9, 76 (71%) lie below the Kauffmann curve as star-forming galaxies, about 5.4 percentage points lower than the 76.4% SF fraction in the full WiggleZ-type sample (2591/3391).", "perturbed_statement": "Among the 107 offset FLASH–WiggleZ systems in Fig. 9, 80 (75%) lie below the Kauffmann curve as star-forming galaxies, about 1.4 percentage points lower than the 76.4% SF fraction in the full WiggleZ-type sample (2591/3391).", "perturbed_explanation": "Table 4 reports 76 offset objects classified as star-forming (71.0%), not 80 (75%). The perturbed counts and percentage therefore contradict the tabulated values.", "claim": "Among the 107 offset FLASH–WiggleZ systems in Fig. 9, 80 (75%) lie below the Kauffmann curve as star-forming galaxies, about 1.4 percentage points lower than the 76.4% SF fraction in the full WiggleZ-type sample (2591/3391).", "label": false }, { "paperid": "2409.20002v2", "paper_path": "./SciVer/papers/2409.20002v2.json", "claim_type": "parallel", "item1": "1", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.20002v2-Table1-1.png", "item2_path": "./SciVer/images/2409.20002v2-Table4-1.png", "section": [ "4.4" ], "request_id": 572, "origin_statement": "Table 4 indicates that 3 of 4 major cloud AI services (Azure OpenAI Service, Amazon Bedrock, Alibaba EAS) support semantic caching, while Google Vertex AI does not; Table 1 enumerates 7 use cases embedding private attributes like [Name], [medical condition], and [email address].", "perturbed_statement": "Table 4 indicates that 2 of 4 major cloud AI services (Azure OpenAI Service, Amazon Bedrock, Alibaba EAS) support semantic caching, while Google Vertex AI does not; Table 1 enumerates 7 use cases embedding private attributes like [Name], [medical condition], and [email address].", "perturbed_explanation": "The perturbed statement wrongly claims that only 2 providers support semantic caching, but Table 4 actually shows three providers (Azure OpenAI Service, Amazon Bedrock, and Alibaba EAS) with check marks indicating support.", "claim": "Table 4 indicates that 2 of 4 major cloud AI services (Azure OpenAI Service, Amazon Bedrock, Alibaba EAS) support semantic caching, while Google Vertex AI does not; Table 1 enumerates 7 use cases embedding private attributes like [Name], [medical condition], and [email address].", "label": false }, { "paperid": "2410.20742v1", "paper_path": "./SciVer/papers/2410.20742v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20742v1_figure_4(b).png", "item2_path": "./SciVer/images/2410.20742v1_figure_4(c).png", "section": [ "6" ], "request_id": 574, "origin_statement": "Under the VITS model, POP achieves a PESQ of approximately 3.55—surpassing EM’s ~2.0—and its protected mel-spectrogram displays almost no harmonic content compared to the dense patterns in the original.", "perturbed_statement": "Under the VITS model, POP achieves a PESQ of approximately 2.5—surpassing EM’s ~2.0—and its protected mel-spectrogram displays almost no harmonic content compared to the dense patterns in the original.", "perturbed_explanation": "The perturbation is incorrect because Figure 4(b) shows that the POP method yields a PESQ of about 3.55 on VITS, not 2.5 as stated in the perturbed claim.", "claim": "Under the VITS model, POP achieves a PESQ of approximately 2.5—surpassing EM’s ~2.0—and its protected mel-spectrogram displays almost no harmonic content compared to the dense patterns in the original.", "label": false }, { "paperid": "2411.06175v1", "paper_path": "./SciVer/papers/2411.06175v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06175v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.06175v1_figure_3(a).png", "section": [ "6" ], "request_id": 586, "origin_statement": "At the 60th smallest category, training size log10(size+1) rises from ~1.0 (~9 docs) to ~1.4 (~25 docs) after RAG augmentation, while the smoothed RAG curve shows that 100-word originals are extended to roughly 150 words.", "perturbed_statement": "At the 60th smallest category, training size log10(size+1) falls from ~1.0 (~9 docs) to ~0.6 (~3 docs) after RAG augmentation, while the smoothed RAG curve shows that 100-word originals shrink to roughly 80 words.", "perturbed_explanation": "The perturbed statement incorrectly claims the augmented category size decreases from log10 ≈1.0 to ≈0.6; the chart actually shows an increase to about 1.4. It also wrongly asserts that 100-word originals shrink to 80 words, whereas the smoothed RAG curve indicates they grow to around 150 words.", "claim": "At the 60th smallest category, training size log10(size+1) falls from ~1.0 (~9 docs) to ~0.6 (~3 docs) after RAG augmentation, while the smoothed RAG curve shows that 100-word originals shrink to roughly 80 words.", "label": false }, { "paperid": "2409.05653v3", "paper_path": "./SciVer/papers/2409.05653v3.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.05653v3-Table5-1.png", "item2_path": "./SciVer/images/2409.05653v3-Table6-1.png", "section": [ "5.2" ], "request_id": 591, "origin_statement": "Within Table 5, the nominative pronoun “xe” shows the highest single-case Jaccard index at 0.32, while in Table 6, SpanBERT-large’s maximum cross-case overlap is 0.29 for “she” between nominative and possessive cases.", "perturbed_statement": "Within Table 5, the nominative pronoun “xe” shows the highest single-case Jaccard index at 0.22, while in Table 6, SpanBERT-large’s maximum cross-case overlap is 0.29 for “she” between nominative and possessive cases.", "perturbed_explanation": "The perturbed statement misreports the Jaccard index for “xe” in the nominative case. Table 5 shows that this value is actually 0.32, not 0.22, so the stated 0.22 contradicts the original data.", "claim": "Within Table 5, the nominative pronoun “xe” shows the highest single-case Jaccard index at 0.22, while in Table 6, SpanBERT-large’s maximum cross-case overlap is 0.29 for “she” between nominative and possessive cases.", "label": false }, { "paperid": "2409.00768v1", "paper_path": "./SciVer/papers/2409.00768v1.json", "claim_type": "parallel", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00768v1_figure_5.png", "item2_path": "./SciVer/images/2409.00768v1-Table7-1.png", "section": [ "4.4" ], "request_id": 598, "origin_statement": "On DF2K, JPEG quality 95% yields blockiness of 10.33 (Table 8) and the HAT model’s PSNR climbs to about 33.0 dB by 800k iterations (Figure 5). At 50% quality, blockiness spikes to 165.34 and PSNR drops below 32.0 dB by 800k.", "perturbed_statement": "On DF2K, JPEG quality 95% yields blockiness of 83.20 (Table 8) and the HAT model’s PSNR climbs to about 33.0 dB by 800k iterations (Figure 5). At 50% quality, blockiness spikes to 165.34 and PSNR drops below 32.0 dB by 800k.", "perturbed_explanation": "The perturbed statement wrongly lists the blockiness at 95% quality as 83.20, but Table 8 shows the blockiness for 95% quality on DF2K is actually 10.33, not 83.20.", "claim": "On DF2K, JPEG quality 95% yields blockiness of 83.20 (Table 8) and the HAT model’s PSNR climbs to about 33.0 dB by 800k iterations (Figure 5). At 50% quality, blockiness spikes to 165.34 and PSNR drops below 32.0 dB by 800k.", "label": false }, { "paperid": "2409.10695v2", "paper_path": "./SciVer/papers/2409.10695v2.json", "claim_type": "parallel", "item1": "15(b)", "item2": "16", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10695v2_figure_15(b).png", "item2_path": "./SciVer/images/2409.10695v2_figure_16.png", "section": [ "6.2.2" ], "request_id": 602, "origin_statement": "In the camping ad tests from Fig.15, PGv3 renders the “WWW.CAMPINGSTORE.COM” URL at the bottom correctly while Ideogram-2 omits it; Fig.16 shows PGv3’s overall accuracy of about 0.89 exceeds Ideogram-2’s ~0.80 by roughly 0.09.", "perturbed_statement": "In the camping ad tests from Fig.15, Ideogram-2 renders the “WWW.CAMPINGSTORE.COM” URL at the bottom correctly while PGv3 omits it; Fig.16 shows PGv3’s overall accuracy of about 0.89 exceeds Ideogram-2’s ~0.80 by roughly 0.09.", "perturbed_explanation": "Fig.15 clearly shows that PGv3 (the right two images) correctly displays the URL “WWW.CAMPINGSTORE.COM,” whereas Ideogram-2 (the left images) omits or misformats it. The perturbed statement wrongly swaps which model renders the URL correctly.", "claim": "In the camping ad tests from Fig.15, Ideogram-2 renders the “WWW.CAMPINGSTORE.COM” URL at the bottom correctly while PGv3 omits it; Fig.16 shows PGv3’s overall accuracy of about 0.89 exceeds Ideogram-2’s ~0.80 by roughly 0.09.", "label": false }, { "paperid": "2409.08980v1", "paper_path": "./SciVer/papers/2409.08980v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08980v1_figure_2.png", "item2_path": "./SciVer/images/2409.08980v1_figure_3.png", "section": [ "4.2.2" ], "request_id": 608, "origin_statement": "Overall Risk-Benefit is the most impactful feature with a mean absolute SHAP of about 0.17 (Fig. 2), and its highest values (red points) yield positive SHAP effects up to roughly 0.12, strongly predicting high trust (Fig. 3).", "perturbed_statement": "Overall Risk-Benefit is the most impactful feature with a mean absolute SHAP of about 0.10 (Fig. 2), and its highest values (red points) yield positive SHAP effects up to roughly 0.12, strongly predicting high trust (Fig. 3).", "perturbed_explanation": "The perturbed statement misreports the mean absolute SHAP for Overall Risk-Benefit: Fig. 2 shows it is approximately 0.17, not 0.10, so the claimed 0.10 contradicts the bar plot’s actual value.", "claim": "Overall Risk-Benefit is the most impactful feature with a mean absolute SHAP of about 0.10 (Fig. 2), and its highest values (red points) yield positive SHAP effects up to roughly 0.12, strongly predicting high trust (Fig. 3).", "label": false }, { "paperid": "2409.19589v1", "paper_path": "./SciVer/papers/2409.19589v1.json", "claim_type": "parallel", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19589v1_figure_7.png", "item2_path": "./SciVer/images/2409.19589v1-Table3-1.png", "section": [ "5.2.2" ], "request_id": 613, "origin_statement": "In the synthetic bear patch from Fig. 7, Ours-15 recovers individual fur strands more faithfully than LDM-100, and Table 3 shows Ours-15 achieves a lower LPIPS (0.244 vs 0.255) than LDM-100.", "perturbed_statement": "In the synthetic bear patch from Fig. 7, Ours-15 recovers individual fur strands more faithfully than LDM-100, and Table 3 shows Ours-15 achieves a higher LPIPS (0.264 vs 0.255) than LDM-100.", "perturbed_explanation": "The perturbation is incorrect because Table 3 actually reports Ours-15’s LPIPS as 0.244, not 0.264, and this value is lower than LDM-100’s 0.255, contrary to the altered claim.", "claim": "In the synthetic bear patch from Fig. 7, Ours-15 recovers individual fur strands more faithfully than LDM-100, and Table 3 shows Ours-15 achieves a higher LPIPS (0.264 vs 0.255) than LDM-100.", "label": false }, { "paperid": "2409.13873v1", "paper_path": "./SciVer/papers/2409.13873v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.13873v1_figure_3.png", "item2_path": "./SciVer/images/2409.13873v1_figure_5.png", "section": [ "8" ], "request_id": 622, "origin_statement": "Figure 3 shows the posterior sd of the change-point (chgpt_sd) is about 0.15, but in Figure 5 the 95% prediction interval width increases from around 0.5 units at 0.2 years to about 1.5 units at 0.8 years, indicating threefold larger uncertainty after the change point.", "perturbed_statement": "Figure 3 shows the posterior sd of the change-point (chgpt_sd) is about 0.30, but in Figure 5 the 95% prediction interval width increases from around 0.5 units at 0.2 years to about 1.5 units at 0.8 years, indicating threefold larger uncertainty after the change point.", "perturbed_explanation": "The perturbed value of the posterior standard deviation is incorrect: the traceplot in Figure 3 centers around approximately 0.15 for chgpt_sd, not 0.30, so the statement contradicts the actual posterior sd observed in the trace.", "claim": "Figure 3 shows the posterior sd of the change-point (chgpt_sd) is about 0.30, but in Figure 5 the 95% prediction interval width increases from around 0.5 units at 0.2 years to about 1.5 units at 0.8 years, indicating threefold larger uncertainty after the change point.", "label": false }, { "paperid": "2411.15839v1", "paper_path": "./SciVer/papers/2411.15839v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15839v1_figure_4(b).png", "item2_path": "./SciVer/images/2411.15839v1-Table3-1.png", "section": [ "4.2" ], "request_id": 629, "origin_statement": "In the MME benchmark, VaLiD improves the Posters task accuracy from roughly 50% to about 90%, while on the AMBER benchmark for LLaVA-v1.5 it boosts action attribute F1 from 78.00% to 85.57%.", "perturbed_statement": "In the MME benchmark, VaLiD improves the Posters task accuracy from roughly 50% to about 75%, while on the AMBER benchmark for LLaVA-v1.5 it boosts action attribute F1 from 78.00% to 90.00%.", "perturbed_explanation": "The perturbed statement is incorrect because the radar chart shows VaLiD achieves about 90% accuracy on the Posters task (not 75%), and Table 3 reports an action attribute F1 of 85.57% for VaLiD on LLaVA-v1.5 (not 90.00%).", "claim": "In the MME benchmark, VaLiD improves the Posters task accuracy from roughly 50% to about 75%, while on the AMBER benchmark for LLaVA-v1.5 it boosts action attribute F1 from 78.00% to 90.00%.", "label": false }, { "paperid": "2411.03359v1", "paper_path": "./SciVer/papers/2411.03359v1.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.03359v1-Table3-1.png", "item2_path": "./SciVer/images/2411.03359v1-Table6-1.png", "section": [ "4.2" ], "request_id": 631, "origin_statement": "On the ImageNet-20 (ID) vs ImageNet-10 (OOD) task, SCT cuts FPR95 by 9.4 points (34.40→25.00) and raises AUROC from 92.34% to 94.95%, while its 1-shot ID accuracy (68.80%) remains within 0.23% of LoCoOp's 69.03%.", "perturbed_statement": "On the ImageNet-20 (ID) vs ImageNet-10 (OOD) task, SCT cuts FPR95 by 10.4 points (34.40→25.00) and raises AUROC from 92.34% to 94.95%, while its 1-shot ID accuracy (69.80%) remains within 0.23% of LoCoOp's 69.03%.", "perturbed_explanation": "The perturbed statement is incorrect because SCT actually reduces FPR95 by 9.4 points (from 34.40 to 25.00), not by 10.4. Additionally, SCT's 1-shot ID accuracy is 68.80%, not 69.80%, as shown in Table 5.", "claim": "On the ImageNet-20 (ID) vs ImageNet-10 (OOD) task, SCT cuts FPR95 by 10.4 points (34.40→25.00) and raises AUROC from 92.34% to 94.95%, while its 1-shot ID accuracy (69.80%) remains within 0.23% of LoCoOp's 69.03%.", "label": false }, { "paperid": "2410.22451v1", "paper_path": "./SciVer/papers/2410.22451v1.json", "claim_type": "parallel", "item1": "8", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22451v1_figure_8.png", "item2_path": "./SciVer/images/2410.22451v1-Table3-1.png", "section": [ "6" ], "request_id": 638, "origin_statement": "At a 128-frame interjection, Cutie’s suffix J&F plunges to about 84.9 (solid red in Fig.8 and Table3), whereas Cutie+ recovers to 89.0, yielding a 4.1-point gain, slightly surpassing SAM 2+’s 3.9-point improvement over SAM 2 (90.0 vs 86.1).", "perturbed_statement": "At a 128-frame interjection, Cutie’s suffix J&F plunges to about 88.9 (solid red in Fig.8 and Table3), whereas Cutie+ recovers to 89.0, yielding a 4.1-point gain, slightly surpassing SAM 2+’s 3.9-point improvement over SAM 2 (90.0 vs 86.1).", "perturbed_explanation": "The perturbed statement incorrectly lists Cutie’s 128-frame suffix J&F as 88.9, but Table 3 and the solid red line in Figure 8 both show it is actually 84.9.", "claim": "At a 128-frame interjection, Cutie’s suffix J&F plunges to about 88.9 (solid red in Fig.8 and Table3), whereas Cutie+ recovers to 89.0, yielding a 4.1-point gain, slightly surpassing SAM 2+’s 3.9-point improvement over SAM 2 (90.0 vs 86.1).", "label": false }, { "paperid": "2411.16129v1", "paper_path": "./SciVer/papers/2411.16129v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.16129v1-Table5-1.png", "item2_path": "./SciVer/images/2411.16129v1-Table6-1.png", "section": [ "4.2" ], "request_id": 641, "origin_statement": "Flipping the depth scan alone reduces IoU from 46.21% (forward, Table 5) to 45.73% (reversed, Table 6—a drop of 0.48), while mIoU rises from 16.50% to 16.73%—a 0.23-point increase.", "perturbed_statement": "Flipping the depth scan alone reduces IoU from 46.21% (forward, Table 5) to 44.73% (reversed, Table 6—a drop of 1.48), while mIoU rises from 16.50% to 16.73%—a 0.23-point increase.", "perturbed_explanation": "The perturbed statement incorrectly reports the reversed IoU as 44.73% and a drop of 1.48. Table 6 actually shows the reversed depth-only IoU is 45.73% (method (a)), implying a 0.48-point drop from 46.21%, not 1.48.", "claim": "Flipping the depth scan alone reduces IoU from 46.21% (forward, Table 5) to 44.73% (reversed, Table 6—a drop of 1.48), while mIoU rises from 16.50% to 16.73%—a 0.23-point increase.", "label": false }, { "paperid": "2409.09506v1", "paper_path": "./SciVer/papers/2409.09506v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.09506v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.09506v1_figure_1(b).png", "section": [ "4.2" ], "request_id": 643, "origin_statement": "ESPnet requires 454 new lines including ~270 Python and ~80 Bash, plus about 1,130 Python-dependent files, while ESPnet-EZ needs only 168 new lines (~130 Python, 0 Bash) and about 180 Python-dependent files, reflecting ~2.7× and ~6.3× cuts.", "perturbed_statement": "ESPnet requires 454 new lines including ~270 Python and ~80 Bash, plus about 1,130 Python-dependent files, while ESPnet-EZ needs only 168 new lines (~150 Python, 0 Bash) and about 180 Python-dependent files.", "perturbed_explanation": "The perturbed statement incorrectly states that ESPnet-EZ uses ~150 Python lines, whereas the chart shows it uses approximately 130 Python lines out of the 168 total, so the ~150 figure contradicts the actual data.", "claim": "ESPnet requires 454 new lines including ~270 Python and ~80 Bash, plus about 1,130 Python-dependent files, while ESPnet-EZ needs only 168 new lines (~150 Python, 0 Bash) and about 180 Python-dependent files.", "label": false }, { "paperid": "2411.15224v1", "paper_path": "./SciVer/papers/2411.15224v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15224v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.15224v1_figure_2(b).png", "section": [ "4.2.2", "4.2.1" ], "request_id": 644, "origin_statement": "T_det displays diagonal values ranging from 0.825 (3rd row) to 0.954 (1st row) with all off-diagonal entries below 0.054, whereas after training the gradient norms on the diagonal reach ~0.20 compared to under 0.03 for non-diagonals.", "perturbed_statement": "T_det displays diagonal values ranging from 0.825 (3rd row) to 0.954 (1st row) with all off-diagonal entries below 0.054, whereas after training the gradient norms on the diagonal reach only ~0.10 compared to under 0.03 for non-diagonals.", "perturbed_explanation": "The perturbed statement wrongly downplays the peak diagonal gradient norm. Figure 2(b)’s color bar and bright diagonal line show gradient norms reaching about 0.20, not ~0.10, making the altered detail inconsistent with the visualization.", "claim": "T_det displays diagonal values ranging from 0.825 (3rd row) to 0.954 (1st row) with all off-diagonal entries below 0.054, whereas after training the gradient norms on the diagonal reach only ~0.10 compared to under 0.03 for non-diagonals.", "label": false }, { "paperid": "2411.16012v1", "paper_path": "./SciVer/papers/2411.16012v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.16012v1-Table4-1.png", "item2_path": "./SciVer/images/2411.16012v1-Table5-1.png", "section": [ "4.2" ], "request_id": 647, "origin_statement": "Using Gaia DR3, the inferred rotation gradient about the z-axis is ωz = 29.02 ± 11.79 m s⁻¹ pc⁻¹ (a 2.5σ detection), while in the Galactic frame this work measures W = −1.29 ± 0.03 km s⁻¹, tightening Lodieu et al.’s uncertainty from 0.05 to 0.03 km s⁻¹.", "perturbed_statement": "Using Gaia DR3, the inferred rotation gradient about the z-axis is ωz = 12.34 ± 11.79 m s⁻¹ pc⁻¹ (a 2.5σ detection), while in the Galactic frame this work measures W = −1.29 ± 0.03 km s⁻¹, tightening Lodieu et al.’s uncertainty from 0.05 to 0.03 km s⁻¹.", "perturbed_explanation": "The perturbed ωz value of 12.34 m s⁻¹ pc⁻¹ contradicts Table 4, which reports ωz = 29.02 ± 11.79 m s⁻¹ pc⁻¹ for Gaia DR3. All other figures remain unchanged and correct.", "claim": "Using Gaia DR3, the inferred rotation gradient about the z-axis is ωz = 12.34 ± 11.79 m s⁻¹ pc⁻¹ (a 2.5σ detection), while in the Galactic frame this work measures W = −1.29 ± 0.03 km s⁻¹, tightening Lodieu et al.’s uncertainty from 0.05 to 0.03 km s⁻¹.", "label": false }, { "paperid": "2411.18065v1", "paper_path": "./SciVer/papers/2411.18065v1.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18065v1_figure_3.png", "item2_path": "./SciVer/images/2411.18065v1_figure_6.png", "section": [ "3.4" ], "request_id": 649, "origin_statement": "In Figure 3’s floating-point example, a 3-bit mantissa multiplied by a 2-bit mantissa produces 6 primitive ANDs before truncation, while FBRT in Figure 6a handles a 2-bit activation × 4-bit weight multiplication yielding 8 primitives, all without using additional links.", "perturbed_statement": "In Figure 3’s floating-point example, a 4-bit mantissa multiplied by a 2-bit mantissa produces 8 primitive ANDs before truncation, while FBRT in Figure 6a handles a 2-bit activation × 4-bit weight multiplication yielding 8 primitives, all without using additional links.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 3 uses a 3-bit mantissa (A2, A1, A0), not 4 bits, so it generates 3×2=6 primitives rather than 8.", "claim": "In Figure 3’s floating-point example, a 4-bit mantissa multiplied by a 2-bit mantissa produces 8 primitive ANDs before truncation, while FBRT in Figure 6a handles a 2-bit activation × 4-bit weight multiplication yielding 8 primitives, all without using additional links.", "label": false }, { "paperid": "2411.13005v1", "paper_path": "./SciVer/papers/2411.13005v1.json", "claim_type": "parallel", "item1": "6(b)", "item2": "6(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13005v1_figure_6(b).png", "item2_path": "./SciVer/images/2411.13005v1_figure_6(c).png", "section": [ "4.3" ], "request_id": 655, "origin_statement": "DT-LSD’s line overlay on the YorkUrban bedroom scene shows clean continuous edges around the windows with no noisy segments, correlating with its 1.0% higher sAP and 1.6% higher AP over MLNET.", "perturbed_statement": "DT-LSD’s line overlay on the YorkUrban bedroom scene shows numerous noisy line segments around the windows, correlating with its 3.0% lower sAP and 2.0% lower AP than MLNET.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 reports DT-LSD actually outperforms MLNET by about 1.0% in sAP and 1.6% in AP, not underperforms by 3.0% and 2.0%. Additionally, the qualitative image shows DT-LSD avoids noisy window edges rather than producing numerous noisy segments.", "claim": "DT-LSD’s line overlay on the YorkUrban bedroom scene shows numerous noisy line segments around the windows, correlating with its 3.0% lower sAP and 2.0% lower AP than MLNET.", "label": false }, { "paperid": "2410.06842v1", "paper_path": "./SciVer/papers/2410.06842v1.json", "claim_type": "parallel", "item1": "13", "item2": "15", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06842v1_figure_13.png", "item2_path": "./SciVer/images/2410.06842v1_figure_15.png", "section": [ "4.4" ], "request_id": 658, "origin_statement": "The 'Other' subcategory in Fig.13 attains an S-Measure of about 0.96, while in Fig.15, SurANet-C correctly segments concealed military objects in all three test images, whereas competing methods like SINetV2 miss at least one instance.", "perturbed_statement": "The 'Other' subcategory in Fig.13 attains an S-Measure of about 0.75, while in Fig.15, SurANet-C correctly segments concealed military objects in only one test image, whereas competing methods like SINetV2 miss at least one instance.", "perturbed_explanation": "In Fig.13, the 'Other' subcategory actually reaches an S-Measure near 0.96, not 0.75. Additionally, Fig.15 shows SurANet-C successfully segments objects in all three military camouflage examples, not just one.", "claim": "The 'Other' subcategory in Fig.13 attains an S-Measure of about 0.75, while in Fig.15, SurANet-C correctly segments concealed military objects in only one test image, whereas competing methods like SINetV2 miss at least one instance.", "label": false }, { "paperid": "2409.12210v1", "paper_path": "./SciVer/papers/2409.12210v1.json", "claim_type": "parallel", "item1": "3", "item2": "8", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.12210v1_figure_3.png", "item2_path": "./SciVer/images/2409.12210v1-Table8-1.png", "section": [ "4.3", "4.2" ], "request_id": 665, "origin_statement": "In the baseline’s last epoch, EP5 is routed approximately 3.99×10^8 tokens compared to EP0’s 3.93×10^8 (ratio ≈1.02), while Table7 shows a per-layer max/min routing ratio of 3.69 for layer0 top1 in epoch7.", "perturbed_statement": "In the baseline’s last epoch, EP5 is routed approximately 4.05×10^8 tokens compared to EP0’s 3.93×10^8 (ratio ≈1.03), while Table7 shows a per-layer max/min routing ratio of 3.0 for layer0 top1 in epoch7.", "perturbed_explanation": "This statement is incorrect because Figure 3(b) shows EP5 receives about 3.99×10^8 tokens, not 4.05×10^8, and Table 7 reports a max/min routing ratio of 3.69 for layer0 top1 in epoch 7, not 3.0.", "claim": "In the baseline’s last epoch, EP5 is routed approximately 4.05×10^8 tokens compared to EP0’s 3.93×10^8 (ratio ≈1.03), while Table7 shows a per-layer max/min routing ratio of 3.0 for layer0 top1 in epoch7.", "label": false }, { "paperid": "2409.14857v2", "paper_path": "./SciVer/papers/2409.14857v2.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14857v2_figure_2(a).png", "item2_path": "./SciVer/images/2409.14857v2_figure_2(b).png", "section": [ "6.1" ], "request_id": 670, "origin_statement": "DistMult embeddings place qatar at roughly (2.9, 2.28), the highest Y-value among the labeled countries, whereas in the FMult-sigmoid plot belize’s activation falls below 0.5 at around X = –4.", "perturbed_statement": "DistMult embeddings place morocco at roughly (−2.7, 2.28), the highest Y-value among the labeled countries, whereas in the FMult-sigmoid plot belize’s activation falls below 0.5 at around X = –4.", "perturbed_explanation": "This is incorrect because in the DistMult plot morocco’s Y-coordinate is about 2.03, not 2.28, and it does not have the highest Y-value (qatar does at approximately 2.28).", "claim": "DistMult embeddings place morocco at roughly (−2.7, 2.28), the highest Y-value among the labeled countries, whereas in the FMult-sigmoid plot belize’s activation falls below 0.5 at around X = –4.", "label": false }, { "paperid": "2409.06538v1", "paper_path": "./SciVer/papers/2409.06538v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06538v1_figure_2.png", "item2_path": "./SciVer/images/2409.06538v1_figure_3.png", "section": [ "2.2" ], "request_id": 678, "origin_statement": "Fig. 2 shows that contracting the network from T9 yields five red‐circled process tensors (T6′, T5′, T4′, T3′, T2′′) before obtaining Z; Fig. 3 demonstrates that after fixing s1, s2, s3 and absorbing W14, W25, W36, three new process tensors T4″, T5″, T6″ are generated.", "perturbed_statement": "Fig. 2 shows that contracting the network from T9 yields six red‐circled process tensors (T6′, T5′, T4′, T3′, T2′′, T1′) before obtaining Z; Fig. 3 demonstrates that after fixing s1, s2, s3 and absorbing W14, W25, W36, four new process tensors T4″, T5″, T6″, T7″ are generated.", "perturbed_explanation": "The perturbed statement incorrectly claims six intermediate tensors in Fig. 2, but the caption identifies only five red‐circled process tensors (T6′, T5′, T4′, T3′, T2′′). It also asserts four new process tensors in Fig. 3, whereas only three (T4″, T5″, T6″) are produced.", "claim": "Fig. 2 shows that contracting the network from T9 yields six red‐circled process tensors (T6′, T5′, T4′, T3′, T2′′, T1′) before obtaining Z; Fig. 3 demonstrates that after fixing s1, s2, s3 and absorbing W14, W25, W36, four new process tensors T4″, T5″, T6″, T7″ are generated.", "label": false }, { "paperid": "2411.05867v1", "paper_path": "./SciVer/papers/2411.05867v1.json", "claim_type": "parallel", "item1": "7", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05867v1_figure_7.png", "item2_path": "./SciVer/images/2411.05867v1_figure_9.png", "section": [ "3.2.2" ], "request_id": 680, "origin_statement": "At grid-search setting C (low input scaling, low regularization, high spectral radius), the hybrid reservoir computer achieves a mean valid time exceeding 200 s in the synchronous regime, whereas the standard RC’s mean valid time drops below 100 s.", "perturbed_statement": "At grid-search setting C (low input scaling, low regularization, high spectral radius), the hybrid reservoir computer achieves a mean valid time of only 80 s in the synchronous regime, while the standard RC surpasses 200 s.", "perturbed_explanation": "This statement contradicts Figure 9a, which shows the hybrid RC maintaining valid times above 200 s at high spectral radius, and the standard RC dropping below 100 s under the same conditions.", "claim": "At grid-search setting C (low input scaling, low regularization, high spectral radius), the hybrid reservoir computer achieves a mean valid time of only 80 s in the synchronous regime, while the standard RC surpasses 200 s.", "label": false }, { "paperid": "2410.04927v2", "paper_path": "./SciVer/papers/2410.04927v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04927v2-Table2-1.png", "item2_path": "./SciVer/images/2410.04927v2-Table3-1.png", "section": [ "5.5" ], "request_id": 681, "origin_statement": "Despite Office’s low interaction density (0.47%) and modest average sequence length (9.1), FELLAS boosts GRU4Rec’s H@20 from 0.03875 under centralized training to 0.05477—over a 41% improvement.", "perturbed_statement": "Despite Office’s low interaction density (0.07%) and modest average sequence length (9.1), FELLAS boosts GRU4Rec’s H@20 from 0.03875 under centralized training to 0.05477—over a 41% improvement.", "perturbed_explanation": "The perturbed statement incorrectly reports Office’s density as 0.07%. According to Table 2, Office’s interaction density is 0.47%, not 0.07%.", "claim": "Despite Office’s low interaction density (0.07%) and modest average sequence length (9.1), FELLAS boosts GRU4Rec’s H@20 from 0.03875 under centralized training to 0.05477—over a 41% improvement.", "label": false }, { "paperid": "2409.13990v2", "paper_path": "./SciVer/papers/2409.13990v2.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13990v2_figure_3.png", "item2_path": "./SciVer/images/2409.13990v2-Table2-1.png", "section": [ "4.2" ], "request_id": 699, "origin_statement": "For α=0.1 at η=4, Table 2 reports mean false claims 2.356 and mean true claims 4.722, while Figure 3 shows median false discoveries about 2 versus median true discoveries about 5, indicating true selections roughly double false ones at this threshold.", "perturbed_statement": "For α=0.1 at η=4, Table 2 reports mean false claims 1.356 and mean true claims 4.722, while Figure 3 shows median false discoveries about 2 versus median true discoveries about 5, indicating true selections roughly double false ones at this threshold.", "perturbed_explanation": "The perturbed statement incorrectly reports the mean number of false claims as 1.356. Table 2 actually gives the mean false claims at η=4 as 2.356, so the stated value contradicts the table.", "claim": "For α=0.1 at η=4, Table 2 reports mean false claims 1.356 and mean true claims 4.722, while Figure 3 shows median false discoveries about 2 versus median true discoveries about 5, indicating true selections roughly double false ones at this threshold.", "label": false }, { "paperid": "2409.19521v1", "paper_path": "./SciVer/papers/2409.19521v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.19521v1-Table1-1.png", "item2_path": "./SciVer/images/2409.19521v1-Table2-1.png", "section": [ "5.2.1" ], "request_id": 714, "origin_statement": "GenTel-Shield achieves an F1 score of 97.69% on jailbreak attacks (Table 1) versus 96.74% on goal hijacking attacks (Table 2), indicating a 0.95-point higher performance in detecting jailbreak scenarios.", "perturbed_statement": "GenTel-Shield achieves an F1 score of 97.69% on jailbreak attacks versus 98.74% on goal hijacking attacks, indicating a 1.05-point higher performance against goal hijacking.", "perturbed_explanation": "The perturbed statement wrongly lists the goal hijacking F1 as 98.74%, but Table 2 reports the actual F1 for goal hijacking as 96.74%, so the stated F1 value and the 1.05-point difference are incorrect.", "claim": "GenTel-Shield achieves an F1 score of 97.69% on jailbreak attacks versus 98.74% on goal hijacking attacks, indicating a 1.05-point higher performance against goal hijacking.", "label": false }, { "paperid": "2409.13980v1", "paper_path": "./SciVer/papers/2409.13980v1.json", "claim_type": "parallel", "item1": "7", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13980v1_figure_7.png", "item2_path": "./SciVer/images/2409.13980v1-Table4-1.png", "section": [ "4.5" ], "request_id": 718, "origin_statement": "On WinoGAViL, GPT-4 judged our context-aware descriptions superior 67.3% of the time, and in ICL performance, CVR-ICL reaches 69.8% accuracy on the 5/6 category, outperforming KATE’s 68.6%.", "perturbed_statement": "On WinoGAViL, GPT-4 judged our context-aware descriptions superior 67.3% of the time, and in ICL performance, CVR-ICL reaches 75.2% accuracy on the 5/6 category, outperforming KATE’s 68.6%.", "perturbed_explanation": "The perturbed statement wrongly inflates the CVR-ICL accuracy on WinoGAViL 5/6 to 75.2%, but Table 4 shows that CVR-ICL actually achieves 69.8% on that category.", "claim": "On WinoGAViL, GPT-4 judged our context-aware descriptions superior 67.3% of the time, and in ICL performance, CVR-ICL reaches 75.2% accuracy on the 5/6 category, outperforming KATE’s 68.6%.", "label": false }, { "paperid": "2410.19599v2", "paper_path": "./SciVer/papers/2410.19599v2.json", "claim_type": "parallel", "item1": "9", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19599v2_figure_9.png", "item2_path": "./SciVer/images/2410.19599v2-Table5-1.png", "section": [ "2.2" ], "request_id": 719, "origin_statement": "GPT-4’s subject density peaks at 19% in all three 11–20 game variants (basic, costless, cycle) in Figure 9, and Table 5 reports its Jensen–Shannon divergence as roughly 0.5025 (p < 0.0001), reflecting a significant distributional mismatch with human players.", "perturbed_statement": "GPT-4’s subject density peaks at 17% in all three 11–20 game variants (basic, costless, cycle) in Figure 9, and Table 5 reports its Jensen–Shannon divergence as roughly 0.5025 (p < 0.0001), reflecting a significant distributional mismatch with human players.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 9 shows GPT-4’s density mode at 19%, not 17%, across the basic, costless, and cycle variants. There is no peak at 17% for GPT-4 in the plotted response distributions.", "claim": "GPT-4’s subject density peaks at 17% in all three 11–20 game variants (basic, costless, cycle) in Figure 9, and Table 5 reports its Jensen–Shannon divergence as roughly 0.5025 (p < 0.0001), reflecting a significant distributional mismatch with human players.", "label": false }, { "paperid": "2409.04459v1", "paper_path": "./SciVer/papers/2409.04459v1.json", "claim_type": "parallel", "item1": "4", "item2": "13", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04459v1_figure_4.png", "item2_path": "./SciVer/images/2409.04459v1-Table13-1.png", "section": [ "4.5" ], "request_id": 720, "origin_statement": "At k=25, MIND achieves a Δ_cos of 91.12% (Table 13) while its classification accuracy is about 77.2% (Figure 4), implying the verifiability margin remains roughly 14 percentage points above its accuracy.", "perturbed_statement": "At k=25, MIND achieves a Δ_cos of 85.33% (Table 13) while its classification accuracy is about 80.5% (Figure 4), implying the verifiability margin is only about 5 percentage points above its accuracy.", "perturbed_explanation": "This statement is incorrect because Table 13 shows MIND’s Δ_cos at k=25 is actually 91.12%, not 85.33%, and Figure 4 indicates MIND’s accuracy at k=25 is approximately 77.2%, not 80.5%.", "claim": "At k=25, MIND achieves a Δ_cos of 85.33% (Table 13) while its classification accuracy is about 80.5% (Figure 4), implying the verifiability margin is only about 5 percentage points above its accuracy.", "label": false }, { "paperid": "2410.10652v1", "paper_path": "./SciVer/papers/2410.10652v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10652v1_figure_4.png", "item2_path": "./SciVer/images/2410.10652v1-Table2-1.png", "section": [ "4.2" ], "request_id": 728, "origin_statement": "In the Layer5_Layer6_100 query for sample 151675, QueST reproduced both Layer5 and Layer6 cell types as a concentrated region (Figure 4), despite having a lower match accuracy of 0.751 compared to STAGATE’s 0.969 in Table 2.", "perturbed_statement": "In the Layer5_Layer6_100 query for sample 151675, QueST reproduced both Layer5 and Layer6 cell types as a concentrated region (Figure 4), achieving the highest match accuracy of 0.969, surpassing STAGATE’s 0.751 in Table 2.", "perturbed_explanation": "The perturbation is incorrect because Table 2 shows QueST’s match accuracy is 0.751, not 0.969, and STAGATE’s accuracy is 0.969, not 0.751.", "claim": "In the Layer5_Layer6_100 query for sample 151675, QueST reproduced both Layer5 and Layer6 cell types as a concentrated region (Figure 4), achieving the highest match accuracy of 0.969, surpassing STAGATE’s 0.751 in Table 2.", "label": false }, { "paperid": "2411.10959v1", "paper_path": "./SciVer/papers/2411.10959v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10959v1_figure_3.png", "item2_path": "./SciVer/images/2411.10959v1-Table3-1.png", "section": [ "5.1" ], "request_id": 744, "origin_statement": "The aerial view depicts a sprawling rural shrid with population concentrated centrally, matching its low urban area share; according to Table 3, treatment shrids (2,276 villages) average 2,604 residents and 0.004 urban area, about 14% higher average population than buffer control shrids (2,931 villages).", "perturbed_statement": "The aerial view depicts a sprawling rural shrid with population concentrated centrally, matching its low urban area share; according to Table 3, treatment shrids (2,931 villages) average 2,604 residents and 0.004 urban area, about 14% higher average population than buffer control shrids (2,276 villages).", "perturbed_explanation": "The perturbed statement swaps the shrid counts for treatment and buffer control groups. Table 3 actually lists 2,276 treatment shrids and 2,931 buffer control shrids, so claiming 2,931 treatment and 2,276 buffer control villages contradicts the table.", "claim": "The aerial view depicts a sprawling rural shrid with population concentrated centrally, matching its low urban area share; according to Table 3, treatment shrids (2,931 villages) average 2,604 residents and 0.004 urban area, about 14% higher average population than buffer control shrids (2,276 villages).", "label": false }, { "paperid": "2411.13545v2", "paper_path": "./SciVer/papers/2411.13545v2.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13545v2_figure_4.png", "item2_path": "./SciVer/images/2411.13545v2-Table3-1.png", "section": [ "4.3.1" ], "request_id": 760, "origin_statement": "In ResNet50 at 99.99% sparsity, DyReLU phasing yields an initial gradient norm of approximately 3.2 (top-right of Figure 4) and 53.32% accuracy, while weight sharing gives a slightly lower initial gradient norm (~2.5, bottom-right) but higher accuracy at 62.71% (Table 3).", "perturbed_statement": "In ResNet50 at 99.99% sparsity, DyReLU phasing yields an initial gradient norm of approximately 2.0 (top-right of Figure 4) and 53.32% accuracy, while weight sharing gives a slightly lower initial gradient norm (~2.5, bottom-right) but higher accuracy at 72.71% (Table 3).", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 shows the initial gradient norm for DyReLU phasing is about 3.2, not 2.0. Additionally, Table 3 reports weight sharing accuracy at 99.99% sparsity as 62.71%, not 72.71%.", "claim": "In ResNet50 at 99.99% sparsity, DyReLU phasing yields an initial gradient norm of approximately 2.0 (top-right of Figure 4) and 53.32% accuracy, while weight sharing gives a slightly lower initial gradient norm (~2.5, bottom-right) but higher accuracy at 72.71% (Table 3).", "label": false }, { "paperid": "2411.07042v1", "paper_path": "./SciVer/papers/2411.07042v1.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07042v1_figure_1.png", "item2_path": "./SciVer/images/2411.07042v1-Table3-1.png", "section": [ "4.1.2" ], "request_id": 766, "origin_statement": "Fig.1 shows Minion offering four responses—2 expert-driven (Proposal, Power) and 2 user-driven (Anger Expression, Out of Character), a 50:50 split. Table 3 also defines 8 strategies evenly: 4 expert-driven and 4 user-driven, reflecting an equal distribution of conflict-resolution approaches.", "perturbed_statement": "Fig.1 shows Minion offering four responses—3 expert-driven (Proposal, Power, Interests) and 1 user-driven (Anger Expression), a 75:25 split. Table 3 also defines 8 strategies evenly: 4 expert-driven and 4 user-driven, reflecting an equal distribution of conflict-resolution approaches.", "perturbed_explanation": "The perturbed statement is incorrect because Fig.1 only displays two expert-driven options (Proposal and Power) and two user-driven options (Anger Expression and Out of Character). It does not include Interests among the shown Minion responses, so the split is actually 50:50, not 75:25.", "claim": "Fig.1 shows Minion offering four responses—3 expert-driven (Proposal, Power, Interests) and 1 user-driven (Anger Expression), a 75:25 split. Table 3 also defines 8 strategies evenly: 4 expert-driven and 4 user-driven, reflecting an equal distribution of conflict-resolution approaches.", "label": false }, { "paperid": "2411.15653v1", "paper_path": "./SciVer/papers/2411.15653v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15653v1_figure_4.png", "item2_path": "./SciVer/images/2411.15653v1-Table5-1.png", "section": [ "4.3" ], "request_id": 768, "origin_statement": "In Fig. 4, the GC map is isotropic at (η = 0.5, φ = 0.5) with a central value of 1.0, while in Table 5 a probability threshold of 0.6 achieves α = 0.964 and the highest CAS of 0.506.", "perturbed_statement": "In Fig. 4, the GC map is isotropic at (η = 0.5, φ = 0.5) with a central value of 1.0, while in Table 5 a probability threshold of 0.5 achieves α = 0.964 and the highest CAS of 0.506.", "perturbed_explanation": "This is incorrect because Table 5 reports that at threshold 0.5 α = 0.938 and CAS = 0.499; the values α = 0.964 and CAS = 0.506 correspond to threshold 0.6, not 0.5.", "claim": "In Fig. 4, the GC map is isotropic at (η = 0.5, φ = 0.5) with a central value of 1.0, while in Table 5 a probability threshold of 0.5 achieves α = 0.964 and the highest CAS of 0.506.", "label": false }, { "paperid": "2411.07492v1", "paper_path": "./SciVer/papers/2411.07492v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07492v1_figure_4.png", "item2_path": "./SciVer/images/2411.07492v1-Table2-1.png", "section": [ "4.1" ], "request_id": 772, "origin_statement": "At an FPR of about 0.1, model_stack’s ROC (green) reaches roughly 75% TPR—nearly 10% higher than model_sub’s ~65%—consistent with Table 2 showing AUROC rising from 0.765 for MODEL_SUB to 0.849 for MODEL_STACK under optimized color scale.", "perturbed_statement": "At an FPR of about 0.1, model_stack’s ROC (green) reaches roughly 85% TPR—about 20% higher than model_sub’s ~65%—consistent with Table 2 showing AUROC rising from 0.765 for MODEL_SUB to 0.965 for MODEL_STACK under optimized color scale.", "perturbed_explanation": "The perturbed statement is incorrect because in Figure 4 model_stack’s TPR at FPR=0.1 is closer to 75%, not 85%, and Table 2 reports MODEL_STACK’s AUROC as 0.849, not 0.965.", "claim": "At an FPR of about 0.1, model_stack’s ROC (green) reaches roughly 85% TPR—about 20% higher than model_sub’s ~65%—consistent with Table 2 showing AUROC rising from 0.765 for MODEL_SUB to 0.965 for MODEL_STACK under optimized color scale.", "label": false }, { "paperid": "2411.06609v1", "paper_path": "./SciVer/papers/2411.06609v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06609v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.06609v1_figure_3(c).png", "section": [ "6.1" ], "request_id": 783, "origin_statement": "Under α=0.3 and I₀=10², the Ornstein–Uhlenbeck prior reconstruction exhibits a peak intensity of about 3.5, roughly 0.5 units (∼17%) higher than the bi-Laplacian prior’s peak of about 3.0.", "perturbed_statement": "Under α=0.3 and I₀=10², the Ornstein–Uhlenbeck prior reconstruction exhibits a peak intensity of about 2.5, roughly 0.5 units (∼17%) lower than the bi-Laplacian prior’s peak of about 3.0.", "perturbed_explanation": "The perturbation is wrong because the colorbar in the Ornstein–Uhlenbeck reconstruction (Figure 3 right) clearly shows a maximum around 3.5, not 2.5. The bi-Laplacian prior’s peak remains near 3.0, so claiming a lower peak contradicts the figure.", "claim": "Under α=0.3 and I₀=10², the Ornstein–Uhlenbeck prior reconstruction exhibits a peak intensity of about 2.5, roughly 0.5 units (∼17%) lower than the bi-Laplacian prior’s peak of about 3.0.", "label": false }, { "paperid": "2409.02719v1", "paper_path": "./SciVer/papers/2409.02719v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.02719v1_figure_2.png", "item2_path": "./SciVer/images/2409.02719v1-Table3-1.png", "section": [ "3.3" ], "request_id": 786, "origin_statement": "After deriving enriched features from basic inputs, the study employs LightGBM with 8-fold cross-validation to build eight models, each using 50,000 trees, a max_depth of 10, and a learning_rate of 8.75×10−3; their averaged predictions form the final output.", "perturbed_statement": "After deriving enriched features from basic inputs, the study employs LightGBM with 8-fold cross-validation to build eight models, each using 5,000 trees, a max_depth of 10, and a learning_rate of 8.75×10−3; their averaged predictions form the final output.", "perturbed_explanation": "The perturbed statement wrongly claims each model uses 5,000 trees. Table 3 clearly lists n_estimators as 50,000, so the 5,000 value contradicts the provided hyperparameter setting.", "claim": "After deriving enriched features from basic inputs, the study employs LightGBM with 8-fold cross-validation to build eight models, each using 5,000 trees, a max_depth of 10, and a learning_rate of 8.75×10−3; their averaged predictions form the final output.", "label": false }, { "paperid": "2409.06439v1", "paper_path": "./SciVer/papers/2409.06439v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.06439v1-Table2-1.png", "item2_path": "./SciVer/images/2409.06439v1-Table3-1.png", "section": [ "4.1" ], "request_id": 787, "origin_statement": "Using 500 trees, the RF regression model explained 97.49% of variance, and Petal Width contributed the largest increase in MSE, with %IncMSE of 2.25%, about 0.07 percentage points higher than Species (2.18%).", "perturbed_statement": "Using 100 trees, the RF regression model explained 97.49% of variance, and Petal Width contributed the largest increase in MSE, with %IncMSE of 2.25%, about 0.07 percentage points higher than Species (2.18%).", "perturbed_explanation": "This is incorrect because Table 2 specifies the model used 500 trees, not 100.", "claim": "Using 100 trees, the RF regression model explained 97.49% of variance, and Petal Width contributed the largest increase in MSE, with %IncMSE of 2.25%, about 0.07 percentage points higher than Species (2.18%).", "label": false }, { "paperid": "2409.12946v1", "paper_path": "./SciVer/papers/2409.12946v1.json", "claim_type": "parallel", "item1": "5", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.12946v1-Table5-1.png", "item2_path": "./SciVer/images/2409.12946v1-Table7-1.png", "section": [ "4.3" ], "request_id": 797, "origin_statement": "In Table 5, the combined NAR+ORD modules (row e) achieve 49.47% AA on CIFAR-10 with 1% labels, while Table 7 shows that DynACL++ & SNORD reach 50.37% AA on the same setting, marking a 0.9% increase.", "perturbed_statement": "In Table 5, the combined NAR+ORD modules (row e) achieve 49.47% AA on CIFAR-10 with 1% labels, while Table 7 shows that DynACL++ & SNORD reach 50.37% AA on the same setting, marking a 1.9% increase.", "perturbed_explanation": "The perturbed statement overstates the improvement. Table 5 reports 49.47% AA (row e) and Table 7 reports 50.37% AA for DynACL++ & SNORD, yielding a 0.9% increase, not 1.9%.", "claim": "In Table 5, the combined NAR+ORD modules (row e) achieve 49.47% AA on CIFAR-10 with 1% labels, while Table 7 shows that DynACL++ & SNORD reach 50.37% AA on the same setting, marking a 1.9% increase.", "label": false }, { "paperid": "2410.13077v1", "paper_path": "./SciVer/papers/2410.13077v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13077v1_figure_4.png", "item2_path": "./SciVer/images/2410.13077v1-Table3-1.png", "section": [ "4.1" ], "request_id": 798, "origin_statement": "By step 2000, LoRA+MoD raises layer 29 sparsity to over 0.15, and this model achieves an average MT-Bench score of 6.8—0.5 points above the 6.3 scored by the LoRA-only baseline.", "perturbed_statement": "By step 2000, LoRA+MoD raises layer 29 sparsity to over 0.25, and this model achieves an average MT-Bench score of 6.8—0.5 points above the 6.3 scored by the LoRA-only baseline.", "perturbed_explanation": "The figure shows layer 29 sparsity peaks around 0.15 by step 2000, not over 0.25. Stating it exceeds 0.25 contradicts the chart’s actual sparsity values for layer 29.", "claim": "By step 2000, LoRA+MoD raises layer 29 sparsity to over 0.25, and this model achieves an average MT-Bench score of 6.8—0.5 points above the 6.3 scored by the LoRA-only baseline.", "label": false }, { "paperid": "2411.04732v1", "paper_path": "./SciVer/papers/2411.04732v1.json", "claim_type": "parallel", "item1": "8(a)", "item2": "8(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04732v1_figure_8(a).png", "item2_path": "./SciVer/images/2411.04732v1_figure_8(b).png", "section": [ "5.3" ], "request_id": 805, "origin_statement": "At layer 2, the model with residual initialization assigns around 80% of its logic components to the A⇐B gate, whereas the Gaussian initialization assigns fewer than 10% at the same layer.", "perturbed_statement": "At layer 2, the model with residual initialization assigns around 80% of its logic components to the A⇐B gate, whereas the Gaussian initialization assigns about 30% at the same layer.", "perturbed_explanation": "The perturbed statement overstates the Gaussian initialization frequency for the A⇐B gate at layer 2. In the Gaussian initialization heatmap, that gate appears in under 10% of cases at layer 2, not 30%.", "claim": "At layer 2, the model with residual initialization assigns around 80% of its logic components to the A⇐B gate, whereas the Gaussian initialization assigns about 30% at the same layer.", "label": false }, { "paperid": "2410.16617v1", "paper_path": "./SciVer/papers/2410.16617v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16617v1_figure_4.png", "item2_path": "./SciVer/images/2410.16617v1-Table2-1.png", "section": [ "4.2" ], "request_id": 10, "origin_statement": "Figure 4a shows fitted chikungunya cases in Catumbi peaking at about 80 cases around bi-week 33. Table 2 lists the intercept relative odds ratio for chikungunya versus dengue as 0.99, implying roughly equal dengue odds under average covariates. Thus, dengue would also be expected near 80 cases then.", "perturbed_statement": "Figure 4a shows fitted chikungunya cases in Catumbi peaking at about 80 cases around bi-week 33. Table 2 lists the intercept relative odds ratio for chikungunya versus dengue as 1.19, implying dengue odds are 19% higher under average covariates. Thus, dengue would be expected around 95 cases then.", "perturbed_explanation": "The perturbed statement misquotes Table 2: the intercept relative odds ratio for chikungunya versus dengue is actually 0.99 (95% CI 0.89–1.10), not 1.19. The 1.19 value refers to the Zika‐dengue intercept, so the conclusion about dengue odds and expected case count is incorrect.", "claim": "Figure 4a shows fitted chikungunya cases in Catumbi peaking at about 80 cases around bi-week 33. Table 2 lists the intercept relative odds ratio for chikungunya versus dengue as 0.99, implying roughly equal dengue odds under average covariates. Thus, dengue would also be expected near 80 cases then.", "label": true }, { "paperid": "2411.18433v1", "paper_path": "./SciVer/papers/2411.18433v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.18433v1-Table1-1.png", "item2_path": "./SciVer/images/2411.18433v1-Table2-1.png", "section": [ "5.1" ], "request_id": 141, "origin_statement": "Table 2 shows the information-sharing network has 62% mutual ties; Table 1 indicates φ<0 implies reciprocity decreases with latent distance; thus the information-sharing network likely exhibits distance-dependent reciprocity with φ<0.", "perturbed_statement": "Table 2 shows the advice network has 62% mutual ties; Table 1 indicates φ<0 implies reciprocity decreases with latent distance; thus the advice network likely exhibits distance-dependent reciprocity with φ<0.", "perturbed_explanation": "The advice network actually has 39% mutual ties, not 62% as claimed. This contradicts the premise drawn from Table 2, so the inference about the advice network’s reciprocity pattern is incorrect.", "claim": "Table 2 shows the information-sharing network has 62% mutual ties; Table 1 indicates φ<0 implies reciprocity decreases with latent distance; thus the information-sharing network likely exhibits distance-dependent reciprocity with φ<0.", "label": true }, { "paperid": "2410.13883v1", "paper_path": "./SciVer/papers/2410.13883v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13883v1_figure_1(c).png", "item2_path": "./SciVer/images/2410.13883v1_figure_3.png", "section": [ "1" ], "request_id": 142, "origin_statement": "In 2022, Transformer publications in chart understanding (≈1000) overtook CNN publications (≈600) (Figure 1). Since Figure 3 shows Chart Understanding sits at the intersection of Computer Vision and Natural Language Processing, this surge in Transformers signals stronger NLP–CV integration in CU tasks.", "perturbed_statement": "In 2020, Transformer publications in chart understanding overtook CNN publications, implying that CU now emphasizes multimodal information retrieval features due to the rise of transformers.", "perturbed_explanation": "This is wrong because in 2020 CNN still had more publications (≈950) than Transformers (≈400), so Transformers did not overtake CNN that year. The conclusion about emphasizing Information Retrieval from that faulty premise is therefore invalid.", "claim": "In 2022, Transformer publications in chart understanding (≈1000) overtook CNN publications (≈600) (Figure 1). Since Figure 3 shows Chart Understanding sits at the intersection of Computer Vision and Natural Language Processing, this surge in Transformers signals stronger NLP–CV integration in CU tasks.", "label": true }, { "paperid": "2411.11677v1", "paper_path": "./SciVer/papers/2411.11677v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11677v1_figure_4.png", "item2_path": "./SciVer/images/2411.11677v1_figure_5.png", "section": [ "5.5" ], "request_id": 214, "origin_statement": "From Figure 4, NARM’s N@10 accuracy improves by 4.5% between 1k and 5k queries. In Figure 5, this greater sensitivity corresponds to a 2% N@10 gain when recommendation-list length increases from 50 to 200, outpacing SASRec and BERT4Rec improvements.", "perturbed_statement": "From Figure 4, NARM’s N@10 accuracy improves by 4.5% between 1k and 5k queries. In Figure 5, this greater sensitivity corresponds to a 1% N@10 gain when recommendation-list length increases from 50 to 200, outpacing SASRec and BERT4Rec improvements.", "perturbed_explanation": "The left chart of Figure 5 shows NARM’s N@10 rising from about 0.61 at list length 50 to about 0.63 at 200, a 2% gain, not 1%. Moreover, SASRec’s N@10 also increases by roughly 2% (≈0.59→0.61). Thus claiming a 1% gain that ‘outpaces’ SASRec contradicts the plotted values.", "claim": "From Figure 4, NARM’s N@10 accuracy improves by 4.5% between 1k and 5k queries. In Figure 5, this greater sensitivity corresponds to a 2% N@10 gain when recommendation-list length increases from 50 to 200, outpacing SASRec and BERT4Rec improvements.", "label": true }, { "paperid": "2411.12355v1", "paper_path": "./SciVer/papers/2411.12355v1.json", "claim_type": "sequential", "item1": "4", "item2": "5(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12355v1_figure_4.png", "item2_path": "./SciVer/images/2411.12355v1_figure_5(a).png", "section": [ "4.5" ], "request_id": 230, "origin_statement": "When the ratio of filtered prototypes increases from 0.4 to 0.7, MSVD accuracy rises from about 55% to about 67%; since the DPE module assigns the highest importance score to prototype 21 in Figure 4, filtering that prototype improves performance.", "perturbed_statement": "When the ratio of filtered prototypes increases from 0.4 to 0.7, MSVD accuracy rises from about 55% to about 67%; since the DPE module assigns the highest importance score to prototype 15 in Figure 4, filtering that prototype improves performance.", "perturbed_explanation": "The perturbation wrongly claims prototype 15 has the highest importance. In Figure 4 the peak importance score actually occurs at prototype 21, not prototype 15, so the premise is incorrect and the conclusion no longer follows.", "claim": "When the ratio of filtered prototypes increases from 0.4 to 0.7, MSVD accuracy rises from about 55% to about 67%; since the DPE module assigns the highest importance score to prototype 21 in Figure 4, filtering that prototype improves performance.", "label": true }, { "paperid": "2409.08522v1", "paper_path": "./SciVer/papers/2409.08522v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.08522v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.08522v1-Table1-1.png", "section": [ "5" ], "request_id": 244, "origin_statement": "UC’s F1 mean improves from 0.78 at 1 hour to 0.83 at 32 hours (Fig 4), matching an item_per_user reliability score increase from 0.2 (2–3) to 0.5 (4–8) per Table 1.", "perturbed_statement": "UC’s F1 mean increases from 0.80 at 1 hour to 0.90 at 32 hours (Fig 4), matching an item_per_user reliability score jump from 0.5 (4–8) to 1.0 (9+) per Table 1.", "perturbed_explanation": "The perturbed claim is wrong on two counts. First, Fig 4 shows UC rising from about 0.78 to 0.83, not from 0.80 to 0.90. Second, Table 1 assigns a reliability score of 0.2 for 2–3 items per user (not 0.5) and 0.5 for 4–8 (not already 0.5 then jumping to 1.0).", "claim": "UC’s F1 mean improves from 0.78 at 1 hour to 0.83 at 32 hours (Fig 4), matching an item_per_user reliability score increase from 0.2 (2–3) to 0.5 (4–8) per Table 1.", "label": true }, { "paperid": "2410.21523v1", "paper_path": "./SciVer/papers/2410.21523v1.json", "claim_type": "sequential", "item1": "7", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21523v1_figure_7.png", "item2_path": "./SciVer/images/2410.21523v1_figure_9.png", "section": [ "6" ], "request_id": 331, "origin_statement": "Fig7 shows TabDAR’s Adult synthetic data has a joint peak at education_num ≈9 for ages 30–40. In Fig9, TabDAR’s Adult correlation error is light (<0.05), indicating precise correlation estimation.", "perturbed_statement": "Fig7 shows TabDAR’s Adult synthetic data has a joint peak at education_num ≈11 for ages 30–40. In Fig9, TabDAR’s Adult correlation error is very low (<0.03), indicating near-perfect correlation estimation.", "perturbed_explanation": "The perturbed claim is wrong because in Figure 7 the most prominent density peak for TabDAR occurs at education_num around 9 (not 11), and in Figure 9 the TabDAR Adult heatmap cells are light green corresponding to errors near 0.04–0.05, not below 0.03.", "claim": "Fig7 shows TabDAR’s Adult synthetic data has a joint peak at education_num ≈9 for ages 30–40. In Fig9, TabDAR’s Adult correlation error is light (<0.05), indicating precise correlation estimation.", "label": true }, { "paperid": "2409.00511v1", "paper_path": "./SciVer/papers/2409.00511v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "4(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00511v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.00511v1_figure_4(a).png", "section": [ "4.1" ], "request_id": 356, "origin_statement": "From Fig. 3(a), the Diffusion model reduces cosine distance to about 0.90 by timestep 800 on AWA, enabling it to maintain roughly 72% unseen classification accuracy at 55 semantic dimensions on CUB (Fig. 4(a)).", "perturbed_statement": "From Fig. 3(a), the Diffusion model reduces cosine distance to about 0.85 by timestep 800 on AWA, enabling it to maintain roughly 80% unseen classification accuracy at 55 semantic dimensions on CUB (Fig. 4(a)).", "perturbed_explanation": "Fig. 3(a) shows the cosine distance is approximately 0.88–0.90 at t=800, not 0.85. Moreover, in Fig. 4(a) the Diffusion unseen accuracy at 55 dimensions is around 72%, not 80%.", "claim": "From Fig. 3(a), the Diffusion model reduces cosine distance to about 0.90 by timestep 800 on AWA, enabling it to maintain roughly 72% unseen classification accuracy at 55 semantic dimensions on CUB (Fig. 4(a)).", "label": true }, { "paperid": "2409.02137v1", "paper_path": "./SciVer/papers/2409.02137v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.02137v1_figure_1(c).png", "item2_path": "./SciVer/images/2409.02137v1_figure_2(a).png", "section": [ "2.4" ], "request_id": 465, "origin_statement": "Given WaypointRL’s heatmap in Figure 1 showing cube 3 with the highest visit intensity, and Figure 2’s first row showing almost all cells colored across all six depth levels of cube 3, WaypointRL achieves over 95% coverage in every depth of cube 3.", "perturbed_statement": "Given WaypointRL’s heatmap in Figure 1 showing cube 2 with the highest visit intensity, and Figure 2’s first row showing almost all cells colored across all six depth levels of cube 2, WaypointRL achieves over 95% coverage in every depth of cube 2.", "perturbed_explanation": "This is incorrect because Figure 1 actually highlights cube 3—not cube 2—as having the highest intensity under WaypointRL, and Figure 2 details only the six depth levels of cube 3, not cube 2.", "claim": "Given WaypointRL’s heatmap in Figure 1 showing cube 3 with the highest visit intensity, and Figure 2’s first row showing almost all cells colored across all six depth levels of cube 3, WaypointRL achieves over 95% coverage in every depth of cube 3.", "label": true }, { "paperid": "2410.05817v3", "paper_path": "./SciVer/papers/2410.05817v3.json", "claim_type": "sequential", "item1": "4", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.05817v3_figure_4.png", "item2_path": "./SciVer/images/2410.05817v3_figure_7.png", "section": [ "4.3" ], "request_id": 526, "origin_statement": "Phi-1.5 uses ND most (≈1350); 'part-of-continent' has the highest ND outputs (≈300); thus Phi-1.5 likely produced the most ND-based 'part-of-continent' outputs.", "perturbed_statement": "Pythia-1.4B uses ND most (≈500); 'part-of-continent' has the highest ND outputs (≈300); thus Pythia-1.4B likely produced the most ND-based 'part-of-continent' outputs.", "perturbed_explanation": "This is wrong because Figure 4 shows Phi-1.5 actually has the highest ND count (≈1350), while Pythia-1.4B only has ≈500 ND outputs. Pythia-1.4B cannot therefore have produced the most ND-based 'part-of-continent' outputs.", "claim": "Phi-1.5 uses ND most (≈1350); 'part-of-continent' has the highest ND outputs (≈300); thus Phi-1.5 likely produced the most ND-based 'part-of-continent' outputs.", "label": true }, { "paperid": "2409.19589v1", "paper_path": "./SciVer/papers/2409.19589v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19589v1_figure_7.png", "item2_path": "./SciVer/images/2409.19589v1-Table3-1.png", "section": [ "5.2.2" ], "request_id": 608, "origin_statement": "Because ResShift-15 achieves the highest PSNR (23.83) among training-from-scratch methods in Table 3, it reconstructs brick textures more faithfully than Ours-15 in the real-world dataset shown in Figure 7.", "perturbed_statement": "Because Ours-15 achieves the highest PSNR (23.83) among training-from-scratch methods in Table 3, it reconstructs brick textures more faithfully than ResShift-15 in the real-world dataset shown in Figure 7.", "perturbed_explanation": "This is incorrect because Table 3 shows ResShift-15 has the highest PSNR of 23.83; Ours-15 actually has a lower PSNR of 23.60. Assigning the top PSNR score to Ours-15 contradicts the concrete values in the table.", "claim": "Because ResShift-15 achieves the highest PSNR (23.83) among training-from-scratch methods in Table 3, it reconstructs brick textures more faithfully than Ours-15 in the real-world dataset shown in Figure 7.", "label": true }, { "paperid": "2409.14857v2", "paper_path": "./SciVer/papers/2409.14857v2.json", "claim_type": "sequential", "item1": "2(a)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14857v2_figure_2(a).png", "item2_path": "./SciVer/images/2409.14857v2_figure_3.png", "section": [ "6.1" ], "request_id": 667, "origin_statement": "In Fig.2, Morocco has the lowest X coordinate among DistMult embeddings; Fig.3 shows a degree 1 polynomial achieves the highest training MRR (~0.85) on COUNTRIES; thus the leftmost embedding corresponds to the best train performance.", "perturbed_statement": "Slovakia has the lowest X coordinate in DistMult embeddings (Fig.2), and Fig.3 shows a degree 3 polynomial yields the highest training MRR (~0.61) on COUNTRIES; hence the leftmost embedding corresponds to optimal train performance at degree 3.", "perturbed_explanation": "This is wrong because in Fig.2 Morocco, not Slovakia, has the lowest X (≈–2.7), while Slovakia’s X is around –0.2. Moreover, Fig.3 shows degree 1 achieves the highest training MRR (~0.85) on COUNTRIES, not degree 3 (~0.61).", "claim": "In Fig.2, Morocco has the lowest X coordinate among DistMult embeddings; Fig.3 shows a degree 1 polynomial achieves the highest training MRR (~0.85) on COUNTRIES; thus the leftmost embedding corresponds to the best train performance.", "label": true }, { "paperid": "2409.14128v2", "paper_path": "./SciVer/papers/2409.14128v2.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14128v2_figure_3(a).png", "item2_path": "./SciVer/images/2409.14128v2-Table3-1.png", "section": [ "7" ], "request_id": 727, "origin_statement": "At 1440px scale, FatFormer achieves 36.7% synthetic recall (Figure 3). Table 3 shows average patch-level accuracy on Gaussian blur is only 67.04%, the lowest among transformations. Therefore, detecting large-scale Gaussian-blurred images is notably challenging.", "perturbed_statement": "At 1440px scale, FatFormer achieves 40.7% synthetic recall. Table 3 shows average patch-level accuracy on Gaussian blur is 86.27%, the second-highest. Therefore, detecting large-scale Gaussian-blurred images is straightforward.", "perturbed_explanation": "This is incorrect because Figure 3 reports FatFormer’s synthetic recall at 1440px as 36.7%, not 40.7%. Moreover, Table 3’s bottom row gives the average accuracy on GBlur as 67.04%, which is the lowest, not 86.27% nor second-highest. These misstatements invalidate the conclusion.", "claim": "At 1440px scale, FatFormer achieves 36.7% synthetic recall (Figure 3). Table 3 shows average patch-level accuracy on Gaussian blur is only 67.04%, the lowest among transformations. Therefore, detecting large-scale Gaussian-blurred images is notably challenging.", "label": true }, { "paperid": "2409.08519v1", "paper_path": "./SciVer/papers/2409.08519v1.json", "claim_type": "sequential", "item1": "10", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.08519v1_figure_10.png", "item2_path": "./SciVer/images/2409.08519v1-Table1-1.png", "section": [ "5.2" ], "request_id": 783, "origin_statement": "Table 1 shows that shape matching uses ε = 0.01. In Fig. 10, the d_I matrix exhibits a distinct diagonal block for the Gorilla class. Therefore, with ε = 0.01, our merge-tree-based LSH framework achieves accurate Gorilla classification.", "perturbed_statement": "Table 1 shows that shape matching uses ε = 0.1. In Fig. 10, the d_I matrix exhibits a distinct diagonal block for the Gorilla class. Therefore, with ε = 0.1, our merge-tree-based LSH framework achieves accurate Gorilla classification.", "perturbed_explanation": "The perturbation is incorrect because Table 1 specifies ε = 0.01 for the shape-matching experiments, not ε = 0.1. Claiming ε = 0.1 contradicts the provided dataset parameters and is unsupported by the context.", "claim": "Table 1 shows that shape matching uses ε = 0.01. In Fig. 10, the d_I matrix exhibits a distinct diagonal block for the Gorilla class. Therefore, with ε = 0.01, our merge-tree-based LSH framework achieves accurate Gorilla classification.", "label": true }, { "paperid": "2411.12115v1", "paper_path": "./SciVer/papers/2411.12115v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.12115v1-Table1-1.png", "item2_path": "./SciVer/images/2411.12115v1-Table3-1.png", "section": [ "5.1" ], "request_id": 785, "origin_statement": "Pruned LD3M with DC at 128×128 IPC=1 yields a +2.0-point boost on ImageNet-B (Table 1). Applying LD3M pruning at 256×256 IPC=1 with random initialization (Table 3) corresponds to a +1.7-point gain on ImageNet-B, confirming cross-resolution consistency.", "perturbed_statement": "Pruned LD3M with DC at 128×128 IPC=1 yields a +2.5-point boost on ImageNet-B (Table 1). Applying LD3M pruning at 256×256 IPC=1 with random initialization (Table 3) corresponds to a +1.9-point gain on ImageNet-B, confirming cross-resolution consistency.", "perturbed_explanation": "The claim is wrong because Table 1 reports a +2.0 ± 0.3 improvement for pruned LD3M with DC on ImageNet-B, not +2.5. Likewise, Table 3 shows a +1.7 ± 0.0 gain for pruned LD3M (random init.) on ImageNet-B, not +1.9.", "claim": "Pruned LD3M with DC at 128×128 IPC=1 yields a +2.0-point boost on ImageNet-B (Table 1). Applying LD3M pruning at 256×256 IPC=1 with random initialization (Table 3) corresponds to a +1.7-point gain on ImageNet-B, confirming cross-resolution consistency.", "label": true }, { "paperid": "2409.04986v1", "paper_path": "./SciVer/papers/2409.04986v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.04986v1-Table3-1.png", "item2_path": "./SciVer/images/2409.04986v1-Table4-1.png", "section": [ "4.2" ], "request_id": 844, "origin_statement": "Table 3 shows DynamicAvg at interval b–g (β=0.3) on CIFAR10 yields 67.4% accuracy. Table 4 shows for interval b–e under Dir(0.3), CIFAR10 accuracy is 73.3% with cost ratio 0.073. Thus, switching to b–e reduces server cost from 0.252 to 0.073 (71% reduction) and increases accuracy by 5.9%.", "perturbed_statement": "Table 3 shows DynamicAvg at interval b–g (β=0.3) on CIFAR10 yields 69.7% accuracy. Table 4 shows for interval b–e under Dir(0.3), CIFAR10 accuracy is 73.3% with cost ratio 0.073. Thus, switching to b–e reduces server cost from 0.252 to 0.073 (71% reduction) and increases accuracy by 3.6%.", "perturbed_explanation": "The perturbation incorrectly cites the b–g accuracy from Table 3 as 69.7%, whereas the actual value is 67.4%. Consequently, the computed gain of 3.6% is wrong; the true improvement (73.3%–67.4%) is 5.9%.", "claim": "Table 3 shows DynamicAvg at interval b–g (β=0.3) on CIFAR10 yields 67.4% accuracy. Table 4 shows for interval b–e under Dir(0.3), CIFAR10 accuracy is 73.3% with cost ratio 0.073. Thus, switching to b–e reduces server cost from 0.252 to 0.073 (71% reduction) and increases accuracy by 5.9%.", "label": true }, { "paperid": "2410.03717v1", "paper_path": "./SciVer/papers/2410.03717v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.03717v1_figure_5.png", "item2_path": "./SciVer/images/2410.03717v1-Table4-1.png", "section": [ "5.2.1", "5.2" ], "request_id": 1065, "origin_statement": "From Table 4, the Base Model after Event SFT answers 65% of Direct Questions correctly; Figure 5 shows it still makes about 65 mistakes on Multihop Questions, so its Multihop accuracy remains 35%, indicating minimal reasoning improvement.", "perturbed_statement": "From Table 4, the Base Model after Event SFT answers 70% of Direct Questions correctly; Figure 5 shows it still makes about 65 mistakes on Multihop Questions, so its Multihop accuracy remains 35%, indicating minimal reasoning improvement.", "perturbed_explanation": "Table 4 lists the Base Model’s Event SFT Direct Question accuracy as 65%, not 70%, so the perturbed claim’s premise about direct accuracy is incorrect.", "claim": "From Table 4, the Base Model after Event SFT answers 65% of Direct Questions correctly; Figure 5 shows it still makes about 65 mistakes on Multihop Questions, so its Multihop accuracy remains 35%, indicating minimal reasoning improvement.", "label": true }, { "paperid": "2411.01705v1", "paper_path": "./SciVer/papers/2411.01705v1.json", "claim_type": "sequential", "item1": "2", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01705v1_figure_2.png", "item2_path": "./SciVer/images/2411.01705v1_figure_3(a).png", "section": [ "6.1.2" ], "request_id": 1071, "origin_statement": "Figure 2 shows the paraphrased extraction ASR on MedQA is 64.3%. Figure 3 shows adding a two-word trigger boosts the MedQA verbatim extraction ASR to 89.41%. Hence, two-word trigger verbatim attacks outperform paraphrased attacks by 25.11 points on MedQA.", "perturbed_statement": "Figure 2 shows the paraphrased extraction ASR on MedQA is 72.5%. Figure 3 shows adding a two-word trigger boosts the MedQA verbatim extraction ASR to 89.41%. Hence, two-word trigger verbatim attacks outperform paraphrased attacks by 25.11 points on MedQA.", "perturbed_explanation": "The perturbed statement is wrong because Figure 2 actually reports a 64.3% ASR for paraphrased extraction on MedQA—not 72.5%. The 72.5% value belongs to MedmcQA. Consequently, the claimed 25.11-point gap versus a 72.5% baseline is invalid.", "claim": "Figure 2 shows the paraphrased extraction ASR on MedQA is 64.3%. Figure 3 shows adding a two-word trigger boosts the MedQA verbatim extraction ASR to 89.41%. Hence, two-word trigger verbatim attacks outperform paraphrased attacks by 25.11 points on MedQA.", "label": true }, { "paperid": "2411.09902v1", "paper_path": "./SciVer/papers/2411.09902v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09902v1_figure_3(b).png", "item2_path": "./SciVer/images/2411.09902v1-Table1-1.png", "section": [ "2.4" ], "request_id": 1104, "origin_statement": "Table 1 shows NuSTAR FPMA and FPMB exposures totaling ∼158 ks; in Figure 3, the red points from NuSTAR lie systematically below the single PL model fit above ∼6 keV, indicating a spectral break at about 6 keV.", "perturbed_statement": "Table 1 shows XMM-Newton PN exposures totaling ∼158 ks; in Figure 3, the red data in the XMM-Newton spectrum lie below the single PL model fit above ∼6 keV, implying a spectral break at 6 keV in the XMM-Newton spectrum.", "perturbed_explanation": "This is incorrect because Table 1’s ∼79 ks/78 ks entries are for NuSTAR FPMA/FPMB, not XMM-Newton PN, so the ∼158 ks total refers to NuSTAR, not XMM. Moreover, in Figure 3 the red points represent NuSTAR data, not XMM-Newton spectra.", "claim": "Table 1 shows NuSTAR FPMA and FPMB exposures totaling ∼158 ks; in Figure 3, the red points from NuSTAR lie systematically below the single PL model fit above ∼6 keV, indicating a spectral break at about 6 keV.", "label": true }, { "paperid": "2411.14984v1", "paper_path": "./SciVer/papers/2411.14984v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.14984v1-Table1-1.png", "item2_path": "./SciVer/images/2411.14984v1-Table2-1.png", "section": [ "5.2" ], "request_id": 1236, "origin_statement": "Across modalities, Table1 shows AE-KD with debiased teachers yields 87.5% WGA on CelebA using a resnet18 student. Table2 shows AE-KD self-distillation with resnet50 boosts WGA to 89.4%, indicating that increasing student capacity raises worst‐group accuracy by 1.9 points.", "perturbed_statement": "Table1 shows AE-KD with debiased teachers yields 90.9% WGA on CelebA using a resnet18 student. Table2 shows AE-KD self-distillation with resnet50 boosts WGA to 92.0%, indicating that increasing student capacity raises worst‐group accuracy by 1.1 points.", "perturbed_explanation": "This claim is incorrect because Table1 actually reports AE-KD with debiased teachers at 87.5% WGA on CelebA (not 90.9%), and Table2 reports AE-KD self-distillation at 89.4% WGA (not 92.0%). Hence both the misquoted values and the 1.1‐point gain are false.", "claim": "Across modalities, Table1 shows AE-KD with debiased teachers yields 87.5% WGA on CelebA using a resnet18 student. Table2 shows AE-KD self-distillation with resnet50 boosts WGA to 89.4%, indicating that increasing student capacity raises worst‐group accuracy by 1.9 points.", "label": true }, { "paperid": "2409.17025v1", "paper_path": "./SciVer/papers/2409.17025v1.json", "claim_type": "sequential", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17025v1_figure_2.png", "item2_path": "./SciVer/images/2409.17025v1-Table1-1.png", "section": [ "5.1.1" ], "request_id": 1332, "origin_statement": "In fold 2, Kerrisons constitute the largest class (~840 images in Figure 2b). DeepLabV3 achieves the highest Kerrisons mIoU (73.4% in Table 1). This suggests DeepLabV3’s superior Kerrisons segmentation benefits from the relatively abundant Kerrisons training samples in fold 2.", "perturbed_statement": "In fold 2, Cup Forceps constitute the largest class (~840 images in Figure 2b). DeepLabV3 achieves the highest Cup Forceps mIoU (11.8% in Table 1). This suggests DeepLabV3’s segmentation gains most from ample Cup Forceps samples.", "perturbed_explanation": "Figure 2b shows Cup Forceps have only ~480 images in fold 2 (not ~840), and Table 1 indicates SegFormer (24.4%) — not DeepLabV3 (11.8%) — has the highest Cup Forceps mIoU. These details contradict the perturbed claim.", "claim": "In fold 2, Kerrisons constitute the largest class (~840 images in Figure 2b). DeepLabV3 achieves the highest Kerrisons mIoU (73.4% in Table 1). This suggests DeepLabV3’s superior Kerrisons segmentation benefits from the relatively abundant Kerrisons training samples in fold 2.", "label": true }, { "paperid": "2411.06916v1", "paper_path": "./SciVer/papers/2411.06916v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06916v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.06916v1_figure_8.png", "section": [ "6.4" ], "request_id": 1350, "origin_statement": "Figure 7 shows ReCL achieves 40.2% ACC in the DIL scenario (dark green bar). Figure 8’s second-row images include a real dog sample. Therefore, in DIL, ReCL reconstructs dog images that closely match real dog samples.", "perturbed_statement": "Figure 7 shows ReCL achieves 40.2% ACC in the CIL scenario. Figure 8’s second-row images include a real dog sample. Therefore, in CIL, ReCL reconstructs dog images that closely match real dog samples.", "perturbed_explanation": "The 40.2% ACC value in Figure 7 corresponds to the DIL scenario (dark green bar), not CIL. Associating that accuracy with CIL contradicts the bar coloring and legend in the figure.", "claim": "Figure 7 shows ReCL achieves 40.2% ACC in the DIL scenario (dark green bar). Figure 8’s second-row images include a real dog sample. Therefore, in DIL, ReCL reconstructs dog images that closely match real dog samples.", "label": true }, { "paperid": "2411.02348v1", "paper_path": "./SciVer/papers/2411.02348v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02348v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.02348v1_figure_2(a).png", "section": [ "2.1" ], "request_id": 1376, "origin_statement": "Figure1 identifies the Greek alphabet as the near-transfer domain. Figure2 specifies that modified instructions for the Greek alphabet were shown exclusively to adult participants. Therefore, adults, but not children, received specialized instructions for near-transfer Greek-alphabet puzzles.", "perturbed_statement": "Figure1 identifies the Greek alphabet as the near-transfer domain. Figure2 specifies that modified instructions for the Greek alphabet were shown exclusively to child participants. Therefore, children, but not adults, received specialized instructions for near-transfer Greek-alphabet puzzles.", "perturbed_explanation": "The perturbed claim misstates Figure2: the context and caption clearly state that modified instructions for Greek and Symbol alphabets were exclusive to adults and were not shown to children. Thus the premise that children alone received those instructions is false.", "claim": "Figure1 identifies the Greek alphabet as the near-transfer domain. Figure2 specifies that modified instructions for the Greek alphabet were shown exclusively to adult participants. Therefore, adults, but not children, received specialized instructions for near-transfer Greek-alphabet puzzles.", "label": true }, { "paperid": "2410.13907v1", "paper_path": "./SciVer/papers/2410.13907v1.json", "claim_type": "sequential", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13907v1_figure_5.png", "item2_path": "./SciVer/images/2410.13907v1-Table2-1.png", "section": [ "4.2.2" ], "request_id": 1394, "origin_statement": "Figure 5 shows that f_wm+t_c+sig_w yields a word error rate of 0.48; Table 2 shows that BERT with a wrong extractor yields 0.00; thus using a wrong extractor on a model with wrong signature reduces WER from 0.48 to 0.00.", "perturbed_statement": "Figure 5 shows that f_wm+t_c+sig_w yields a word error rate of 0.48; Table 2 shows that BERT with a wrong extractor yields 0.13; thus using a wrong extractor on a model with wrong signature reduces WER from 0.48 to 0.13.", "perturbed_explanation": "This is incorrect because Table 2 actually reports that BERT with the wrong extractor (E_w) has a WER of 0.00, not 0.13, so the claimed reduction to 0.13 contradicts the table data.", "claim": "Figure 5 shows that f_wm+t_c+sig_w yields a word error rate of 0.48; Table 2 shows that BERT with a wrong extractor yields 0.00; thus using a wrong extractor on a model with wrong signature reduces WER from 0.48 to 0.00.", "label": true }, { "paperid": "2410.01811v1", "paper_path": "./SciVer/papers/2410.01811v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01811v1_figure_5.png", "item2_path": "./SciVer/images/2410.01811v1_figure_6.png", "section": [ "2.2" ], "request_id": 1452, "origin_statement": "Since 52.1% of Malayalam respondents have over twenty years of work experience (Fig 5B), and 80.3% selected the High LTO option (Fig 6), it infers the most experienced cohort overwhelmingly favors long-term orientation.", "perturbed_statement": "Since only 21.1% of Malayalam respondents have over twenty years of work experience (Fig 5B), and 80.3% selected High LTO (Fig 6), it suggests a small minority drives the high long-term orientation score.", "perturbed_explanation": "The premise is incorrect: Fig 5B actually shows 52.1% (not 21.1%) of respondents report over twenty years of work experience, so it is the majority—not a small minority—driving the High LTO result.", "claim": "Since 52.1% of Malayalam respondents have over twenty years of work experience (Fig 5B), and 80.3% selected the High LTO option (Fig 6), it infers the most experienced cohort overwhelmingly favors long-term orientation.", "label": true }, { "paperid": "2411.10700v1", "paper_path": "./SciVer/papers/2411.10700v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10700v1_figure_1.png", "item2_path": "./SciVer/images/2411.10700v1_figure_3.png", "section": [ "3" ], "request_id": 1509, "origin_statement": "Fig.1(b) shows the SKM* force s-wave phase shift rising at about 50 keV, and Fig.3(a) then gives b_{βp}≈10^{-3} at that resonance energy.", "perturbed_statement": "Fig.1(b) shows the SKM* force s-wave phase shift rising at about 250 keV, and Fig.3(a) then gives b_{βp}≈10^{-3} at that resonance energy.", "perturbed_explanation": "In Fig.1(b) the SKM* phase shift jumps at ~50 keV (black curve), not at 250 keV. Therefore using 250 keV to read b_{βp} from Fig.3(a) mislocates the resonance and the resulting branching ratio claim is false.", "claim": "Fig.1(b) shows the SKM* force s-wave phase shift rising at about 50 keV, and Fig.3(a) then gives b_{βp}≈10^{-3} at that resonance energy.", "label": true }, { "paperid": "2409.20209v1", "paper_path": "./SciVer/papers/2409.20209v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.20209v1_figure_2.png", "item2_path": "./SciVer/images/2409.20209v1_figure_4.png", "section": [ "3" ], "request_id": 1637, "origin_statement": "Since Case 2’s validation loss in Figure 2 plateaus at approximately 0.2 by epoch 30, at a test SNR of 25 dB (within its training range), Figure 4 shows its elevation RMSE falls below 1°.", "perturbed_statement": "Since Case 2’s validation loss in Figure 2 plateaus at approximately 0.2 by epoch 30, at a test SNR of 10 dB (within its training range), Figure 4 shows its elevation RMSE falls below 1°.", "perturbed_explanation": "This is wrong because Case 2 was trained on SNR values from 20–30 dB, not 10 dB, and in Figure 4 the blue dashed line (Case 2 elevation) at 10 dB is around 3°, which is well above 1°, directly contradicting the claim.", "claim": "Since Case 2’s validation loss in Figure 2 plateaus at approximately 0.2 by epoch 30, at a test SNR of 25 dB (within its training range), Figure 4 shows its elevation RMSE falls below 1°.", "label": true }, { "paperid": "2411.05345v1", "paper_path": "./SciVer/papers/2411.05345v1.json", "claim_type": "sequential", "item1": "6(c)", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05345v1_figure_6(c).png", "item2_path": "./SciVer/images/2411.05345v1_figure_8.png", "section": [ "4.2" ], "request_id": 1786, "origin_statement": "Since 'which' appears largest in the MMLU word cloud, indicating it's most edited, and Figure 7(a) shows MMLU accuracy dropping from 55% at 0 edits to 44.5% at 8 edits, we conclude that edits to 'which' contribute roughly a 10-point accuracy drop.", "perturbed_statement": "Since 'individual' appears largest in the MMLU word cloud, and MMLU accuracy drops from 55% at 0 edits to 40% at 8 edits, we conclude edits to 'individual' cause a 15-point drop.", "perturbed_explanation": "This is incorrect because the MMLU word cloud clearly shows 'which' as the most prominent edited term, not 'individual'. Moreover, Figure 7(a) reports MMLU accuracy at 8 edits is about 44.5%, not 40%, so the stated 15-point decline misrepresents the actual data.", "claim": "Since 'which' appears largest in the MMLU word cloud, indicating it's most edited, and Figure 7(a) shows MMLU accuracy dropping from 55% at 0 edits to 44.5% at 8 edits, we conclude that edits to 'which' contribute roughly a 10-point accuracy drop.", "label": true }, { "paperid": "2409.14217v2", "paper_path": "./SciVer/papers/2409.14217v2.json", "claim_type": "sequential", "item1": "3(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14217v2_figure_3(b).png", "item2_path": "./SciVer/images/2409.14217v2-Table3-1.png", "section": [ "5.2" ], "request_id": 1858, "origin_statement": "Figure 3 shows Cornac’s NDCG@100 on MSD rises from 0.15 at 64 dimensions to 0.31 at 1024. Table 3 marks Adaptive negative sampling as optimal. Therefore, applying Adaptive sampling with 1024-dimensional embeddings maximizes Cornac’s MSD NDCG@100.", "perturbed_statement": "Figure 3 shows Cornac’s NDCG@100 on MSD peaks at 256 dimensions then falls, and Table 3 marks Uniform negative sampling as optimal. Therefore, applying Uniform sampling with 1024-dimensional embeddings maximizes Cornac’s MSD NDCG@100.", "perturbed_explanation": "Figure 3 actually shows Cornac’s NDCG@100 steadily increases through 1024 dimensions (no peak at 256), and Table 3 bolds Adaptive (not Uniform) sampling as the best option. Both claims in the perturbed statement contradict those details.", "claim": "Figure 3 shows Cornac’s NDCG@100 on MSD rises from 0.15 at 64 dimensions to 0.31 at 1024. Table 3 marks Adaptive negative sampling as optimal. Therefore, applying Adaptive sampling with 1024-dimensional embeddings maximizes Cornac’s MSD NDCG@100.", "label": true }, { "paperid": "2410.03091v1", "paper_path": "./SciVer/papers/2410.03091v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.03091v1_figure_2.png", "item2_path": "./SciVer/images/2410.03091v1_figure_3.png", "section": [ "5" ], "request_id": 48, "origin_statement": "Fig2C shows the intervention group’s CGM follow-up durations are shorter than the control group’s. In Fig3A at day 9, the intervention group’s proposed mean TIR 70–180 mg/dL (~66%) exceeds the naive estimate (~57%) by about 8 percentage points.", "perturbed_statement": "Fig2C shows the control group’s CGM follow-up durations are shorter than the intervention group’s. In Fig3A at day 9, the intervention group’s naive mean TIR 70–180 mg/dL (~66%) exceeds the proposed estimate (~57%) by about 8 percentage points.", "perturbed_explanation": "This is incorrect because Fig2C’s magenta (intervention) CDF reaches 0.5 before the teal (control) CDF, indicating the intervention group has shorter follow-up, not the control. Moreover, in Fig3A the proposed (triangle) estimate (~66%) is above the naive (circle) (~57%), not the reverse.", "claim": "Fig2C shows the intervention group’s CGM follow-up durations are shorter than the control group’s. In Fig3A at day 9, the intervention group’s proposed mean TIR 70–180 mg/dL (~66%) exceeds the naive estimate (~57%) by about 8 percentage points.", "label": true }, { "paperid": "2410.12049v1", "paper_path": "./SciVer/papers/2410.12049v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.12049v1_figure_1.png", "item2_path": "./SciVer/images/2410.12049v1_figure_2.png", "section": [ "2.1" ], "request_id": 64, "origin_statement": "Since Sabiá-3 is priced at roughly $2 per million tokens (Fig 1), and it outperforms Sabiá-2 Medium on 76% of ENADE 2022–23 exams (Fig 2), Sabiá-3 offers superior performance over Sabiá-2 Medium on most disciplines at this price point.", "perturbed_statement": "Since Sabiá-3 is priced at roughly $6 per million tokens (Fig 1), and it outperforms Sabiá-2 Medium on 90% of ENADE 2022–23 exams (Fig 2), Sabiá-3 offers superior performance over Sabiá-2 Medium on most disciplines at this price point.", "perturbed_explanation": "The perturbed claim is wrong because Figure 1 shows Sabiá-3 costs about $2 per million tokens, not $6, and Figure 2 reports it outperforms Sabiá-2 Medium on 76% of exams, not 90%.", "claim": "Since Sabiá-3 is priced at roughly $2 per million tokens (Fig 1), and it outperforms Sabiá-2 Medium on 76% of ENADE 2022–23 exams (Fig 2), Sabiá-3 offers superior performance over Sabiá-2 Medium on most disciplines at this price point.", "label": true }, { "paperid": "2410.23507v1", "paper_path": "./SciVer/papers/2410.23507v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23507v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.23507v1-Table3-1.png", "section": [ "5" ], "request_id": 91, "origin_statement": "Table 3 indicates MoECE-GS-Large uses 917 M effective parameters. In Figure 3, the model at ~917 M achieves an F0.5 of about 67.8, while T5-XL at 3000 M reaches 67.65. Therefore, MoECE-GS-Large exceeds T5-XL’s F0.5 with fewer parameters.", "perturbed_statement": "Table 3 indicates MoECE-GS-Large uses 917 M effective parameters. In Figure 3, the model at ~917 M achieves an F0.5 of about 67.8, while T5-XL at 3000 M reaches 68.00. Therefore, MoECE-GS-Large falls short of T5-XL’s F0.5 despite using fewer parameters.", "perturbed_explanation": "The perturbation misstates T5-XL’s F0.5 score. Figure 3 actually shows T5-XL at 3000 M has an F0.5 around 67.65, not 68.00, so MoECE-GS-Large still outperforms T5-XL rather than falling short.", "claim": "Table 3 indicates MoECE-GS-Large uses 917 M effective parameters. In Figure 3, the model at ~917 M achieves an F0.5 of about 67.8, while T5-XL at 3000 M reaches 67.65. Therefore, MoECE-GS-Large exceeds T5-XL’s F0.5 with fewer parameters.", "label": true }, { "paperid": "2411.18385v1", "paper_path": "./SciVer/papers/2411.18385v1.json", "claim_type": "sequential", "item1": "2(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18385v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.18385v1-Table2-1.png", "section": [ "4.1" ], "request_id": 100, "origin_statement": "On SVHN, since FedIvon’s test loss curve falls below 0.8 by round 120 (Figure 2), it achieves the lowest negative log-likelihood (0.5303 vs 0.6423 for FedLaplace) in Table 2, reflecting its superior predictive uncertainty.", "perturbed_statement": "On SVHN, FedLaplace’s test loss dips below 0.8 by round 100 (Figure 2), so it achieves the lowest negative log-likelihood 0.5303 in Table 2, outperforming FedIvon.", "perturbed_explanation": "This is wrong because in Figure 2 the red FedLaplace curve only drops below 0.8 after about 200 rounds (not at 100), and Table 2 shows FedLaplace’s NLL is 0.6423—higher than FedIvon’s 0.5303, so it does not outperform FedIvon.", "claim": "On SVHN, since FedIvon’s test loss curve falls below 0.8 by round 120 (Figure 2), it achieves the lowest negative log-likelihood (0.5303 vs 0.6423 for FedLaplace) in Table 2, reflecting its superior predictive uncertainty.", "label": true }, { "paperid": "2410.15592v2", "paper_path": "./SciVer/papers/2410.15592v2.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15592v2_figure_3.png", "item2_path": "./SciVer/images/2410.15592v2-Table4-1.png", "section": [ "5" ], "request_id": 131, "origin_statement": "The SSLM pre-training employed a 25% masking rate with a 9:0:1 masking distribution (Table 5), so on the validation set (Fig. 3), the structure-sequence similarity converges around 71.6%.", "perturbed_statement": "The SSLM pre-training employed a 15% masking rate with a 9:0:1 masking distribution (Table 5), so on the validation set (Fig. 3), the structure-sequence similarity converges around 71.6%.", "perturbed_explanation": "Table 5 shows that the 9:0:1 masking distribution is only used at a 25% masking rate; at 15% masking the model uses 8:1:1 ratios. Thus claiming a 15% mask rate with 9:0:1 contradicts the table.", "claim": "The SSLM pre-training employed a 25% masking rate with a 9:0:1 masking distribution (Table 5), so on the validation set (Fig. 3), the structure-sequence similarity converges around 71.6%.", "label": true }, { "paperid": "2409.07796v1", "paper_path": "./SciVer/papers/2409.07796v1.json", "claim_type": "sequential", "item1": "5", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.07796v1-Table5-1.png", "item2_path": "./SciVer/images/2409.07796v1-Table7-1.png", "section": [ "4.7" ], "request_id": 144, "origin_statement": "WildFiT-All (B0) achieves 90.3% accuracy on D2 (Table 5). When trained with 700 samples/class, WildFiT reaches 90.0% on D2 (Table 7). Hence, adding BDD and CDD modules beyond fine-tuning yields only a 0.3% improvement on D2.", "perturbed_statement": "WildFiT-All (B0) achieves 90.3% accuracy on D2 (Table 5). When trained with 700 samples/class, WildFiT reaches 88.2% on D2 (Table 7). Hence, adding BDD and CDD modules beyond fine-tuning yields only a 2.1% improvement on D2.", "perturbed_explanation": "The perturbation misreads Table 7’s 700/class WildFiT accuracy as 88.2%, but the table shows 88.2% is for 350/class, while the correct 700/class WildFiT accuracy is 90.0%. Therefore the true improvement is 0.3%, not 2.1%.", "claim": "WildFiT-All (B0) achieves 90.3% accuracy on D2 (Table 5). When trained with 700 samples/class, WildFiT reaches 90.0% on D2 (Table 7). Hence, adding BDD and CDD modules beyond fine-tuning yields only a 0.3% improvement on D2.", "label": true }, { "paperid": "2409.20332v1", "paper_path": "./SciVer/papers/2409.20332v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.20332v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.20332v1_figure_1(b).png", "section": [ "1" ], "request_id": 171, "origin_statement": "Chart 1 shows pancreas dice decreases by 0.3% with synthetic augmentation. The synthetic CT image in Fig.1(b) displays blurred pancreas borders. Thus, suboptimal organ detail in synthetic volumes likely degrades SSL pancreas segmentation.", "perturbed_statement": "Chart 1 shows kidney dice increases by 2.6% with synthetic augmentation. The synthetic CT image in Fig.1(b) displays clear kidney borders. Thus, synthetic volumes improve SSL kidney segmentation.", "perturbed_explanation": "In Chart 1, the kidney bar is actually at –0.5% (not +2.6%, which is the liver). Furthermore, Fig.1(b) highlights the pancreas region, not the kidney, and it shows blurred rather than clear organ boundaries.", "claim": "Chart 1 shows pancreas dice decreases by 0.3% with synthetic augmentation. The synthetic CT image in Fig.1(b) displays blurred pancreas borders. Thus, suboptimal organ detail in synthetic volumes likely degrades SSL pancreas segmentation.", "label": true }, { "paperid": "2411.12846v1", "paper_path": "./SciVer/papers/2411.12846v1.json", "claim_type": "sequential", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.12846v1_figure_4.png", "item2_path": "./SciVer/images/2411.12846v1-Table1-1.png", "section": [ "3.2" ], "request_id": 173, "origin_statement": "Of the three datasets that include skin tone (PAD-UFES-20, Diverse Dermatology Images, SkinCon), all use the six-level Fitzpatrick scale, whereas the Monk scale offers ten nuanced gradations, so models trained on these datasets lack fine-grained tone differentiation.", "perturbed_statement": "Of the three datasets that include skin tone (PAD-UFES-20, DDI, SkinCon), all use the five-level Fitzpatrick scale, whereas the Monk scale offers ten nuanced gradations, so models trained on them lack fine-grained tone differentiation.", "perturbed_explanation": "The perturbed statement wrongly claims the Fitzpatrick scale has five levels. Figure 4 clearly shows six Fitzpatrick types (Type 1 through Type 6), so stating five levels contradicts the provided scale.", "claim": "Of the three datasets that include skin tone (PAD-UFES-20, Diverse Dermatology Images, SkinCon), all use the six-level Fitzpatrick scale, whereas the Monk scale offers ten nuanced gradations, so models trained on these datasets lack fine-grained tone differentiation.", "label": true }, { "paperid": "2411.17977v1", "paper_path": "./SciVer/papers/2411.17977v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17977v1_figure_4.png", "item2_path": "./SciVer/images/2411.17977v1_figure_5.png", "section": [ "3" ], "request_id": 190, "origin_statement": "From Fig.4’s power-law fit (CR-only model), S≈30 mJy at ν=2 GHz. In Fig.5 (DM-only) the χ² minimum for the τ channel occurs at m≈30 GeV. Thus, a ∼30 GeV DM candidate annihilating via τ⁺τ⁻ could reproduce the observed 2 GHz flux density.", "perturbed_statement": "From Fig.4’s power-law fit, S≈100 mJy at ν=2 GHz. In Fig.5 (DM-only) the χ² minimum for the τ channel occurs at m≈10 GeV. Thus, a ∼10 GeV DM candidate annihilating via τ⁺τ⁻ could reproduce the observed 2 GHz flux density.", "perturbed_explanation": "The perturbed claim misreads both figures. In Fig. 4 the red power-law line gives S≈30 mJy (not 100 mJy) at 2 GHz. In Fig. 5 the green τ-channel χ² minimum lies near m≈30 GeV (not 10 GeV). These discrepancies make the perturbed conclusion false.", "claim": "From Fig.4’s power-law fit (CR-only model), S≈30 mJy at ν=2 GHz. In Fig.5 (DM-only) the χ² minimum for the τ channel occurs at m≈30 GeV. Thus, a ∼30 GeV DM candidate annihilating via τ⁺τ⁻ could reproduce the observed 2 GHz flux density.", "label": true }, { "paperid": "2410.08847v2", "paper_path": "./SciVer/papers/2410.08847v2.json", "claim_type": "sequential", "item1": "4", "item2": "16", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.08847v2_figure_4.png", "item2_path": "./SciVer/images/2410.08847v2-Table16-1.png", "section": [ "6.3" ], "request_id": 216, "origin_statement": "Figure 4 shows that 71% of the highest length-normalized CHES samples consist of two refusals, and Table 16 reports that after filtering out these high-scoring samples, Llama-3-8B-Instruct’s preferred response log probability still decreases by 27.7.", "perturbed_statement": "Figure 4 shows that 71% of the highest length-normalized CHES samples consist of two non-refusal pairs, and Table 16 reports that after filtering out these high-scoring samples, Llama-3-8B-Instruct’s preferred response log probability increases by 27.7.", "perturbed_explanation": "The perturbation is wrong for two reasons: (1) Figure 4’s red cluster labeled “Two Refusals” corresponds to 262/370 (71%), not the “Two Non-Refusals” group. (2) Table 16 shows that DPO (filtered) on Llama-3-8B-Instruct yields –27.7, not +27.7.", "claim": "Figure 4 shows that 71% of the highest length-normalized CHES samples consist of two refusals, and Table 16 reports that after filtering out these high-scoring samples, Llama-3-8B-Instruct’s preferred response log probability still decreases by 27.7.", "label": true }, { "paperid": "2410.22785v1", "paper_path": "./SciVer/papers/2410.22785v1.json", "claim_type": "sequential", "item1": "4", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22785v1_figure_4.png", "item2_path": "./SciVer/images/2410.22785v1_figure_7.png", "section": [ "4.2.5" ], "request_id": 221, "origin_statement": "In Fig. 7 the v=1 28SiO maser peak is roughly twice the v=2 peak; in Fig. 4 the black cross marking the SiO maser coincides with the infrared source, implying that the stronger v=1 SiO emission arises from the central IR-bright AGB star.", "perturbed_statement": "In Fig. 7 the v=2 28SiO maser peak is roughly twice the v=1 peak; in Fig. 4 the black cross marking the SiO maser is offset by about 1″ from the infrared source, implying that the stronger v=2 SiO emission arises from a nearby companion rather than the central AGB star.", "perturbed_explanation": "This is incorrect because Fig. 7 shows the v=1 line intensity (~1.05) is actually higher than the v=2 line (~0.65), so v=2 is not stronger. Moreover, in Fig. 4 the black cross is centered on the infrared peak, not offset by 1″, so there is no indication of a separate companion source.", "claim": "In Fig. 7 the v=1 28SiO maser peak is roughly twice the v=2 peak; in Fig. 4 the black cross marking the SiO maser coincides with the infrared source, implying that the stronger v=1 SiO emission arises from the central IR-bright AGB star.", "label": true }, { "paperid": "2411.09585v1", "paper_path": "./SciVer/papers/2411.09585v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.09585v1-Table6-1.png", "item2_path": "./SciVer/images/2411.09585v1-Table7-1.png", "section": [ "4.5" ], "request_id": 224, "origin_statement": "At ε=0.1, D3's average ACC is 92.12% (Table 6). Table 7 shows λ=10 also yields 92.12% ACC. Thus, setting λ to 10 replicates the clean accuracy achieved at ε=0.1.", "perturbed_statement": "At ε=0.1, D3's average ACC is 91.88% (Table 6). Table 7 shows λ=10 also yields 91.88% ACC. Thus, setting λ to 10 replicates the clean accuracy achieved at ε=0.1.", "perturbed_explanation": "This is incorrect because Table 6 reports an average ACC of 92.12% at ε=0.1 (not 91.88%), and Table 7 reports 92.12% ACC at λ=10 (not 91.88%), so the perturbed values contradict both tables.", "claim": "At ε=0.1, D3's average ACC is 92.12% (Table 6). Table 7 shows λ=10 also yields 92.12% ACC. Thus, setting λ to 10 replicates the clean accuracy achieved at ε=0.1.", "label": true }, { "paperid": "2410.09635v1", "paper_path": "./SciVer/papers/2410.09635v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09635v1_figure_1.png", "item2_path": "./SciVer/images/2410.09635v1_figure_2.png", "section": [ "1" ], "request_id": 294, "origin_statement": "Figure 1 lists 3 fetal risk factors; Figure 2’s risk explainer uses test data to generate counterfactuals that can alter any of these 3 features, such as suggesting removal of Bradycardia to change an abnormal risk prediction to normal.", "perturbed_statement": "Figure 1 lists 4 fetal risk factors; Figure 2’s risk explainer uses test data to generate counterfactuals that can alter any of these 4 features, such as suggesting removal of Bradycardia to change an abnormal risk prediction to normal.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 1 actually shows only 3 fetal risk factors—Bradycardia, Chorioamnionitis, and Meconium—rather than 4.", "claim": "Figure 1 lists 3 fetal risk factors; Figure 2’s risk explainer uses test data to generate counterfactuals that can alter any of these 3 features, such as suggesting removal of Bradycardia to change an abnormal risk prediction to normal.", "label": true }, { "paperid": "2410.15712v1", "paper_path": "./SciVer/papers/2410.15712v1.json", "claim_type": "sequential", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.15712v1_figure_8.png", "item2_path": "./SciVer/images/2410.15712v1_figure_9.png", "section": [ "4" ], "request_id": 316, "origin_statement": "Figure 8 shows the peak unabsorbed X-ray flux (~5×10⁻¹¹ erg cm⁻² s⁻¹) at ≈(l=16°, b=–3°) inside the Scutum Supershell. Figure 9 locates LS 5039 at (l=16.9°, b=–2.2°), adjacent to that hotspot. Therefore, LS 5039 likely powers the localized X-ray enhancement.", "perturbed_statement": "Figure 8 shows the peak unabsorbed X-ray flux (~5×10⁻¹¹ erg cm⁻² s⁻¹) at ≈(l=18°, b=+2°) inside the Scutum Supershell. Figure 9 locates LS 5039 at (l=16.9°, b=–2.2°), adjacent to that hotspot. Therefore, LS 5039 likely powers the localized X-ray enhancement.", "perturbed_explanation": "The perturbed statement misplaces the X-ray flux peak at (l=18°, b=+2°), but Figure 8 actually shows the highest flux at around (16°, –3°). At (18°, +2°) there is no matching X-ray hotspot, so LS 5039 (at 16.9°, –2.2°) cannot lie adjacent to a non-existent peak.", "claim": "Figure 8 shows the peak unabsorbed X-ray flux (~5×10⁻¹¹ erg cm⁻² s⁻¹) at ≈(l=16°, b=–3°) inside the Scutum Supershell. Figure 9 locates LS 5039 at (l=16.9°, b=–2.2°), adjacent to that hotspot. Therefore, LS 5039 likely powers the localized X-ray enhancement.", "label": true }, { "paperid": "2410.11566v2", "paper_path": "./SciVer/papers/2410.11566v2.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.11566v2_figure_1.png", "item2_path": "./SciVer/images/2410.11566v2-Table1-1.png", "section": [ "4.1" ], "request_id": 323, "origin_statement": "Figure 1c shows BE's uncertainty falls below 10° in about 0.1s; Table 1 reports BE's CPU time is 11.11s, over 13 times MEKF's 0.83s; thus BE achieves rapid uncertainty reduction at significantly higher computational cost.", "perturbed_statement": "Figure 1c shows BE's uncertainty falls below 10° in about 0.5s; Table 1 reports BE's CPU time is 0.83s, equal to MEKF's; thus BE achieves rapid uncertainty reduction without extra computational cost.", "perturbed_explanation": "Both intermediate premises are incorrect. In Figure 1c, BE's uncertainty actually drops below 10° at around 0.1 s, not 0.5 s. And Table 1 lists BE's CPU time as 11.11 s (not 0.83 s), so BE does incur higher computational cost than MEKF.", "claim": "Figure 1c shows BE's uncertainty falls below 10° in about 0.1s; Table 1 reports BE's CPU time is 11.11s, over 13 times MEKF's 0.83s; thus BE achieves rapid uncertainty reduction at significantly higher computational cost.", "label": true }, { "paperid": "2411.02264v1", "paper_path": "./SciVer/papers/2411.02264v1.json", "claim_type": "sequential", "item1": "2", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02264v1_figure_2.png", "item2_path": "./SciVer/images/2411.02264v1_figure_6.png", "section": [ "5.1" ], "request_id": 336, "origin_statement": "From Fig. 2, the (440) QNM’s imaginary frequency uncertainty (vertical error) is about ±50 Hz. In Fig. 6, under the dependent QQNM (blue), this uncertainty narrows to roughly ±35 Hz—about a 30 % reduction relative to the linear (black) error bar.", "perturbed_statement": "From Fig. 2, the (440) QNM’s imaginary frequency uncertainty (vertical error) is about ±50 Hz. In Fig. 6, under the dependent QQNM (blue), this uncertainty narrows to roughly ±20 Hz—a 60 % reduction relative to the linear (black) error bar.", "perturbed_explanation": "The blue error bar for the (440) mode’s imaginary frequency in Fig. 6 actually spans about ±35 Hz (from roughly –315 Hz to –235 Hz), not ±20 Hz. Therefore, the reduction is near 30 %, not 60 %, contradicting the perturbed claim.", "claim": "From Fig. 2, the (440) QNM’s imaginary frequency uncertainty (vertical error) is about ±50 Hz. In Fig. 6, under the dependent QQNM (blue), this uncertainty narrows to roughly ±35 Hz—about a 30 % reduction relative to the linear (black) error bar.", "label": true }, { "paperid": "2410.05935v1", "paper_path": "./SciVer/papers/2410.05935v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.05935v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.05935v1_figure_5(d).png", "section": [ "4.2.2" ], "request_id": 358, "origin_statement": "Table2 shows the proposed method improves unseen-class detection at thr=100 over other augmentations. Fig.5 then demonstrates this gain by correctly detecting the rightmost face with a different expression, which baseline methods fail to localize.", "perturbed_statement": "Table2 shows the proposed method degrades unseen-class detection at thr=100 compared to default augmentation. Fig.5 thus incorrectly shows the proposed method missing the rightmost face with a different expression, proving it cannot handle expression variations.", "perturbed_explanation": "This is incorrect because Table 2 in the context explicitly states that the proposed method \"shows an improvement over the default augmentation on both seen and unseen classes\" at all thresholds, including thr=100. Furthermore, Fig.5 actually illustrates the proposed method correctly detecting the rightmost face with a different expression, not missing it.", "claim": "Table2 shows the proposed method improves unseen-class detection at thr=100 over other augmentations. Fig.5 then demonstrates this gain by correctly detecting the rightmost face with a different expression, which baseline methods fail to localize.", "label": true }, { "paperid": "2409.07162v2", "paper_path": "./SciVer/papers/2409.07162v2.json", "claim_type": "sequential", "item1": "3(b)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07162v2_figure_3(b).png", "item2_path": "./SciVer/images/2409.07162v2_figure_3(c).png", "section": [ "5.2.2" ], "request_id": 361, "origin_statement": "ChatGPT’s neutral f1 drops from ~45% at zero-shot to ~40% at 1-shot (first chart). LLama-70B’s neutral f1 climbs from ~52% to ~60% (second chart). Thus, under 1-shot, LLama-70B outperforms ChatGPT’s neutral sentiment by approximately 20 percentage points.", "perturbed_statement": "ChatGPT’s neutral f1 drops from ~45% at zero-shot to ~43% at 1-shot (first chart). LLama-70B’s neutral f1 climbs from ~52% to ~60% (second chart). Thus, under 1-shot, LLama-70B outperforms ChatGPT’s neutral sentiment by approximately 17 percentage points.", "perturbed_explanation": "The perturbation wrongly states that ChatGPT’s neutral f1 at 1-shot is ~43%, but the first chart and context report a drop of 5 points from ~45% to ~40%. This incorrect intermediate premise makes the claimed 17-point gap incorrect.", "claim": "ChatGPT’s neutral f1 drops from ~45% at zero-shot to ~40% at 1-shot (first chart). LLama-70B’s neutral f1 climbs from ~52% to ~60% (second chart). Thus, under 1-shot, LLama-70B outperforms ChatGPT’s neutral sentiment by approximately 20 percentage points.", "label": true }, { "paperid": "2409.05306v2", "paper_path": "./SciVer/papers/2409.05306v2.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05306v2_figure_5.png", "item2_path": "./SciVer/images/2409.05306v2-Table4-1.png", "section": [ "3.3" ], "request_id": 382, "origin_statement": "With a test ROC AUC of 0.88 (Figure 5), the GNN model reliably predicts that using atomic layer deposition yields uniform and conformal Al₂O₃ films on Si (Table 4).", "perturbed_statement": "With a test ROC AUC of 0.78 (Figure 5), the GNN model reliably predicts that using thermal evaporation yields uniform and conformal Al₂O₃ films on Si (Table 4).", "perturbed_explanation": "This statement is wrong because Figure 5 reports a testing ROC AUC of 0.88, not 0.78, and Table 4 specifies atomic layer deposition as the solution for Si–Al₂O₃ interfaces, not thermal evaporation.", "claim": "With a test ROC AUC of 0.88 (Figure 5), the GNN model reliably predicts that using atomic layer deposition yields uniform and conformal Al₂O₃ films on Si (Table 4).", "label": true }, { "paperid": "2409.05314v2", "paper_path": "./SciVer/papers/2409.05314v2.json", "claim_type": "sequential", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05314v2_figure_6.png", "item2_path": "./SciVer/images/2409.05314v2-Table3-1.png", "section": [ "7" ], "request_id": 386, "origin_statement": "Tinyllama-1.1B begins training at a lower loss than Gemma-2B in Fig.6; accordingly, in Table 3 Tinyllama-1.1B-Tele achieves a 221.38-point reduction in Ans-PPL (230.44 to 9.06), the largest among the adapted models.", "perturbed_statement": "Gemma-2B begins training at a lower loss than Tinyllama-1.1B in Fig.6; accordingly, in Table 3 Gemma-2B-Tele achieves a 1.58-point reduction in Ans-PPL (13.31 to 11.73), the largest among the adapted models.", "perturbed_explanation": "This claim is incorrect because Figure 6 actually shows Tinyllama-1.1B starting at around 1.76 training loss versus Gemma-2B near 3.0, so Gemma-2B does not start lower. Moreover, Table 3 reports Gemma-2B-Tele’s Ans-PPL drop as only 1.58 points, which is far smaller than Tinyllama-1.1B-Tele’s 221.38-point reduction, so it is not the largest.", "claim": "Tinyllama-1.1B begins training at a lower loss than Gemma-2B in Fig.6; accordingly, in Table 3 Tinyllama-1.1B-Tele achieves a 221.38-point reduction in Ans-PPL (230.44 to 9.06), the largest among the adapted models.", "label": true }, { "paperid": "2409.00163v1", "paper_path": "./SciVer/papers/2409.00163v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00163v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.00163v1-Table3-1.png", "section": [ "4.2" ], "request_id": 445, "origin_statement": "Figure 3 shows DeepHit’s predicted OS curves for five patients vary minimally. Table 3 reports DeepHit’s OS IBS of 0.214 exceeds CoxPH’s 0.164. Therefore, DeepHit has worse calibration performance than CoxPH.", "perturbed_statement": "Figure 3 shows DeepHit’s predicted OS curves for five patients vary minimally. Table 3 reports DeepHit’s OS IBS of 0.150, lower than CoxPH’s 0.164. Therefore, DeepHit has better calibration performance than CoxPH.", "perturbed_explanation": "The perturbation is incorrect because Table 3 actually lists DeepHit’s OS IBS as 0.214 (not 0.150), which is higher than CoxPH’s 0.164. Hence DeepHit’s calibration is worse, not better, than CoxPH’s.", "claim": "Figure 3 shows DeepHit’s predicted OS curves for five patients vary minimally. Table 3 reports DeepHit’s OS IBS of 0.214 exceeds CoxPH’s 0.164. Therefore, DeepHit has worse calibration performance than CoxPH.", "label": true }, { "paperid": "2411.10312v1", "paper_path": "./SciVer/papers/2411.10312v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.10312v1-Table1-1.png", "item2_path": "./SciVer/images/2411.10312v1-Table2-1.png", "section": [ "3.2" ], "request_id": 503, "origin_statement": "Table 1 shows GC-FPCA took 0.06 min versus GFAMM’s 79.83 min at I=100, a ~1 330× speedup; Table 2 shows at K=200 GC-FPCA also took 0.06 min versus GFAMM’s 61.54 min (~1 025×), indicating GC-FPCA’s speed advantage shrinks as K increases.", "perturbed_statement": "Because Table 1 reports GC-FPCA time as 0.06 min at I=100 and Table 2 lists GC-FPCA at 0.06 min for K=200, 500, and 1000, GC-FPCA computing time is unaffected by the number of observations per function.", "perturbed_explanation": "This is incorrect: Table 2 shows that GC-FPCA’s time increases to 0.13 min at K=1000 (not 0.06 min), so computing time does change with the number of observations per function.", "claim": "Table 1 shows GC-FPCA took 0.06 min versus GFAMM’s 79.83 min at I=100, a ~1 330× speedup; Table 2 shows at K=200 GC-FPCA also took 0.06 min versus GFAMM’s 61.54 min (~1 025×), indicating GC-FPCA’s speed advantage shrinks as K increases.", "label": true }, { "paperid": "2410.06765v1", "paper_path": "./SciVer/papers/2410.06765v1.json", "claim_type": "sequential", "item1": "4", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.06765v1-Table4-1.png", "item2_path": "./SciVer/images/2410.06765v1-Table6-1.png", "section": [ "4.2" ], "request_id": 514, "origin_statement": "Table 4 shows SEED-Bench’s Scene Understanding is reclassified as Coarse-grained Perception. Table 6 then lists only Scene Understanding under the Coarse category for SEED-Bench, confirming it is the sole coarse-grained sub-task from SEED-Bench.", "perturbed_statement": "Table 4 shows SEED-Bench’s Instance Attribute is reclassified as Coarse-grained Perception. Table 6 then lists both Scene Understanding and Instance Attribute under Coarse tasks for SEED-Bench, indicating two coarse tasks.", "perturbed_explanation": "The perturbation is wrong because Table 4 actually classifies Instance Attribute as Fine-grained Perception, not Coarse-grained. Additionally, Table 6 places Instance Attribute under the Fine-grained section for SEED-Bench, not under Coarse tasks.", "claim": "Table 4 shows SEED-Bench’s Scene Understanding is reclassified as Coarse-grained Perception. Table 6 then lists only Scene Understanding under the Coarse category for SEED-Bench, confirming it is the sole coarse-grained sub-task from SEED-Bench.", "label": true }, { "paperid": "2409.00140v1", "paper_path": "./SciVer/papers/2409.00140v1.json", "claim_type": "sequential", "item1": "5", "item2": "6(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00140v1_figure_5.png", "item2_path": "./SciVer/images/2409.00140v1_figure_6(a).png", "section": [ "3.2" ], "request_id": 525, "origin_statement": "Figure 5 shows FQReLU-QIP-1 with QXavier init (grey) is statistically equivalent to the QHe-initialized best (blue). Figure 6 then shows the QIP-1 group (blue) yields the highest mean. Thus FQReLU-QIP-1 with QXavier init achieves top accuracy with only one-quarter of parameters.", "perturbed_statement": "Figure 5 shows SQReLU-QIP-1 with QHe init (grey) matches the best blue group. Figure 6 then shows the QIP-1-2 group (blue) yields the highest mean. Thus SQReLU-QIP-1-2 with QHe init achieves top accuracy with half the parameters.", "perturbed_explanation": "This is incorrect because in Figure 5 all SQReLU-QIP-1 variants (including QHe init) appear in red, indicating they are statistically worse than the blue top group, not grey. Moreover, in Figure 6 the blue marker corresponds to the QIP-1 group, not QIP-1-2, so SQReLU-QIP-1-2 cannot yield the highest mean.", "claim": "Figure 5 shows FQReLU-QIP-1 with QXavier init (grey) is statistically equivalent to the QHe-initialized best (blue). Figure 6 then shows the QIP-1 group (blue) yields the highest mean. Thus FQReLU-QIP-1 with QXavier init achieves top accuracy with only one-quarter of parameters.", "label": true }, { "paperid": "2409.15313v1", "paper_path": "./SciVer/papers/2409.15313v1.json", "claim_type": "sequential", "item1": "9", "item2": "11", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15313v1_figure_9.png", "item2_path": "./SciVer/images/2409.15313v1_figure_11.png", "section": [ "4.5" ], "request_id": 554, "origin_statement": "Resnet-34’s loss falls below 0.5 within the first 100 iterations (Figure 9), and Figure 11 shows its validation accuracy climbs above 0.85 by epoch 40, indicating strong generalization even in late training.", "perturbed_statement": "Resnet-34’s loss falls below 0.2 within the first 100 iterations (Figure 9), and Figure 11 shows its validation accuracy remains below 0.8 by epoch 50, indicating weak generalization throughout training.", "perturbed_explanation": "The perturbed claim is false because Figure 9 never shows the loss dropping as low as 0.2—it stabilizes around 0.4–0.5—and Figure 11 clearly plots validation accuracy exceeding 0.85 by epoch 40, contradicting the assertion that it remains below 0.8.", "claim": "Resnet-34’s loss falls below 0.5 within the first 100 iterations (Figure 9), and Figure 11 shows its validation accuracy climbs above 0.85 by epoch 40, indicating strong generalization even in late training.", "label": true }, { "paperid": "2409.16745v1", "paper_path": "./SciVer/papers/2409.16745v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16745v1_figure_1(c).png", "item2_path": "./SciVer/images/2409.16745v1_figure_2(b).png", "section": [ "3.2" ], "request_id": 555, "origin_statement": "The 2–8 keV X-ray polarization degree on 06 Dec was ~5.8% (Fig. 1 bottom left); the B-band optical polarization that night was 4.27% (Fig. 1 bottom right); thus optical B polarization is ~1.5% lower than the X-ray.", "perturbed_statement": "The 2–8 keV X-ray polarization degree on 06 Dec was ~4.8% (Fig. 1 bottom left); the B-band optical polarization that night was 4.27% (Fig. 1 bottom right); thus optical B polarization is only ~0.5% lower than the X-ray.", "perturbed_explanation": "Fig. 1 bottom left actually shows the 2–8 keV X-ray polarization at ~5.8%, not ~4.8%. Because the X-ray degree is misquoted, the claimed ~0.5% difference is incorrect—the true drop from ~5.8% to 4.27% is ~1.5%.", "claim": "The 2–8 keV X-ray polarization degree on 06 Dec was ~5.8% (Fig. 1 bottom left); the B-band optical polarization that night was 4.27% (Fig. 1 bottom right); thus optical B polarization is ~1.5% lower than the X-ray.", "label": true }, { "paperid": "2410.20742v1", "paper_path": "./SciVer/papers/2410.20742v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20742v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.20742v1_figure_4(c).png", "section": [ "6" ], "request_id": 565, "origin_statement": "POP method achieves an SNR of about 17.9 dB in the VITS model, and the POP-protected audio’s mel-spectrogram (right) is almost blank, indicating the synthesized audio reveals no sensitive information to protect privacy.", "perturbed_statement": "EM method achieves an SNR of about 17.9 dB in the VITS model, and the EM-protected audio’s mel-spectrogram (right) is almost blank, indicating the synthesized audio reveals no sensitive information to protect privacy.", "perturbed_explanation": "The bar chart shows that the 17.9 dB SNR in VITS is achieved by the POP method, not EM (EM’s SNR is only about 10 dB). Furthermore, the blank mel-spectrogram on the right corresponds to POP-protected audio, not EM, so attributing these results to EM is incorrect.", "claim": "POP method achieves an SNR of about 17.9 dB in the VITS model, and the POP-protected audio’s mel-spectrogram (right) is almost blank, indicating the synthesized audio reveals no sensitive information to protect privacy.", "label": true }, { "paperid": "2410.00454v1", "paper_path": "./SciVer/papers/2410.00454v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "3(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.00454v1_figure_3(b).png", "item2_path": "./SciVer/images/2410.00454v1_figure_3(d).png", "section": [ "4.3" ], "request_id": 581, "origin_statement": "From Fig.3(b), generality peaks at 4 experts (0.92). At top-k=4 in Fig.3(d), generality falls to 0.865, indicating that using four routing paths erodes generalization by about 5.5%.", "perturbed_statement": "From Fig.3(b), generality peaks at 2 experts (0.88). At top-k=4 in Fig.3(d), generality rises to 0.90, indicating that using four routing paths enhances generalization by about 2%.", "perturbed_explanation": "This is incorrect because Fig.3(b) shows generality at 2 experts is about 0.86 (not 0.88), and the true peak is at 4 experts (0.92). Moreover, Fig.3(d) reports generality at top-k=4 as approximately 0.865, not 0.90, so the claimed rise contradicts the actual data.", "claim": "From Fig.3(b), generality peaks at 4 experts (0.92). At top-k=4 in Fig.3(d), generality falls to 0.865, indicating that using four routing paths erodes generalization by about 5.5%.", "label": true }, { "paperid": "2409.05653v3", "paper_path": "./SciVer/papers/2409.05653v3.json", "claim_type": "sequential", "item1": "4", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.05653v3-Table4-1.png", "item2_path": "./SciVer/images/2409.05653v3-Table6-1.png", "section": [ "5.2" ], "request_id": 584, "origin_statement": "SpanBERT-large’s “she” biases share no occupational overlap between nominative and accusative cases (Jaccard = 0.00), so hairdresser, a nominative-case bias, is absent from accusative biased occupations for she.", "perturbed_statement": "SpanBERT-large’s “she” biases share moderate overlap between nominative and accusative cases (Jaccard = 0.13), so hairdresser appears among both nominative and accusative biased occupations for she.", "perturbed_explanation": "This is incorrect because Table 6 reports a Nom-Acc Jaccard index of 0.00 for “she,” not 0.13, and Table 4 shows hairdresser only under the nominative form of she, not the accusative form.", "claim": "SpanBERT-large’s “she” biases share no occupational overlap between nominative and accusative cases (Jaccard = 0.00), so hairdresser, a nominative-case bias, is absent from accusative biased occupations for she.", "label": true }, { "paperid": "2411.16273v1", "paper_path": "./SciVer/papers/2411.16273v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16273v1_figure_3.png", "item2_path": "./SciVer/images/2411.16273v1-Table1-1.png", "section": [ "2.2" ], "request_id": 612, "origin_statement": "From Fig. 3a, the CNN trained on EMG-only data achieves roughly 94% accuracy. Table 1 reports a 93.31% recall for “Turn left.” Thus, the Turn left recall is slightly below the model’s overall EMG-only accuracy.", "perturbed_statement": "From Fig. 3a, the CNN trained on EMG-only data achieves roughly 92% accuracy. Table 1 reports a 93.31% recall for “Turn left.” Thus, the Turn left recall slightly exceeds the model’s overall EMG-only accuracy.", "perturbed_explanation": "Fig. 3a shows the EMG-only CNN actually reaches about 94% accuracy, not 92%. Since the true accuracy (∼94%) is higher than the 93.31% recall, the recall does not exceed the overall performance, invalidating the perturbed claim.", "claim": "From Fig. 3a, the CNN trained on EMG-only data achieves roughly 94% accuracy. Table 1 reports a 93.31% recall for “Turn left.” Thus, the Turn left recall is slightly below the model’s overall EMG-only accuracy.", "label": true }, { "paperid": "2411.18065v1", "paper_path": "./SciVer/papers/2411.18065v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18065v1_figure_1.png", "item2_path": "./SciVer/images/2411.18065v1_figure_2.png", "section": [ "2.1" ], "request_id": 645, "origin_statement": "Fig.1 shows FP8 e5m2 has five exponent bits. Fig.2 shows FP16’s encoding uses five exponent bits and ten mantissa bits. Thus FP8 e5m2 shares FP16’s exponent range but with only two mantissa bits, lowering its precision.", "perturbed_statement": "Fig.1 shows FP8 e5m2 has five exponent bits. Fig.2 shows FP16’s encoding uses eight exponent bits and ten mantissa bits. Thus FP8 e5m2 has a smaller exponent range than FP16, explaining its reduced dynamic range.", "perturbed_explanation": "The perturbed statement misreads Figure 2: FP16’s exponent field is five bits (green), not eight. Since FP8 e5m2 also has five exponent bits, it does not have a smaller exponent range than FP16.", "claim": "Fig.1 shows FP8 e5m2 has five exponent bits. Fig.2 shows FP16’s encoding uses five exponent bits and ten mantissa bits. Thus FP8 e5m2 shares FP16’s exponent range but with only two mantissa bits, lowering its precision.", "label": true }, { "paperid": "2410.06842v1", "paper_path": "./SciVer/papers/2410.06842v1.json", "claim_type": "sequential", "item1": "10", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.06842v1_figure_10.png", "item2_path": "./SciVer/images/2410.06842v1-Table4-1.png", "section": [ "4.3" ], "request_id": 656, "origin_statement": "Table 4 shows SurGenerator (Gauss) yields an E_φ of 0.894 on COD10K, outperforming both Edge (0.890) and Uniform (0.892). Building on this improved boundary alignment, Figure 10 reveals that SACLoss with SCCT sampling produces the sharpest object boundaries in final predictions.", "perturbed_statement": "Table 4 shows SurGenerator (Gauss) yields an E_φ of 0.893 on COD10K, underperforming Uniform (0.892). Building on this, Figure 10 reveals that SACLoss with SubSample sampling produces the sharpest object boundaries.", "perturbed_explanation": "The perturbation is incorrect because Table 4 actually reports E_φ=0.894 for SurGenerator (Gauss), which is higher than Uniform’s 0.892. Moreover, Figure 10 clearly shows that SACLoss with SCCT—not SubSample—yields the most precise boundary predictions.", "claim": "Table 4 shows SurGenerator (Gauss) yields an E_φ of 0.894 on COD10K, outperforming both Edge (0.890) and Uniform (0.892). Building on this improved boundary alignment, Figure 10 reveals that SACLoss with SCCT sampling produces the sharpest object boundaries in final predictions.", "label": true }, { "paperid": "2409.13990v2", "paper_path": "./SciVer/papers/2409.13990v2.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13990v2_figure_5.png", "item2_path": "./SciVer/images/2409.13990v2-Table4-1.png", "section": [ "4.3" ], "request_id": 695, "origin_statement": "Table 4 reports that for test size m=10 at α=0.1, the coverage rate is 0.962. In Figure 5, for test size m=5 at 1−α=0.9, the coverage rate is about 0.94. Therefore, at the 90% level, m=10 achieves roughly 2.2% higher coverage than m=5.", "perturbed_statement": "Table 4 reports that for test size m=10 at α=0.1, the coverage rate is 0.942. In Figure 5, for test size m=5 at 1−α=0.9, the coverage rate is about 0.96. Therefore, at the 90% level, m=5 outperforms m=10 by approximately 2%.", "perturbed_explanation": "The statement misreads two values. Table 4 actually shows a 0.962 coverage for m=10 at α=0.1 (not 0.942), and Figure 5 shows about 0.94 coverage for m=5 at 1−α=0.9 (not 0.96). Thus m=10 still outperforms m=5, so the perturbed claim is false.", "claim": "Table 4 reports that for test size m=10 at α=0.1, the coverage rate is 0.962. In Figure 5, for test size m=5 at 1−α=0.9, the coverage rate is about 0.94. Therefore, at the 90% level, m=10 achieves roughly 2.2% higher coverage than m=5.", "label": true }, { "paperid": "2409.04459v1", "paper_path": "./SciVer/papers/2409.04459v1.json", "claim_type": "sequential", "item1": "4", "item2": "14", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04459v1_figure_4.png", "item2_path": "./SciVer/images/2409.04459v1-Table14-1.png", "section": [ "4.5" ], "request_id": 715, "origin_statement": "At k=25, Fig. 4 shows MIND accuracy is about 77%. Table 14 reports MIND ACC = 77.21% at w = 1536. Thus selecting k = 25 and w = 1536 yields nearly identical ~77% utility for MIND embeddings with perfect verifiability.", "perturbed_statement": "At k=25, Fig. 4 shows MIND accuracy is about 75%. Table 14 reports MIND ACC = 75.21% at w = 1536. Thus selecting k = 25 and w = 1536 yields nearly identical ~75% utility for MIND embeddings with perfect verifiability.", "perturbed_explanation": "The perturbation is incorrect because Fig. 4 actually shows MIND accuracy around 77% at k=25, not 75%. Moreover, Table 14 records MIND ACC = 77.21% at w = 1536, not 75.21%. These discrepancies contradict the provided data.", "claim": "At k=25, Fig. 4 shows MIND accuracy is about 77%. Table 14 reports MIND ACC = 77.21% at w = 1536. Thus selecting k = 25 and w = 1536 yields nearly identical ~77% utility for MIND embeddings with perfect verifiability.", "label": true }, { "paperid": "2410.10652v1", "paper_path": "./SciVer/papers/2410.10652v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10652v1_figure_3.png", "item2_path": "./SciVer/images/2410.10652v1-Table2-1.png", "section": [ "4.2" ], "request_id": 723, "origin_statement": "Figure 3 shows SLAT’s cosine similarity map for niche Layer5_Layer6_100 dips to –0.8, indicating negative artifacts. Table 2 reports QueST’s subgraph Wasserstein distance (0.751) is lower than SLAT’s (0.803). Thus, QueST both avoids SLAT’s negative artifacts and yields a better distance, demonstrating superior niche query performance.", "perturbed_statement": "Figure 3 shows SLAT’s cosine similarity map for niche Layer5_Layer6_100 dips to –0.8, indicating negative artifacts. Table 2 reports SLAT’s subgraph Wasserstein distance (0.751) is lower than QueST’s (0.803). Thus, SLAT both avoids QueST’s negative artifacts and yields a better distance, demonstrating superior niche query performance.", "perturbed_explanation": "The perturbed statement swaps the actual distances from Table 2: in reality, QueST’s distance is 0.751 and SLAT’s is 0.803, not the other way around. It also wrongly asserts that SLAT avoids negative artifacts, whereas Figure 3 clearly shows SLAT’s map includes values down to –0.8.", "claim": "Figure 3 shows SLAT’s cosine similarity map for niche Layer5_Layer6_100 dips to –0.8, indicating negative artifacts. Table 2 reports QueST’s subgraph Wasserstein distance (0.751) is lower than SLAT’s (0.803). Thus, QueST both avoids SLAT’s negative artifacts and yields a better distance, demonstrating superior niche query performance.", "label": true }, { "paperid": "2410.01727v1", "paper_path": "./SciVer/papers/2410.01727v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.01727v1_figure_7.png", "item2_path": "./SciVer/images/2410.01727v1-Table3-1.png", "section": [ "5.2" ], "request_id": 729, "origin_statement": "On XES3G5M, full KCQRL annotations beat the original for correctness by 93.2% (Table 3), and using KCQRL’s complete CL loss lifts AKT AUC to 83.1% at 100% training data (Fig. 7), illustrating that chain‐of‐thought grounding boosts performance.", "perturbed_statement": "On XES3G5M, KCQRL w/o solution steps annotations were chosen over the original by 93.2% for correctness, and using the full CL loss raises AKT AUC to 82.5% with 100% data, showing chain‐of‐thought grounding boosts performance.", "perturbed_explanation": "This is incorrect because Table 3 shows the 93.2% correctness margin applies to full KCQRL (with solution steps), not the variant without solution steps (which scores only 15.9%). Moreover, Fig. 7 reports AKT AUC at 100% data as about 83.1%, not 82.5%.", "claim": "On XES3G5M, full KCQRL annotations beat the original for correctness by 93.2% (Table 3), and using KCQRL’s complete CL loss lifts AKT AUC to 83.1% at 100% training data (Fig. 7), illustrating that chain‐of‐thought grounding boosts performance.", "label": true }, { "paperid": "2410.04346v1", "paper_path": "./SciVer/papers/2410.04346v1.json", "claim_type": "sequential", "item1": "4", "item2": "8", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04346v1-Table4-1.png", "item2_path": "./SciVer/images/2410.04346v1-Table8-1.png", "section": [ "4.2" ], "request_id": 749, "origin_statement": "Table 4 shows OPO without Sinkhorn scaling scores 56.20 in Scoring at β=0.1; Table 8 shows ApproxNDCG at β=0.1 achieves 58.59. Therefore, OPO without scaling trails ApproxNDCG by 2.39 points.", "perturbed_statement": "Table 4 shows OPO without Sinkhorn scaling scores 57.32 in Scoring at β=0.1; Table 8 shows ApproxNDCG at β=0.1 achieves 59.04. Therefore, OPO without scaling trails ApproxNDCG by 1.72 points.", "perturbed_explanation": "The perturbed statement misquotes both tables: Table 4 lists the w/o Scale Scoring as 56.20 (not 57.32), and Table 8 lists ApproxNDCG at β=0.1 as 58.59 (not 59.04). These factual inaccuracies invalidate the claimed 1.72-point gap.", "claim": "Table 4 shows OPO without Sinkhorn scaling scores 56.20 in Scoring at β=0.1; Table 8 shows ApproxNDCG at β=0.1 achieves 58.59. Therefore, OPO without scaling trails ApproxNDCG by 2.39 points.", "label": true }, { "paperid": "2409.01629v1", "paper_path": "./SciVer/papers/2409.01629v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01629v1_figure_3(b).png", "item2_path": "./SciVer/images/2409.01629v1_figure_4.png", "section": [ "5" ], "request_id": 758, "origin_statement": "The Hγ line in Figure 4 shows up to 9% deeper absorption in the plate spectrum; however, Figure 5’s Gaussian ratio distribution has σ=2.4%, indicating that deep-line discrepancies exceed typical spectrophotometric scatter by nearly a factor of four.", "perturbed_statement": "The Hγ line in Figure 4 shows up to 9% deeper absorption in the plate spectrum; however, Figure 5’s Gaussian ratio distribution has σ=5%, indicating that deep-line discrepancies exceed typical spectrophotometric scatter by nearly a factor of four.", "perturbed_explanation": "This statement is wrong because Figure 5 actually reports a standard deviation of 2.4%, not 5%. Using the incorrect σ invalidates the comparison and the claimed factor.", "claim": "The Hγ line in Figure 4 shows up to 9% deeper absorption in the plate spectrum; however, Figure 5’s Gaussian ratio distribution has σ=2.4%, indicating that deep-line discrepancies exceed typical spectrophotometric scatter by nearly a factor of four.", "label": true }, { "paperid": "2409.02719v1", "paper_path": "./SciVer/papers/2409.02719v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.02719v1_figure_2.png", "item2_path": "./SciVer/images/2409.02719v1-Table3-1.png", "section": [ "3.3" ], "request_id": 778, "origin_statement": "Figure 2 shows the model uses 8-fold CV, and Table 3 sets n_estimators=50000. Therefore, across all folds the model potentially constructs up to 8×50000=400,000 decision trees before early stopping.", "perturbed_statement": "Figure 2 shows the model uses 8-fold CV, and Table 3 sets n_estimators=256. Therefore, across all folds the model potentially constructs up to 8×256=2048 decision trees before early stopping.", "perturbed_explanation": "The perturbed statement is wrong because Table 3 actually lists n_estimators as 50000, not 256. The value 256 refers to num_leaves, not the number of estimators.", "claim": "Figure 2 shows the model uses 8-fold CV, and Table 3 sets n_estimators=50000. Therefore, across all folds the model potentially constructs up to 8×50000=400,000 decision trees before early stopping.", "label": true }, { "paperid": "2410.19231v1", "paper_path": "./SciVer/papers/2410.19231v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19231v1_figure_3.png", "item2_path": "./SciVer/images/2410.19231v1-Table1-1.png", "section": [ "3.5.x" ], "request_id": 784, "origin_statement": "Figure 3 shows tutors speak a median of roughly 42 words per turn, over double students' median, and Table 1 reports a tutor uptake rate of 86%, indicating lengthy tutor turns are associated with high uptake.", "perturbed_statement": "Figure 3 shows tutors speak a median of roughly 32 words per turn, and Table 1 reports a tutor uptake rate of 28%, suggesting moderate-length tutor turns are linked to low uptake.", "perturbed_explanation": "The perturbation is wrong on two counts. In Figure 3, the tutor’s median talk time is about 42 words, not 32. And in Table 1, the 0.28 value corresponds to the tutor’s Focusing rate, whereas the Uptake rate is actually 0.86, not 0.28.", "claim": "Figure 3 shows tutors speak a median of roughly 42 words per turn, over double students' median, and Table 1 reports a tutor uptake rate of 86%, indicating lengthy tutor turns are associated with high uptake.", "label": true }, { "paperid": "2410.12968v1", "paper_path": "./SciVer/papers/2410.12968v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.12968v1-Table2-1.png", "item2_path": "./SciVer/images/2410.12968v1-Table3-1.png", "section": [ "3.1" ], "request_id": 807, "origin_statement": "Table 2 shows N05M’s half-light radius is 1.98 pc; Table 3 gives R85% = 3.8 Rhl, so its 85th percentile projected radius equals 3.8×1.98 ≈ 7.5 pc, significantly smaller than N1M’s ~11.3 pc.", "perturbed_statement": "Table 2 shows N05M’s half-light radius is 2.50 pc; Table 3 gives R85% = 4.0 Rhl, so its 85th percentile projected radius equals 4.0×2.50 ≈ 10.0 pc.", "perturbed_explanation": "This is incorrect because Table 2 lists N05M’s half-light radius as 1.98 pc (not 2.50 pc) and Table 3 lists its R85% as 3.8 Rhl (not 4.0 Rhl). The correct 85th percentile radius is 3.8×1.98 ≈ 7.5 pc, not 10.0 pc.", "claim": "Table 2 shows N05M’s half-light radius is 1.98 pc; Table 3 gives R85% = 3.8 Rhl, so its 85th percentile projected radius equals 3.8×1.98 ≈ 7.5 pc, significantly smaller than N1M’s ~11.3 pc.", "label": true }, { "paperid": "2409.14781v4", "paper_path": "./SciVer/papers/2409.14781v4.json", "claim_type": "sequential", "item1": "3(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14781v4_figure_3(b).png", "item2_path": "./SciVer/images/2409.14781v4-Table4-1.png", "section": [ "5.3" ], "request_id": 846, "origin_statement": "Fig.3b shows DC-PDD’s AUC rises by 0.13 when text length increases from 64 (0.65) to 512 tokens (0.78). Table 4 shows switching the reference corpus from 1 Gb C4 (0.688) to 10 Gb C4 (0.698) yields only a 0.01 gain. Hence, text length has a larger impact than corpus size.", "perturbed_statement": "Fig.3b shows DC-PDD’s AUC rises by 0.17 when text length increases from 64 (0.63) to 512 tokens (0.80). Table 4 shows switching the reference corpus from 1 Gb C4 (0.658) to 10 Gb C4 (0.678) yields only a 0.02 gain. Hence, text length has a larger impact than corpus size.", "perturbed_explanation": "The perturbation misreports the AUC values. In Fig. 3b, the actual AUCs are 0.65 at 64 tokens and 0.78 at 512 tokens (not 0.63 and 0.80), so the true increase is 0.13. In Table 4, C4 1 Gb is 0.688 and 10 Gb is 0.698 (not 0.658 and 0.678), so the gain is 0.01. These errors invalidate the perturbed claim.", "claim": "Fig.3b shows DC-PDD’s AUC rises by 0.13 when text length increases from 64 (0.65) to 512 tokens (0.78). Table 4 shows switching the reference corpus from 1 Gb C4 (0.688) to 10 Gb C4 (0.698) yields only a 0.01 gain. Hence, text length has a larger impact than corpus size.", "label": true }, { "paperid": "2409.19747v1", "paper_path": "./SciVer/papers/2409.19747v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.19747v1-Table1-1.png", "item2_path": "./SciVer/images/2409.19747v1-Table2-1.png", "section": [ "2.1" ], "request_id": 849, "origin_statement": "Since Table1 lists ACL and EMNLP as major NLP venues, and Table2 shows that every ChartQA paper is marked ✓ under “Deep Learning based,” we infer that NLP conferences like ACL and EMNLP have hosted deep learning–based Chart Question Answering research.", "perturbed_statement": "Since Table1 lists ACL and EMNLP as major NLP venues, and Table2 shows that every ChartQA paper is marked ✗ under “Rule based,” we infer that NLP conferences like ACL and EMNLP have hosted rule–based Chart Question Answering research.", "perturbed_explanation": "The perturbed claim is false because Table2’s “How?” columns indicate all ChartQA works (e.g., Kim et al. [KHA20], OpenCQA [KDL*22], SciGraphQA [LT23]) are ✓ under “Deep Learning based” and ✗ under “Rule based.” In other words, they do not use rule-based methods as claimed.", "claim": "Since Table1 lists ACL and EMNLP as major NLP venues, and Table2 shows that every ChartQA paper is marked ✓ under “Deep Learning based,” we infer that NLP conferences like ACL and EMNLP have hosted deep learning–based Chart Question Answering research.", "label": true }, { "paperid": "2409.16670v1", "paper_path": "./SciVer/papers/2409.16670v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16670v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.16670v1_figure_3(b).png", "section": [ "5.2.2" ], "request_id": 852, "origin_statement": "From 2-shot to 3-shot, GRACE_t's accuracy increases by 7% (from 54% to 61%), and GraphLoRA's improvement over GRACE_t decreases by 5% (from 21% to 16%).", "perturbed_statement": "From 2-shot to 3-shot, GRACE_t's accuracy increases by 5%, and GraphLoRA's improvement over GRACE_t decreases by 5%.", "perturbed_explanation": "The perturbed claim is false because the actual accuracy of GRACE_t rises from 54% at 2-shot to 61% at 3-shot, a 7% increase, not 5% as stated.", "claim": "From 2-shot to 3-shot, GRACE_t's accuracy increases by 7% (from 54% to 61%), and GraphLoRA's improvement over GRACE_t decreases by 5% (from 21% to 16%).", "label": true }, { "paperid": "2409.10104v1", "paper_path": "./SciVer/papers/2409.10104v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "4(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10104v1_figure_4(b).png", "item2_path": "./SciVer/images/2409.10104v1_figure_4(d).png", "section": [ "4.2" ], "request_id": 858, "origin_statement": "Tripling model size from vit-base (~400 MB) to vit-large (~1200 MB) yields only a +0.004 F1 improvement (from ~0.959 to ~0.963) on the 1600-example per-category set.", "perturbed_statement": "Tripling model size from vit-base (~400 MB) to vit-large (~1200 MB) yields a −0.008 F1 change (from ~0.955 to ~0.947) on the 1600-example per-category set.", "perturbed_explanation": "The perturbed claim misreports the F₁ scores and their trend. In Figure 4 (1600 examples), vit-base actually scores ~0.959 and vit-large ~0.963—not 0.955 and 0.947—and the larger model outperforms the smaller by ~0.004, not underperforms by 0.008.", "claim": "Tripling model size from vit-base (~400 MB) to vit-large (~1200 MB) yields only a +0.004 F1 improvement (from ~0.959 to ~0.963) on the 1600-example per-category set.", "label": true }, { "paperid": "2411.11736v1", "paper_path": "./SciVer/papers/2411.11736v1.json", "claim_type": "sequential", "item1": "1(b)", "item2": "1(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11736v1_figure_1(b).png", "item2_path": "./SciVer/images/2411.11736v1_figure_1(c).png", "section": [ "3" ], "request_id": 888, "origin_statement": "Figure 1 indicates the Transformer Encoder weights are frozen in stage 1 then unfrozen in stage 2. Context states one CCH handles M4GT sub‐source detection (6 classes), so this head leverages encoder fine-tuning in stage 2 to enhance its 6-class classification.", "perturbed_statement": "Figure 1 indicates the Transformer Encoder weights are frozen in both training stages. Context states one CCH handles M4GT sub-source detection (6 classes), so this head struggles to learn refined 6-class features.", "perturbed_explanation": "The perturbation is incorrect because Figure 1’s caption explicitly states that the Transformer Encoder is “frozen at the first stage of training and trainable at the second one.” The claim that it remains frozen in both stages contradicts this detail, invalidating the conclusion about the M4GT head’s learning ability.", "claim": "Figure 1 indicates the Transformer Encoder weights are frozen in stage 1 then unfrozen in stage 2. Context states one CCH handles M4GT sub‐source detection (6 classes), so this head leverages encoder fine-tuning in stage 2 to enhance its 6-class classification.", "label": true }, { "paperid": "2411.06382v1", "paper_path": "./SciVer/papers/2411.06382v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.06382v1-Table1-1.png", "item2_path": "./SciVer/images/2411.06382v1-Table2-1.png", "section": [ "2.4" ], "request_id": 889, "origin_statement": "Because Izz is the smallest principal moment of inertia (4.5×10⁻⁷ g·m²) in Table 1 and the mean yaw RMSE (6.01°) in Table 2 exceeds roll (5.27°) and pitch (5.17°), the CEKF shows its largest orientation error around the z-axis, which has the least inertia.", "perturbed_statement": "Because Iyy is the smallest principal moment of inertia (1.34×10⁻⁶ g·m²) in Table 1 and the mean pitch RMSE (5.17°) in Table 2 exceeds roll (5.27°) and yaw (6.01°), the CEKF shows its largest orientation error around the y-axis, which has the least inertia.", "perturbed_explanation": "This is incorrect because Table 1 actually lists Izz (4.5×10⁻⁷ g·m²) as the smallest moment of inertia, not Iyy (1.34×10⁻⁶ g·m²). Furthermore, Table 2 shows yaw RMSE (6.01°) is the highest, not pitch RMSE (5.17°), so the largest error is around the z-axis, not the y-axis.", "claim": "Because Izz is the smallest principal moment of inertia (4.5×10⁻⁷ g·m²) in Table 1 and the mean yaw RMSE (6.01°) in Table 2 exceeds roll (5.27°) and pitch (5.17°), the CEKF shows its largest orientation error around the z-axis, which has the least inertia.", "label": true }, { "paperid": "2409.06178v1", "paper_path": "./SciVer/papers/2409.06178v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.06178v1-Table1-1.png", "item2_path": "./SciVer/images/2409.06178v1-Table3-1.png", "section": [ "6.5" ], "request_id": 899, "origin_statement": "DIY participants had overall accuracy SD of 20% (Table 1) and extra-hard task SD of 0.63 tasks (Table 3). Given 2 extra-hard tasks, this equates to 31.5% SD, indicating higher variability on extra-hard tasks than overall.", "perturbed_statement": "DIY participants had overall accuracy SD of 20% (Table 1) but extra-hard task SD was only 0.35 tasks (Table 3), equating to 17.5% SD, indicating lower variability on extra-hard tasks than overall.", "perturbed_explanation": "The perturbed statement misreads the 0.35 value in Table 3, which is the mean number of correctly completed extra-hard tasks, not the SD. The correct SD for DIY extra-hard tasks in Table 3 is 0.63 tasks, which corresponds to 31.5% variability over two tasks.", "claim": "DIY participants had overall accuracy SD of 20% (Table 1) and extra-hard task SD of 0.63 tasks (Table 3). Given 2 extra-hard tasks, this equates to 31.5% SD, indicating higher variability on extra-hard tasks than overall.", "label": true }, { "paperid": "2410.03399v1", "paper_path": "./SciVer/papers/2410.03399v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.03399v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.03399v1_figure_4(b).png", "section": [ "4.3" ], "request_id": 1051, "origin_statement": "On the MBD dataset with ten million sequences, CoLES and GRU both achieve around 0.82. That performance threshold is not reached on Retail, where no model exceeds 0.56 even at ten million sequences. Thus top MBD models never attain that level on Retail.", "perturbed_statement": "On the MBD dataset with ten million sequences, CoLES and GRU both achieve around 0.82. Surprisingly, on the Retail dataset, CoLES surpasses this and reaches 0.83 at ten million sequences. Thus the top MBD models also attain equivalent accuracy on Retail.", "perturbed_explanation": "This is incorrect because in the Retail plot CoLES's performance peaks at approximately 0.556, not 0.83, so it never reaches the MBD threshold of 0.82.", "claim": "On the MBD dataset with ten million sequences, CoLES and GRU both achieve around 0.82. That performance threshold is not reached on Retail, where no model exceeds 0.56 even at ten million sequences. Thus top MBD models never attain that level on Retail.", "label": true }, { "paperid": "2410.11851v1", "paper_path": "./SciVer/papers/2410.11851v1.json", "claim_type": "sequential", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.11851v1_figure_2.png", "item2_path": "./SciVer/images/2410.11851v1-Table1-1.png", "section": [ "2" ], "request_id": 1053, "origin_statement": "Fig2’s singularity is pointlike; using ℏ=1.05×10^−34 kg·m^2·s^−1, G=6.67×10^−11 N·m^2·kg^−2 and c=3×10^8 m·s^−1 (Table1), the Planck length √(ℏG/c^3)≈1.6×10^−35 m, so the singularity scale is far smaller than any Schwarzschild radius.", "perturbed_statement": "Fig2’s singularity is pointlike; using ℏ=1.05×10^−34 kg·m^2·s^−1, G=6.67×10^−11 N·m^2·kg^−2 and c=3×10^8 m·s^−1 (Table1), the Planck length √(ℏG/c^3)≈1.6×10^−35 m, so the singularity scale is far larger than any Schwarzschild radius.", "perturbed_explanation": "The perturbation flips the final comparison: it claims 1.6×10^−35 m exceeds Schwarzschild radii, but astrophysical Schwarzschild radii are kilometers (10^3 m) or more. Thus the Planck length is actually many orders of magnitude smaller than any horizon radius, contradicting the statement.", "claim": "Fig2’s singularity is pointlike; using ℏ=1.05×10^−34 kg·m^2·s^−1, G=6.67×10^−11 N·m^2·kg^−2 and c=3×10^8 m·s^−1 (Table1), the Planck length √(ℏG/c^3)≈1.6×10^−35 m, so the singularity scale is far smaller than any Schwarzschild radius.", "label": true }, { "paperid": "2409.11867v1", "paper_path": "./SciVer/papers/2409.11867v1.json", "claim_type": "sequential", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11867v1_figure_4.png", "item2_path": "./SciVer/images/2409.11867v1-Table4-1.png", "section": [ "5.4" ], "request_id": 1088, "origin_statement": "Because Fig 4(a) interleaves one Transformer block for every seven Mamba blocks, StableMamba-T uses 1.2 G FLOPs at 224², which is 0.1 G more than VideoMamba-T’s 1.1 G.", "perturbed_statement": "Because Fig 4(a) interleaves one Transformer block for every seven Mamba blocks, StableMamba-T uses 1.1 G FLOPs at 224², which is 0.1 G less than VideoMamba-T’s 1.2 G.", "perturbed_explanation": "Table 4 actually reports StableMamba-T at 1.2 G FLOPs (not 1.1 G) and VideoMamba-T at 1.1 G (not 1.2 G), so both the reported FLOPs and their relative difference are incorrect.", "claim": "Because Fig 4(a) interleaves one Transformer block for every seven Mamba blocks, StableMamba-T uses 1.2 G FLOPs at 224², which is 0.1 G more than VideoMamba-T’s 1.1 G.", "label": true }, { "paperid": "2410.00034v1", "paper_path": "./SciVer/papers/2410.00034v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.00034v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.00034v1_figure_2(b).png", "section": [ "2.5" ], "request_id": 1094, "origin_statement": "Chart (a) shows there were 11 papers in 2020, and chart (b) shows MDPI published 17 papers; therefore, MDPI’s total publications exceed the entire 2020 output by six.", "perturbed_statement": "Chart (a) shows there were 11 papers in 2020, and chart (b) shows MDPI published 15 papers; therefore, MDPI’s total publications exceed the entire 2020 output by four.", "perturbed_explanation": "The perturbed statement misreads chart (b): MDPI actually published 17 papers, not 15. Thus the premise ‘MDPI published 15 papers’ is incorrect and the true difference from 2020’s 11 papers is six, not four.", "claim": "Chart (a) shows there were 11 papers in 2020, and chart (b) shows MDPI published 17 papers; therefore, MDPI’s total publications exceed the entire 2020 output by six.", "label": true }, { "paperid": "2409.03771v1", "paper_path": "./SciVer/papers/2409.03771v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03771v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.03771v1_figure_6(b).png", "section": [ "4.1" ], "request_id": 1103, "origin_statement": "At 4 nodes, DPC alone finishes a 512³ Perlin Noise grid in about 1.6 s (Fig 4). Meanwhile, computing connected components with DPC on the same grid and nodes takes about 18 s (Fig 6), making the CC stage roughly 11× slower than the main DPC pass.", "perturbed_statement": "At 4 nodes, DPC alone finishes a 512³ Perlin Noise grid in about 2.4 s (Fig 4). Meanwhile, computing connected components with DPC on the same grid and nodes takes about 18 s (Fig 6), making the CC stage roughly 7× slower than the main DPC pass.", "perturbed_explanation": "The perturbation misreads the DPC runtime at 4 nodes: Fig 4 shows ~1.6 s, not 2.4 s. Using the correct 1.6 s gives a ratio of 18 s/1.6 s≈11×, so the CC stage is ~11× slower, not ~7×.", "claim": "At 4 nodes, DPC alone finishes a 512³ Perlin Noise grid in about 1.6 s (Fig 4). Meanwhile, computing connected components with DPC on the same grid and nodes takes about 18 s (Fig 6), making the CC stage roughly 11× slower than the main DPC pass.", "label": true }, { "paperid": "2410.08737v1", "paper_path": "./SciVer/papers/2410.08737v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.08737v1-Table3-1.png", "item2_path": "./SciVer/images/2410.08737v1-Table4-1.png", "section": [ "5.3" ], "request_id": 1120, "origin_statement": "End users contribute about 84% of filtered UPnP endpoint exposures: they have 107 UPnP services (Table 4) out of 127 filtered UPnP endpoints (Table 3).", "perturbed_statement": "End users contribute only about 56% of filtered UPnP endpoint exposures: they have 71 UPnP services out of 127 filtered UPnP endpoints.", "perturbed_explanation": "The perturbed claim misstates the number of end-user UPnP services. Table 4 actually lists 107 end-user UPnP services, not 71. Using the correct 107 (not 71) against 127 filtered endpoints yields ≈84%, so the 56% figure is incorrect.", "claim": "End users contribute about 84% of filtered UPnP endpoint exposures: they have 107 UPnP services (Table 4) out of 127 filtered UPnP endpoints (Table 3).", "label": true }, { "paperid": "2409.19173v1", "paper_path": "./SciVer/papers/2409.19173v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19173v1_figure_2.png", "item2_path": "./SciVer/images/2409.19173v1_figure_3.png", "section": [ "5" ], "request_id": 1157, "origin_statement": "Beta(1.2,2) in Figure 2 is right‐skewed, concentrating sample densities below 0.4. Figure 3 shows the highest merged F1-score (~0.85) in the 0.2–0.4 density range. Thus, the skewed sampling effectively targeted the optimal density region for improved performance.", "perturbed_statement": "Beta(1.2,2) in Figure 2 is left-skewed, concentrating sample densities above 0.6. Figure 3 shows the highest merged F1-score (~0.85) in the 0.6–0.8 density range. Thus, the sampling strategy emphasized high-density regions for peak performance.", "perturbed_explanation": "This is incorrect because Figure 2’s Beta(1.2,2) distribution is right-skewed (heavier mass near zero), not left-skewed, and Figure 3 actually shows the best F1 (~0.85) occurs in the 0.2–0.4 density range, not at 0.6–0.8.", "claim": "Beta(1.2,2) in Figure 2 is right‐skewed, concentrating sample densities below 0.4. Figure 3 shows the highest merged F1-score (~0.85) in the 0.2–0.4 density range. Thus, the skewed sampling effectively targeted the optimal density region for improved performance.", "label": true }, { "paperid": "2410.01159v2", "paper_path": "./SciVer/papers/2410.01159v2.json", "claim_type": "sequential", "item1": "2", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01159v2_figure_2.png", "item2_path": "./SciVer/images/2410.01159v2_figure_5.png", "section": [ "5.1" ], "request_id": 1165, "origin_statement": "Figure 2 shows Type 1’s proportion in covariate group 5 rising from ~0.4 in 1976 to ~0.75 by 2013; consequently, Figure 5 shows CLR lower bounds for Type 1 in group 5 remain above zero until around the mid-1990s, indicating an initial positive gender gap in high-wage group.", "perturbed_statement": "Figure 2 shows Type 1’s proportion in covariate group 5 dropping from ~0.75 in 1976 to ~0.4 by 2013; consequently, Figure 5 shows CLR lower bounds for Type 1 in group 5 remain above zero throughout the entire period, indicating a persistent gender gap.", "perturbed_explanation": "The perturbation is wrong because Figure 2’s green series (covariate group 5) clearly increases from about 0.4 in 1976 to about 0.75 in 2013, not decreases. Moreover, in Figure 5 the CLR lower bound for Type 1 in group 5 falls below zero after the late 1990s, contradicting the claim that it stays positive.", "claim": "Figure 2 shows Type 1’s proportion in covariate group 5 rising from ~0.4 in 1976 to ~0.75 by 2013; consequently, Figure 5 shows CLR lower bounds for Type 1 in group 5 remain above zero until around the mid-1990s, indicating an initial positive gender gap in high-wage group.", "label": true }, { "paperid": "2411.14355v1", "paper_path": "./SciVer/papers/2411.14355v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.14355v1-Table1-1.png", "item2_path": "./SciVer/images/2411.14355v1-Table3-1.png", "section": [ "4" ], "request_id": 1176, "origin_statement": "KK capture, at 72.7% of 2νDEC events (Table 1), imparts an absolute uncertainty of 0.2%×72.7%≈0.15% on the half-life via the 2νDEC model (Table 3), which is negligible compared to the 14.1% statistical uncertainty.", "perturbed_statement": "KK capture, at 32.3% of 2νDEC events (Table 1), imparts an absolute uncertainty of 2.2%×32.3%≈0.71% on the half-life via the fit range model (Table 3), which is negligible compared to the 14.1% statistical uncertainty.", "perturbed_explanation": "Table 1 lists the KK branching fraction as 72.7%, not 32.3%. Table 3 shows the 2νDEC model uncertainty is 0.2% (not 2.2%) and the fit range uncertainty applies to the ROI, not the model itself. Thus the claimed 0.71% uncertainty is incorrect.", "claim": "KK capture, at 72.7% of 2νDEC events (Table 1), imparts an absolute uncertainty of 0.2%×72.7%≈0.15% on the half-life via the 2νDEC model (Table 3), which is negligible compared to the 14.1% statistical uncertainty.", "label": true }, { "paperid": "2409.07703v1", "paper_path": "./SciVer/papers/2409.07703v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.07703v1-Table5-1.png", "item2_path": "./SciVer/images/2409.07703v1-Table6-1.png", "section": [ "3.2.1" ], "request_id": 1193, "origin_statement": "Table 5 shows AutoGen with GPT-4o achieves 31.73% task-level accuracy, and Figure 6 shows its accuracy falls to about 28% when input length exceeds the largest bin, indicating its performance degrades for very long contexts.", "perturbed_statement": "Table 5 shows AutoGen with GPT-4o achieves 28.68% accuracy, and Figure 6 shows its accuracy falls to about 20% when input length exceeds the largest bin, indicating its performance degrades for very long contexts.", "perturbed_explanation": "The perturbed claim misreports two concrete values: Table 5 lists 31.73% as the task-level accuracy for AutoGen with GPT-4o (28.68% is the competition-level accuracy), and Figure 6 actually shows a drop to around 28%, not to 20%.", "claim": "Table 5 shows AutoGen with GPT-4o achieves 31.73% task-level accuracy, and Figure 6 shows its accuracy falls to about 28% when input length exceeds the largest bin, indicating its performance degrades for very long contexts.", "label": true }, { "paperid": "2411.12852v1", "paper_path": "./SciVer/papers/2411.12852v1.json", "claim_type": "sequential", "item1": "6(b)", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.12852v1_figure_6(b).png", "item2_path": "./SciVer/images/2411.12852v1-Table6-1.png", "section": [ "4.2.1" ], "request_id": 1207, "origin_statement": "From the Proposed method’s confusion matrix for DEAP→SEED, there are 7904 false negatives and 41551 true positives for positive emotions. Dividing 41551 by their sum yields 84.02% sensitivity, matching the Se value in Table 6.", "perturbed_statement": "From the Proposed method’s confusion matrix for DEAP→SEED, there are 8872 false negatives and 40583 true positives for positive emotions. Dividing 40583 by their sum yields 82.06% sensitivity, matching the Se value in Table 6.", "perturbed_explanation": "The perturbed statement conflates the Proposed method with Model D’s counts. The actual Proposed method confusion matrix shows 7904 false negatives and 41551 true positives, not 8872 and 40583. Consequently, the true sensitivity is 84.02%, not 82.06%, contradicting Table 6.", "claim": "From the Proposed method’s confusion matrix for DEAP→SEED, there are 7904 false negatives and 41551 true positives for positive emotions. Dividing 41551 by their sum yields 84.02% sensitivity, matching the Se value in Table 6.", "label": true }, { "paperid": "2410.03039v1", "paper_path": "./SciVer/papers/2410.03039v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.03039v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.03039v1-Table2-1.png", "section": [ "5.2" ], "request_id": 1224, "origin_statement": "At 10 training images, FineXtract achieves an AS of approximately 0.55. This exceeds the Direct Text2Img+Clustering AS on AltDiffusion (0.282) by about 0.27, demonstrating stronger extraction performance across DM architectures.", "perturbed_statement": "At 10 training images, FineXtract achieves an AS of approximately 0.55. This exceeds the Direct Text2Img+Clustering AS on AltDiffusion (0.40) by about 0.15, demonstrating stronger extraction performance across DM architectures.", "perturbed_explanation": "The perturbed statement misreports the Direct Text2Img+Clustering AS on AltDiffusion as 0.40, whereas Table 2 shows it is actually 0.282. This inflated baseline makes the claimed improvement and difference incorrect.", "claim": "At 10 training images, FineXtract achieves an AS of approximately 0.55. This exceeds the Direct Text2Img+Clustering AS on AltDiffusion (0.282) by about 0.27, demonstrating stronger extraction performance across DM architectures.", "label": true }, { "paperid": "2410.08207v1", "paper_path": "./SciVer/papers/2410.08207v1.json", "claim_type": "sequential", "item1": "3", "item2": "7(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08207v1_figure_3.png", "item2_path": "./SciVer/images/2410.08207v1_figure_7(d).png", "section": [ "3.2" ], "request_id": 7, "origin_statement": "After t exceeds 500 where mutual information between z_t and x0 drops to about 1% of its initial value (figure 3), using a λ₁=0.7 exponential schedule yields the lowest structure distance (~0.005) at CLIP similarity ~20.4 compared to linear and uniform (figure 7).", "perturbed_statement": "After t exceeds 500 where mutual information between z_t and x0 drops to about 1% of its initial value (figure 3), using a λ₁=0.7 uniform schedule yields the lowest structure distance (~0.005) at CLIP similarity ~20.4 compared to linear and exponential (figure 7).", "perturbed_explanation": "Figure 7 shows that the uniform schedule (red squares) at CLIP ~20.4 corresponds to a structure distance above ~0.003, whereas the exponential schedule (black crosses) achieves the lowest structure distance (~0.0008–0.006). Thus the uniform schedule does not yield the minimal distortion.", "claim": "After t exceeds 500 where mutual information between z_t and x0 drops to about 1% of its initial value (figure 3), using a λ₁=0.7 exponential schedule yields the lowest structure distance (~0.005) at CLIP similarity ~20.4 compared to linear and uniform (figure 7).", "label": true }, { "paperid": "2410.07073v2", "paper_path": "./SciVer/papers/2410.07073v2.json", "claim_type": "sequential", "item1": "11", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07073v2_figure_11.png", "item2_path": "./SciVer/images/2410.07073v2-Table2-1.png", "section": [ "3" ], "request_id": 9, "origin_statement": "Figure 11 shows Pixtral’s response earns a judge rating of 8 versus QwenVL-7B’s 3. Table 2 then reveals Pixtral’s average MM-MT-Bench score of 6.05 exceeds Qwen-2-VL 7B’s 5.45, indicating Pixtral consistently outperforms QwenVL across benchmarks.", "perturbed_statement": "Figure 11 shows Pixtral’s response earns a judge rating of 8 versus QwenVL-7B’s 3. Table 2 then reveals Pixtral’s average MM-MT-Bench score of 5.45, which is lower than Qwen-2-VL 7B’s 6.05, suggesting Pixtral underperforms QwenVL overall.", "perturbed_explanation": "This is incorrect because Table 2 actually lists Pixtral’s average MM-MT-Bench score as 6.05 and Qwen-2-VL 7B’s as 5.45. The perturbed claim swaps these two values, contradicting the table’s concrete figures.", "claim": "Figure 11 shows Pixtral’s response earns a judge rating of 8 versus QwenVL-7B’s 3. Table 2 then reveals Pixtral’s average MM-MT-Bench score of 6.05 exceeds Qwen-2-VL 7B’s 5.45, indicating Pixtral consistently outperforms QwenVL across benchmarks.", "label": true }, { "paperid": "2409.03911v1", "paper_path": "./SciVer/papers/2409.03911v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(m)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03911v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.03911v1_figure_1(m).png", "section": [ "1" ], "request_id": 12, "origin_statement": "The historical car’s separate front and rear fenders and running board indicate varied early automobile forms. The modern car’s smooth, integrated fenders and unified body reveal standardized design. Therefore, modern cars exhibit less structural diversity than vintage cars.", "perturbed_statement": "The historical car’s smooth, integrated fenders suggest early vehicles shared uniform body designs. The modern car’s pronounced separate wheel arches and exposed fenders imply increased stylistic variation. Thus, modern cars display higher structural diversity than vintage cars.", "perturbed_explanation": "In the historical image, the car clearly has distinct front and rear fenders mounted above the wheels and a running board—these are not smooth, integrated fenders. In the modern image, the vehicle shows a seamless body with integrated fenders and no pronounced exposed wheel arches. Both mischaracterizations contradict the actual visual details.", "claim": "The historical car’s separate front and rear fenders and running board indicate varied early automobile forms. The modern car’s smooth, integrated fenders and unified body reveal standardized design. Therefore, modern cars exhibit less structural diversity than vintage cars.", "label": true }, { "paperid": "2410.15939v1", "paper_path": "./SciVer/papers/2410.15939v1.json", "claim_type": "sequential", "item1": "10", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15939v1_figure_10.png", "item2_path": "./SciVer/images/2410.15939v1-Table1-1.png", "section": [ "4.1" ], "request_id": 13, "origin_statement": "From Figure 10, when graph edges double to 2×, the sink query F1 drops to approximately 0.27. Table 1 shows Mistral’s GraphViz encoding sink performance is exactly 0.27. Therefore, Figure 10 likely depicts GraphViz‐encoded sink queries across complexities.", "perturbed_statement": "From Figure 10, when graph edges double to 2×, the sink query F1 drops to approximately 0.27. Table 1 shows Mistral’s Adjacency encoding sink performance is exactly 0.27. Therefore, Figure 10 likely depicts Adjacency‐encoded sink queries across complexities.", "perturbed_explanation": "This claim is incorrect because Table 1 actually lists Mistral’s Adjacency encoding sink F1 as 0.15, not 0.27. The 0.27 value corresponds to the GraphViz encoding, so attributing it to Adjacency encoding contradicts the table data.", "claim": "From Figure 10, when graph edges double to 2×, the sink query F1 drops to approximately 0.27. Table 1 shows Mistral’s GraphViz encoding sink performance is exactly 0.27. Therefore, Figure 10 likely depicts GraphViz‐encoded sink queries across complexities.", "label": true }, { "paperid": "2410.09982v3", "paper_path": "./SciVer/papers/2410.09982v3.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09982v3_figure_4(a).png", "item2_path": "./SciVer/images/2410.09982v3_figure_4(b).png", "section": [ "3.1", "3.3" ], "request_id": 15, "origin_statement": "At prune block size 4, Self-Data FT recovers 93.29% average performance (table), and its mean embedding similarity of 0.89 (image) exceeds SFT’s 0.85, confirming reduced distribution shift.", "perturbed_statement": "At prune block size 4, SFT recovers 93.29% average performance (table), and its mean embedding similarity of 0.89 exceeds Self-Data FT’s 0.85, confirming reduced distribution shift.", "perturbed_explanation": "The table lists SFT’s average recovery at block size 4 as 84.52%, not 93.29%. The histogram shows SFT’s mean similarity is 0.85 (purple dot), not 0.89. These errors directly contradict the context.", "claim": "At prune block size 4, Self-Data FT recovers 93.29% average performance (table), and its mean embedding similarity of 0.89 (image) exceeds SFT’s 0.85, confirming reduced distribution shift.", "label": true }, { "paperid": "2409.01901v1", "paper_path": "./SciVer/papers/2409.01901v1.json", "claim_type": "sequential", "item1": "7(c)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01901v1_figure_7(c).png", "item2_path": "./SciVer/images/2409.01901v1-Table2-1.png", "section": [ "3.3" ], "request_id": 22, "origin_statement": "Because StretchSense gloves frequently confuse the neutral ‘five’ handshape (Fig.7) and SEMLEX offers 49,274 ASL reference videos versus SB NGT’s 1,000, models trained on SEMLEX have over 48 k additional ‘five’ samples likely improving its disambiguation.", "perturbed_statement": "Because StretchSense gloves frequently confuse the neutral ‘five’ handshape (Fig.7) and SB NGT offers 49,274 NGT reference videos versus SEMLEX’s 1,000, models trained on SB NGT have over 48 k additional ‘five’ samples likely improving its disambiguation.", "perturbed_explanation": "The perturbed claim wrongly attributes 49,274 reference videos to SB NGT. Table 2 shows that the 49,274 videos belong to SEMLEX (ASL), while SB NGT only provides 1,000 reference videos, making the premise invalid.", "claim": "Because StretchSense gloves frequently confuse the neutral ‘five’ handshape (Fig.7) and SEMLEX offers 49,274 ASL reference videos versus SB NGT’s 1,000, models trained on SEMLEX have over 48 k additional ‘five’ samples likely improving its disambiguation.", "label": true }, { "paperid": "2409.08851v1", "paper_path": "./SciVer/papers/2409.08851v1.json", "claim_type": "sequential", "item1": "6(b)", "item2": "6(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08851v1_figure_6(b).png", "item2_path": "./SciVer/images/2409.08851v1_figure_6(c).png", "section": [ "3.3" ], "request_id": 43, "origin_statement": "At Reτ=395, RK3 exhibits significant damping at kx>10; in Reτ=590, this damping difference reduces; hence RK3’s underprediction of inertial-range energy lessens as Reynolds increases.", "perturbed_statement": "At Reτ=395, RK3 exhibits significant damping at kx>10; in Reτ=590 this damping difference increases; hence RK3’s underprediction of inertial-range energy worsens as Reynolds increases.", "perturbed_explanation": "The perturbed statement claims the damping difference grows at Reτ=590, but Figure 6(c) (CFL=0.5, Reτ=590) shows the energy spectra curves for RK3 and the higher-order schemes converge more closely than in Figure 6(b) (Reτ=395). Thus the damping difference actually decreases, contradicting the perturbation.", "claim": "At Reτ=395, RK3 exhibits significant damping at kx>10; in Reτ=590, this damping difference reduces; hence RK3’s underprediction of inertial-range energy lessens as Reynolds increases.", "label": true }, { "paperid": "2410.19472v1", "paper_path": "./SciVer/papers/2410.19472v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19472v1_figure_7(a).png", "item2_path": "./SciVer/images/2410.19472v1_figure_7(b).png", "section": [ "3.1" ], "request_id": 50, "origin_statement": "The outflow relative error at mesh level l=1 is 1.41% (from the volumetric flow rate plot), and the integrated wall shear stress error at l=1 is 7.01% (from the WSS plot), making the wall shear stress error about five times larger than the flow error.", "perturbed_statement": "The outflow relative error at mesh level l=1 is 1.41% (from the volumetric flow rate plot), and the integrated wall shear stress error at l=1 is 3.50% (from the WSS plot), making the wall shear stress error about two and a half times larger than the flow error.", "perturbed_explanation": "The perturbed claim misreads the WSS legend: the actual ε_rel for l=1 in the integrated wall shear stress plot is 0.0701 (7.01%), not 0.0350 (3.50%). Therefore the ratio is about five, not two and a half.", "claim": "The outflow relative error at mesh level l=1 is 1.41% (from the volumetric flow rate plot), and the integrated wall shear stress error at l=1 is 7.01% (from the WSS plot), making the wall shear stress error about five times larger than the flow error.", "label": true }, { "paperid": "2410.13376v1", "paper_path": "./SciVer/papers/2410.13376v1.json", "claim_type": "sequential", "item1": "10", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13376v1_figure_10.png", "item2_path": "./SciVer/images/2410.13376v1-Table6-1.png", "section": [ "4.2" ], "request_id": 51, "origin_statement": "At Re* = 180, Figure 10 shows ε(Re*) ≈ 0.004. Table 6 lists a mean error ε_mean = 5.498×10⁻⁵. Therefore, the error at Re* = 180 is about 73 times larger than the average testing error.", "perturbed_statement": "At Re* = 180, Figure 10 shows ε(Re*) ≈ 0.004. Table 6 lists a mean error ε_mean = 5.498×10⁻³. Therefore, the error at Re* = 180 is about 0.73 times the average testing error.", "perturbed_explanation": "This is wrong because Table 6 actually gives ε_mean = 5.498×10⁻⁵, not 5.498×10⁻³. With the correct mean, 0.004/0.00005498 ≈ 73, not 0.73.", "claim": "At Re* = 180, Figure 10 shows ε(Re*) ≈ 0.004. Table 6 lists a mean error ε_mean = 5.498×10⁻⁵. Therefore, the error at Re* = 180 is about 73 times larger than the average testing error.", "label": true }, { "paperid": "2409.10343v1", "paper_path": "./SciVer/papers/2409.10343v1.json", "claim_type": "sequential", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10343v1_figure_7.png", "item2_path": "./SciVer/images/2409.10343v1_figure_8.png", "section": [ "7.4.3" ], "request_id": 52, "origin_statement": "Figure 7’s prompt lists NOT INTERESTED BOOKS, indicating a false-positive update; Figure 8’s prompt lists INTERESTED BOOKS, indicating a false-negative update; thus the system both prunes false likes and recovers missed true likes in user profiles.", "perturbed_statement": "Figure 7’s prompt lists INTERESTED BOOKS, indicating a false-negative update; Figure 8’s prompt lists NOT INTERESTED BOOKS, indicating a false-positive update; thus the system both prunes false likes and recovers missed true likes in user profiles.", "perturbed_explanation": "The perturbation is wrong because Figure 7 is explicitly captioned as the false-positive case with a NOT INTERESTED BOOKS section (not INTERESTED BOOKS), and Figure 8 is the false-negative case with an INTERESTED BOOKS section (not NOT INTERESTED BOOKS).", "claim": "Figure 7’s prompt lists NOT INTERESTED BOOKS, indicating a false-positive update; Figure 8’s prompt lists INTERESTED BOOKS, indicating a false-negative update; thus the system both prunes false likes and recovers missed true likes in user profiles.", "label": true }, { "paperid": "2410.22387v1", "paper_path": "./SciVer/papers/2410.22387v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22387v1_figure_5.png", "item2_path": "./SciVer/images/2410.22387v1-Table4-1.png", "section": [ "4.2" ], "request_id": 55, "origin_statement": "At z-score threshold 5, the PID FOXM1 Pathway is significantly enriched among Gleason 9 random forest features (Figure 5); Table 4 shows it has NES = 2.1334 for Gleason 9, indicating concordant up-regulation in GSEA.", "perturbed_statement": "At z-score threshold 5, the PID FOXM1 Pathway is significantly enriched among Gleason 9 random forest features; Table 4 shows it has NES = –2.1334 for Gleason 9, indicating down-regulation in GSEA.", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 reports a NES of +2.1334 (not –2.1334) for the PID FOXM1 Pathway in Gleason 9, so it is up-regulated, not down-regulated.", "claim": "At z-score threshold 5, the PID FOXM1 Pathway is significantly enriched among Gleason 9 random forest features (Figure 5); Table 4 shows it has NES = 2.1334 for Gleason 9, indicating concordant up-regulation in GSEA.", "label": true }, { "paperid": "2411.01074v1", "paper_path": "./SciVer/papers/2411.01074v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01074v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.01074v1-Table2-1.png", "section": [ "5.2" ], "request_id": 57, "origin_statement": "From Figure 3(a), MODA’s average reuse accuracy on CIFAR10 2–10-class sub-tasks is 94.48%; Table 2 reports that MODA improves target-class accuracy by 10.11% in overfitted LeNet5, indicating modules are both highly reusable and effectively enhance weak models.", "perturbed_statement": "From Figure 3(a), MODA’s average reuse accuracy on CIFAR10 sub-tasks is 85%; Table 2 reports that MODA improves target-class accuracy by 15% in overfitted LeNet5, indicating modules are both highly reusable and effectively enhance weak models.", "perturbed_explanation": "The perturbed reuse accuracy (85%) contradicts the 94.48% average reuse accuracy reported in Figure 3(a). The perturbed target-class improvement (15%) contradicts the 10.11% average boost for overfitted LeNet5 shown in Table 2.", "claim": "From Figure 3(a), MODA’s average reuse accuracy on CIFAR10 2–10-class sub-tasks is 94.48%; Table 2 reports that MODA improves target-class accuracy by 10.11% in overfitted LeNet5, indicating modules are both highly reusable and effectively enhance weak models.", "label": true }, { "paperid": "2411.10399v1", "paper_path": "./SciVer/papers/2411.10399v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10399v1_figure_3.png", "item2_path": "./SciVer/images/2411.10399v1_figure_4.png", "section": [ "2.4" ], "request_id": 63, "origin_statement": "Figure 3’s red curve depicts a crossing trade over two atomic intervals. By Figure 4, each fee f₁ and f₂ is allocated proportional to K₁,m, so LP1’s total reward = (K₁,₁/(K₁,₁+K₂,₁))·f₁ + (K₁,₂/(K₁,₂+K₂,₂))·f₂.", "perturbed_statement": "Figure 3’s red curve depicts a non-crossing trade, so fees f₁ and f₂ are pooled, giving LP1 (K₁,₁+K₁,₂)/(K₁,₁+K₁,₂+K₂,₁+K₂,₂)·(f₁+f₂).", "perturbed_explanation": "This is incorrect because Figure 3 explicitly labels the red trajectory as Case 2: crossing, not non-crossing. Also, Figure 4 shows that fees f₁ and f₂ are distributed separately per atomic interval, not pooled into a single share across both intervals.", "claim": "Figure 3’s red curve depicts a crossing trade over two atomic intervals. By Figure 4, each fee f₁ and f₂ is allocated proportional to K₁,m, so LP1’s total reward = (K₁,₁/(K₁,₁+K₂,₁))·f₁ + (K₁,₂/(K₁,₂+K₂,₂))·f₂.", "label": true }, { "paperid": "2409.10031v1", "paper_path": "./SciVer/papers/2409.10031v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10031v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.10031v1_figure_2(a).png", "section": [ "3.1" ], "request_id": 65, "origin_statement": "Figure 2a shows China has 13 BTC-related sanctioned entities. Figure 2b reveals 12 individuals and 1 company among these. Therefore, individuals represent about 92% of China’s sanctioned BTC entities.", "perturbed_statement": "Figure 2a shows China has 13 BTC-related sanctioned entities. Figure 2b reveals 11 individuals and 2 companies among these. Therefore, individuals represent about 85% of China’s sanctioned BTC entities.", "perturbed_explanation": "The perturbed statement incorrectly claims there are 11 individuals and 2 companies in China. According to Figure 2b, there are actually 12 individuals and only 1 company among the 13 BTC-related sanctioned entities in China.", "claim": "Figure 2a shows China has 13 BTC-related sanctioned entities. Figure 2b reveals 12 individuals and 1 company among these. Therefore, individuals represent about 92% of China’s sanctioned BTC entities.", "label": true }, { "paperid": "2411.07533v1", "paper_path": "./SciVer/papers/2411.07533v1.json", "claim_type": "sequential", "item1": "9", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07533v1_figure_9.png", "item2_path": "./SciVer/images/2411.07533v1-Table2-1.png", "section": [ "5.3" ], "request_id": 66, "origin_statement": "Because German is categorized as low-resource (Table 2), its models achieve form competence around 0.6 (Figure 9), which corresponds to a meaning competence of approximately 0.56.", "perturbed_statement": "Because Chinese is classified as low-resource (Table 2), its models achieve form competence around 0.6 (Figure 9), corresponding to a meaning competence of ~0.56.", "perturbed_explanation": "The perturbed statement mislabels Chinese as \"low-resource\": Table 2 actually shows Chinese as \"Mid\" resource for Llama2 and Llama3 (and \"High\" for Qwen). Moreover, Figure 9’s red points (Chinese) cluster at form competence ~0.85–0.9, not ~0.6.", "claim": "Because German is categorized as low-resource (Table 2), its models achieve form competence around 0.6 (Figure 9), which corresponds to a meaning competence of approximately 0.56.", "label": true }, { "paperid": "2411.06018v1", "paper_path": "./SciVer/papers/2411.06018v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06018v1_figure_2.png", "item2_path": "./SciVer/images/2411.06018v1-Table3-1.png", "section": [ "3.2" ], "request_id": 74, "origin_statement": "Figure2 shows that under zero-shot time-series reasoning, Qwen2.5-72B achieves the highest normalized gain in HAR. Table3 reports its absolute HAR accuracy as 29.17%, which is 12.5 percentage points above the random guessing baseline of 16.67%.", "perturbed_statement": "Figure2 shows that under zero-shot time-series reasoning, Qwen2.5-72B achieves the highest normalized gain in HAR. Table3 reports its absolute HAR accuracy as 25.00%, which is 8.33 percentage points above the random guessing baseline of 16.67%.", "perturbed_explanation": "The perturbation is wrong because Table 3 actually lists Qwen2.5-72B’s zero-shot HAR accuracy as 29.17%, not 25.00%. Consequently, the improvement over the 16.67% random baseline is 12.5 points, not 8.33 points.", "claim": "Figure2 shows that under zero-shot time-series reasoning, Qwen2.5-72B achieves the highest normalized gain in HAR. Table3 reports its absolute HAR accuracy as 29.17%, which is 12.5 percentage points above the random guessing baseline of 16.67%.", "label": true }, { "paperid": "2411.16964v2", "paper_path": "./SciVer/papers/2411.16964v2.json", "claim_type": "sequential", "item1": "6(b)", "item2": "11", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16964v2_figure_6(b).png", "item2_path": "./SciVer/images/2411.16964v2_figure_11.png", "section": [ "4.5.1", "4.5" ], "request_id": 79, "origin_statement": "Because Fig.6 shows MotionWavelet’s dashed blue x-velocity closely tracks the red GT wrist deceleration near frame 40, its wavelet-based model in Fig.11 generates predicted poses at 0.6 s whose limb orientations (black) align more accurately with the blue ground truth skeleton than the phase-based method (top row).", "perturbed_statement": "Since Fig.6 shows DLow’s green velocity curve closest aligning with the red GT wrist deceleration around frame 40, its wavelet-based model in Fig.11 generates predicted poses at 0.6 s whose limbs (black) align less accurately with the blue ground truth skeleton than the phase-based method (top row).", "perturbed_explanation": "The perturbation is wrong because in Fig.6 the green line (DLow) actually deviates more from the red GT curve around frame 40, whereas the blue dashed line (MotionWavelet) aligns best. Moreover, in Fig.11 the bottom-row wavelet predictions visibly match the blue ground truth skeleton more closely than the phase-based (top row), not less accurately.", "claim": "Because Fig.6 shows MotionWavelet’s dashed blue x-velocity closely tracks the red GT wrist deceleration near frame 40, its wavelet-based model in Fig.11 generates predicted poses at 0.6 s whose limb orientations (black) align more accurately with the blue ground truth skeleton than the phase-based method (top row).", "label": true }, { "paperid": "2409.01988v1", "paper_path": "./SciVer/papers/2409.01988v1.json", "claim_type": "sequential", "item1": "8", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01988v1_figure_8.png", "item2_path": "./SciVer/images/2409.01988v1-Table2-1.png", "section": [ "5.3" ], "request_id": 82, "origin_statement": "Since the classifier baseline achieves 92.09% test accuracy on clean OV-MNIST (Table 2), and its accuracy under AWGN severity level 2 drops to about 48% (Figure 8 left), the classifier loses roughly 44 percentage points due to moderate noise.", "perturbed_statement": "Since the classifier baseline achieves 92.09% test accuracy on clean OV-MNIST, and its accuracy under AWGN severity level 2 drops to only 22%, the classifier loses roughly 70 percentage points due to moderate noise.", "perturbed_explanation": "The perturbation misreads Figure 8: at AWGN severity level 2 the classifier’s accuracy is about 48%, not 22% (22% corresponds to severity 4). Hence the claimed drop to 22% and the 70-point loss contradict the actual data.", "claim": "Since the classifier baseline achieves 92.09% test accuracy on clean OV-MNIST (Table 2), and its accuracy under AWGN severity level 2 drops to about 48% (Figure 8 left), the classifier loses roughly 44 percentage points due to moderate noise.", "label": true }, { "paperid": "2409.19025v1", "paper_path": "./SciVer/papers/2409.19025v1.json", "claim_type": "sequential", "item1": "2(c)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19025v1_figure_2(c).png", "item2_path": "./SciVer/images/2409.19025v1-Table4-1.png", "section": [ "4.1" ], "request_id": 85, "origin_statement": "Figure 2 shows frustration is one of the highest-rated emotions in the radial plot. Table 4 indicates frustration correlates most strongly with the attack strategy. Therefore, participants predominantly felt frustration when role-playing the attack coping strategy.", "perturbed_statement": "Figure 2 shows frustration is one of the highest-rated emotions in the radial plot. Table 4 indicates frustration correlates most strongly with the contact strategy. Therefore, participants predominantly felt frustration when role-playing the contact coping strategy.", "perturbed_explanation": "This claim is false because Table 4 clearly marks frustration as correlated with the attack strategy (it is bold under “attack”), not with contact. Reassigning frustration’s correlation to contact contradicts the table.", "claim": "Figure 2 shows frustration is one of the highest-rated emotions in the radial plot. Table 4 indicates frustration correlates most strongly with the attack strategy. Therefore, participants predominantly felt frustration when role-playing the attack coping strategy.", "label": true }, { "paperid": "2409.18903v2", "paper_path": "./SciVer/papers/2409.18903v2.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.18903v2-Table2-1.png", "item2_path": "./SciVer/images/2409.18903v2-Table3-1.png", "section": [ "6" ], "request_id": 87, "origin_statement": "Table 2 shows EOC_k(3/2) at k=4 is 1.03, so before wave breaking the scheme converges at first order. Then Table 3 shows EOC_k(3/5) at k=6 drops to 0.53, indicating less regular initial data yields deteriorating convergence at higher k.", "perturbed_statement": "Table 2 shows EOC_k(3/2) at k=4 is 1.30, so before wave breaking the scheme converges superlinearly. Then Table 3 shows EOC_k(3/5) at k=6 remains above 0.80, indicating even less regular initial data maintains high convergence at higher k.", "perturbed_explanation": "Both premises are false. In Table 2 the EOC_k(3/2) at k=4 is actually 1.03, not 1.30. In Table 3 the EOC_k(3/5) at k=6 is 0.53, not above 0.80. Therefore the claim of superlinear and stable convergence for less regular data contradicts the actual values.", "claim": "Table 2 shows EOC_k(3/2) at k=4 is 1.03, so before wave breaking the scheme converges at first order. Then Table 3 shows EOC_k(3/5) at k=6 drops to 0.53, indicating less regular initial data yields deteriorating convergence at higher k.", "label": true }, { "paperid": "2409.15951v1", "paper_path": "./SciVer/papers/2409.15951v1.json", "claim_type": "sequential", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15951v1_figure_6.png", "item2_path": "./SciVer/images/2409.15951v1-Table4-1.png", "section": [ "4" ], "request_id": 90, "origin_statement": "Table4 reports Teff = 7900 ± 100 K; using this, Fig. 6’s purple spectrum aligns with models deeper than both solar and ±0.2 dex (green band), indicating WASP-189 has [Fe/H] ≈ +0.5 dex, confirming its metal-rich nature.", "perturbed_statement": "Table4 reports Teff = 7900 ± 100 K; using this, Fig. 6’s purple spectrum falls within the ±0.2 dex green band, indicating WASP-189 has [Fe/H] ≈ 0 dex, showing it is not metal-rich.", "perturbed_explanation": "In Fig. 6’s inset, the purple best-fit line clearly lies below the entire ±0.2 dex green region at the absorption cores, so it cannot fall within that band. Additionally, Table 4 gives [Fe/H] = 0.5 ± 0.1 dex, not 0 dex, contradicting the claim.", "claim": "Table4 reports Teff = 7900 ± 100 K; using this, Fig. 6’s purple spectrum aligns with models deeper than both solar and ±0.2 dex (green band), indicating WASP-189 has [Fe/H] ≈ +0.5 dex, confirming its metal-rich nature.", "label": true }, { "paperid": "2410.20797v1", "paper_path": "./SciVer/papers/2410.20797v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20797v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.20797v1_figure_1(c).png", "section": [ "4.4" ], "request_id": 94, "origin_statement": "At α=0.3, Rplg achieves the highest test accuracy (~87.5%) on CIFAR-10, and the convergence curve shows the pseudo-label difference norm drops below 5 by epoch 50, indicating the meta-learned pseudo-labels rapidly converge to the Bayes optimal classifier.", "perturbed_statement": "At α=0.3, Rplg achieves the highest test accuracy (~87.5%) on CIFAR-10, and the convergence curve shows the pseudo-label difference norm drops below 5 by epoch 10, indicating the meta-learned pseudo-labels rapidly converge to the Bayes optimal classifier.", "perturbed_explanation": "The perturbation is incorrect because in the convergence plot, at epoch 10 the norm difference ‖Q⁽ᵗ⁾–Q*‖ is still above ~15, not below 5. It only falls below 5 around epoch 50, so the claim about epoch 10 contradicts the displayed curve.", "claim": "At α=0.3, Rplg achieves the highest test accuracy (~87.5%) on CIFAR-10, and the convergence curve shows the pseudo-label difference norm drops below 5 by epoch 50, indicating the meta-learned pseudo-labels rapidly converge to the Bayes optimal classifier.", "label": true }, { "paperid": "2410.21705v1", "paper_path": "./SciVer/papers/2410.21705v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21705v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.21705v1_figure_4(b).png", "section": [ "4.4" ], "request_id": 96, "origin_statement": "Figure 4(a) shows that under the route assignment constraint (L_ra), Expert 4 is assigned 20% of new-class samples. Figure 4(b) shows that this model correctly predicts 3277 new-class samples, an increase of 267 compared to the 3010 correct predictions under balanced loss alone (L_bl).", "perturbed_statement": "Figure 4(a) shows that under L_ra, Expert 4 is assigned 25% of new-class samples. Figure 4(b) shows that this model correctly predicts 3377 new-class samples, an increase of 367 compared to the 3010 correct predictions under balanced loss alone (L_bl).", "perturbed_explanation": "The perturbation is incorrect because Figure 4(a) reports Expert 4’s new-class routing probability as 0.20, not 0.25, and Figure 4(b) shows 3277 true new-class predictions under the constraint, not 3377.", "claim": "Figure 4(a) shows that under the route assignment constraint (L_ra), Expert 4 is assigned 20% of new-class samples. Figure 4(b) shows that this model correctly predicts 3277 new-class samples, an increase of 267 compared to the 3010 correct predictions under balanced loss alone (L_bl).", "label": true }, { "paperid": "2410.13636v1", "paper_path": "./SciVer/papers/2410.13636v1.json", "claim_type": "sequential", "item1": "4", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13636v1_figure_4.png", "item2_path": "./SciVer/images/2410.13636v1_figure_7.png", "section": [ "4.2" ], "request_id": 101, "origin_statement": "The M[γ1γ2] distribution shows a sharp peak at ∼125 GeV (Fig. 4), enabling a tight diphoton mass cut that suppresses background. Consequently, in the 27 TeV HE-LHC at L=3 ab⁻¹ (Fig. 7 middle), virtually all samples with α between –1.6 and –0.8 at tanβ≈3–5 reach s≥5.", "perturbed_statement": "The M[γ1γ2] distribution shows a sharp peak at ∼120 GeV (Fig. 4), enabling a tight diphoton mass cut that suppresses background. Consequently, in the 27 TeV HE-LHC at L=3 ab⁻¹ (Fig. 7 middle), virtually all samples with α between –1.6 and –0.8 at tanβ≈3–5 reach s≥5.", "perturbed_explanation": "The perturbed claim is wrong because Fig. 4 clearly shows the signal M[γ1γ2] peak at about 125 GeV, not 120 GeV. The incorrect peak value undermines the premise for the diphoton mass cut efficiency, invalidating the subsequent inference about achieving s≥5 in the specified α range.", "claim": "The M[γ1γ2] distribution shows a sharp peak at ∼125 GeV (Fig. 4), enabling a tight diphoton mass cut that suppresses background. Consequently, in the 27 TeV HE-LHC at L=3 ab⁻¹ (Fig. 7 middle), virtually all samples with α between –1.6 and –0.8 at tanβ≈3–5 reach s≥5.", "label": true }, { "paperid": "2411.01006v2", "paper_path": "./SciVer/papers/2411.01006v2.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01006v2_figure_3.png", "item2_path": "./SciVer/images/2411.01006v2_figure_4.png", "section": [ "5.2" ], "request_id": 102, "origin_statement": "From Figure 4’s CW‐circle histogram, code s61 peaks in variate 1. The decoded segment plot for s61 shows an increasing ramp shape. Hence, CW‐circle samples typically include upward ramp segments in variate 1.", "perturbed_statement": "From Figure 4’s CCW‐circle histogram, code s61 peaks in variate 1. The decoded segment plot for s61 shows an increasing ramp shape. Hence, CCW‐circle samples typically include upward ramp segments in variate 1.", "perturbed_explanation": "This is incorrect because in Figure 4’s CCW‐circle row, code s61 does not have the highest frequency in variate 1 (the peak in variate 1 is actually s7), so CCW‐circle samples do not primarily exhibit s61’s upward ramp in variate 1.", "claim": "From Figure 4’s CW‐circle histogram, code s61 peaks in variate 1. The decoded segment plot for s61 shows an increasing ramp shape. Hence, CW‐circle samples typically include upward ramp segments in variate 1.", "label": true }, { "paperid": "2411.07253v1", "paper_path": "./SciVer/papers/2411.07253v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07253v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.07253v1_figure_1(b).png", "section": [ "4.1", "7.2" ], "request_id": 108, "origin_statement": "At 50 iterations (Fig.1a), ASPGMO-sc's blue points cluster near F1≈0–1000 and F2≈–50–50, closer to the Pareto front than SPGMO or ASPGMO. With 500 iterations (Fig.1b), ASPGMO-sc still yields the lowest F1 and F2, showing faster convergence.", "perturbed_statement": "At 50 iterations (Fig.1a), SPGMO’s red triangles cluster near F1≈0–1000 and F2≈–50–50, closer to the Pareto front than ASPGMO or ASPGMO-sc. With 500 iterations (Fig.1b), SPGMO still yields the lowest F1 and F2, showing fastest convergence.", "perturbed_explanation": "In Fig.1a, SPGMO’s red triangles actually span much larger F1 (up to 8×10^6) and F2 values (tens of thousands), whereas the blue ASPGMO-sc circles lie around F1<1000 and F2≈–50–50. Therefore SPGMO is not closest to the Pareto front in the first stage, invalidating the premise and final claim.", "claim": "At 50 iterations (Fig.1a), ASPGMO-sc's blue points cluster near F1≈0–1000 and F2≈–50–50, closer to the Pareto front than SPGMO or ASPGMO. With 500 iterations (Fig.1b), ASPGMO-sc still yields the lowest F1 and F2, showing faster convergence.", "label": true }, { "paperid": "2411.13607v2", "paper_path": "./SciVer/papers/2411.13607v2.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13607v2_figure_5.png", "item2_path": "./SciVer/images/2411.13607v2_figure_6.png", "section": [ "4.3" ], "request_id": 109, "origin_statement": "Figure 5 shows VioPose’s red wrist trajectory closely follows subtle ~10 mm ground truth vibrato oscillations, unlike other SoTAs’ straight lines. In Figure 6, VioPose (with audio) exhibits no red circles on wrist predictions across frames, demonstrating perfect vibrato capture.", "perturbed_statement": "Figure 5 shows MixSTE’s red wrist trajectory closely follows subtle ~10 mm ground truth vibrato oscillations, unlike other SoTAs’ straight lines. In Figure 6, VioPose (with audio) shows three red circles on wrist predictions across frames, demonstrating underperforming vibrato capture.", "perturbed_explanation": "This is incorrect because Figure 5 clearly shows MixSTE’s estimated trajectories as largely straight with no fine oscillations—the subtle vibrato is only captured by VioPose. Furthermore, in Figure 6 the VioPose row contains zero red circles around the wrist; all error highlights belong to other models.", "claim": "Figure 5 shows VioPose’s red wrist trajectory closely follows subtle ~10 mm ground truth vibrato oscillations, unlike other SoTAs’ straight lines. In Figure 6, VioPose (with audio) exhibits no red circles on wrist predictions across frames, demonstrating perfect vibrato capture.", "label": true }, { "paperid": "2411.00513v1", "paper_path": "./SciVer/papers/2411.00513v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00513v1_figure_2.png", "item2_path": "./SciVer/images/2411.00513v1_figure_4.png", "section": [ "3" ], "request_id": 111, "origin_statement": "At the onset of Case BB RLO (vertical dot-dashed line in Fig.2), the He star’s mass loss rate spikes to ~10⁻⁴ M⊙/yr. That moment aligns with the wine triangle in Fig.4 at central T≃6×10⁸ K and ρ≃10⁶ g/cm³, just below the C-ignition curve.", "perturbed_statement": "At the onset of Case BB RLO (vertical dot-dashed line in Fig.2), the He star’s mass loss rate spikes to ~10⁻⁵ M⊙/yr. That moment aligns with the wine triangle in Fig.4 at central T≃7×10⁸ K and ρ≃3×10⁶ g/cm³, above the C-ignition curve, implying C-burning begins before RLO.", "perturbed_explanation": "This is incorrect because Fig.2 shows the RLO-driven mass loss rate rises to about 10⁻⁴ M⊙/yr, not 10⁻⁵. Moreover, in Fig.4 the wine triangle (onset of Case BB RLO) lies at T≈6×10⁸ K and ρ≈10⁶ g/cm³, below (not above) the C-ignition line.", "claim": "At the onset of Case BB RLO (vertical dot-dashed line in Fig.2), the He star’s mass loss rate spikes to ~10⁻⁴ M⊙/yr. That moment aligns with the wine triangle in Fig.4 at central T≃6×10⁸ K and ρ≃10⁶ g/cm³, just below the C-ignition curve.", "label": true }, { "paperid": "2411.00049v1", "paper_path": "./SciVer/papers/2411.00049v1.json", "claim_type": "sequential", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00049v1_figure_5.png", "item2_path": "./SciVer/images/2411.00049v1-Table1-1.png", "section": [ "5.2" ], "request_id": 117, "origin_statement": "Because FOIL-iter achieves 95.1% accuracy on the Car Eval dataset (Figure 5) and increases Reuters accuracy from 72.21% to 81.74% (Table 1), FOIL-iter consistently boosts accuracy across distinct domains.", "perturbed_statement": "Because FOIL-iter achieves 96% accuracy on the Car Eval dataset and reduces Reuters accuracy from 72.21% to 68.39%, FOIL-iter improves nominal classification but harms text classification.", "perturbed_explanation": "Both premises are incorrect: Figure 5 shows FOIL-iter on Car Eval reaches 95.1%, not 96%. And Table 1 reports FOIL-iter on Reuters raises accuracy to 81.74%, not reducing it to 68.39%.", "claim": "Because FOIL-iter achieves 95.1% accuracy on the Car Eval dataset (Figure 5) and increases Reuters accuracy from 72.21% to 81.74% (Table 1), FOIL-iter consistently boosts accuracy across distinct domains.", "label": true }, { "paperid": "2409.06166v1", "paper_path": "./SciVer/papers/2409.06166v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06166v1_figure_5.png", "item2_path": "./SciVer/images/2409.06166v1-Table4-1.png", "section": [ "4.1" ], "request_id": 124, "origin_statement": "From Figure 5, PromptSRC+RPP improves average 1-shot accuracy by 4.19% over PromptSRC. Table 4 shows RPP raises PromptSRC’s Harmonic Mean from 79.97 to 81.10, a 1.13% gain. Therefore, RPP significantly boosts both few-shot and base-to-new performance.", "perturbed_statement": "From Figure 5, PromptSRC+RPP improves average 1-shot accuracy by 4.19% over PromptSRC. Table 4 shows RPP raises PromptSRC’s Harmonic Mean by 2.43%, claiming >2% HM gain alongside the 4.19% few-shot boost.", "perturbed_explanation": "Table 4 actually reports PromptSRC+RPP’s HM rising from 79.97 to 81.10, which is a 1.13% increase, not 2.43%. The perturbed claim thus misstates the base-to-new HM improvement.", "claim": "From Figure 5, PromptSRC+RPP improves average 1-shot accuracy by 4.19% over PromptSRC. Table 4 shows RPP raises PromptSRC’s Harmonic Mean from 79.97 to 81.10, a 1.13% gain. Therefore, RPP significantly boosts both few-shot and base-to-new performance.", "label": true }, { "paperid": "2411.06184v1", "paper_path": "./SciVer/papers/2411.06184v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06184v1_figure_4(b).png", "item2_path": "./SciVer/images/2411.06184v1_figure_4(c).png", "section": [ "4.4" ], "request_id": 129, "origin_statement": "From the STBO surface (Fig 4b), the lowest loss is about 0.22 at log C≈−1.5, log γ≈−1. Then the MTBO surface (Fig 4c) shows a deeper valley with minimum loss around 0.18 at similar hyperparameters.", "perturbed_statement": "From the STBO surface (Fig 4b), the lowest loss is about 0.15 at log C≈−1.5, log γ≈−1. Then the MTBO surface (Fig 4c) shows a minimum loss around 0.18, so STBO outperforms MTBO for N=64.", "perturbed_explanation": "The perturbed statement misreads the STBO minimum loss. In Fig 4b the lowest loss is ∼0.22, not 0.15. This incorrect premise invalidates the conclusion that STBO outperforms MTBO, since MTBO actually reaches ∼0.18, lower than STBO’s true ∼0.22.", "claim": "From the STBO surface (Fig 4b), the lowest loss is about 0.22 at log C≈−1.5, log γ≈−1. Then the MTBO surface (Fig 4c) shows a deeper valley with minimum loss around 0.18 at similar hyperparameters.", "label": true }, { "paperid": "2411.18243v1", "paper_path": "./SciVer/papers/2411.18243v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18243v1_figure_1(c).png", "item2_path": "./SciVer/images/2411.18243v1-Table1-1.png", "section": [ "2.1" ], "request_id": 132, "origin_statement": "Figure 1 (bottom) shows the PDS built for times >5.8 s revealing two QPO peaks. Table 1 lists their frequencies as 2.8 ± 0.3 Hz and 5.2 ± 0.5 Hz. The near 2:1 ratio implies the 5.2 Hz QPO is the first harmonic of the 2.8 Hz QPO.", "perturbed_statement": "Figure 1 (bottom) shows the PDS built for times >5.8 s revealing two QPO peaks. Table 1 lists their frequencies as 2.8 ± 0.3 Hz and 6.2 ± 0.5 Hz. The near 2:1 ratio implies the 6.2 Hz QPO is the first harmonic of the 2.8 Hz QPO.", "perturbed_explanation": "The perturbation misquotes the second QPO frequency. Table 1 actually reports the second QPO at 5.2 ± 0.5 Hz, not 6.2 ± 0.5 Hz. Therefore the claimed ratio and harmonic interpretation no longer match the data.", "claim": "Figure 1 (bottom) shows the PDS built for times >5.8 s revealing two QPO peaks. Table 1 lists their frequencies as 2.8 ± 0.3 Hz and 5.2 ± 0.5 Hz. The near 2:1 ratio implies the 5.2 Hz QPO is the first harmonic of the 2.8 Hz QPO.", "label": true }, { "paperid": "2410.02479v1", "paper_path": "./SciVer/papers/2410.02479v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02479v1_figure_4.png", "item2_path": "./SciVer/images/2410.02479v1-Table3-1.png", "section": [ "5.4" ], "request_id": 136, "origin_statement": "CrossDex-9-E achieves about 90% success on training hands (Fig4), and DexPilot retargeting yields 89.2% training success (Table3). Therefore, using 9 eigengrasps with DexPilot will achieve approximately 90% training success.", "perturbed_statement": "CrossDex-9-E achieves about 90% success on training hands (Fig4), and DexPilot retargeting yields only 48.2% training success (Table3). Therefore, using 9 eigengrasps with DexPilot will achieve only about 48% training success.", "perturbed_explanation": "The perturbed statement misreads Table 3: the 48.2% figure corresponds to DexPilot’s unseen-hand success, not its training-hand success. Table 3 actually shows DexPilot’s training-hand success is 89.2%, so the claim of 48.2% training success is incorrect.", "claim": "CrossDex-9-E achieves about 90% success on training hands (Fig4), and DexPilot retargeting yields 89.2% training success (Table3). Therefore, using 9 eigengrasps with DexPilot will achieve approximately 90% training success.", "label": true }, { "paperid": "2410.13995v2", "paper_path": "./SciVer/papers/2410.13995v2.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13995v2_figure_4(a).png", "item2_path": "./SciVer/images/2410.13995v2_figure_4(b).png", "section": [ "6.1" ], "request_id": 139, "origin_statement": "By 4M steps, Q-Incept’s purple line in the ASR plot exceeds 90% success compared to under 10% for Q-Vanilla (cyan). In the return plot, the purple curve also converges around −50, similar to other methods—showing Q-Incept achieves high stealthy attack success without harming performance.", "perturbed_statement": "By 4M steps, Q-Incept’s purple line in the ASR plot only reaches about 80%, and in the return plot its curve converges near −30—suggesting Q-Incept sacrifices task performance for moderate attack success.", "perturbed_explanation": "The perturbed statement is wrong because Figure 4 shows Q-Incept ASR climbs above 90% (not 80%) by 4M steps, and the return plot shows its purple curve stabilizes around −50 (not −30). Both misrepresentations contradict the displayed metrics.", "claim": "By 4M steps, Q-Incept’s purple line in the ASR plot exceeds 90% success compared to under 10% for Q-Vanilla (cyan). In the return plot, the purple curve also converges around −50, similar to other methods—showing Q-Incept achieves high stealthy attack success without harming performance.", "label": true }, { "paperid": "2411.05689v1", "paper_path": "./SciVer/papers/2411.05689v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05689v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.05689v1_figure_4.png", "section": [ "4.3" ], "request_id": 145, "origin_statement": "Because Figure 3's cumulative histogram shows proposed_5 yields nearly zero negative differences while dual_annealing has a wide negative tail, and Figure 4 shows dual_annealing's CPU time exceeds 100× that of the baseline on many problems, proposed_5 is both more accurate and faster than dual_annealing.", "perturbed_statement": "Because Figure 3's cumulative histogram shows dual_annealing yields almost no negative differences while proposed_5 has a wide negative tail, and Figure 4 shows dual_annealing's CPU time is lower than the baseline for most problems, dual_annealing is both more accurate and faster than proposed_5.", "perturbed_explanation": "This is wrong because in Figure 3 the dual_annealing curve actually accumulates a large negative tail (indicating many worse solutions), whereas proposed_5 hardly ever falls below the baseline. Moreover, Figure 4's left plot shows dual_annealing CPU ratios are always above 1 (often >100), so it is much slower, not faster.", "claim": "Because Figure 3's cumulative histogram shows proposed_5 yields nearly zero negative differences while dual_annealing has a wide negative tail, and Figure 4 shows dual_annealing's CPU time exceeds 100× that of the baseline on many problems, proposed_5 is both more accurate and faster than dual_annealing.", "label": true }, { "paperid": "2411.07664v1", "paper_path": "./SciVer/papers/2411.07664v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07664v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.07664v1_figure_3(b).png", "section": [ "3.4" ], "request_id": 150, "origin_statement": "Image (b) depicts multiple cubes around a sphere, so it falls under the multiple spatial relations evaluation, which uses batches of 90 images with 9 control examples requiring at least 8 correct control ratings for acceptance.", "perturbed_statement": "Image (b) depicts multiple cubes around a sphere, so it falls under the single spatial relations evaluation, which uses batches of 80 images with 8 control examples requiring at least 7 correct control ratings for acceptance.", "perturbed_explanation": "This is incorrect because Figure 3’s examples are labeled as complex prompts, which are evaluated under the multiple spatial relations setup. That setup uses 90 images per batch with 9 control examples (10%), and requires at least 8 correct control ratings—not the single‐relation setup of 80 images, 8 controls, and 7 correct.", "claim": "Image (b) depicts multiple cubes around a sphere, so it falls under the multiple spatial relations evaluation, which uses batches of 90 images with 9 control examples requiring at least 8 correct control ratings for acceptance.", "label": true }, { "paperid": "2410.13114v1", "paper_path": "./SciVer/papers/2410.13114v1.json", "claim_type": "sequential", "item1": "7", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13114v1_figure_7.png", "item2_path": "./SciVer/images/2410.13114v1-Table2-1.png", "section": [ "3.1" ], "request_id": 155, "origin_statement": "Mozilla Common Voice provides over 2000 hours of Spanish audio (Figure 8) and documents language metadata (Table 2), so researchers can reliably filter Spanish recordings for linguistic analysis.", "perturbed_statement": "LibriVox provides over 400 hours of Spanish audio (Figure 8) and documents language metadata (Table 2), so researchers can reliably filter Spanish recordings for linguistic analysis.", "perturbed_explanation": "Figure 8 shows LibriVox has only about 100 hours of Spanish audio, not over 400 hours; thus the claim that it provides 400+ Spanish hours is incorrect.", "claim": "Mozilla Common Voice provides over 2000 hours of Spanish audio (Figure 8) and documents language metadata (Table 2), so researchers can reliably filter Spanish recordings for linguistic analysis.", "label": true }, { "paperid": "2411.17640v1", "paper_path": "./SciVer/papers/2411.17640v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17640v1_figure_2.png", "item2_path": "./SciVer/images/2411.17640v1-Table3-1.png", "section": [ "3.3" ], "request_id": 157, "origin_statement": "From Figure 2, the red dotted line shows an SNR detection threshold at 0.135. Table 3 labels TOI-790.01 as “Very clear,” implying its SNR exceeds 0.135. Indeed, its SNR (~0.5 on the plot) yields an identification rate ≈0.98.", "perturbed_statement": "From Figure 2, the red dotted line shows an SNR detection threshold at 0.135. Table 3 labels TOI-790.01 as “Very clear,” implying its SNR exceeds 0.135. Yet TOI-790.01’s SNR (~0.08 on the plot) only yields an identification rate ≈0.42.", "perturbed_explanation": "The perturbation is wrong because Figure 2 clearly places the vertical dashed mark for TOI-790.01 around a transit SNR of 0.5, not 0.08, and shows its cross-marker at an identification rate near 0.98. Additionally, Table 3’s “Very clear” note confirms a high SNR well above the 0.135 threshold.", "claim": "From Figure 2, the red dotted line shows an SNR detection threshold at 0.135. Table 3 labels TOI-790.01 as “Very clear,” implying its SNR exceeds 0.135. Indeed, its SNR (~0.5 on the plot) yields an identification rate ≈0.98.", "label": true }, { "paperid": "2411.18328v1", "paper_path": "./SciVer/papers/2411.18328v1.json", "claim_type": "sequential", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18328v1_figure_5.png", "item2_path": "./SciVer/images/2411.18328v1_figure_7.png", "section": [ "4.5" ], "request_id": 163, "origin_statement": "In Figure 5, ExACT’s features in (b) remain entangled while ours in (c) form distinct clusters; Figure 7 shows EventCrab has 8M fewer parameters than ExACT, so EventCrab achieves superior feature separability with a more compact model.", "perturbed_statement": "In Figure 5, ExACT’s features in (b) appear more separable while ours in (c) overlap; Figure 7 shows EventCrab has 12M fewer parameters than ExACT, so EventCrab underperforms in separability despite a lighter model.", "perturbed_explanation": "This is incorrect because Fig. 5(c) actually shows that our method’s features form more distinct clusters compared to ExACT’s in Fig. 5(b), and Fig. 7 indicates an 8M parameter reduction, not 12M.", "claim": "In Figure 5, ExACT’s features in (b) remain entangled while ours in (c) form distinct clusters; Figure 7 shows EventCrab has 8M fewer parameters than ExACT, so EventCrab achieves superior feature separability with a more compact model.", "label": true }, { "paperid": "2411.01960v1", "paper_path": "./SciVer/papers/2411.01960v1.json", "claim_type": "sequential", "item1": "8(b)", "item2": "12", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01960v1_figure_8(b).png", "item2_path": "./SciVer/images/2411.01960v1_figure_12.png", "section": [ "7.1" ], "request_id": 166, "origin_statement": "In the IC 348 MMS core the non-debiased polarization–intensity slope is 0.34 (Fig. 8) and the mean grain alignment size is 0.57 μm (Fig. 12), implying dust grains in that core have grown beyond 0.57 μm, larger than typical diffuse ISM sizes.", "perturbed_statement": "In the IC 348 MMS core the non-debiased polarization–intensity slope is 0.34 (Fig. 8) and the mean grain alignment size is 0.27 μm, implying dust grains in that core have grown beyond 0.27 μm, larger than typical diffuse ISM sizes.", "perturbed_explanation": "The alignment size for IC 348 MMS is actually 0.57 μm, not 0.27 μm, as shown in Figure 12 and stated in Section 7.1. Moreover, 0.27 μm lies within the typical diffuse ISM grain size range (0.25–0.3 μm), so grains larger than 0.27 μm are not necessarily indicative of additional growth.", "claim": "In the IC 348 MMS core the non-debiased polarization–intensity slope is 0.34 (Fig. 8) and the mean grain alignment size is 0.57 μm (Fig. 12), implying dust grains in that core have grown beyond 0.57 μm, larger than typical diffuse ISM sizes.", "label": true }, { "paperid": "2411.01543v1", "paper_path": "./SciVer/papers/2411.01543v1.json", "claim_type": "sequential", "item1": "8", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01543v1_figure_8.png", "item2_path": "./SciVer/images/2411.01543v1-Table4-1.png", "section": [ "3.2" ], "request_id": 168, "origin_statement": "Figure 8’s iodine images show that ring artifacts persist when using SCFM with ring correction but are largely gone for A-SWIFT with ring correction. Table 4 then shows that the mean iodine E_RMSE for A-SWIFT & RC (0.6 mg/ml) is half that of SCFM & RC (1.2 mg/ml).", "perturbed_statement": "Figure 8’s iodine images show that ring artifacts are fully removed when using SCFM with ring correction but remain for A-SWIFT with ring correction. Table 4 then shows both SCFM & RC and A-SWIFT & RC have the same mean iodine E_RMSE of 0.6 mg/ml.", "perturbed_explanation": "This is incorrect because in Figure 8 the SCFM & RC iodine image still exhibits severe ring artifacts, while the A-SWIFT & RC image is largely artifact-free. Furthermore, Table 4 lists the mean iodine E_RMSE as 1.2 mg/ml for SCFM & RC and 0.6 mg/ml for A-SWIFT & RC, not the same value.", "claim": "Figure 8’s iodine images show that ring artifacts persist when using SCFM with ring correction but are largely gone for A-SWIFT with ring correction. Table 4 then shows that the mean iodine E_RMSE for A-SWIFT & RC (0.6 mg/ml) is half that of SCFM & RC (1.2 mg/ml).", "label": true }, { "paperid": "2409.02285v1", "paper_path": "./SciVer/papers/2409.02285v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.02285v1-Table4-1.png", "item2_path": "./SciVer/images/2409.02285v1-Table5-1.png", "section": [ "4.2" ], "request_id": 172, "origin_statement": "COVID-19 stringency increased Malawi's FS index by 0.015 (p<0.05; Table 4), and Table 5 shows Baseline FS × Baseline HHI = 0.412* in Malawi, implying that higher initial food insecurity combined with greater specialization led to worse food insecurity post-outbreak.", "perturbed_statement": "COVID-19 stringency decreased Malawi's FS index by 0.015 (p<0.05; Table 4), and Table 5 shows Baseline FS × Baseline HHI = –0.412* in Malawi, implying that higher initial food insecurity combined with greater specialization led to reduced food insecurity post-outbreak.", "perturbed_explanation": "Table 4 actually reports the COVID-19 stringency coefficient for Malawi’s FS index as +0.015* (not a decrease), and Table 5 reports the Baseline FS × Baseline HHI interaction in Malawi as +0.412* (not –0.412). Both sign reversals contradict the tabulated results.", "claim": "COVID-19 stringency increased Malawi's FS index by 0.015 (p<0.05; Table 4), and Table 5 shows Baseline FS × Baseline HHI = 0.412* in Malawi, implying that higher initial food insecurity combined with greater specialization led to worse food insecurity post-outbreak.", "label": true }, { "paperid": "2410.02001v2", "paper_path": "./SciVer/papers/2410.02001v2.json", "claim_type": "sequential", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02001v2_figure_5.png", "item2_path": "./SciVer/images/2410.02001v2-Table2-1.png", "section": [ "4" ], "request_id": 177, "origin_statement": "At an SNR threshold of 30 dB, CFBS with cvs_th=0.92 yields a minimal spectral angle of about 0.38 (from Fig. 5) and averages 222 wrongly classified objects (Table 2).", "perturbed_statement": "At an SNR threshold of 30 dB, CFBS with cvs_th=0.92 yields a minimal spectral angle of about 0.38 (from Fig. 5) and averages 174 wrongly classified objects.", "perturbed_explanation": "The perturbed statement misreports the number of misclassifications for CFBS_0.92. Table 2 shows CFBS_0.92 has 222 wrongly classified objects, not 174.", "claim": "At an SNR threshold of 30 dB, CFBS with cvs_th=0.92 yields a minimal spectral angle of about 0.38 (from Fig. 5) and averages 222 wrongly classified objects (Table 2).", "label": true }, { "paperid": "2410.05046v1", "paper_path": "./SciVer/papers/2410.05046v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.05046v1-Table2-1.png", "item2_path": "./SciVer/images/2410.05046v1-Table3-1.png", "section": [ "3.4" ], "request_id": 178, "origin_statement": "Table 2 defines Measurements as laboratory tests or vital signs. Table 3 shows CHIA is the sole dataset that includes Measurement annotations, while NCBI, BC5CDR, and BIORED omit them. Hence, CHIA uniquely captures quantitative clinical observations in our benchmark.", "perturbed_statement": "Table 2 defines Measurements as laboratory tests or vital signs. Table 3 shows BC5CDR is the sole dataset that includes Measurement annotations, while CHIA, NCBI, and BIORED omit them. Hence, BC5CDR uniquely captures quantitative clinical observations in our benchmark.", "perturbed_explanation": "The perturbed statement contradicts Table 3: BC5CDR’s listed entity types are Condition and Drug only, and it does not include Measurements. Conversely, CHIA does include Measurement annotations, so BC5CDR cannot be the sole dataset annotating quantitative clinical observations.", "claim": "Table 2 defines Measurements as laboratory tests or vital signs. Table 3 shows CHIA is the sole dataset that includes Measurement annotations, while NCBI, BC5CDR, and BIORED omit them. Hence, CHIA uniquely captures quantitative clinical observations in our benchmark.", "label": true }, { "paperid": "2410.20579v1", "paper_path": "./SciVer/papers/2410.20579v1.json", "claim_type": "sequential", "item1": "13", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20579v1_figure_13.png", "item2_path": "./SciVer/images/2410.20579v1-Table2-1.png", "section": [ "5.2" ], "request_id": 182, "origin_statement": "Table 2 shows CSD-iPOT outperforms CSD in marginal calibration in 68 of 104 comparisons; in Figure 13a, the orange CSD curve underestimates elder HFCR survival at predicted 0.4 while the green CSD-iPOT aligns with the ideal; this illustrates CSD-iPOT’s superior subgroup calibration.", "perturbed_statement": "Table 2 shows CSD-iPOT outperforms CSD in marginal calibration in only 20 of 104 comparisons; in Figure 13a, the orange CSD curve underestimates elder HFCR survival at predicted 0.4 while the green CSD-iPOT aligns with the ideal; this reveals inconsistent subgroup performance.", "perturbed_explanation": "The perturbed statement misreports Table 2: under “Compare with CSD,” CSD-iPOT actually wins marginal calibration in 68 out of 104 comparisons (not 20). This incorrect count contradicts the 68 wins shown in Table 2’s Cal_margin Win entry.", "claim": "Table 2 shows CSD-iPOT outperforms CSD in marginal calibration in 68 of 104 comparisons; in Figure 13a, the orange CSD curve underestimates elder HFCR survival at predicted 0.4 while the green CSD-iPOT aligns with the ideal; this illustrates CSD-iPOT’s superior subgroup calibration.", "label": true }, { "paperid": "2411.17459v1", "paper_path": "./SciVer/papers/2411.17459v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17459v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.17459v1_figure_7(b).png", "section": [ "4.3" ], "request_id": 186, "origin_statement": "At 60K steps, the 32-channel setting achieves a validation LPIPS of about 0.034 (Fig.7a), whereas even the best WL loss weight (λWL≈0.01) only lowers LPIPS to around 0.080 at the same step (Fig.7b), implying latent dimension has a stronger effect on reconstruction quality.", "perturbed_statement": "At 60K steps, the 16-channel setting achieves a validation LPIPS of about 0.034, and the optimal WL loss weight (λWL≈0.01) also lowers LPIPS to around 0.030 by the same point, demonstrating that adjusting WL weight alone can match the effect of increasing latent channels.", "perturbed_explanation": "Figure 7(a) shows that at 60K the 32-channel model (not the 16-channel one) yields LPIPS ≈0.034, while the 16-channel curve is around 0.040. Figure 7(b) also demonstrates the best WL weight only reduces LPIPS to ≈0.080, not ≈0.030, so WL weighting cannot match the latent‐channel improvement.", "claim": "At 60K steps, the 16-channel setting achieves a validation LPIPS of about 0.034, and the optimal WL loss weight (λWL≈0.01) also lowers LPIPS to around 0.030 by the same point, demonstrating that adjusting WL weight alone can match the effect of increasing latent channels.", "label": false }, { "paperid": "2411.00249v1", "paper_path": "./SciVer/papers/2411.00249v1.json", "claim_type": "sequential", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00249v1_figure_6.png", "item2_path": "./SciVer/images/2411.00249v1-Table2-1.png", "section": [ "7" ], "request_id": 188, "origin_statement": "Figure 6 shows that at the 10th Harary split on WikiElec, neg_out reaches approximately 0.83. Table 2 indicates GraphC executed 25 splits on the WikiElec graph. Therefore, GraphC’s final clustering similarly achieves around 83% of negative edges between communities on its 7,066-node graph.", "perturbed_statement": "Figure 6 shows that at the 10th Harary split on WikiElec, neg_out reaches approximately 0.83. Table 2 indicates GraphC executed only 10 splits on the WikiElec graph. Therefore, GraphC’s final clustering similarly achieves around 83% of negative edges between communities on its 7,066-node graph.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 actually reports that GraphC performed 25 splits on the WikiElec dataset, not 10 splits. Thus the claim about executing only 10 splits contradicts the table’s ‘# splits = 25’ entry for WikiElec.", "claim": "Figure 6 shows that at the 10th Harary split on WikiElec, neg_out reaches approximately 0.83. Table 2 indicates GraphC executed only 10 splits on the WikiElec graph. Therefore, GraphC’s final clustering similarly achieves around 83% of negative edges between communities on its 7,066-node graph.", "label": false }, { "paperid": "2409.12428v1", "paper_path": "./SciVer/papers/2409.12428v1.json", "claim_type": "sequential", "item1": "2", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12428v1_figure_2.png", "item2_path": "./SciVer/images/2409.12428v1_figure_5.png", "section": [ "5.1" ], "request_id": 189, "origin_statement": "Figure 2 shows credit_risk_score drifts of about 0.25 for P and 0.22 for UP, implying DCD≈0.03. In Figure 5's NPV-DIFF plot, DCD≈0.03 maps to unfairness around 0.01, indicating low negative predictive value disparity between age groups.", "perturbed_statement": "Figure 2 shows credit_risk_score drifts of about 0.20 for P and 0.30 for UP, implying DCD≈−0.10. In Figure 5's NPV-DIFF plot, a negative DCD of −0.10 corresponds to unfairness around 0.01, suggesting minimal bias across age groups.", "perturbed_explanation": "This is incorrect because Figure 2's credit_risk_score bars are ~0.25 (P) and ~0.22 (UP), so DCD is ~0.03 and positive, not −0.10. Also, Figure 5 does not show any NPV-DIFF point at DCD=−0.10.", "claim": "Figure 2 shows credit_risk_score drifts of about 0.20 for P and 0.30 for UP, implying DCD≈−0.10. In Figure 5's NPV-DIFF plot, a negative DCD of −0.10 corresponds to unfairness around 0.01, suggesting minimal bias across age groups.", "label": false }, { "paperid": "2411.03177v1", "paper_path": "./SciVer/papers/2411.03177v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03177v1_figure_2.png", "item2_path": "./SciVer/images/2411.03177v1-Table3-1.png", "section": [ "3.3" ], "request_id": 196, "origin_statement": "Figure 2 shows cosine weighting removes size-based semantic drift at 64–1024 pixels. Table 3 shows replicate padding with β_txt = 0.02 yields the lowest FID (6.79). Therefore, combining cosine size weighting with β_txt = 0.02 padding likely yields the strongest image quality.", "perturbed_statement": "Figure 2 shows constant weighting removes size-based semantic drift at 64–1024 pixels. Table 3 shows replicate padding with β_txt = 0.1 yields the lowest FID (7.01). Therefore, combining constant weighting with β_txt = 0.1 padding likely yields the strongest image quality.", "perturbed_explanation": "This is incorrect because Figure 2’s caption explicitly states that constant weighting introduces undesirable correlations, whereas cosine weighting (not constant) disentangles size from content. Moreover, Table 3 reports the lowest FID (6.79) at β_txt = 0.02, not at β_txt = 0.1 (which has FID = 7.01).", "claim": "Figure 2 shows constant weighting removes size-based semantic drift at 64–1024 pixels. Table 3 shows replicate padding with β_txt = 0.1 yields the lowest FID (7.01). Therefore, combining constant weighting with β_txt = 0.1 padding likely yields the strongest image quality.", "label": false }, { "paperid": "2411.06665v1", "paper_path": "./SciVer/papers/2411.06665v1.json", "claim_type": "sequential", "item1": "6", "item2": "8", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.06665v1-Table6-1.png", "item2_path": "./SciVer/images/2411.06665v1-Table8-1.png", "section": [ "4.3" ], "request_id": 201, "origin_statement": "Table 6 shows adding adaptive weights raises mean accuracy from 83.5% to 85.1%. Table 8 shows mixup contrast learning with patch-level mixup yields highest mean accuracy of 87.4%. Thus, combining adaptive weights in PWC and patch-level mixup in RMC gives top performance.", "perturbed_statement": "Table 6 shows probability contrast alone raises mean accuracy from 77.1% to 85.1%. Table 8 shows mixup contrast learning with image-level mixup peaks at 87.4%. Thus, combining probability contrast in PWC and image-level mixup in RMC yields the highest overall performance.", "perturbed_explanation": "This is incorrect because in Table 6 probability contrast alone (row 2) yields a mean of 80.0%, not 85.1% (85.1% requires adaptive weighting in row 4). Also, Table 8 shows image-level mixup with mixup contrast reaches only 85.1%, not 87.4% (87.4% is from patch-level mixup in the last row).", "claim": "Table 6 shows probability contrast alone raises mean accuracy from 77.1% to 85.1%. Table 8 shows mixup contrast learning with image-level mixup peaks at 87.4%. Thus, combining probability contrast in PWC and image-level mixup in RMC yields the highest overall performance.", "label": false }, { "paperid": "2409.16911v2", "paper_path": "./SciVer/papers/2409.16911v2.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16911v2_figure_2.png", "item2_path": "./SciVer/images/2409.16911v2_figure_4.png", "section": [ "4.2" ], "request_id": 204, "origin_statement": "Dimension 218 is the third-largest magnitude feature at the 27th layer (Fig. 2). In Fig. 4, at layer 27, the lower-left quadrant’s averaged overlap (~8%) exceeds the upper-left’s (~5%). This implies translation demonstrations evoke more consistent features than monolingual demonstrations in that layer.", "perturbed_statement": "Dimension 218 is the fifth-largest magnitude feature at the 27th layer, and in Fig. 4 at layer 27 the upper-left quadrant’s overlap (~8%) exceeds the lower-left’s (~5%), suggesting monolingual demonstrations activate more consistent features than translation ones.", "perturbed_explanation": "This is incorrect because Fig. 2 clearly lists 218 as the third, not fifth, largest magnitude. Moreover, Fig. 4 shows at layer 27 the lower-left quadrant (~8%) actually exceeds the upper-left (~5%), opposite to the perturbed claim.", "claim": "Dimension 218 is the fifth-largest magnitude feature at the 27th layer, and in Fig. 4 at layer 27 the upper-left quadrant’s overlap (~8%) exceeds the lower-left’s (~5%), suggesting monolingual demonstrations activate more consistent features than translation ones.", "label": false }, { "paperid": "2411.14318v1", "paper_path": "./SciVer/papers/2411.14318v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14318v1_figure_3.png", "item2_path": "./SciVer/images/2411.14318v1-Table2-1.png", "section": [ "3.3" ], "request_id": 210, "origin_statement": "Because Velocitune stabilizes Arxiv domain weight at ~0.5 after 3000 steps (Figure 3), Mistral-Velocitune achieves a 4.4% higher overall accuracy than Mistral-CPT on CmdGen-NVIDIA (Table 2).", "perturbed_statement": "Because Velocitune stabilizes Arxiv domain weight at ~0.65 after 5000 steps (Figure 3), Mistral-Velocitune achieves a 3.1% higher overall accuracy than Mistral-CPT on CmdGen-NVIDIA (Table 2).", "perturbed_explanation": "Figure 3 actually shows the Arxiv domain weight peaks around 0.5 by roughly 3000 steps, not 0.65 at 5000 steps. Moreover, Table 2 reports a 4.4% accuracy boost for Mistral-Velocitune over Mistral-CPT on CmdGen-NVIDIA, not 3.1%.", "claim": "Because Velocitune stabilizes Arxiv domain weight at ~0.65 after 5000 steps (Figure 3), Mistral-Velocitune achieves a 3.1% higher overall accuracy than Mistral-CPT on CmdGen-NVIDIA (Table 2).", "label": false }, { "paperid": "2409.06784v1", "paper_path": "./SciVer/papers/2409.06784v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06784v1_figure_5(a).png", "item2_path": "./SciVer/images/2409.06784v1_figure_6(b).png", "section": [ "3" ], "request_id": 223, "origin_statement": "In Fig. 5 for a=3 at K=14, the ground state energy is E_R≈1.2 with E_I=0, indicating a purely real spectrum under unbroken PT symmetry. In Fig. 6 for a=5 at K=15, E_R≈5.7 and E_I≈3.8, indicating a complex spectrum due to PT symmetry breaking.", "perturbed_statement": "In Fig. 5 for a=3 at K=14, the energy is E_R≈1.2 with E_I=0, and in Fig. 6 for a=5 at K=15, E_R≈5.7 with E_I=0, indicating both spectra are purely real despite PT symmetry breaking at a=5.", "perturbed_explanation": "This is incorrect because Figure 6 clearly shows the imaginary part E_I converging around 3.8 for a=5 (not zero), and the caption states the eigenfunction does not respect PT symmetry, yielding a complex spectrum rather than a purely real one.", "claim": "In Fig. 5 for a=3 at K=14, the energy is E_R≈1.2 with E_I=0, and in Fig. 6 for a=5 at K=15, E_R≈5.7 with E_I=0, indicating both spectra are purely real despite PT symmetry breaking at a=5.", "label": false }, { "paperid": "2411.06786v1", "paper_path": "./SciVer/papers/2411.06786v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06786v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.06786v1_figure_5(b).png", "section": [ "1" ], "request_id": 225, "origin_statement": "ViT-L/14’s DCT zero-frequency (0,0) response is about 1200. Swin-L’s zero-frequency response is about 3200, over 2.5× larger. Therefore, Swin-L’s pre-trained feature distribution is even more heavily dominated by the direct component than ViT-L/14.", "perturbed_statement": "ViT-L/14’s DCT zero-frequency (0,0) response is about 1200. Swin-L’s zero-frequency response is about 3200, over 2.5× larger, while its second-frequency bin is lower (≈200 vs ≈300 in ViT). This suggests Swin-L’s direct-component dominance is actually less pronounced.", "perturbed_explanation": "The perturbed claim is false because Swin-L’s second-frequency bin is not lower than ViT-L/14’s. In the plots, Swin-L’s second-frequency height is around 700, significantly higher than ViT-L/14’s ≈300, so its direct-component dominance remains more pronounced.", "claim": "ViT-L/14’s DCT zero-frequency (0,0) response is about 1200. Swin-L’s zero-frequency response is about 3200, over 2.5× larger, while its second-frequency bin is lower (≈200 vs ≈300 in ViT). This suggests Swin-L’s direct-component dominance is actually less pronounced.", "label": false }, { "paperid": "2411.17181v2", "paper_path": "./SciVer/papers/2411.17181v2.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17181v2_figure_5.png", "item2_path": "./SciVer/images/2411.17181v2-Table4-1.png", "section": [ "5", "1" ], "request_id": 232, "origin_statement": "From Fig. 5(c), WSbSC achieves about 0.60 ROUGE-LCS on the Kaggle dataset; Table 4 reports Turkish at 0.47 ROUGE-LCS. Thus, WSbSC on Kaggle outperforms Turkish by roughly 0.13 in ROUGE-LCS.", "perturbed_statement": "From Fig. 5(c), WSbSC achieves about 0.45 ROUGE-LCS on the Kaggle dataset; Table 4 reports Turkish at 0.47 ROUGE-LCS. Thus, WSbSC on Kaggle underperforms Turkish by 0.02 in ROUGE-LCS.", "perturbed_explanation": "The perturbed statement misquotes the Kaggle ROUGE-LCS score as 0.45. In Fig. 5(c), the actual ROUGE-LCS for WSbSC on Kaggle is approximately 0.60, not 0.45, so WSbSC still outperforms Turkish (0.47).", "claim": "From Fig. 5(c), WSbSC achieves about 0.45 ROUGE-LCS on the Kaggle dataset; Table 4 reports Turkish at 0.47 ROUGE-LCS. Thus, WSbSC on Kaggle underperforms Turkish by 0.02 in ROUGE-LCS.", "label": false }, { "paperid": "2411.17593v1", "paper_path": "./SciVer/papers/2411.17593v1.json", "claim_type": "sequential", "item1": "4", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17593v1_figure_4.png", "item2_path": "./SciVer/images/2411.17593v1_figure_8.png", "section": [ "3.3" ], "request_id": 233, "origin_statement": "Figure 4 shows educators can load 'A Christmas Carol' via Demo1, so the example input is Dickens' text; Figure 8 then reports 'sir' among the top‐10 Oxford 3000 tokens, indicating that 'sir' was highly weighted in the model's analysis of Dickens' excerpt.", "perturbed_statement": "Figure 4 shows educators can load 'The Iliad' via Demo3, so the example input is Homer's text; Figure 8 then reports 'sir' among the top‐10 Oxford 3000 tokens, indicating that 'sir' was highly weighted in the model's analysis of Homer's Iliad.", "perturbed_explanation": "This is incorrect because Figure 4’s Demo3 loads The Iliad only if selected—but the example in Figure 8 follows Demo1 (A Christmas Carol by Dickens), not Demo3. Therefore, 'sir' reflects Dickens’ text, not Homer's Iliad.", "claim": "Figure 4 shows educators can load 'The Iliad' via Demo3, so the example input is Homer's text; Figure 8 then reports 'sir' among the top‐10 Oxford 3000 tokens, indicating that 'sir' was highly weighted in the model's analysis of Homer's Iliad.", "label": false }, { "paperid": "2411.07825v1", "paper_path": "./SciVer/papers/2411.07825v1.json", "claim_type": "sequential", "item1": "1(b)", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07825v1_figure_1(b).png", "item2_path": "./SciVer/images/2411.07825v1_figure_2(a).png", "section": [ "5" ], "request_id": 239, "origin_statement": "In Algorithm1, Fig1 shows ||K_i − K_{i-1}|| peaks at iteration 4, signaling the switch to Steps 6–7; Fig2 then shows ||P_i − P_{i-1}|| peaks at iteration 5, i.e., one iteration later in Algorithm2.", "perturbed_statement": "In Algorithm1, Fig1 shows ||K_i − K_{i-1}|| peaks at iteration 3, signaling the switch to Steps 6–7; Fig2 then shows ||P_i − P_{i-1}|| peaks at iteration 5, i.e., two iterations later in Algorithm2.", "perturbed_explanation": "This is incorrect because Fig1 clearly shows the maximum ||K_i − K_{i-1}|| at iteration 4 (about 3.1), not at iteration 3, so the premise of a peak at iteration 3 contradicts the plotted data.", "claim": "In Algorithm1, Fig1 shows ||K_i − K_{i-1}|| peaks at iteration 3, signaling the switch to Steps 6–7; Fig2 then shows ||P_i − P_{i-1}|| peaks at iteration 5, i.e., two iterations later in Algorithm2.", "label": false }, { "paperid": "2411.06965v1", "paper_path": "./SciVer/papers/2411.06965v1.json", "claim_type": "sequential", "item1": "6", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06965v1_figure_6.png", "item2_path": "./SciVer/images/2411.06965v1-Table1-1.png", "section": [ "4.4.1" ], "request_id": 248, "origin_statement": "On the Humanoid task, mCWAE-WGAIL-Bonus reaches about 6.3×10^6 QD-Score at 2000 iterations (Figure 6) while PPGA-trueReward reports 5.71×10^6 (Table 1), so mCWAE-WGAIL-Bonus outperforms the expert by roughly 10.3%.", "perturbed_statement": "On the Humanoid task, mCWAE-WGAIL-Bonus reaches only about 5.0×10^6 QD-Score at 2000 iterations (Figure 6) while PPGA-trueReward reports 5.71×10^6 (Table 1), so mCWAE-WGAIL-Bonus underperforms the expert by roughly 12%.", "perturbed_explanation": "The perturbation misreads Figure 6: the red curve for mCWAE-WGAIL-Bonus on Humanoid is actually near 6.3×10^6 at 2000 iterations, not 5.0×10^6, so it still exceeds the 5.71×10^6 reported for PPGA-trueReward.", "claim": "On the Humanoid task, mCWAE-WGAIL-Bonus reaches only about 5.0×10^6 QD-Score at 2000 iterations (Figure 6) while PPGA-trueReward reports 5.71×10^6 (Table 1), so mCWAE-WGAIL-Bonus underperforms the expert by roughly 12%.", "label": false }, { "paperid": "2411.16516v1", "paper_path": "./SciVer/papers/2411.16516v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.16516v1-Table2-1.png", "item2_path": "./SciVer/images/2411.16516v1-Table3-1.png", "section": [ "4" ], "request_id": 251, "origin_statement": "Table2 shows DP-Sniper selects Ŝ so that Pr[M(a′)∈Ŝ]=c and computes ξ* via Eq(4); Table3 defines ε*(θ)=ξ(a,a′,Ŝ) in Eq(4), hence DP-Sniper’s ξ* equals ε*(θ) for M_θ.", "perturbed_statement": "Table2 shows DP-Sniper selects Ŝ so that Pr[M(a)∈Ŝ]=c and computes ξ* via Eq(3); Table3 defines ε*(θ)=ξ(a,a′,S*), hence DP-Sniper’s ξ* equals ε*(θ) for M_θ.", "perturbed_explanation": "The perturbation is incorrect because in Table2 DP-Sniper actually chooses Ŝ so that Pr[M(a′)∈Ŝ] = c, not Pr[M(a)∈Ŝ], and computes ξ* according to Eq(4), not Eq(3). Moreover, Table3 defines ε*(θ)=ξ(a,a′,Ŝ) using the empirical set Ŝ (Eq 4), not the theoretical S*. These details contradict the perturbed claim.", "claim": "Table2 shows DP-Sniper selects Ŝ so that Pr[M(a)∈Ŝ]=c and computes ξ* via Eq(3); Table3 defines ε*(θ)=ξ(a,a′,S*), hence DP-Sniper’s ξ* equals ε*(θ) for M_θ.", "label": false }, { "paperid": "2410.22534v1", "paper_path": "./SciVer/papers/2410.22534v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22534v1_figure_1(c).png", "item2_path": "./SciVer/images/2410.22534v1-Table4-1.png", "section": [ "5" ], "request_id": 252, "origin_statement": "Table4 shows that moving from one to two latent classes reduces LOOIC by 181 points (z=5.62, p=0), indicating significant improvement; Figure1’s top panel (G=2) reveals one subgroup with steep normMMSE decline, confirming cognitive trajectory heterogeneity.", "perturbed_statement": "Table4 shows that increasing from two to three classes yields the largest WAIC decrease (z=6.97, p=0), supporting G=3; Figure1’s bottom panel (G=3) displays two distinct normMMSE decline patterns, confirming two subgroup heterogeneity.", "perturbed_explanation": "This is incorrect because Table4 actually reports the largest WAIC drop when going from G=1 to G=2 (z_WAIC=6.97, p=0), not from G=2 to G=3 (where z_WAIC=0.63, p=0.264). Moreover, Figure1’s bottom panel for G=3 shows three subgroup trajectories (red, blue, yellow), not just two patterns.", "claim": "Table4 shows that increasing from two to three classes yields the largest WAIC decrease (z=6.97, p=0), supporting G=3; Figure1’s bottom panel (G=3) displays two distinct normMMSE decline patterns, confirming two subgroup heterogeneity.", "label": false }, { "paperid": "2410.09850v1", "paper_path": "./SciVer/papers/2410.09850v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09850v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.09850v1_figure_3.png", "section": [ "4.3.1" ], "request_id": 253, "origin_statement": "Fig2 shows TCN has the highest JS divergence for Jump Diffusion returns at length 100; consequently, in Fig3 the TCN-generated Brownian Motion type 1 at length 100 exhibits the largest interquartile range among generators.", "perturbed_statement": "Fig2 shows LSTM has the highest JS divergence for Jump Diffusion returns at length 100; consequently, in Fig3 the LSTM-generated Brownian Motion type 1 at length 100 exhibits the largest interquartile range among generators.", "perturbed_explanation": "In Fig2’s heatmap, the darkest cell in the Jump Diffusion row at length 100 corresponds to TCN, not LSTM, so LSTM does not have the highest divergence. Additionally, in Fig3 the purple box (TCN, length 100) is visibly wider than the yellow box (LSTM, length 100), contradicting the perturbed claim.", "claim": "Fig2 shows LSTM has the highest JS divergence for Jump Diffusion returns at length 100; consequently, in Fig3 the LSTM-generated Brownian Motion type 1 at length 100 exhibits the largest interquartile range among generators.", "label": false }, { "paperid": "2410.14148v3", "paper_path": "./SciVer/papers/2410.14148v3.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.14148v3_figure_5.png", "item2_path": "./SciVer/images/2410.14148v3-Table4-1.png", "section": [ "4.3" ], "request_id": 259, "origin_statement": "Figure 5 shows the token-level score for “umbrellas” is 0.718, and Table 4 shows that with fine-grained rewards and PPO the MME^P score reaches 1522.6, confirming fine-grained feedback boosts comprehensive benchmark performance.", "perturbed_statement": "Figure 5 shows the token-level score for “umbrellas” is 0.636, and Table 4 shows that with fine-grained rewards and PPO the MME^P score reaches 1431.9, confirming fine-grained feedback boosts comprehensive benchmark performance.", "perturbed_explanation": "This is incorrect because Figure 5 actually reports a token-level score of 0.718 for “umbrellas,” not 0.636. Additionally, Table 4 lists the MME^P score for the ✓ fine-grained + ✓ PPO configuration as 1522.6, not 1431.9.", "claim": "Figure 5 shows the token-level score for “umbrellas” is 0.636, and Table 4 shows that with fine-grained rewards and PPO the MME^P score reaches 1431.9, confirming fine-grained feedback boosts comprehensive benchmark performance.", "label": false }, { "paperid": "2411.14751v1", "paper_path": "./SciVer/papers/2411.14751v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14751v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.14751v1_figure_3(b).png", "section": [ "4.3" ], "request_id": 261, "origin_statement": "By noise level-6, the baseline’s mAP falls under 0.1, and from the AP_ls plot its AP_ls also collapses at level-6 while rot10_std5_prob1 retains ~0.26; thus, noise-trained models maintain lane localization under severe SDMap noise.", "perturbed_statement": "By noise level-5, the baseline’s mAP falls under 0.1, and from the AP_ls plot its AP_ls also collapses at level-5 while rot10_std5_prob1 retains ~0.26; thus, noise-trained models maintain lane localization under severe SDMap noise.", "perturbed_explanation": "The perturbation is wrong because in the mAP plot the baseline’s mAP at level-5 is about 0.26—not below 0.1—and in the AP_ls plot the baseline’s AP_ls does not collapse until level-6, not level-5.", "claim": "By noise level-5, the baseline’s mAP falls under 0.1, and from the AP_ls plot its AP_ls also collapses at level-5 while rot10_std5_prob1 retains ~0.26; thus, noise-trained models maintain lane localization under severe SDMap noise.", "label": false }, { "paperid": "2409.11212v1", "paper_path": "./SciVer/papers/2409.11212v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11212v1_figure_5(a).png", "item2_path": "./SciVer/images/2409.11212v1_figure_5(c).png", "section": [ "5.4" ], "request_id": 274, "origin_statement": "The UltraFeedback plot shows Uncertainty sampling averages about 20.7% noise, a 40.6-point drop from Random’s 61.3% (modality1). In the MATH-StepDPO plot, Uncertainty sampling lowers noise from 9.3% to 5.1%, a 4.2-point reduction (modality2).", "perturbed_statement": "The UltraFeedback plot shows Uncertainty sampling averages about 18% noise, a 43.3-point drop from Random’s 61.3% (modality1). In the MATH-StepDPO plot, Uncertainty sampling lowers noise from 9.3% to 5.1%, a 4.2-point reduction (modality2).", "perturbed_explanation": "The perturbation incorrectly uses 18% as the average Uncertainty noise rate on UltraFeedback, but the figure’s ‘Avg.’ bar for Uncertainty is ~20.7%, not 18%. This makes the stated 43.3-point drop from 61.3% false; the real drop is ~40.6 points.", "claim": "The UltraFeedback plot shows Uncertainty sampling averages about 18% noise, a 43.3-point drop from Random’s 61.3% (modality1). In the MATH-StepDPO plot, Uncertainty sampling lowers noise from 9.3% to 5.1%, a 4.2-point reduction (modality2).", "label": false }, { "paperid": "2411.09702v1", "paper_path": "./SciVer/papers/2411.09702v1.json", "claim_type": "sequential", "item1": "1", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09702v1_figure_1.png", "item2_path": "./SciVer/images/2411.09702v1_figure_6.png", "section": [ "1" ], "request_id": 277, "origin_statement": "Attention Copy directly achieves 85.1% top-1 accuracy on ImageNet-1K (Figure 1), and when ensembled with the fine-tuned MAE model, its accuracy improves by 1.1 points to 86.2%, surpassing fine-tuning alone at 85.7% (Figure 6).", "perturbed_statement": "Attention Copy directly achieves only 83.0% top-1 accuracy on ImageNet-1K (Figure 1), and when ensembled with the fine-tuned MAE model, its accuracy unexpectedly jumps by 2.5 points to 85.5%, matching fine-tuning alone at 85.7% (Figure 6).", "perturbed_explanation": "The perturbed claim misreads Figure 1: Attention Copy actually scores 85.1%, not 83.0%. It also misstates Figure 6: the ensemble gain for Attention Copy is +1.1 (to 86.2%), not +2.5 (to 85.5%), so the resulting accuracy is incorrect.", "claim": "Attention Copy directly achieves only 83.0% top-1 accuracy on ImageNet-1K (Figure 1), and when ensembled with the fine-tuned MAE model, its accuracy unexpectedly jumps by 2.5 points to 85.5%, matching fine-tuning alone at 85.7% (Figure 6).", "label": false }, { "paperid": "2409.14063v1", "paper_path": "./SciVer/papers/2409.14063v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14063v1_figure_4.png", "item2_path": "./SciVer/images/2409.14063v1-Table3-1.png", "section": [ "5.2" ], "request_id": 292, "origin_statement": "Fig.4a shows FedAvg mixes missing and owning classes indistinctly; Table 3 reports FedAvg’s Fruit accuracy at only 11.9%; thus, this class mixing likely causes FedAvg’s poor Fruit performance.", "perturbed_statement": "Fig.4a shows FedAvg mixes missing and owning classes indistinctly; Table 3 reports FedAvg’s Fruit accuracy at 29.7%; thus, this class mixing likely causes FedAvg’s poor Fruit performance.", "perturbed_explanation": "The perturbed statement misreads Table 3: 29.7% is FedAvg’s Cars accuracy, not Fruit. FedAvg’s Fruit accuracy is actually 11.9%, so the claimed 29.7% contradicts the table data.", "claim": "Fig.4a shows FedAvg mixes missing and owning classes indistinctly; Table 3 reports FedAvg’s Fruit accuracy at 29.7%; thus, this class mixing likely causes FedAvg’s poor Fruit performance.", "label": false }, { "paperid": "2409.10604v4", "paper_path": "./SciVer/papers/2409.10604v4.json", "claim_type": "sequential", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10604v4_figure_1.png", "item2_path": "./SciVer/images/2409.10604v4_figure_4.png", "section": [ "1" ], "request_id": 293, "origin_statement": "Figure 1 shows image 200.8 (green) is a LyC non-leaking region. In Figure 4, the relensed positions of Source 200 (green crosses) lie adjacent to both Perturber I and II, implying that dark subhalos distort the lensing of the non-leaking knot.", "perturbed_statement": "Figure 1 shows image 200.8 (green) is a LyC-leaking region. In Figure 4, the relensed positions of Source 200 (green crosses) lie adjacent to both Perturber I and II, indicating dark subhalos distort the lensing of a LyC-leaking knot.", "perturbed_explanation": "This claim fails because image 200.8 is explicitly identified in the caption as a non-leaking region, whereas only images 1.4 and 1.10 are the multiply-imaged LyC leakers. Labeling 200.8 as leaking contradicts the context.", "claim": "Figure 1 shows image 200.8 (green) is a LyC-leaking region. In Figure 4, the relensed positions of Source 200 (green crosses) lie adjacent to both Perturber I and II, indicating dark subhalos distort the lensing of a LyC-leaking knot.", "label": false }, { "paperid": "2411.04281v1", "paper_path": "./SciVer/papers/2411.04281v1.json", "claim_type": "sequential", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04281v1_figure_1.png", "item2_path": "./SciVer/images/2411.04281v1_figure_4.png", "section": [ "3.4" ], "request_id": 296, "origin_statement": "CorGAN generates phecode prevalences with a median around 0.01 matching real MIMIC-III. Then, as synthetic sample size rises beyond 100,000, the MMD falls below 0.049, indicating closer distribution alignment. Therefore, CorGAN maintains realistic prevalence while improving fidelity at scale.", "perturbed_statement": "CorGAN generates phecode prevalences with a median around 0.02 matching MIMIC-IV. Then, as synthetic sample size rises beyond 100,000, the MMD falls below 0.01, indicating a near-perfect distribution alignment. Therefore, CorGAN achieves near-perfect fidelity and prevalence accuracy at scale.", "perturbed_explanation": "First, Figure 1 shows CorGAN’s median phecode prevalence is about 0.01, not 0.02. Second, Figure 4 demonstrates that MMD stabilizes around 0.048–0.049 and never drops below 0.01, so the claim of MMD<0.01 is incorrect.", "claim": "CorGAN generates phecode prevalences with a median around 0.02 matching MIMIC-IV. Then, as synthetic sample size rises beyond 100,000, the MMD falls below 0.01, indicating a near-perfect distribution alignment. Therefore, CorGAN achieves near-perfect fidelity and prevalence accuracy at scale.", "label": false }, { "paperid": "2411.14914v1", "paper_path": "./SciVer/papers/2411.14914v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14914v1_figure_2.png", "item2_path": "./SciVer/images/2411.14914v1_figure_3.png", "section": [ "4.2" ], "request_id": 301, "origin_statement": "Figure 2 shows gpt-3.5-turbo-1106 has a median recall of about 0.3 for query q4. Figure 3 shows its median precision for q4 is roughly 0.009, indicating it yields high recall but very low precision on q4.", "perturbed_statement": "Figure 2 shows gpt-3.5-turbo-1106 has a median recall of about 0.6 for query q4. Figure 3 shows its median precision for q4 is roughly 0.02, indicating it yields high recall and moderate precision on q4.", "perturbed_explanation": "This is incorrect because Figure 2 actually reports a median recall near 0.3 for q4 (not 0.6), and Figure 3 reports a median precision near 0.009 for q4 (not 0.02). Both values contradict the stated perturbed premises.", "claim": "Figure 2 shows gpt-3.5-turbo-1106 has a median recall of about 0.6 for query q4. Figure 3 shows its median precision for q4 is roughly 0.02, indicating it yields high recall and moderate precision on q4.", "label": false }, { "paperid": "2411.15173v1", "paper_path": "./SciVer/papers/2411.15173v1.json", "claim_type": "sequential", "item1": "2(b)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15173v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.15173v1_figure_2(c).png", "section": [ "4.1" ], "request_id": 304, "origin_statement": "Figure 2b shows class 0 and class 1 features in domain C4 overlap significantly; thus in Figure 2c, pretrained model features from centers C2–C5 are completely mixed, demonstrating that model-derived features cannot separate target subdomains under multiple distribution shifts.", "perturbed_statement": "Figure 2b shows class 0 and class 1 features in domain C4 are clearly separated; thus in Figure 2c, pretrained model features from centers C2–C5 form four distinct clusters, demonstrating effective domain separation under multiple shifts.", "perturbed_explanation": "The perturbed claim is false because in Figure 2b the purple (Class 0) and red (Class 1) points for C4 overlap heavily rather than being clearly separated. Additionally, Figure 2c shows that points from centers C2–C5 are intermixed and indistinguishable, not forming four distinct clusters.", "claim": "Figure 2b shows class 0 and class 1 features in domain C4 are clearly separated; thus in Figure 2c, pretrained model features from centers C2–C5 form four distinct clusters, demonstrating effective domain separation under multiple shifts.", "label": false }, { "paperid": "2410.21088v1", "paper_path": "./SciVer/papers/2410.21088v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21088v1_figure_3.png", "item2_path": "./SciVer/images/2410.21088v1-Table1-1.png", "section": [ "1" ], "request_id": 306, "origin_statement": "Figure 3 indicates Shallow Diffuse embeds the watermark within the null space of Jₙₜ. Table 1 shows Shallow Diffuse achieves 1.00 AUC in JPEG robustness under the server scenario. Therefore, embedding the watermark in the null space yields perfect JPEG robustness.", "perturbed_statement": "Figure 3 indicates Shallow Diffuse embeds the watermark within the range of Jₙₜ. Table 1 shows Shallow Diffuse achieves 0.98 AUC in JPEG robustness under the server scenario. Therefore, embedding the watermark in the range yields near-perfect JPEG robustness.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3 explicitly shows the watermark being applied in the null space of Jₙₜ, not its range. Moreover, Table 1 reports a JPEG robustness AUC of 1.00 for Shallow Diffuse, not 0.98 as claimed.", "claim": "Figure 3 indicates Shallow Diffuse embeds the watermark within the range of Jₙₜ. Table 1 shows Shallow Diffuse achieves 0.98 AUC in JPEG robustness under the server scenario. Therefore, embedding the watermark in the range yields near-perfect JPEG robustness.", "label": false }, { "paperid": "2409.01490v1", "paper_path": "./SciVer/papers/2409.01490v1.json", "claim_type": "sequential", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01490v1_figure_6.png", "item2_path": "./SciVer/images/2409.01490v1-Table3-1.png", "section": [ "4.2" ], "request_id": 309, "origin_statement": "Based on Fig.6’s five orbital loops and the 3534 days TOF in Table 3, the spacecraft’s average revolution period around the Sun is about 3534/5 ≈ 707 days.", "perturbed_statement": "Based on Fig.6’s four orbital loops and the 3534 days TOF in Table 3, the spacecraft’s average revolution period around the Sun is about 3534/4 ≈ 884 days.", "perturbed_explanation": "Figure 6 actually depicts five concentric loops (five revolutions), not four. Dividing the 3534 day TOF by five gives ≈707 days per revolution, so the 884 day figure is incorrect.", "claim": "Based on Fig.6’s four orbital loops and the 3534 days TOF in Table 3, the spacecraft’s average revolution period around the Sun is about 3534/4 ≈ 884 days.", "label": false }, { "paperid": "2409.08158v1", "paper_path": "./SciVer/papers/2409.08158v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08158v1_figure_3.png", "item2_path": "./SciVer/images/2409.08158v1_figure_4.png", "section": [ "3.1", "2" ], "request_id": 312, "origin_statement": "In 2015, the field’s average social cost of carbon was about $160/tC (Figure 3), so Energy Policy’s papers, averaging $72/tC (Figure 4), report estimates roughly $88/tC lower than the contemporary field average.", "perturbed_statement": "In 2015, the field’s average social cost of carbon was about $280/tC (Figure 3), so Energy Policy’s papers, averaging $72/tC (Figure 4), report estimates roughly $208/tC lower than the contemporary field average.", "perturbed_explanation": "Figure 3 actually shows the average SCC in 2015 at roughly $160/tC, not $280/tC. The perturbed premise of a $280/tC average misreads the first figure, so the derived $208/tC difference is invalid.", "claim": "In 2015, the field’s average social cost of carbon was about $280/tC (Figure 3), so Energy Policy’s papers, averaging $72/tC (Figure 4), report estimates roughly $208/tC lower than the contemporary field average.", "label": false }, { "paperid": "2409.03044v1", "paper_path": "./SciVer/papers/2409.03044v1.json", "claim_type": "sequential", "item1": "1", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03044v1_figure_1.png", "item2_path": "./SciVer/images/2409.03044v1_figure_2(a).png", "section": [ "4.2" ], "request_id": 314, "origin_statement": "Phones are the most frequently used authentication device (Fig 1). Touchscreens are the single most common input modality (Fig 2). Therefore, most password or PIN entries occur on phones via touchscreen interfaces.", "perturbed_statement": "Laptops are the most frequently used authentication device (Fig 1), and keyboards are the most common input modality (Fig 2); therefore, most password or PIN entries occur on laptops using keyboard interfaces.", "perturbed_explanation": "Figure 1’s blue bar for Phone (~450) is higher than Laptop (~200), so phones—not laptops—are the top device. Figure 2 shows Touchscreen has the largest “most frequent” count (≈650) compared to Keyboard (≈550), so keyboards are not the single most common input modality.", "claim": "Laptops are the most frequently used authentication device (Fig 1), and keyboards are the most common input modality (Fig 2); therefore, most password or PIN entries occur on laptops using keyboard interfaces.", "label": false }, { "paperid": "2409.11428v1", "paper_path": "./SciVer/papers/2409.11428v1.json", "claim_type": "sequential", "item1": "6(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11428v1_figure_6(b).png", "item2_path": "./SciVer/images/2409.11428v1-Table4-1.png", "section": [ "5.4" ], "request_id": 319, "origin_statement": "Figure 6(b) shows Affinity Propagation selects 10 traps in a 100-file directory; Table 4 reports AP file loss rising from 0.6523% on EP-1 to 0.9415% on EP-2; thus, AP’s file loss increases by ~0.2892 percentage points in larger directories.", "perturbed_statement": "Figure 6(b) shows Affinity Propagation selects 20 traps in a 100-file directory; Table 4 reports AP file loss dropping from 0.6523% on EP-1 to 0.4515% on EP-2; thus, AP’s file loss decreases by ~0.2008 percentage points in larger directories.", "perturbed_explanation": "This is incorrect because Figure 6(b) actually shows Affinity Propagation selecting 10 traps (not 20) in a 100-file directory, and Table 4 shows AP file loss rising to 0.9415% on EP-2 (not dropping to 0.4515%).", "claim": "Figure 6(b) shows Affinity Propagation selects 20 traps in a 100-file directory; Table 4 reports AP file loss dropping from 0.6523% on EP-1 to 0.4515% on EP-2; thus, AP’s file loss decreases by ~0.2008 percentage points in larger directories.", "label": false }, { "paperid": "2411.06286v1", "paper_path": "./SciVer/papers/2411.06286v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06286v1_figure_3.png", "item2_path": "./SciVer/images/2411.06286v1-Table2-1.png", "section": [ "3.2" ], "request_id": 321, "origin_statement": "SPIKAN(b) at n_cp = 100² yields the smallest velocity discrepancies: Figure 3 shows its green curve aligns closest to the FVM reference on both centerlines, and Table 2 reports L2 errors of 5.83% (u) and 7.18% (v), outperforming PIKAN and SPIKAN(a).", "perturbed_statement": "PIKAN at n_cp = 50² yields the smallest velocity discrepancies: Figure 3 shows its red curve aligns closest to the FVM reference on both centerlines, and Table 2 reports L2 errors of 9.73% (u) and 11.80% (v), outperforming the SPIKANs.", "perturbed_explanation": "The perturbed claim is false because Figure 3 reveals that the green curve (SPIKAN(b))—not the red curve (PIKAN)—lies closest to the FVM reference on both centerlines. Moreover, Table 2 shows SPIKAN(b) has lower L2(u,v) errors (5.83%, 7.18%) than PIKAN’s higher errors (9.73%, 11.80%).", "claim": "PIKAN at n_cp = 50² yields the smallest velocity discrepancies: Figure 3 shows its red curve aligns closest to the FVM reference on both centerlines, and Table 2 reports L2 errors of 9.73% (u) and 11.80% (v), outperforming the SPIKANs.", "label": false }, { "paperid": "2411.00311v1", "paper_path": "./SciVer/papers/2411.00311v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00311v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.00311v1_figure_2(b).png", "section": [ "2.2", "5.5" ], "request_id": 330, "origin_statement": "C2A fine-tunes only 0.028% of the parameters (table) and still keeps CKA similarity above 97% under non-IID after nine local epochs (second image), demonstrating parameter-efficient yet stable federated learning.", "perturbed_statement": "C2A fine-tunes only 0.28% of the parameters (table) and still keeps CKA similarity above 97% under non-IID after nine local epochs (second image), demonstrating parameter-efficient yet stable federated learning.", "perturbed_explanation": "The perturbed claim is wrong because the table reports that C2A uses 0.028% of the model parameters, not 0.28%. This tenfold exaggeration contradicts the exact figure in the context.", "claim": "C2A fine-tunes only 0.28% of the parameters (table) and still keeps CKA similarity above 97% under non-IID after nine local epochs (second image), demonstrating parameter-efficient yet stable federated learning.", "label": false }, { "paperid": "2411.01228v1", "paper_path": "./SciVer/papers/2411.01228v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.01228v1-Table3-1.png", "item2_path": "./SciVer/images/2411.01228v1-Table4-1.png", "section": [ "8.3.2" ], "request_id": 338, "origin_statement": "Table 3 shows NurtureBot v3 achieved the highest CUQ score (91.3). Table 4 highlights “Emotional Design” as crucial for success. Therefore, NurtureBot v3’s top CUQ performance likely reflects its effective emotional design enhancements in user interactions.", "perturbed_statement": "Table 3 shows NurtureBot v2 achieved the highest CUQ score (87.2). Table 4 highlights “Multi-modal Interactions” as crucial for success. Therefore, NurtureBot v2’s top CUQ performance likely reflects its advanced multi-modal interaction features.", "perturbed_explanation": "The perturbation is incorrect because Table 3 actually reports NurtureBot v3 as having the highest CUQ score (91.3), not v2. NurtureBot v2’s CUQ score is 87.2, which is lower than v3’s score, so v2 did not achieve the top usability performance.", "claim": "Table 3 shows NurtureBot v2 achieved the highest CUQ score (87.2). Table 4 highlights “Multi-modal Interactions” as crucial for success. Therefore, NurtureBot v2’s top CUQ performance likely reflects its advanced multi-modal interaction features.", "label": false }, { "paperid": "2411.05322v1", "paper_path": "./SciVer/papers/2411.05322v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05322v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.05322v1_figure_4(b).png", "section": [ "4.2" ], "request_id": 340, "origin_statement": "Fig 4a shows adaptive quantization spreads explicit representation values over [-150,150]. Since Fig 4b shows fixed quantization only covers [-20,20] at similar bitrate, adaptive quantization thus preserves more high-frequency information, improving reconstruction detail.", "perturbed_statement": "Fig 4a shows adaptive quantization spans values over [-150,150]. Because Fig 4b shows fixed quantization also covers [-150,150] at similar bitrate, fixed quantization thus preserves as much high-frequency information as adaptive.", "perturbed_explanation": "The perturbed claim is false because Fig 4b’s histogram clearly confines fixed-quantized values within [-20,20], not [-150,150], so fixed quantization cannot preserve the same high-frequency range as adaptive quantization.", "claim": "Fig 4a shows adaptive quantization spans values over [-150,150]. Because Fig 4b shows fixed quantization also covers [-150,150] at similar bitrate, fixed quantization thus preserves as much high-frequency information as adaptive.", "label": false }, { "paperid": "2411.07273v2", "paper_path": "./SciVer/papers/2411.07273v2.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07273v2_figure_3(a).png", "item2_path": "./SciVer/images/2411.07273v2_figure_3(c).png", "section": [ "5.2" ], "request_id": 351, "origin_statement": "At its peak ply (around 32 plies), the 6×6 board exhibits roughly 10^14 distinct positions; mapping these positions via the DFA representation yields about 10^7 states, implying a compression ratio of 10^7:1.", "perturbed_statement": "At its peak ply (around 32 plies), the 6×6 board exhibits roughly 10^12 distinct positions; mapping these positions via the DFA representation yields about 10^7 states, implying a compression ratio of 10^5:1.", "perturbed_explanation": "Figure 3’s grey curve for 6×6 actually peaks near 10^14 positions, not 10^12. The misread 10^12 base makes the 10^5:1 ratio invalid—using the correct 10^14 positions gives 10^14/10^7≈10^7:1 compression, not 10^5:1.", "claim": "At its peak ply (around 32 plies), the 6×6 board exhibits roughly 10^12 distinct positions; mapping these positions via the DFA representation yields about 10^7 states, implying a compression ratio of 10^5:1.", "label": false }, { "paperid": "2411.15633v1", "paper_path": "./SciVer/papers/2411.15633v1.json", "claim_type": "sequential", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15633v1_figure_1.png", "item2_path": "./SciVer/images/2411.15633v1-Table4-1.png", "section": [ "4.3" ], "request_id": 355, "origin_statement": "Fig. 1 shows that fake loss drops to near zero by 200 iterations, implying rapid overfitting to fake patterns; correspondingly, Table 4 reports that the baseline detector’s linear-prob variant, with only 0.002 M trainable parameters, achieves an average AUC of 0.767.", "perturbed_statement": "Fig. 1 shows that fake loss drops to near zero by 1000 iterations, implying slow overfitting to fake patterns; correspondingly, Table 4 reports that the baseline detector’s linear-prob variant, with only 0.002 M trainable parameters, achieves an average AUC of 0.859.", "perturbed_explanation": "The perturbation is incorrect because in Fig. 1 the fake loss actually falls to essentially zero by the 200th iteration (not 1000). Moreover, Table 4 shows the linear-prob variant’s average AUC is 0.767, not 0.859 (0.859 is the FFT baseline’s AUC).", "claim": "Fig. 1 shows that fake loss drops to near zero by 1000 iterations, implying slow overfitting to fake patterns; correspondingly, Table 4 reports that the baseline detector’s linear-prob variant, with only 0.002 M trainable parameters, achieves an average AUC of 0.859.", "label": false }, { "paperid": "2409.01466v1", "paper_path": "./SciVer/papers/2409.01466v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.01466v1-Table3-1.png", "item2_path": "./SciVer/images/2409.01466v1-Table4-1.png", "section": [ "4.3" ], "request_id": 357, "origin_statement": "Table 3 shows that GPT labels the first ad as Promote, contradicting the human Contrast label. After removing such controversial samples, Table 4 reports a Promote F1 of 0.9704 under CoT prompting, implying highly consistent detection of promotional tone post-adjustment.", "perturbed_statement": "Table 3 shows that GPT labels the first ad as Attack, contradicting the human Contrast label. After removing such controversial samples, Table 4 reports an Attack F1 of 0.9721 under Judge prompting, implying highly consistent detection of attack tone post-adjustment.", "perturbed_explanation": "This is incorrect because in Table 3 GPT actually labels the first ad as Promote (not Attack). Moreover, Table 4 lists an Attack F1 of 0.8078 (Judge prompting), while 0.9721 is the Promote F1 score, not the Attack F1 score.", "claim": "Table 3 shows that GPT labels the first ad as Attack, contradicting the human Contrast label. After removing such controversial samples, Table 4 reports an Attack F1 of 0.9721 under Judge prompting, implying highly consistent detection of attack tone post-adjustment.", "label": false }, { "paperid": "2410.20483v2", "paper_path": "./SciVer/papers/2410.20483v2.json", "claim_type": "sequential", "item1": "5(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20483v2_figure_5(a).png", "item2_path": "./SciVer/images/2410.20483v2_figure_6(b).png", "section": [ "6.2" ], "request_id": 359, "origin_statement": "Fig5(a) shows that for the Adult dataset, the SEV-C+F (flex=0.05) method on GBDT achieves mean SEV ≈1.25 and L∞≈2; accordingly, Fig6(b) indicates the corresponding SEV-C explanation changes only 1 feature while securing log‐likelihood above 100.", "perturbed_statement": "Fig5(a) shows that for the Adult dataset, the SEV-C+F (flex=0.05) method on GBDT achieves mean SEV ≈1.25 and L∞≈2; accordingly, Fig6(b) indicates the corresponding SEV-C explanation changes only 1 feature while securing log‐likelihood above 1000.", "perturbed_explanation": "The perturbed statement is wrong because in Fig6(b) the orange diamond (SEV-C) on the Adult panel has a log-likelihood just above 10^2, not anywhere near 10^3, so it does not exceed 1000.", "claim": "Fig5(a) shows that for the Adult dataset, the SEV-C+F (flex=0.05) method on GBDT achieves mean SEV ≈1.25 and L∞≈2; accordingly, Fig6(b) indicates the corresponding SEV-C explanation changes only 1 feature while securing log‐likelihood above 1000.", "label": false }, { "paperid": "2411.17616v1", "paper_path": "./SciVer/papers/2411.17616v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17616v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.17616v1_figure_3.png", "section": [ "3.3" ], "request_id": 370, "origin_statement": "At DDPM step 10, Skip-DiT’s skip branches keep normalized feature similarity above 0.95 (Fig 2); because Skip-DiT connects shallow and deep blocks via residual paths (Fig 3b), Skip-Cache can safely bypass blocks 2…L−1 during inference, relying on these stable cached features.", "perturbed_statement": "At DDPM step 10, Skip-DiT’s skip branches keep normalized feature similarity above 0.95 (Fig 2); however, Skip-Cache only caches the final DiT block output (Fig 3c), so it cannot bypass intermediate blocks 2…L−1 despite this high similarity.", "perturbed_explanation": "The perturbed statement misreads Fig 3c: Skip-Cache actually caches the first-layer output x₀′ᵗ and the (L−1)-th layer output x_{L−1}′ᵗ (see red arrows in Fig 3c), not just the final block output. Therefore, it can bypass intermediate blocks using these two cached feature maps.", "claim": "At DDPM step 10, Skip-DiT’s skip branches keep normalized feature similarity above 0.95 (Fig 2); however, Skip-Cache only caches the final DiT block output (Fig 3c), so it cannot bypass intermediate blocks 2…L−1 despite this high similarity.", "label": false }, { "paperid": "2409.11599v1", "paper_path": "./SciVer/papers/2409.11599v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11599v1_figure_3(b).png", "item2_path": "./SciVer/images/2409.11599v1-Table2-1.png", "section": [ "5.2" ], "request_id": 371, "origin_statement": "Figure 3 shows novice gaze markers (orange) clustered on the internal micrometer region; Table 2 reports for Char 15 (internal micrometer) a t-statistic of 2.548 and p = 0.020; thus novices spent significantly longer scanning the internal micrometer than experts.", "perturbed_statement": "Figure 3 shows novice gaze markers (orange) clustered on the height gauge; Table 2 reports for Char 35 (height gauge) a t-statistic of 1.633 and p = 0.120; thus novices spent significantly longer scanning the height gauge than experts.", "perturbed_explanation": "This is incorrect because Figure 3 does not display an orange gaze marker on the height gauge (the orange markers appear on the internal micrometer, electronic caliper, and depth gauge). Moreover, Table 2’s p = 0.120 for the height gauge indicates no significant difference in scanning time.", "claim": "Figure 3 shows novice gaze markers (orange) clustered on the height gauge; Table 2 reports for Char 35 (height gauge) a t-statistic of 1.633 and p = 0.120; thus novices spent significantly longer scanning the height gauge than experts.", "label": false }, { "paperid": "2411.03500v1", "paper_path": "./SciVer/papers/2411.03500v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03500v1_figure_5.png", "item2_path": "./SciVer/images/2411.03500v1-Table4-1.png", "section": [ "6.3" ], "request_id": 373, "origin_statement": "LambdaTune evaluates only five configurations (Table 4), while UDO runs 617 trials. Yet in Query 13 (Figure 5), LambdaTune reduces execution time from 2.36 s to 1.38 s—about a 41% speedup compared to default—demonstrating high efficiency per trial.", "perturbed_statement": "LambdaTune evaluates only three configurations (Table 4), while UDO runs 617 trials. Yet in Query 13 (Figure 5), LambdaTune reduces execution time from 2.36 s to 1.38 s—about a 41% speedup compared to default—demonstrating high efficiency per trial.", "perturbed_explanation": "Table 4 shows that LambdaTune evaluates five configurations for TPC-H 1 GB with initial indexes, not three. Therefore the statement that it runs only three configurations is incorrect and contradicts the data in Table 4.", "claim": "LambdaTune evaluates only three configurations (Table 4), while UDO runs 617 trials. Yet in Query 13 (Figure 5), LambdaTune reduces execution time from 2.36 s to 1.38 s—about a 41% speedup compared to default—demonstrating high efficiency per trial.", "label": false }, { "paperid": "2411.14533v1", "paper_path": "./SciVer/papers/2411.14533v1.json", "claim_type": "sequential", "item1": "11(b)", "item2": "15", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14533v1_figure_11(b).png", "item2_path": "./SciVer/images/2411.14533v1-Table15-1.png", "section": [ "6.1" ], "request_id": 380, "origin_statement": "7 of 42 DIMACS instances in Table 15 show a positive max-color improvement (diff_fx > 0), and Figure 11(b) displays a median best-solution deviation around 2.5%, indicating BRKGA+R+LS generally outperforms BRKGA-G on DIMACS graphs.", "perturbed_statement": "20 of 42 DIMACS instances in Table 15 show a positive max-color improvement, and Figure 11(b) displays a median best-solution deviation around 0.5%, indicating BRKGA+R+LS generally outperforms BRKGA-G on DIMACS graphs.", "perturbed_explanation": "The table actually reports only 7 instances with diff_fx > 0, not 20, and the boxplot’s median deviation for DIMACS best solutions is about 2.5%, not 0.5%, so both figures in the perturbed claim contradict the context.", "claim": "20 of 42 DIMACS instances in Table 15 show a positive max-color improvement, and Figure 11(b) displays a median best-solution deviation around 0.5%, indicating BRKGA+R+LS generally outperforms BRKGA-G on DIMACS graphs.", "label": false }, { "paperid": "2409.12887v2", "paper_path": "./SciVer/papers/2409.12887v2.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.12887v2-Table4-1.png", "item2_path": "./SciVer/images/2409.12887v2-Table5-1.png", "section": [ "4.3" ], "request_id": 384, "origin_statement": "Since removing general data (‘w/o general’) decreases average performance by 1.06 points (81.21 vs 80.15) in Table 4, and GCSE achieves a 93.77% Spearman correlation in Table 5, we conclude that including general data contributes significantly to its high correlation on synthetic STS-B.", "perturbed_statement": "Since removing general data (‘w/o general’) decreases average performance by 2.06 points (81.21 vs 79.15) in Table 4, and GCSE achieves an 89.77% Spearman correlation in Table 5, we conclude that including general data only marginally boosts its STS-B correlation to around 90%.", "perturbed_explanation": "Both premises are incorrect. Table 4 shows the drop is only 1.06 points (81.21 – 80.15), not 2.06, and Table 5 reports GCSE’s Spearman correlation as 93.77%, not 89.77%.", "claim": "Since removing general data (‘w/o general’) decreases average performance by 2.06 points (81.21 vs 79.15) in Table 4, and GCSE achieves an 89.77% Spearman correlation in Table 5, we conclude that including general data only marginally boosts its STS-B correlation to around 90%.", "label": false }, { "paperid": "2409.09549v1", "paper_path": "./SciVer/papers/2409.09549v1.json", "claim_type": "sequential", "item1": "9(b)", "item2": "10(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.09549v1_figure_9(b).png", "item2_path": "./SciVer/images/2409.09549v1_figure_10(b).png", "section": [ "5.2" ], "request_id": 385, "origin_statement": "On the MHDeep task, COMFORT w LoRA achieves its highest accuracy (92.8%) when fine-tuned with 60% of the training data, and under PEFT this further improves to 93.8% when using a rank value of 8.", "perturbed_statement": "On the MHDeep task, COMFORT w LoRA achieves its highest accuracy (93.4%) when fine-tuned with 70% of the training data, and under PEFT this further improves to 93.7% when using a rank value of 16.", "perturbed_explanation": "Figure 9 shows that COMFORT w LoRA actually peaks at 60% data with 92.8%, not at 70% with 93.4%. Figure 10 shows that at rank 16, LoRA yields about 92.55%, not 93.7%. Both premises contradict the plotted values.", "claim": "On the MHDeep task, COMFORT w LoRA achieves its highest accuracy (93.4%) when fine-tuned with 70% of the training data, and under PEFT this further improves to 93.7% when using a rank value of 16.", "label": false }, { "paperid": "2410.13343v1", "paper_path": "./SciVer/papers/2410.13343v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13343v1_figure_7.png", "item2_path": "./SciVer/images/2410.13343v1-Table3-1.png", "section": [ "5.2.4" ], "request_id": 395, "origin_statement": "Figure 7 shows an LLM wrongly infers “the judge knew the lawyer” from “The judge knew the lawyer thanked the actor,” oversimplifying reasoning. Table 3 reveals GPT-3.5-Turbo’s hypothesis accuracy drops to 48.0% when tautologies appear at the start, confirming a start-position bias.", "perturbed_statement": "Figure 7 shows an LLM wrongly infers “the judge knew the lawyer” from “The judge knew the lawyer thanked the actor,” oversimplifying reasoning. Table 3 reveals GPT-4’s hypothesis accuracy drops to 71.2% when tautologies appear at the start, confirming a start-position bias.", "perturbed_explanation": "The perturbed statement misassigns GPT-4’s accuracy figures. In Table 3, GPT-4’s hypothesis accuracy at the start is actually 76.4%, not 71.2%. The 71.2% value corresponds to end-position tautologies, so the claim about a start-position drop to 71.2% contradicts the table.", "claim": "Figure 7 shows an LLM wrongly infers “the judge knew the lawyer” from “The judge knew the lawyer thanked the actor,” oversimplifying reasoning. Table 3 reveals GPT-4’s hypothesis accuracy drops to 71.2% when tautologies appear at the start, confirming a start-position bias.", "label": false }, { "paperid": "2409.14032v1", "paper_path": "./SciVer/papers/2409.14032v1.json", "claim_type": "sequential", "item1": "2(b)", "item2": "2(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14032v1_figure_2(b).png", "item2_path": "./SciVer/images/2409.14032v1_figure_2(f).png", "section": [ "4" ], "request_id": 408, "origin_statement": "At subsample r=200 for case 2, the OSP ASE from Figure 2b (linear model) is about 0.35 and from Figure 2f (Cox model) about 0.42, so OSP in the linear model reduces ASE by roughly 0.07 compared to the Cox model.", "perturbed_statement": "At subsample r=200 for case 2, the OSP ASE from Figure 2b (linear model) is about 0.38 and from Figure 2f (Cox model) about 0.45, so OSP in the linear model reduces ASE by roughly 0.07 compared to the Cox model.", "perturbed_explanation": "Figure 2b actually shows the OSP ASE at r=200 near 0.35, not 0.38. Figure 2f shows it near 0.42, not 0.45. Thus the perturbed values (0.38 and 0.45) contradict the plotted ASEs and make the final claim unsupported.", "claim": "At subsample r=200 for case 2, the OSP ASE from Figure 2b (linear model) is about 0.38 and from Figure 2f (Cox model) about 0.45, so OSP in the linear model reduces ASE by roughly 0.07 compared to the Cox model.", "label": false }, { "paperid": "2409.06924v1", "paper_path": "./SciVer/papers/2409.06924v1.json", "claim_type": "sequential", "item1": "2", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06924v1_figure_2.png", "item2_path": "./SciVer/images/2409.06924v1_figure_6.png", "section": [ "4" ], "request_id": 425, "origin_statement": "The observed O–C for cycle 8 in Fig 2 is about –120 minutes, roughly four times deeper than the TTV model’s predicted –30 minute trough at cycle 8 in Fig 6.", "perturbed_statement": "The observed O–C for cycle 8 in Fig 2 is about –30 minutes, slightly shallower than the TTV model’s predicted –40 minute trough at cycle 8 in Fig 6.", "perturbed_explanation": "This is incorrect because in Fig 2 the data point at cycle 8 is at –120 minutes (not –30 minutes), and in Fig 6 the model trough at cycle 8 is around –30 minutes (not –40 minutes).", "claim": "The observed O–C for cycle 8 in Fig 2 is about –30 minutes, slightly shallower than the TTV model’s predicted –40 minute trough at cycle 8 in Fig 6.", "label": false }, { "paperid": "2409.05878v2", "paper_path": "./SciVer/papers/2409.05878v2.json", "claim_type": "sequential", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05878v2_figure_6(a).png", "item2_path": "./SciVer/images/2409.05878v2_figure_6(b).png", "section": [ "3.4" ], "request_id": 426, "origin_statement": "In ML-1M pruning, Jumanji has the thickest edge to Toy Story; given CF-KAN’s genre-based tracing, the Anime model similarly shows the Demon Slayer series strongly influences the recommendation of Demon Slayer the Movie: Mugen Train.", "perturbed_statement": "In ML-1M pruning, Grumpier Old Men has the thickest edge to Toy Story; given CF-KAN’s genre-based tracing, the Anime model similarly shows that Kimi no Na wa strongly influences recommendation of Demon Slayer the Movie: Mugen Train.", "perturbed_explanation": "The perturbation is wrong because the ML-1M visualization clearly shows Jumanji, not Grumpier Old Men, as having the thickest connection to Toy Story. Likewise, the Anime model’s explanation traces from the Demon Slayer series, not from Kimi no Na wa, to the movie recommendation.", "claim": "In ML-1M pruning, Grumpier Old Men has the thickest edge to Toy Story; given CF-KAN’s genre-based tracing, the Anime model similarly shows that Kimi no Na wa strongly influences recommendation of Demon Slayer the Movie: Mugen Train.", "label": false }, { "paperid": "2411.07140v2", "paper_path": "./SciVer/papers/2411.07140v2.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07140v2_figure_3.png", "item2_path": "./SciVer/images/2411.07140v2-Table3-1.png", "section": [ "3.2" ], "request_id": 430, "origin_statement": "Because o1-preview’s overall correctness (CO) is 63.8, exceeding GLM-4-Plus’s 58.7 (Table 3), its ETAS CO of 70.4 also surpasses GLM-4-Plus’s 64.1 (Figure 3).", "perturbed_statement": "Because o1-preview’s overall CO is 63.8, exceeding GLM-4-Plus’s 58.7 (Table 3), its ETAS CO of 70.4 also surpasses GLM-4-Plus’s 72.4 (Figure 3).", "perturbed_explanation": "The perturbed statement misreads the ETAS CO for GLM-4-Plus as 72.4. In Figure 3 the ETAS CO for GLM-4-Plus is actually 64.1, not 72.4, so o1-preview’s 70.4 does not exceed GLM-4-Plus’s ETAS CO.", "claim": "Because o1-preview’s overall CO is 63.8, exceeding GLM-4-Plus’s 58.7 (Table 3), its ETAS CO of 70.4 also surpasses GLM-4-Plus’s 72.4 (Figure 3).", "label": false }, { "paperid": "2410.22046v2", "paper_path": "./SciVer/papers/2410.22046v2.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.22046v2-Table4-1.png", "item2_path": "./SciVer/images/2410.22046v2-Table5-1.png", "section": [ "4.1.2" ], "request_id": 431, "origin_statement": "Observing that chord accuracy drops from 61.23% in the shortest bin to 58.44% in the medium bin, the corresponding note accuracy only slightly decreases from 75.85% to 74.80%, demonstrating the model’s robustness in note-level predictions even as chord-level accuracy declines.", "perturbed_statement": "Observing that chord accuracy drops from 61.23% in the shortest bin to 53.46% in the medium bin, the corresponding note accuracy plunges from 75.85% to 72.28%, indicating note predictions degrade as sharply as chords.", "perturbed_explanation": "The perturbed claim mislabels the medium-length bin values. In Table 4, the medium bin (50.8–76.2) shows a chord accuracy of 58.44%, not 53.46% (which is for the longest bin). Likewise, Table 5 reports 74.80% note accuracy for the medium bin, not 72.28% (which again is for the longest bin).", "claim": "Observing that chord accuracy drops from 61.23% in the shortest bin to 53.46% in the medium bin, the corresponding note accuracy plunges from 75.85% to 72.28%, indicating note predictions degrade as sharply as chords.", "label": false }, { "paperid": "2409.04290v1", "paper_path": "./SciVer/papers/2409.04290v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04290v1_figure_3.png", "item2_path": "./SciVer/images/2409.04290v1_figure_4.png", "section": [ "4.1.4" ], "request_id": 440, "origin_statement": "Since CoxKAN prunes ε1 and ε2 in the ‘difficult’ dataset (Fig3d) and Fig4a shows a symmetric wedge shape around x2=0, we infer θ(x) depends on |x2| rather than signed x2.", "perturbed_statement": "Since CoxKAN prunes x2 and ε1 in the ‘difficult’ dataset (Fig3d), only x1 remains; Fig4a’s monotonic increase along x1 implies θ(x)=tanh(5 log(x1)).", "perturbed_explanation": "This is wrong because in Fig3d the model actually prunes only ε1 and ε2, not x2—both x1 and x2 feed into the final operator. Moreover, Fig4a clearly shows variation with x2 (the symmetric wedge around x2=0), so θ cannot depend solely on x1.", "claim": "Since CoxKAN prunes x2 and ε1 in the ‘difficult’ dataset (Fig3d), only x1 remains; Fig4a’s monotonic increase along x1 implies θ(x)=tanh(5 log(x1)).", "label": false }, { "paperid": "2409.07019v1", "paper_path": "./SciVer/papers/2409.07019v1.json", "claim_type": "sequential", "item1": "6", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.07019v1_figure_6.png", "item2_path": "./SciVer/images/2409.07019v1-Table6-1.png", "section": [ "4" ], "request_id": 442, "origin_statement": "Table 6 shows a significant RV slope (γ̇ = 0.00239 m/s/day). This long-term acceleration cannot be explained by K2-2 b’s ~10 M⊕ mass (Figure 6 places it near the 50% H₂O track), implying the slope arises from an additional outer companion.", "perturbed_statement": "Table 6 shows an RV slope of γ̇ = 0.000239 ± 0.00039 m/s/day. This acceleration is consistent with zero and can be explained by K2-2 b’s ~10 M⊕ mass (per Figure 6), negating the need for an additional companion.", "perturbed_explanation": "This is false because Table 6 actually lists γ̇ = 0.00239 ± 0.00039 m/s/day (not 0.000239), making the RV slope significant at >6σ. Such a strong, non-zero slope cannot be produced by K2-2 b’s ∼10 M⊕ orbit alone, so an additional outer companion is required.", "claim": "Table 6 shows an RV slope of γ̇ = 0.000239 ± 0.00039 m/s/day. This acceleration is consistent with zero and can be explained by K2-2 b’s ~10 M⊕ mass (per Figure 6), negating the need for an additional companion.", "label": false }, { "paperid": "2411.01739v2", "paper_path": "./SciVer/papers/2411.01739v2.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.01739v2-Table5-1.png", "item2_path": "./SciVer/images/2411.01739v2-Table6-1.png", "section": [ "5.5" ], "request_id": 446, "origin_statement": "Table5 shows GeM fusion achieves an average accuracy of 89.21 on Split-Clothing. Table6 shows training with all loss functions also yields 89.21. Thus, GeM prompt fusion performance matches the performance obtained by combining all loss terms.", "perturbed_statement": "Table5 shows Mean pooling achieves 87.80 average accuracy on Split-Clothing. Table6 shows training with all loss functions also yields 87.80. Thus, Mean pooling performance matches the performance obtained by combining all loss terms.", "perturbed_explanation": "This is incorrect because Table6 indicates that combining all loss terms yields an average accuracy of 89.21, not 87.80 as stated in the perturbed claim.", "claim": "Table5 shows Mean pooling achieves 87.80 average accuracy on Split-Clothing. Table6 shows training with all loss functions also yields 87.80. Thus, Mean pooling performance matches the performance obtained by combining all loss terms.", "label": false }, { "paperid": "2411.01537v1", "paper_path": "./SciVer/papers/2411.01537v1.json", "claim_type": "sequential", "item1": "5(c)", "item2": "5(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01537v1_figure_5(c).png", "item2_path": "./SciVer/images/2411.01537v1_figure_5(d).png", "section": [ "4.6" ], "request_id": 447, "origin_statement": "Image (c) shows LinRec+SASRec layer 1 distributes attention across both recent and distant items. Building on that, image (d) displays an even smoother spread of attention scores across the entire sequence, indicating layer 2 further enhances capture of long-term dependencies.", "perturbed_statement": "Image (c) shows LinRec+SASRec layer 1 concentrates attention almost exclusively on the first five recent items. Based on this, image (d) retains peaks at the very beginning, suggesting layer 2 fails to improve long-term attention.", "perturbed_explanation": "The perturbation is wrong because image (c) actually exhibits a relatively smooth attention pattern across the whole sequence, not an exclusive focus on the first five items. Moreover, image (d) further smooths attention over both early and late items, rather than retaining a peak at the start.", "claim": "Image (c) shows LinRec+SASRec layer 1 concentrates attention almost exclusively on the first five recent items. Based on this, image (d) retains peaks at the very beginning, suggesting layer 2 fails to improve long-term attention.", "label": false }, { "paperid": "2411.09586v1", "paper_path": "./SciVer/papers/2411.09586v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09586v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.09586v1_figure_4(b).png", "section": [ "5.1", "6.3" ], "request_id": 451, "origin_statement": "Districts in southern Peru shaded dark red on the mining output map (mining output >1,000,000 USD p.c.) also appear dark red on the windfalls map, implying windfalls exceed 300 USD p.c. in those high-output areas.", "perturbed_statement": "Districts shaded orange on the mining output map (20,000–100,000 USD p.c.) also appear dark red on the windfalls map, implying these medium-output areas received windfalls exceeding 300 USD p.c.", "perturbed_explanation": "The perturbed claim misidentifies orange-shaded districts (20,000–100,000 USD p.c.) as receiving dark red windfalls (>300 USD p.c.). In the windfalls map, those same orange districts are mostly yellow (0–50 USD) or brown (150–300 USD), not dark red, so they did not exceed 300 USD p.c.", "claim": "Districts shaded orange on the mining output map (20,000–100,000 USD p.c.) also appear dark red on the windfalls map, implying these medium-output areas received windfalls exceeding 300 USD p.c.", "label": false }, { "paperid": "2410.11716v1", "paper_path": "./SciVer/papers/2410.11716v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.11716v1_figure_1.png", "item2_path": "./SciVer/images/2410.11716v1_figure_2.png", "section": [ "4.1" ], "request_id": 453, "origin_statement": "Figure 1 shows the beta model peaks at about 0.8 mean response around dose 25; setting pk–1=0.8, Figure 2 reports the population-based test has ~0.6 power, whereas all residual-based randomization tests achieve ~0.95 power at pk=0.8.", "perturbed_statement": "Figure 1 shows the beta model peaks at about 0.85 mean response around dose 25; setting pk–1=0.85, Figure 2 reports the population-based test has ~0.65 power, whereas all residual-based randomization tests achieve ~0.98 power at pk=0.85.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 1’s beta model actually peaks at ~0.8 (not 0.85) and Figure 2 shows population-based test power ≈0.6 and residual-based power ≈0.95 at pk=0.8; there are no data at pk=0.85 showing 0.65/0.98 power.", "claim": "Figure 1 shows the beta model peaks at about 0.85 mean response around dose 25; setting pk–1=0.85, Figure 2 reports the population-based test has ~0.65 power, whereas all residual-based randomization tests achieve ~0.98 power at pk=0.85.", "label": false }, { "paperid": "2410.06442v1", "paper_path": "./SciVer/papers/2410.06442v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06442v1_figure_3(b).png", "item2_path": "./SciVer/images/2410.06442v1_figure_3(c).png", "section": [ "4.2.3" ], "request_id": 457, "origin_statement": "In diffusion (b), SFM’s L2 error stays below 0.05 even for ν>20 extrapolation, outperforming Hyper-LR-PINN’s >0.8 error. In reaction (c), SFM’s error remains under 0.15 across all ρ, so SFMs consistently deliver lower extrapolation error than both baselines.", "perturbed_statement": "In diffusion (b), SFM’s L2 error stays below 0.3 even for ν>20 extrapolation, outperforming Hyper-LR-PINN’s >0.8 error. In reaction (c), SFM’s error remains under 0.05 across all ρ, so SFMs consistently deliver lower extrapolation error than both baselines.", "perturbed_explanation": "This is incorrect because in the diffusion plot (b) the SFM error (the bottom curve) is actually under 0.05, not under 0.3, and in the reaction plot (c) the SFM error hovers around 0.1–0.15, not below 0.05, as shown by the pink line in the shaded regions.", "claim": "In diffusion (b), SFM’s L2 error stays below 0.3 even for ν>20 extrapolation, outperforming Hyper-LR-PINN’s >0.8 error. In reaction (c), SFM’s error remains under 0.05 across all ρ, so SFMs consistently deliver lower extrapolation error than both baselines.", "label": false }, { "paperid": "2410.05522v1", "paper_path": "./SciVer/papers/2410.05522v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.05522v1_figure_6.png", "item2_path": "./SciVer/images/2410.05522v1_figure_7.png", "section": [ "3.1" ], "request_id": 458, "origin_statement": "The worst-case Combined Set testing mesh in Fig.6 has an R² of 0.551, which still exceeds the lowest out-of-distribution R² of about –0.25 shown in Fig.7’s boxplot.", "perturbed_statement": "The worst-case Combined Set testing mesh in Fig.6 has an R² of 0.551, which is below the minimum out-of-distribution R² of about 0.13 shown in Fig.7’s boxplot.", "perturbed_explanation": "Fig.7’s out-of-distribution R² distribution actually extends down to around –0.25, not up to 0.13, so 0.551 is greater than the true minimum R², making the perturbed claim incorrect.", "claim": "The worst-case Combined Set testing mesh in Fig.6 has an R² of 0.551, which is below the minimum out-of-distribution R² of about 0.13 shown in Fig.7’s boxplot.", "label": false }, { "paperid": "2409.17201v2", "paper_path": "./SciVer/papers/2409.17201v2.json", "claim_type": "sequential", "item1": "2(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17201v2_figure_2(a).png", "item2_path": "./SciVer/images/2409.17201v2_figure_3(b).png", "section": [ "6.2" ], "request_id": 459, "origin_statement": "At iteration 100 in Fig 2, SIFL M1 attains ≈0.97 accuracy with MLP-SGD; then in Fig 3, its CNN2-Momentum loss on Fashion-MNIST converges to ≈1.63 by iteration 60. Hence, SIFL M1 maintains high accuracy and low loss across both MLP and CNN scenarios.", "perturbed_statement": "At iteration 100 in Fig 2, SIFL M1 attains ≈0.99 accuracy with MLP-SGD; then in Fig 3, its CNN2-Momentum loss on Fashion-MNIST converges to ≈1.8 by iteration 60. Hence, SIFL M1 maintains high accuracy and low loss across both MLP and CNN scenarios.", "perturbed_explanation": "Fig 2’s MLP-SGD curves show SIFL M1 reaches about 0.97 accuracy at iteration 100, not 0.99. Fig 3’s CNN2-Momentum loss plot shows it converges near 1.63 by iteration 60, not 1.8. Thus the perturbed values contradict the actual plotted data.", "claim": "At iteration 100 in Fig 2, SIFL M1 attains ≈0.99 accuracy with MLP-SGD; then in Fig 3, its CNN2-Momentum loss on Fashion-MNIST converges to ≈1.8 by iteration 60. Hence, SIFL M1 maintains high accuracy and low loss across both MLP and CNN scenarios.", "label": false }, { "paperid": "2410.14508v1", "paper_path": "./SciVer/papers/2410.14508v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.14508v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.14508v1_figure_3(b).png", "section": [ "5.3" ], "request_id": 460, "origin_statement": "Since 54.8% of participants are male and 73.8% are aged 18-30, by the pigeonhole principle, at least 28.6% must be male and aged 18-30.", "perturbed_statement": "Since 54.8% of participants are male and 80% are aged 18-30, by the pigeonhole principle, at least 34.8% must be male and aged 18-30.", "perturbed_explanation": "The perturbed statement misreads the age distribution: the age chart reports 73.8% aged 18-30, not 80%. Because the premise '80% are aged 18-30' contradicts the actual 73.8%, the calculated overlap of 34.8% is invalid.", "claim": "Since 54.8% of participants are male and 80% are aged 18-30, by the pigeonhole principle, at least 34.8% must be male and aged 18-30.", "label": false }, { "paperid": "2410.12261v1", "paper_path": "./SciVer/papers/2410.12261v1.json", "claim_type": "sequential", "item1": "4(d)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.12261v1_figure_4(d).png", "item2_path": "./SciVer/images/2410.12261v1-Table3-1.png", "section": [ "4.2" ], "request_id": 473, "origin_statement": "At testing patch size 32, GECCO’s AUC-ROC reaches its maximum (~0.978) in Figure 4(d); based on Table 3, CATCH then achieves 0.970 AUC on GECCO, surpassing the Channel Dependent baseline (0.955).", "perturbed_statement": "At testing patch size 64, GECCO’s AUC-ROC peaks (~0.980) in Figure 4(d); leveraging this, CATCH delivers 0.970 AUC on GECCO in Table 3, outperforming the Channel Independent baseline (0.912).", "perturbed_explanation": "The perturbation is incorrect because Figure 4(d) actually shows GECCO’s highest AUC-ROC at patch size 32 (not 64), and Table 3 reports the Channel Dependent baseline on GECCO as 0.955, not the Channel Independent score of 0.912.", "claim": "At testing patch size 64, GECCO’s AUC-ROC peaks (~0.980) in Figure 4(d); leveraging this, CATCH delivers 0.970 AUC on GECCO in Table 3, outperforming the Channel Independent baseline (0.912).", "label": false }, { "paperid": "2411.14034v1", "paper_path": "./SciVer/papers/2411.14034v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14034v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.14034v1_figure_4.png", "section": [ "5.1" ], "request_id": 474, "origin_statement": "Because Figure 3 shows clusters 2 and 3 overlapping around PCA1 ≈ 0.1–0.3 and PCA2 ≈ –0.1–0.4, the RF model in Figure 4 exhibits increased conductivity prediction spread for actual log₁₀(σ) between –5 and 0, indicating overlapping chemical spaces raise prediction variance.", "perturbed_statement": "Because Figure 3 shows clusters 1 and 4 overlapping around PCA1 ≈ 0.1–0.3 and PCA2 ≈ –0.1–0.4, the RF model in Figure 4 exhibits increased conductivity prediction spread for actual log₁₀(σ) between –5 and 0, indicating overlapping chemical spaces raise prediction variance.", "perturbed_explanation": "This is incorrect because in Figure 3 cluster 1 (magenta, centered near PCA1 ≈ –0.4) and cluster 4 (light green) do not overlap at PCA1 ≈ 0.1–0.3 and PCA2 ≈ –0.1–0.4. The actual overlapping clusters in that region are clusters 2 and 3, so the premise fails.", "claim": "Because Figure 3 shows clusters 1 and 4 overlapping around PCA1 ≈ 0.1–0.3 and PCA2 ≈ –0.1–0.4, the RF model in Figure 4 exhibits increased conductivity prediction spread for actual log₁₀(σ) between –5 and 0, indicating overlapping chemical spaces raise prediction variance.", "label": false }, { "paperid": "2409.19028v1", "paper_path": "./SciVer/papers/2409.19028v1.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19028v1_figure_1.png", "item2_path": "./SciVer/images/2409.19028v1-Table1-1.png", "section": [ "5.1" ], "request_id": 477, "origin_statement": "Tinyllama’s mean score is the lowest (1.87), and Figure 1 shows it has the highest count of 1.0 scores, so Tinyllama produced the most systematically wrong explanations compared to Gpt3.5 and Llama2.", "perturbed_statement": "Gpt3.5’s mean score is the lowest (2.50), and Figure 1 shows it has the highest count of 1.0 scores, so Gpt3.5 produced the most systematically wrong explanations compared to Llama2 and Tinyllama.", "perturbed_explanation": "This is wrong because Table 1 shows Gpt3.5’s mean (2.50) is higher than Tinyllama’s (1.87), and Figure 1 indicates Tinyllama (≈90) has far more 1.0 scores than Gpt3.5 (≈35), so Gpt3.5 does not have the lowest mean or the most 1.0 scores.", "claim": "Gpt3.5’s mean score is the lowest (2.50), and Figure 1 shows it has the highest count of 1.0 scores, so Gpt3.5 produced the most systematically wrong explanations compared to Llama2 and Tinyllama.", "label": false }, { "paperid": "2411.01076v2", "paper_path": "./SciVer/papers/2411.01076v2.json", "claim_type": "sequential", "item1": "2", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01076v2_figure_2.png", "item2_path": "./SciVer/images/2411.01076v2_figure_3(b).png", "section": [ "4.2" ], "request_id": 483, "origin_statement": "Figure3’s reproducible spikes of 4 tokens at iteration 6, 6 tokens at iteration 32, and 6 tokens at iteration 50 indicate stable token-per-iteration patterns; using Figure2’s methodology, an attacker can match observed packet‐size spikes in an encrypted stream to uniquely identify the prompt.", "perturbed_statement": "Figure3’s reproducible spikes of 4 tokens at iteration 8, 6 tokens at iteration 28, and 6 tokens at iteration 50 indicate stable token-per-iteration patterns; using Figure2’s methodology, an attacker can match observed packet‐size spikes in an encrypted stream to uniquely identify the prompt.", "perturbed_explanation": "This is wrong because Figure 3 actually shows the first reproducible spike of 4 tokens at iteration 6 (not iteration 8), and the large blue-region spike of 6 tokens occurs around iteration 32 (not iteration 28). These mismatched iteration IDs contradict the plotted data, invalidating the fingerprint claim.", "claim": "Figure3’s reproducible spikes of 4 tokens at iteration 8, 6 tokens at iteration 28, and 6 tokens at iteration 50 indicate stable token-per-iteration patterns; using Figure2’s methodology, an attacker can match observed packet‐size spikes in an encrypted stream to uniquely identify the prompt.", "label": false }, { "paperid": "2410.23893v3", "paper_path": "./SciVer/papers/2410.23893v3.json", "claim_type": "sequential", "item1": "3(a)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23893v3_figure_3(a).png", "item2_path": "./SciVer/images/2410.23893v3-Table1-1.png", "section": [ "3.1" ], "request_id": 484, "origin_statement": "DiffBatt achieves the lowest RMSE of 125 cycles on the SNL dataset (Table 1), and in Fig. 3 the cyan points (low σ_RUL) tightly cluster along the diagonal, indicating that low‐uncertainty predictions correlate with high accuracy in RUL estimation.", "perturbed_statement": "DiffBatt achieves the lowest RMSE of 202 cycles on the MIX dataset (Table 1), and in Fig. 3 the magenta points (high σ_RUL) tightly cluster along the diagonal, indicating that high‐uncertainty predictions correlate with high accuracy in RUL estimation.", "perturbed_explanation": "The perturbation is incorrect because Table 1 shows that DiffBatt’s RMSE on MIX is 202 but the best benchmark RMSE is 197 (Random Forest), so 202 is not the lowest. Additionally, in Fig. 3 the magenta points (high σ_RUL) are scattered away from the diagonal; it is the cyan points (low σ_RUL) that cluster tightly along the reference line.", "claim": "DiffBatt achieves the lowest RMSE of 202 cycles on the MIX dataset (Table 1), and in Fig. 3 the magenta points (high σ_RUL) tightly cluster along the diagonal, indicating that high‐uncertainty predictions correlate with high accuracy in RUL estimation.", "label": false }, { "paperid": "2409.05224v1", "paper_path": "./SciVer/papers/2409.05224v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.05224v1-Table1-1.png", "item2_path": "./SciVer/images/2409.05224v1-Table2-1.png", "section": [ "6.3" ], "request_id": 489, "origin_statement": "Table 1 shows that the 2;2;8+WL+GPS(0.9) method achieves 33.13 spBLEU on H2H. Table 2 shows that 2;2;8;8+WL+GPS(0.9) achieves 29.92 spBLEU on H2H. Thus adding a low-resource module reduces H2H performance by 3.21 spBLEU.", "perturbed_statement": "Table 1 shows that the 2;2;8+WL+GPS(0.9) method achieves 33.13 spBLEU on H2H. Table 2 shows that 2;2;8;8+WL+GPS(0.9) achieves 32.92 spBLEU on H2H. Thus adding a low-resource module reduces H2H performance by only 0.21 spBLEU.", "perturbed_explanation": "The perturbed claim misreports Table 2’s H2H spBLEU for 2;2;8;8+WL+GPS(0.9) as 32.92, but the actual value in Table 2 is 29.92. Therefore the stated 32.92 score and the computed 0.21-point reduction are incorrect.", "claim": "Table 1 shows that the 2;2;8+WL+GPS(0.9) method achieves 33.13 spBLEU on H2H. Table 2 shows that 2;2;8;8+WL+GPS(0.9) achieves 32.92 spBLEU on H2H. Thus adding a low-resource module reduces H2H performance by only 0.21 spBLEU.", "label": false }, { "paperid": "2409.11088v1", "paper_path": "./SciVer/papers/2409.11088v1.json", "claim_type": "sequential", "item1": "1", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11088v1_figure_1.png", "item2_path": "./SciVer/images/2409.11088v1_figure_5.png", "section": [ "3.1" ], "request_id": 502, "origin_statement": "Fig.1’s z=3 slice shows filaments with typical Lyα surface brightness ≃1×10^-20 erg s^-1 cm^-2 arcsec^-2. Fig.5 reveals that at z=2 a SB cut of 1×10^-20 selects low-mass halos and halo surroundings but not diffuse filaments. Thus at z=3 the same threshold would detect only dense halo regions, missing true filamentary structures.", "perturbed_statement": "Fig.1’s z=3 slice shows filaments with typical Lyα surface brightness ≃1×10^-19 erg s^-1 cm^-2 arcsec^-2. Fig.5 reveals that at z=2 a SB cut of 1×10^-20 displays clear diffuse filaments. Hence at z=3 the same threshold would uncover all filamentary structures.", "perturbed_explanation": "The perturbation misreads Fig.1’s color bar and filament shading: the typical filament SB in the white-boxed region is around 10^-20 (orange), not 10^-19. It also misinterprets Fig.5: the SB>10^-20 panel highlights only halos and their surroundings, not the diffuse filaments (diffuse structures appear only at the lower SB>10^-21 threshold). Hence a 10^-20 cut would not reveal all filaments at z=3.", "claim": "Fig.1’s z=3 slice shows filaments with typical Lyα surface brightness ≃1×10^-19 erg s^-1 cm^-2 arcsec^-2. Fig.5 reveals that at z=2 a SB cut of 1×10^-20 displays clear diffuse filaments. Hence at z=3 the same threshold would uncover all filamentary structures.", "label": false }, { "paperid": "2411.09410v2", "paper_path": "./SciVer/papers/2411.09410v2.json", "claim_type": "sequential", "item1": "4(b)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09410v2_figure_4(b).png", "item2_path": "./SciVer/images/2411.09410v2_figure_4(c).png", "section": [ "4.4" ], "request_id": 515, "origin_statement": "Since the Grocery dataset yields its highest Recall@50 (0.2856) at p = –5, and the Beauty dataset reaches its highest Recall@50 (0.3636) at p = –10, the larger Beauty dataset benefits from a smaller preference p than the smaller Grocery dataset.", "perturbed_statement": "Since the Grocery dataset yields its highest Recall@50 at p = –7, and the Beauty dataset reaches its highest Recall@50 at p = –2, the larger Beauty dataset benefits from a larger preference p than the smaller Grocery dataset.", "perturbed_explanation": "Both premises are incorrect. In the Grocery plot, the peak Recall@50 (0.2856) actually occurs at p = –5, not p = –7 (where Recall@50 is only 0.2843). In the Beauty plot, the maximum Recall@50 (0.3636) occurs at p = –10, not at p = –2 (where Recall@50 is 0.3422). Therefore the perturbed claim contradicts the data points in both plots.", "claim": "Since the Grocery dataset yields its highest Recall@50 at p = –7, and the Beauty dataset reaches its highest Recall@50 at p = –2, the larger Beauty dataset benefits from a larger preference p than the smaller Grocery dataset.", "label": false }, { "paperid": "2409.10132v1", "paper_path": "./SciVer/papers/2409.10132v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10132v1_figure_1.png", "item2_path": "./SciVer/images/2409.10132v1_figure_2.png", "section": [ "1" ], "request_id": 516, "origin_statement": "Figure 1 shows StruEdit achieves the highest editing accuracy and lowest inference latency; from this, we infer that its structured output logic (Figure 2) directly removes outdated parametric knowledge, preventing hallucinations in multi-hop edits.", "perturbed_statement": "Figure 1 shows StruEdit achieves only moderate editing accuracy and the highest inference latency; therefore, its editing pipeline relies on adjusting natural-language reasoning steps (like ICE) to reduce hallucinations in multi-hop edits.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 1 actually places StruEdit at the top-right, indicating it has both the highest accuracy and the lowest latency—not moderate accuracy or highest latency. Moreover, Figure 2 clarifies that StruEdit uses structured output logic to remove parametric knowledge, rather than editing natural-language reasoning steps like ICE.", "claim": "Figure 1 shows StruEdit achieves only moderate editing accuracy and the highest inference latency; therefore, its editing pipeline relies on adjusting natural-language reasoning steps (like ICE) to reduce hallucinations in multi-hop edits.", "label": false }, { "paperid": "2411.06225v1", "paper_path": "./SciVer/papers/2411.06225v1.json", "claim_type": "sequential", "item1": "8", "item2": "8", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06225v1_figure_8.png", "item2_path": "./SciVer/images/2411.06225v1-Table8-1.png", "section": [ "5.2" ], "request_id": 518, "origin_statement": "Figure 8 uses a spatial grid with Nₓ=Nᵧ=235, yielding d=110450. Table 8 reports that for d=110450 (N=512), RandNet-Parareal achieves Sₐₗg≈124.52. Hence the t=20 snapshot was computed about 125× faster than the fine solver.", "perturbed_statement": "Figure 8 uses a spatial grid with Nₓ=Nᵧ=235, yielding d=110450. Table 8 reports that for d=110450 (N=512), RandNet-Parareal achieves Sₐₗg≈97.40. Hence the t=20 snapshot was computed about 97× faster than the fine solver.", "perturbed_explanation": "The perturbation misreads the speed-up: Table 8 shows Sₐₗg=124.52 for d=110450, not 97.40. The 97.40× speed-up corresponds to the d=25538 row, so attributing it to d=110450 is incorrect.", "claim": "Figure 8 uses a spatial grid with Nₓ=Nᵧ=235, yielding d=110450. Table 8 reports that for d=110450 (N=512), RandNet-Parareal achieves Sₐₗg≈97.40. Hence the t=20 snapshot was computed about 97× faster than the fine solver.", "label": false }, { "paperid": "2410.17831v1", "paper_path": "./SciVer/papers/2410.17831v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17831v1_figure_7(a).png", "item2_path": "./SciVer/images/2410.17831v1_figure_7(b).png", "section": [ "5.3" ], "request_id": 522, "origin_statement": "In the 3D view, CHOMP with one GPDF (red) climbs above z=2 m whereas PRM stays near z=2 m; the top view shows this red path veers more laterally around the cow obstacle than PRM, indicating extra clearance over ground contact.", "perturbed_statement": "In the 3D view, CHOMP with one GPDF (red) remains below z=1.5 m and in the top view it aligns more closely with the PRM trajectory, implying it maintains ground contact with minimal lateral deviation.", "perturbed_explanation": "This is incorrect because the 3D view clearly shows the red trajectory rising above z=2 m (not below 1.5 m), and the top view actually shows the red path detouring more laterally around obstacles rather than aligning tightly with the PRM path.", "claim": "In the 3D view, CHOMP with one GPDF (red) remains below z=1.5 m and in the top view it aligns more closely with the PRM trajectory, implying it maintains ground contact with minimal lateral deviation.", "label": false }, { "paperid": "2410.23701v1", "paper_path": "./SciVer/papers/2410.23701v1.json", "claim_type": "sequential", "item1": "18", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23701v1_figure_18.png", "item2_path": "./SciVer/images/2410.23701v1-Table2-1.png", "section": [ "1" ], "request_id": 524, "origin_statement": "Table 2 lists 4.3 K unique objects. Figure 18 shows 3.26 M total grasps. Hence, on average there are roughly 758 grasps per object.", "perturbed_statement": "Table 2 lists 5.3 K unique objects. Figure 18 shows 3.26 M total grasps. Hence, on average there are roughly 615 grasps per object.", "perturbed_explanation": "The perturbed statement erroneously uses 5.3 K objects—Table 2 actually reports 4.3 K objects for “Get a Grip (Ours)”. Using 5.3 K instead contradicts the context and yields an incorrect average grasps per object.", "claim": "Table 2 lists 5.3 K unique objects. Figure 18 shows 3.26 M total grasps. Hence, on average there are roughly 615 grasps per object.", "label": false }, { "paperid": "2409.00119v2", "paper_path": "./SciVer/papers/2409.00119v2.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00119v2_figure_4.png", "item2_path": "./SciVer/images/2409.00119v2-Table5-1.png", "section": [ "4.1" ], "request_id": 531, "origin_statement": "Figure 4 (right) shows RoAd’s throughput on LLaMA-7B at batch size 16 is about 270 tokens/sec, and Table 5 shows RoAd₁ uses just 0.02% trainable parameters. Thus, RoAd₁ achieves roughly 2.7× higher throughput than LoRA while using minimal parameters.", "perturbed_statement": "Figure 4 (right) shows RoAd’s throughput on LLaMA-7B at batch size 16 is about 270 tokens/sec, and Table 5 shows RoAd₁ uses 0.2% trainable parameters. Thus, RoAd₁ achieves roughly 2.7× higher throughput than LoRA while using minimal parameters.", "perturbed_explanation": "This is incorrect because Table 5 reports that RoAd₁ uses 0.02% trainable parameters, not 0.2%.", "claim": "Figure 4 (right) shows RoAd’s throughput on LLaMA-7B at batch size 16 is about 270 tokens/sec, and Table 5 shows RoAd₁ uses 0.2% trainable parameters. Thus, RoAd₁ achieves roughly 2.7× higher throughput than LoRA while using minimal parameters.", "label": false }, { "paperid": "2410.05729v1", "paper_path": "./SciVer/papers/2410.05729v1.json", "claim_type": "sequential", "item1": "6(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05729v1_figure_6(b).png", "item2_path": "./SciVer/images/2410.05729v1-Table4-1.png", "section": [ "4.3" ], "request_id": 532, "origin_statement": "The LRFT configuration with rank/output 35/128 produces the smallest model size (~41.8 MB) (Fig. 6). When combined with Ball Query Graph Construction (Table 4, row 8), this yields an RE of 1.67° and F1 of 94.35%, showcasing a compact yet accurate pipeline.", "perturbed_statement": "The LRFT configuration with rank/output 131/256 produces the smallest model size (~43.9 MB) (Fig. 6). When combined with Ball Query Graph Construction (Table 4, row 8), this yields an RE of 1.72° and F1 of 94.35%, showcasing a compact yet accurate pipeline.", "perturbed_explanation": "The perturbed statement misidentifies the smallest model size configuration: Fig. 6 shows rank/output 35/128 yields ~41.8 MB, not 131/256 (~43.9 MB). It also misattributes the RE for Ball Query Graph Construction: Table 4 row 8 reports RE=1.67°, not 1.72°.", "claim": "The LRFT configuration with rank/output 131/256 produces the smallest model size (~43.9 MB) (Fig. 6). When combined with Ball Query Graph Construction (Table 4, row 8), this yields an RE of 1.72° and F1 of 94.35%, showcasing a compact yet accurate pipeline.", "label": false }, { "paperid": "2411.05009v1", "paper_path": "./SciVer/papers/2411.05009v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.05009v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.05009v1-Table6-1.png", "section": [ "4.2" ], "request_id": 538, "origin_statement": "At 1,024,000 particles, Kokkos achieves ~15× speedup on 16 GPUs (Fig. 3). Table 6 shows Kokkos has 96% performance portability across GPUs. Therefore, Kokkos demonstrates near-ideal strong scaling and very high portability on large multi‐GPU runs.", "perturbed_statement": "At 1,024,000 particles, Kokkos achieves only ~8× speedup on 16 GPUs (Fig. 3). Table 6 shows Kokkos has 96% performance portability across GPUs. Therefore, Kokkos demonstrates moderate strong scaling and high portability on large multi‐GPU runs.", "perturbed_explanation": "The perturbed statement misreads Fig. 3: the green line for 1,024,000 particles at 16 GPUs reaches about 15× speedup, not 8×. This incorrect premise invalidates the conclusion about only moderate strong scaling.", "claim": "At 1,024,000 particles, Kokkos achieves only ~8× speedup on 16 GPUs (Fig. 3). Table 6 shows Kokkos has 96% performance portability across GPUs. Therefore, Kokkos demonstrates moderate strong scaling and high portability on large multi‐GPU runs.", "label": false }, { "paperid": "2411.17502v1", "paper_path": "./SciVer/papers/2411.17502v1.json", "claim_type": "sequential", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17502v1_figure_4.png", "item2_path": "./SciVer/images/2411.17502v1_figure_6.png", "section": [ "4.1.x" ], "request_id": 548, "origin_statement": "Figure 4 shows that the building prediction network uses a numerical embedding layer. Figure 6 shows that numerical embeddings use quantile-based piecewise-linear bins plus a linear layer. Therefore, the building prediction network encodes numerical features via QL quantile-based piecewise-linear embeddings before prediction.", "perturbed_statement": "Figure 4 shows that the building prediction network uses a numerical embedding layer. Figure 6 shows that numerical embeddings use periodic functions with trainable frequencies plus a linear layer. Therefore, the building prediction network encodes numerical features via Fourier-based PLR periodic embeddings before prediction.", "perturbed_explanation": "Figure 6 actually describes QL embeddings based on empirical quantile bins and a linear layer, not periodic Fourier-based PLR mappings. There is no periodic function or trainable frequency step in the QL embedding shown, so the claim of PLR periodic embeddings contradicts the context.", "claim": "Figure 4 shows that the building prediction network uses a numerical embedding layer. Figure 6 shows that numerical embeddings use periodic functions with trainable frequencies plus a linear layer. Therefore, the building prediction network encodes numerical features via Fourier-based PLR periodic embeddings before prediction.", "label": false }, { "paperid": "2409.06624v1", "paper_path": "./SciVer/papers/2409.06624v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06624v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.06624v1_figure_2(d).png", "section": [ "5.1" ], "request_id": 552, "origin_statement": "Between 5K and 10K CPT steps, C-Eval rises from 67.43 to 67.85, indicating improved disciplinary knowledge, and concurrently, HumanEval climbs from 27.44 to 29.27, demonstrating enhanced coding ability.", "perturbed_statement": "Between 5K and 10K CPT steps, C-Eval drops from 67.43 to 67.85, indicating deteriorated disciplinary knowledge, and concurrently, HumanEval falls from 29.27 to 27.44, showing coding skills decline.", "perturbed_explanation": "Both claimed decreases contradict the plotted metrics: the C-Eval score actually increases from 67.43 at 5K steps to 67.85 at 10K steps, and the HumanEval score actually increases from 27.44 to 29.27 over the same interval.", "claim": "Between 5K and 10K CPT steps, C-Eval drops from 67.43 to 67.85, indicating deteriorated disciplinary knowledge, and concurrently, HumanEval falls from 29.27 to 27.44, showing coding skills decline.", "label": false }, { "paperid": "2411.08343v1", "paper_path": "./SciVer/papers/2411.08343v1.json", "claim_type": "sequential", "item1": "9", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.08343v1_figure_9.png", "item2_path": "./SciVer/images/2411.08343v1-Table4-1.png", "section": [ "4" ], "request_id": 557, "origin_statement": "Figure 9 shows the highest average beta magnitude for “Index in sentence” in the temporal lobe. Table 4 identifies “Index in sentence” as a language feature. Thus temporal electrodes are most strongly modulated by word position in a sentence.", "perturbed_statement": "Figure 9 shows the highest average beta magnitude for “Index in sentence” in the insula. Table 4 identifies “Index in sentence” as a language feature. Thus insula electrodes are most strongly modulated by word position in a sentence.", "perturbed_explanation": "The claim is wrong because in Figure 9 the bar for “Index in sentence” is highest in the Temporal region, not the Insula. The insula’s coefficient is lower, so insula electrodes are not the most strongly modulated by word position.", "claim": "Figure 9 shows the highest average beta magnitude for “Index in sentence” in the insula. Table 4 identifies “Index in sentence” as a language feature. Thus insula electrodes are most strongly modulated by word position in a sentence.", "label": false }, { "paperid": "2411.09443v1", "paper_path": "./SciVer/papers/2411.09443v1.json", "claim_type": "sequential", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09443v1_figure_8.png", "item2_path": "./SciVer/images/2411.09443v1_figure_9.png", "section": [ "4.3.2" ], "request_id": 558, "origin_statement": "Fig. 8 shows that 25% of its 33 associated systems are classified as AGN—the single largest class—while Fig. 9 shows that 76% of its 107 offset systems are classified as star-forming—the single largest class; thus, associated matches are AGN-dominated and offset matches are star-formation-dominated.", "perturbed_statement": "Fig. 8 shows that only 15% of its 33 associated systems are classified as AGN—the single largest class—while Fig. 9 shows that 76% of its 107 offset systems are classified as star-forming—the single largest class; thus, associated matches are AGN-dominated and offset matches are star-formation-dominated.", "perturbed_explanation": "The caption for Figure 8 states that 25% of the associated systems are AGN, not 15%. Moreover, if only 15% were AGN, then both the star-forming and composite categories (each exceeding 15%) would outnumber AGN, so AGN would no longer be the single largest class, contradicting the context.", "claim": "Fig. 8 shows that only 15% of its 33 associated systems are classified as AGN—the single largest class—while Fig. 9 shows that 76% of its 107 offset systems are classified as star-forming—the single largest class; thus, associated matches are AGN-dominated and offset matches are star-formation-dominated.", "label": false }, { "paperid": "2411.03788v1", "paper_path": "./SciVer/papers/2411.03788v1.json", "claim_type": "sequential", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03788v1_figure_6.png", "item2_path": "./SciVer/images/2411.03788v1-Table2-1.png", "section": [ "5.2.1", "5.2" ], "request_id": 560, "origin_statement": "Table 2 gives λ₁=1.28×10⁻² s⁻¹ and λ₆=2.60 s⁻¹, so family 1 is long-lived. Fig 6 shows family 1’s adv-only profile (orange) is strongly flattened by diffusivity (blue/green), while family 6’s profile shows only slight smoothing, confirming long-lived precursors are most affected.", "perturbed_statement": "Table 2 gives λ₁=1.28×10⁻² s⁻¹ and λ₆=2.60 s⁻¹, so family 6 is long-lived. Fig 6 shows family 6’s adv-only profile is strongly flattened by diffusivity, while family 1’s shows only slight smoothing, confirming short-lived precursors are most unaffected.", "perturbed_explanation": "This is incorrect because Table 2 actually lists λ₆=2.60 s⁻¹ as the largest decay constant, making family 6 the shortest-lived, not long-lived. Moreover, Fig 6 shows family 6’s curve is only minimally smoothed by diffusivity, not strongly flattened.", "claim": "Table 2 gives λ₁=1.28×10⁻² s⁻¹ and λ₆=2.60 s⁻¹, so family 6 is long-lived. Fig 6 shows family 6’s adv-only profile is strongly flattened by diffusivity, while family 1’s shows only slight smoothing, confirming short-lived precursors are most unaffected.", "label": false }, { "paperid": "2409.20002v2", "paper_path": "./SciVer/papers/2409.20002v2.json", "claim_type": "sequential", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.20002v2_figure_5.png", "item2_path": "./SciVer/images/2409.20002v2-Table1-1.png", "section": [ "4.3" ], "request_id": 563, "origin_statement": "Figure 5(a) at threshold 0.8 shows a true positive rate of about 0.95 at a false positive rate below 0.1. Table 1’s healthcare template includes both name and medical condition. Thus, PNA can recover both private attributes with over 95% accuracy.", "perturbed_statement": "Figure 5(a) at threshold 0.8 shows a true positive rate of only 0.8 at a false positive rate below 0.1. Table 1’s healthcare template includes both name and medical condition. Thus, PNA can recover both private attributes with only 80% accuracy.", "perturbed_explanation": "The perturbation is incorrect because Figure 5(a) actually shows a TPR of around 0.95 at threshold 0.8 (not 0.8). Therefore, the recovery accuracy is over 95%, not just 80%, contradicting the perturbed claim.", "claim": "Figure 5(a) at threshold 0.8 shows a true positive rate of only 0.8 at a false positive rate below 0.1. Table 1’s healthcare template includes both name and medical condition. Thus, PNA can recover both private attributes with only 80% accuracy.", "label": false }, { "paperid": "2410.02799v1", "paper_path": "./SciVer/papers/2410.02799v1.json", "claim_type": "sequential", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02799v1_figure_3.png", "item2_path": "./SciVer/images/2410.02799v1-Table5-1.png", "section": [ "3.3.x" ], "request_id": 566, "origin_statement": "Table 5 reports that Black recipients have a subdistribution hazard ratio exp(δ) of 2.359, indicating over twice the risk of graft rejection; accordingly, Figure 3 shows Black recipients’ cumulative incidence reaches about 0.21 by 4000 days, the highest among groups.", "perturbed_statement": "Table 5 reports that Hispanic recipients have a subdistribution hazard ratio exp(δ) of 2.359, indicating over twice the risk of graft rejection; accordingly, Figure 3 shows Hispanic recipients’ cumulative incidence reaches about 0.21 by 4000 days, the highest among groups.", "perturbed_explanation": "The perturbation is incorrect because Table 5 assigns exp(δ)=2.359 to Black recipients, not Hispanic (Hispanic exp(δ)=1.313). Moreover, Figure 3 shows Hispanic cumulative incidence peaks near 0.16, not 0.21, so they do not have the highest incidence.", "claim": "Table 5 reports that Hispanic recipients have a subdistribution hazard ratio exp(δ) of 2.359, indicating over twice the risk of graft rejection; accordingly, Figure 3 shows Hispanic recipients’ cumulative incidence reaches about 0.21 by 4000 days, the highest among groups.", "label": false }, { "paperid": "2411.06175v1", "paper_path": "./SciVer/papers/2411.06175v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06175v1_figure_3(b).png", "item2_path": "./SciVer/images/2411.06175v1-Table6-1.png", "section": [ "6" ], "request_id": 577, "origin_statement": "For WoS documents originally around 250 words (Figure 3), RAG augmentation increases their length to about 270 words; given that RAG augmentation yields a Jaccard similarity of only 40.91% for WoS (Table 6), this moderate length increase corresponds to substantial vocabulary changes.", "perturbed_statement": "For WoS documents originally around 250 words (Figure 3), RAG augmentation increases their length to about 310 words; given that RAG augmentation yields a Jaccard similarity of 90.84% for WoS (Table 6), this significant length increase corresponds to minor vocabulary changes.", "perturbed_explanation": "This is incorrect because Figure 3 shows that at an original length of ~250 words, the smoothed RAG-augmented length is about 270 words—not 310. Additionally, Table 6 lists 40.91% as the Jaccard similarity among generated WoS samples for RAG augment, while 90.84% is the embedding similarity, not Jaccard overlap.", "claim": "For WoS documents originally around 250 words (Figure 3), RAG augmentation increases their length to about 310 words; given that RAG augmentation yields a Jaccard similarity of 90.84% for WoS (Table 6), this significant length increase corresponds to minor vocabulary changes.", "label": false }, { "paperid": "2411.07539v1", "paper_path": "./SciVer/papers/2411.07539v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07539v1_figure_4.png", "item2_path": "./SciVer/images/2411.07539v1-Table3-1.png", "section": [ "4.3.2" ], "request_id": 596, "origin_statement": "The model’s melody-only control on FilmScoreDB yields 57.6% accuracy (Table 3); Fig. 4’s generated melody heatmap resembles the control pattern; thus, the model reliably transfers melody patterns with improved accuracy.", "perturbed_statement": "The model’s melody-only control on FilmScoreDB yields 67.6% accuracy (Table 3); Fig. 4’s generated melody heatmap perfectly replicates the control pattern; thus, the model achieves near-perfect melody transfer.", "perturbed_explanation": "The statement is wrong because Table 3 reports 57.6% melody accuracy for the model under melody-only control, not 67.6%. Furthermore, Fig. 4 shows only approximate similarity between generated and control melodies, not a perfect replication.", "claim": "The model’s melody-only control on FilmScoreDB yields 67.6% accuracy (Table 3); Fig. 4’s generated melody heatmap perfectly replicates the control pattern; thus, the model achieves near-perfect melody transfer.", "label": false }, { "paperid": "2409.08980v1", "paper_path": "./SciVer/papers/2409.08980v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.08980v1-Table2-1.png", "item2_path": "./SciVer/images/2409.08980v1-Table3-1.png", "section": [ "3.2.1" ], "request_id": 603, "origin_statement": "Because Driving Style and Need For Closure both use a 1–5 Likert scale, their mean scores can be directly compared without rescaling between scales.", "perturbed_statement": "Because Driving Style and Need For Closure both use a 1–4 Likert scale, their mean scores can be directly compared without rescaling between scales.", "perturbed_explanation": "This is incorrect because, per Table 2, Driving Style is measured on a 1–5 Strongly Disagree to Strongly Agree scale (not 1–4). Likewise, Table 3 shows Need For Closure uses a 1–5 scale, so neither variable uses a 1–4 range and direct comparison on a 1–4 basis is invalid.", "claim": "Because Driving Style and Need For Closure both use a 1–4 Likert scale, their mean scores can be directly compared without rescaling between scales.", "label": false }, { "paperid": "2409.13873v1", "paper_path": "./SciVer/papers/2409.13873v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.13873v1-Table2-1.png", "item2_path": "./SciVer/images/2409.13873v1-Table3-1.png", "section": [ "7.3" ], "request_id": 617, "origin_statement": "For n=100, Table 2 reports the joint model’s μω bias as −0.099 (≈20% censoring). Table 3 then shows μω bias of −0.177 (≈50% censoring). Hence, raising censoring increases bias magnitude for μω in the n=100 joint model.", "perturbed_statement": "For n=100, Table 2 reports the joint model’s μω bias as −0.099 (≈20% censoring). Table 3 then shows μω bias of −0.050 (≈50% censoring). Hence, raising censoring decreases bias magnitude for μω in the n=100 joint model.", "perturbed_explanation": "The perturbed statement misquotes Table 3: the actual joint model μω bias at n=100 and ≈50% censoring is −0.177, not −0.050. Therefore, bias magnitude actually increases, so the conclusion that it decreases is false.", "claim": "For n=100, Table 2 reports the joint model’s μω bias as −0.099 (≈20% censoring). Table 3 then shows μω bias of −0.050 (≈50% censoring). Hence, raising censoring decreases bias magnitude for μω in the n=100 joint model.", "label": false }, { "paperid": "2410.21563v1", "paper_path": "./SciVer/papers/2410.21563v1.json", "claim_type": "sequential", "item1": "6", "item2": "10", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.21563v1-Table6-1.png", "item2_path": "./SciVer/images/2410.21563v1-Table10-1.png", "section": [ "4" ], "request_id": 619, "origin_statement": "Since Table 6 gives g^(1)_ZH u_L u_L = e v/(c_w s_w)(c_Hq^(3) − c_Hq^(1)) and Table 10 gives g^(1)_ZH u_R u_R = −e v/(c_w s_w)c_Hu, choosing c_Hu = −(c_Hq^(3) − c_Hq^(1)) makes their magnitudes equal.", "perturbed_statement": "Table 6 shows g^(1)_ZH u_L u_L = −e v/(c_w s_w)(c_Hq^(3) + c_Hq^(1)) and Table 10 shows g^(1)_ZH u_R u_R = e v/(c_w s_w)c_Hu; thus setting c_Hu = c_Hq^(3) + c_Hq^(1) equalizes them.", "perturbed_explanation": "The perturbation misquotes both tables. In Table 6 the coupling is +e v/(c_w s_w)(c_Hq^(3) − c_Hq^(1)), not −e v/(c_w s_w)(c_Hq^(3) + c_Hq^(1)). Moreover, Table 10’s g^(1)_ZH u_R u_R has a negative sign, not positive. Hence the premise fails.", "claim": "Table 6 shows g^(1)_ZH u_L u_L = −e v/(c_w s_w)(c_Hq^(3) + c_Hq^(1)) and Table 10 shows g^(1)_ZH u_R u_R = e v/(c_w s_w)c_Hu; thus setting c_Hu = c_Hq^(3) + c_Hq^(1) equalizes them.", "label": false }, { "paperid": "2409.09586v1", "paper_path": "./SciVer/papers/2409.09586v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.09586v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.09586v1_figure_1(b).png", "section": [ "3.2" ], "request_id": 620, "origin_statement": "The figure displays 11 starred fundamental values specific to AI; the methods section reports 11 merges of similar value statements; therefore one can infer each AI-specific starred value resulted from one merged pair.", "perturbed_statement": "The figure displays 11 starred fundamental values specific to AI; the methods section reports 12 merges of similar value statements; therefore one can infer each AI-specific starred value resulted from one merged pair.", "perturbed_explanation": "The methods section clearly states the authors performed 11 merges, not 12. Claiming 12 merges contradicts the documented number of merges and breaks the one-to-one correspondence with the 11 starred values.", "claim": "The figure displays 11 starred fundamental values specific to AI; the methods section reports 12 merges of similar value statements; therefore one can infer each AI-specific starred value resulted from one merged pair.", "label": false }, { "paperid": "2411.15839v1", "paper_path": "./SciVer/papers/2411.15839v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15839v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.15839v1_figure_4(c).png", "section": [ "4.2" ], "request_id": 624, "origin_statement": "VaLiD reduces numerical calculation accuracy from about 85% to 60% on LLaVA-v1.5 (image1), then raises it from about 60% to 85% on Qwen-VL (image2), indicating opposite effects on numerical calculation across these LVLM architectures.", "perturbed_statement": "VaLiD increases numerical calculation accuracy from 60% to 85% on LLaVA-v1.5 (image1) and from 60% to 85% on Qwen-VL (image2), showing consistent improvements on numerical calculation across both models.", "perturbed_explanation": "The perturbed claim is incorrect because in image1 (LLaVA-v1.5) the yellow (Vanilla) curve sits at about 85% for Numerical Calculation while the blue (VaLiD) curve drops to about 60%, so VaLiD actually reduces rather than increases accuracy on that task.", "claim": "VaLiD increases numerical calculation accuracy from 60% to 85% on LLaVA-v1.5 (image1) and from 60% to 85% on Qwen-VL (image2), showing consistent improvements on numerical calculation across both models.", "label": false }, { "paperid": "2411.03359v1", "paper_path": "./SciVer/papers/2411.03359v1.json", "claim_type": "sequential", "item1": "3", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.03359v1-Table3-1.png", "item2_path": "./SciVer/images/2411.03359v1-Table6-1.png", "section": [ "4.2" ], "request_id": 626, "origin_statement": "SCT’s 1-shot ID accuracy is 68.80% (Table 5); despite this, on the hardest OOD task (ImageNet-100 vs ImageNet-10), SCT still achieves 82.60% AUROC (Table 3), showing robust OOD detection even under low-shot ID performance.", "perturbed_statement": "SCT’s 1-shot ID accuracy is 69.70% (Table 5); despite this, on the hardest OOD task (ImageNet-100 vs ImageNet-10), SCT still achieves only 81.97% AUROC (Table 3), showing weaker OOD detection under low-shot ID performance.", "perturbed_explanation": "The perturbed statement misreports SCT’s 1-shot ID accuracy as 69.70%, but Table 5 shows SCT itself achieves 68.80%; 69.70% is actually the IDLike+SCT variant. It also attributes an AUROC of 81.97% to SCT from Table 3, but 81.97% is LoCoOp’s AUROC on that split—SCT’s AUROC is 82.60%.", "claim": "SCT’s 1-shot ID accuracy is 69.70% (Table 5); despite this, on the hardest OOD task (ImageNet-100 vs ImageNet-10), SCT still achieves only 81.97% AUROC (Table 3), showing weaker OOD detection under low-shot ID performance.", "label": false }, { "paperid": "2411.09903v1", "paper_path": "./SciVer/papers/2411.09903v1.json", "claim_type": "sequential", "item1": "7", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09903v1_figure_7.png", "item2_path": "./SciVer/images/2411.09903v1-Table4-1.png", "section": [ "5.2" ], "request_id": 628, "origin_statement": "From Table 4, the combined sample’s characteristic mass log(M_s/h_{70}^{-2}M_⊙)=9.86, and ALFALFA’s is 9.87; this 0.01 dex lower M_s shifts the black open‐circle HIMF slightly left of the purple ALFALFA curve around log(M_HI)≈9.9 in Figure 7.", "perturbed_statement": "From Table 4, the combined sample’s characteristic mass log(M_s/h_{70}^{-2}M_⊙)=9.96, and ALFALFA’s is 9.87; this 0.09 dex higher M_s shifts the black open‐circle HIMF slightly right of the purple ALFALFA curve around log(M_HI)≈9.9 in Figure 7.", "perturbed_explanation": "The perturbed statement misquotes the Table 4 value: the combined sample’s log(M_s) is 9.86±0.01, not 9.96, so it is actually 0.01 dex lower than ALFALFA’s 9.87. Therefore the HIMF cannot shift right in Figure 7 as claimed.", "claim": "From Table 4, the combined sample’s characteristic mass log(M_s/h_{70}^{-2}M_⊙)=9.96, and ALFALFA’s is 9.87; this 0.09 dex higher M_s shifts the black open‐circle HIMF slightly right of the purple ALFALFA curve around log(M_HI)≈9.9 in Figure 7.", "label": false }, { "paperid": "2410.22451v1", "paper_path": "./SciVer/papers/2410.22451v1.json", "claim_type": "sequential", "item1": "8", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22451v1_figure_8.png", "item2_path": "./SciVer/images/2410.22451v1-Table1-1.png", "section": [ "6" ], "request_id": 633, "origin_statement": "Table 1 shows Cutie+ achieves a 98.5 J&F on full 128-frame videos; consequently, Figure 8 reveals Cutie+ also records an 89 J&F suffix at 128-frame interjections, indicating the interjection classifier preserves both overall and suffix segmentation quality.", "perturbed_statement": "Table 1 shows Cutie+ achieves a 95.3 J&F on full 128-frame videos; consequently, Figure 8 reveals Cutie+ also records a 91 J&F suffix at 128-frame interjections, indicating the interjection classifier preserves both overall and suffix segmentation quality.", "perturbed_explanation": "The perturbation is incorrect because Table 1 lists Cutie+ with a 98.5 J&F at 128 frames (not 95.3), and Figure 8 shows Cutie+ suffix performance at 128-frame interjections is about 89 J&F (not 91). These values directly contradict the context data.", "claim": "Table 1 shows Cutie+ achieves a 95.3 J&F on full 128-frame videos; consequently, Figure 8 reveals Cutie+ also records a 91 J&F suffix at 128-frame interjections, indicating the interjection classifier preserves both overall and suffix segmentation quality.", "label": false }, { "paperid": "2411.16129v1", "paper_path": "./SciVer/papers/2411.16129v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16129v1_figure_7.png", "item2_path": "./SciVer/images/2411.16129v1-Table3-1.png", "section": [ "4.1" ], "request_id": 636, "origin_statement": "Figure 10 shows ScanSSC yielding more complete car contours than CGFormer. Table 3 reports ScanSSC car IoU at 27.10 versus CGFormer’s 26.10. Consequently, ScanSSC outperforms CGFormer in car completion by a full IoU point.", "perturbed_statement": "Figure 10 shows CGFormer yielding more complete car contours than ScanSSC. Table 3 reports ScanSSC car IoU at 27.10 versus CGFormer’s 26.10. Consequently, CGFormer outperforms ScanSSC in car completion by a full IoU point.", "perturbed_explanation": "The perturbed claim is wrong because Figure 10’s red insets under the ScanSSC row clearly display sharper and more accurate car shapes compared to CGFormer. Moreover, Table 3 lists CGFormer’s car IoU as 26.10, which is lower than ScanSSC’s 27.10, so CGFormer does not outperform ScanSSC.", "claim": "Figure 10 shows CGFormer yielding more complete car contours than ScanSSC. Table 3 reports ScanSSC car IoU at 27.10 versus CGFormer’s 26.10. Consequently, CGFormer outperforms ScanSSC in car completion by a full IoU point.", "label": false }, { "paperid": "2410.17193v1", "paper_path": "./SciVer/papers/2410.17193v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.17193v1-Table2-1.png", "item2_path": "./SciVer/images/2410.17193v1-Table3-1.png", "section": [ "4.2" ], "request_id": 648, "origin_statement": "Under IPC300, EDF achieves 84.2% accuracy on ImageYellow; its Car-Easy recovery ratio at IPC50 is 87.4%, exceeding 84.2% and demonstrating stronger recovery performance on complex subsets.", "perturbed_statement": "Under IPC300, EDF achieves 84.2% accuracy on ImageYellow; its Car-Easy recovery ratio at IPC10 is 76.2%, exceeding 84.2% and demonstrating stronger recovery performance on complex subsets.", "perturbed_explanation": "The perturbed claim is false because the recovery ratio for Car-Easy at IPC10 is 76.2%, which is lower than the IPC300 ImageYellow accuracy of 84.2%, so it does not exceed 84.2% as stated.", "claim": "Under IPC300, EDF achieves 84.2% accuracy on ImageYellow; its Car-Easy recovery ratio at IPC10 is 76.2%, exceeding 84.2% and demonstrating stronger recovery performance on complex subsets.", "label": false }, { "paperid": "2409.03203v2", "paper_path": "./SciVer/papers/2409.03203v2.json", "claim_type": "sequential", "item1": "5(b)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03203v2_figure_5(b).png", "item2_path": "./SciVer/images/2409.03203v2_figure_5(c).png", "section": [ "4.3" ], "request_id": 649, "origin_statement": "From the Spanish curve (b), at 20% data usage DiffusionCLS achieves about 0.43 macro-F1. From the French curve (c), at 20% it achieves about 0.35. Therefore, Spanish performance is roughly 0.08 higher than French at 20%.", "perturbed_statement": "From the Spanish curve (b), at 20% data usage DiffusionCLS achieves about 0.40 macro-F1. From the French curve (c), at 20% it achieves about 0.38. Therefore, Spanish performance is only about 0.02 higher than French at 20%.", "perturbed_explanation": "The actual scores are 0.43 for Spanish and 0.35 for French at 20% usage, yielding a 0.08 gap. The perturbation wrongly lowers Spanish to 0.40 and raises French to 0.38, understating the true difference.", "claim": "From the Spanish curve (b), at 20% data usage DiffusionCLS achieves about 0.40 macro-F1. From the French curve (c), at 20% it achieves about 0.38. Therefore, Spanish performance is only about 0.02 higher than French at 20%.", "label": false }, { "paperid": "2410.12831v1", "paper_path": "./SciVer/papers/2410.12831v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.12831v1_figure_5.png", "item2_path": "./SciVer/images/2410.12831v1-Table3-1.png", "section": [ "4.3" ], "request_id": 651, "origin_statement": "Because in Table 3, FLanS achieves a 0.855 Dice score on the WORD dataset compared to 0.534 for SAM2 using point prompts, FLanS text-based prompts more accurately segment the right kidney than SAM2’s point-based prompts, as evidenced in the right panel of Fig. 5.", "perturbed_statement": "Because in Table 3, FLanS achieves a 0.534 Dice score on the WORD dataset compared to 0.855 for SAM2 using point prompts, FLanS text-based prompts are less accurate at segmenting the right kidney than SAM2’s point-based prompts, as evidenced in the right panel of Fig. 5.", "perturbed_explanation": "The perturbation erroneously swapped the Dice scores from Table 3. In reality, FLanS scores 0.855 on WORD and SAM2 (point-prompt) scores 0.534, so FLanS actually outperforms SAM2 with text-based prompts rather than underperforms.", "claim": "Because in Table 3, FLanS achieves a 0.534 Dice score on the WORD dataset compared to 0.855 for SAM2 using point prompts, FLanS text-based prompts are less accurate at segmenting the right kidney than SAM2’s point-based prompts, as evidenced in the right panel of Fig. 5.", "label": false }, { "paperid": "2411.13005v1", "paper_path": "./SciVer/papers/2411.13005v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13005v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.13005v1-Table2-1.png", "section": [ "4.3" ], "request_id": 652, "origin_statement": "From the Wireframe sAP10 PR curve, DT-LSD's precision exceeds LETR's beyond 0.1 recall. Table 2 confirms DT-LSD achieves 71.7% sAP10 against LETR's 65.2% on Wireframe, indicating DT-LSD outperforms LETR in overall sAP10.", "perturbed_statement": "From the Wireframe sAP10 PR curve, DT-LSD's precision exceeds LETR's beyond 0.3 recall. Table 2 confirms DT-LSD achieves 73.9% sAP10 against LETR's 65.2% on Wireframe, indicating DT-LSD outperforms LETR in overall sAP10.", "perturbed_explanation": "The PR curve (Figure 5) shows DT-LSD first surpasses LETR around 0.1 recall, not 0.3. Also, Table 2 reports DT-LSD’s sAP10 as 71.7%, not 73.9%, so both the recall threshold and the reported sAP10 are incorrect.", "claim": "From the Wireframe sAP10 PR curve, DT-LSD's precision exceeds LETR's beyond 0.3 recall. Table 2 confirms DT-LSD achieves 73.9% sAP10 against LETR's 65.2% on Wireframe, indicating DT-LSD outperforms LETR in overall sAP10.", "label": false }, { "paperid": "2409.12210v1", "paper_path": "./SciVer/papers/2409.12210v1.json", "claim_type": "sequential", "item1": "3", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.12210v1_figure_3.png", "item2_path": "./SciVer/images/2409.12210v1-Table6-1.png", "section": [ "4.3" ], "request_id": 662, "origin_statement": "Figure 3(d) shows that in MoDSE’s final epoch the smallest expert (0.5) receives the most tokens. However, Table 6 shows difficult tokens are routed more than twice as often to larger experts for the top-1 slot (6215 vs. 3085), indicating hard cases still favor larger experts.", "perturbed_statement": "Figure 3(d) shows that in MoDSE’s final epoch expert 2.0 receives the most tokens. And Table 6 shows difficult tokens are routed more often to smaller experts (sum(S)=8326 vs. sum(L)=10473), indicating they’re preferred for harder tokens.", "perturbed_explanation": "The perturbed claim is wrong on two counts. First, Figure 3(d) actually highlights expert 0.5 (not 2.0) as the most-routed expert in the final epoch. Second, Table 6 reports sum(L)=10473 > sum(S)=8326, so larger experts receive more difficult-token routings, not smaller ones.", "claim": "Figure 3(d) shows that in MoDSE’s final epoch expert 2.0 receives the most tokens. And Table 6 shows difficult tokens are routed more often to smaller experts (sum(S)=8326 vs. sum(L)=10473), indicating they’re preferred for harder tokens.", "label": false }, { "paperid": "2410.16255v1", "paper_path": "./SciVer/papers/2410.16255v1.json", "claim_type": "sequential", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16255v1_figure_6.png", "item2_path": "./SciVer/images/2410.16255v1-Table2-1.png", "section": [ "4.2" ], "request_id": 663, "origin_statement": "Because the global map in Figure 6 prominently highlights the removed labels on the juice bottle logical anomaly, and MVTec-LOCO contains logical anomalies, ULSAD’s high AUPRO of 73.73% on MVTec-LOCO stems from its global branch’s strong logical anomaly detection.", "perturbed_statement": "Because the local map in Figure 6 prominently highlights the removed labels on the juice bottle logical anomaly, and MVTec-LOCO contains logical anomalies, ULSAD’s high AUPRO of 73.73% on MVTec-LOCO stems from its local branch’s strong logical anomaly detection.", "perturbed_explanation": "The perturbation is wrong because Figure 6 and the paper text both state that the global branch— not the local branch— is more suited to detecting logical anomalies (e.g., the missing labels). The local map shows less focused activation on those logical regions, so attributing ULSAD’s logical anomaly performance to the local branch contradicts the context.", "claim": "Because the local map in Figure 6 prominently highlights the removed labels on the juice bottle logical anomaly, and MVTec-LOCO contains logical anomalies, ULSAD’s high AUPRO of 73.73% on MVTec-LOCO stems from its local branch’s strong logical anomaly detection.", "label": false }, { "paperid": "2410.20421v1", "paper_path": "./SciVer/papers/2410.20421v1.json", "claim_type": "sequential", "item1": "8", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20421v1_figure_8.png", "item2_path": "./SciVer/images/2410.20421v1-Table3-1.png", "section": [ "4.1" ], "request_id": 670, "origin_statement": "Figure 8 shows that ADTrack’s AUC drops from 62.3 on existing benchmarks to 31.38 on NT-VOT211. From Table 3, ProContEXT fine-tuning only increases overall AUC from 51.2 to 52.3. Thus, ADTrack’s performance loss (≈30.9 points) far exceeds ProContEXT’s fine-tuning gain (1.1 points).", "perturbed_statement": "Figure 8 shows that ADTrack’s AUC drops from 62.3 on existing benchmarks to 39.23 on NT-VOT211. From Table 3, ProContEXT fine-tuning only increases overall AUC from 51.2 to 52.3. Thus, ADTrack’s performance loss (≈23.1 points) far exceeds ProContEXT’s fine-tuning gain (1.1 points).", "perturbed_explanation": "The perturbed statement misreports ADTrack’s NT-VOT211 AUC as 39.23. In Figure 8, the 39.23 value actually belongs to Mixformer, while ADTrack’s NT-VOT211 AUC is 31.38. Thus the calculated drop of ≈23.1 points is incorrect.", "claim": "Figure 8 shows that ADTrack’s AUC drops from 62.3 on existing benchmarks to 39.23 on NT-VOT211. From Table 3, ProContEXT fine-tuning only increases overall AUC from 51.2 to 52.3. Thus, ADTrack’s performance loss (≈23.1 points) far exceeds ProContEXT’s fine-tuning gain (1.1 points).", "label": false }, { "paperid": "2410.04927v2", "paper_path": "./SciVer/papers/2410.04927v2.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.04927v2_figure_1.png", "item2_path": "./SciVer/images/2410.04927v2_figure_2.png", "section": [ "1" ], "request_id": 677, "origin_statement": "Because Figure 1 shows FELLAS retains FedSeqRec’s original model update flow without fine-tuning LLM, and Figure 2 depicts clients only sending perturbed sequences to the LLM server under d_x-privacy, FELLAS maintains both low communication overhead and user privacy.", "perturbed_statement": "Because Figure 1 shows FELLAS still fine-tunes LLM within FedSeqRec, and Figure 2 shows clients upload raw interaction sequences to the LLM server, FELLAS both increases communication overhead and fails to protect user privacy.", "perturbed_explanation": "This is wrong because Figure 1 explicitly labels the right-hand side ‘FedSeqRec using LLM as a service’, indicating no client-side LLM fine-tuning or extra parameter uploads—clients still only exchange lightweight model updates. Moreover, Figure 2 shows a d_x-privacy perturbation applied to sequences before querying the LLM server, so raw interaction data is never sent.", "claim": "Because Figure 1 shows FELLAS still fine-tunes LLM within FedSeqRec, and Figure 2 shows clients upload raw interaction sequences to the LLM server, FELLAS both increases communication overhead and fails to protect user privacy.", "label": false }, { "paperid": "2409.15139v2", "paper_path": "./SciVer/papers/2409.15139v2.json", "claim_type": "sequential", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15139v2_figure_5.png", "item2_path": "./SciVer/images/2409.15139v2_figure_7.png", "section": [ "4.2" ], "request_id": 685, "origin_statement": "Because the final converged string contains 35 interpolated fields (Nst=34) in Figure 5, projecting these fields onto the PCA subspace shows that 87.76% of their variance lies along v1 (Figure 7), indicating the top-manifold path is predominantly oriented along the first principal component.", "perturbed_statement": "Because the final converged string contains 20 interpolated fields (Nst=19), projecting these fields onto the PCA subspace shows that 95% of their variance lies along v2 (Figure 7), indicating the top-manifold path is predominantly oriented along the second principal component.", "perturbed_explanation": "This statement is incorrect because Figure 5 specifies Nst=34 (yielding 35 fields), not 19, and Figure 7 reports PCA variances of 87.76% for v1 and only 10.61% for v2, not 95% on v2.", "claim": "Because the final converged string contains 20 interpolated fields (Nst=19), projecting these fields onto the PCA subspace shows that 95% of their variance lies along v2 (Figure 7), indicating the top-manifold path is predominantly oriented along the second principal component.", "label": false }, { "paperid": "2411.15351v1", "paper_path": "./SciVer/papers/2411.15351v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15351v1_figure_7.png", "item2_path": "./SciVer/images/2411.15351v1-Table3-1.png", "section": [ "3" ], "request_id": 690, "origin_statement": "Since Figure 7’s Pt–W phase diagram was generated with ORB and Table 3 shows ORB is over 1000× faster than DFT for Cr–Mo SQS calculations, the Pt–W diagram generation also required over a thousandfold less CPU time than ab initio.", "perturbed_statement": "Since Figure 7’s Pt–W phase diagram was generated with ORB and Table 3 shows ORB is over 100× faster than DFT for Cr–Mo SQS calculations, the Pt–W diagram generation also required over a hundredfold less CPU time than ab initio.", "perturbed_explanation": "Table 3 in the context explicitly states that ORB achieves speedups of over 1000× compared to DFT for Cr–Mo SQS structures. By claiming only a 100× speedup, the perturbed statement contradicts the documented >1000× factor shown in Table 3.", "claim": "Since Figure 7’s Pt–W phase diagram was generated with ORB and Table 3 shows ORB is over 100× faster than DFT for Cr–Mo SQS calculations, the Pt–W diagram generation also required over a hundredfold less CPU time than ab initio.", "label": false }, { "paperid": "2409.11040v1", "paper_path": "./SciVer/papers/2409.11040v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.11040v1-Table2-1.png", "item2_path": "./SciVer/images/2409.11040v1-Table3-1.png", "section": [ "5.1" ], "request_id": 691, "origin_statement": "Table 2 reports β2 = 2.326. Table 3 (20% loss) estimates β2 at 2.019 with 95% CI [1.403, 2.549]. Since 2.326 lies within this interval, the imputation algorithm accurately recovers β2 under 20% data loss.", "perturbed_statement": "Table 2 reports β2 = 2.326. Table 3 (20% loss) estimates β2 at 2.019 with 95% CI [1.503, 2.549]. Since 2.326 lies below the interval’s lower bound, the imputation algorithm fails to recover β2 under 20% data loss.", "perturbed_explanation": "The perturbed statement misquotes the lower bound of the 95% CI for β2 under 20% loss. The actual interval is [1.403, 2.549], not [1.503, 2.549], so 2.326 does lie within the true interval and the algorithm does recover β2.", "claim": "Table 2 reports β2 = 2.326. Table 3 (20% loss) estimates β2 at 2.019 with 95% CI [1.503, 2.549]. Since 2.326 lies below the interval’s lower bound, the imputation algorithm fails to recover β2 under 20% data loss.", "label": false }, { "paperid": "2410.10054v1", "paper_path": "./SciVer/papers/2410.10054v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10054v1_figure_3.png", "item2_path": "./SciVer/images/2410.10054v1-Table3-1.png", "section": [ "4.2" ], "request_id": 694, "origin_statement": "MoLA(2468) assigns 4 experts to layers 9–16 (Figure 3); this mid-layer allocation yields 0.45% lower GSM8K accuracy (43.59% vs 44.04%) on LLaMA compared to uniform MoLA(5555), indicating fewer experts in those layers correlates with reduced zero-shot math performance.", "perturbed_statement": "MoLA(2468) assigns 6 experts to layers 9–16 (Figure 3); this mid-layer allocation yields 0.45% lower GSM8K accuracy (43.59% vs 44.04%) on LLaMA compared to uniform MoLA(5555), indicating excess experts in those layers still correlates with reduced zero-shot math performance.", "perturbed_explanation": "The perturbation is wrong because Figure 3 actually shows MoLA(2468) allocates 4 experts to layers 9–16, not 6. Misstating this intermediate premise invalidates the resulting claim about mid-layer expert redundancy affecting GSM8K accuracy.", "claim": "MoLA(2468) assigns 6 experts to layers 9–16 (Figure 3); this mid-layer allocation yields 0.45% lower GSM8K accuracy (43.59% vs 44.04%) on LLaMA compared to uniform MoLA(5555), indicating excess experts in those layers still correlates with reduced zero-shot math performance.", "label": false }, { "paperid": "2409.19521v1", "paper_path": "./SciVer/papers/2409.19521v1.json", "claim_type": "sequential", "item1": "6", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19521v1_figure_6.png", "item2_path": "./SciVer/images/2409.19521v1_figure_8.png", "section": [ "5.4" ], "request_id": 709, "origin_statement": "GenTel-Shield’s French jailbreak detection accuracy is nearly 100% (Fig. 6), while its French prompt leakage accuracy is about 90% (Fig. 8), indicating a roughly 10% performance drop when shifting from jailbreak detection to prompt leakage in French.", "perturbed_statement": "GenTel-Shield’s French jailbreak detection accuracy is about 92% (Fig. 6), while its French prompt leakage accuracy is about 90% (Fig. 8), indicating only a 2% performance drop when shifting tasks in French.", "perturbed_explanation": "Figure 6 shows that GenTel-Shield’s French jailbreak detection bar actually reaches nearly 100%, not around 92%. By understating the true baseline accuracy, the claimed 2% drop is incorrect.", "claim": "GenTel-Shield’s French jailbreak detection accuracy is about 92% (Fig. 6), while its French prompt leakage accuracy is about 90% (Fig. 8), indicating only a 2% performance drop when shifting tasks in French.", "label": false }, { "paperid": "2409.15588v1", "paper_path": "./SciVer/papers/2409.15588v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15588v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.15588v1_figure_1(b).png", "section": [ "3.1", "3.2" ], "request_id": 710, "origin_statement": "At δ = 1.3, for model (3.1) with (n,p)=(600,50), the new test’s empirical power is ≈0.53 (triangle); for (n,p)=(600,80) it’s ≈0.995. Thus, increasing p from 50 to 80 boosts power by about 0.465.", "perturbed_statement": "At δ = 1.3, for model (3.1) with (n,p)=(600,50), the new test’s empirical power is ≈0.245 (diamond); for (n,p)=(600,80) it’s ≈0.525. Thus, increasing p from 50 to 80 boosts power by about 0.28.", "perturbed_explanation": "The perturbed statement reads off the diamond markers (0.245 and 0.525), which represent the conservative Ryan and Killick test, not the triangle markers for the new test. The correct new-test powers at δ=1.3 are ≈0.53 and ≈0.995.", "claim": "At δ = 1.3, for model (3.1) with (n,p)=(600,50), the new test’s empirical power is ≈0.245 (diamond); for (n,p)=(600,80) it’s ≈0.525. Thus, increasing p from 50 to 80 boosts power by about 0.28.", "label": false }, { "paperid": "2409.13980v1", "paper_path": "./SciVer/papers/2409.13980v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13980v1_figure_5.png", "item2_path": "./SciVer/images/2409.13980v1-Table3-1.png", "section": [ "4.5" ], "request_id": 713, "origin_statement": "Because the context-aware description details that the burger is made of chicken, cheese, lettuce, tomato, onion, and pickles (Figure 5), GPT-4 judges these descriptions superior 75.5% of the time on the Whoops dataset (Table 3).", "perturbed_statement": "Because the context-aware description details that the burger is vegan, GPT-4 judges these descriptions superior 85.5% of the time on the Whoops dataset.", "perturbed_explanation": "This claim is wrong for two reasons: Figure 5’s context-aware description explicitly lists chicken, cheese, lettuce, tomato, onion, and pickles—so the burger is not vegan. And Table 3 reports a 75.5% ‘Description Better’ average on Whoops, not 85.5%.", "claim": "Because the context-aware description details that the burger is vegan, GPT-4 judges these descriptions superior 85.5% of the time on the Whoops dataset.", "label": false }, { "paperid": "2410.19599v2", "paper_path": "./SciVer/papers/2410.19599v2.json", "claim_type": "sequential", "item1": "9", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19599v2_figure_9.png", "item2_path": "./SciVer/images/2410.19599v2-Table1-1.png", "section": [ "4" ], "request_id": 714, "origin_statement": "GPT-4 in the costless variant chooses 19 most frequently (Fig. 9) and in Table 1 cites loss-aversion when explaining its choice of 19, indicating loss-aversion drives its selection even when losses are eliminated.", "perturbed_statement": "GPT-4 in the costless variant chooses 20 most frequently (Fig. 9) and in Table 1 cites loss-aversion when explaining its choice of 19, implying loss-aversion drives its choice even when losses are fixed.", "perturbed_explanation": "This is wrong because Fig. 9 shows GPT-4 (Costless) peaks at 19, not 20, and Table 1 only lists GPT-4’s loss-aversion rationale for the 19 choice. Claiming it chose 20 contradicts both the figure and the table.", "claim": "GPT-4 in the costless variant chooses 20 most frequently (Fig. 9) and in Table 1 cites loss-aversion when explaining its choice of 19, implying loss-aversion drives its choice even when losses are fixed.", "label": false }, { "paperid": "2410.19288v1", "paper_path": "./SciVer/papers/2410.19288v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19288v1_figure_1.png", "item2_path": "./SciVer/images/2410.19288v1_figure_2.png", "section": [ "2.2" ], "request_id": 721, "origin_statement": "Fig 1(b) shows that Gaussian noise is only added within the quality-filtering mask, and Fig 2 shows this mask M is concatenated into the Denoising UNet input, so the network focuses denoising only on masked ROI, skipping background voxels.", "perturbed_statement": "Fig 1(b) shows that Gaussian noise is added uniformly across the entire image, and Fig 2 shows this mask M is concatenated into the Denoising UNet input, so the network applies denoising across the whole image, ignoring ROI boundaries.", "perturbed_explanation": "The perturbed claim is false because Fig 1(b) explicitly states that noise is only added to the region defined by the quality-filtering mask, not uniformly across the image. Therefore the conclusion that the network denoises the whole image (ignoring ROI) contradicts the fact that background regions remain noise-free by design.", "claim": "Fig 1(b) shows that Gaussian noise is added uniformly across the entire image, and Fig 2 shows this mask M is concatenated into the Denoising UNet input, so the network applies denoising across the whole image, ignoring ROI boundaries.", "label": false }, { "paperid": "2409.08224v1", "paper_path": "./SciVer/papers/2409.08224v1.json", "claim_type": "sequential", "item1": "8", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.08224v1_figure_8.png", "item2_path": "./SciVer/images/2409.08224v1-Table3-1.png", "section": [ "6.2" ], "request_id": 724, "origin_statement": "In Week 06, general knowledge accounted for 84% of student statements (Figure 8). Table 3’s Week 06 prompt solely asked for brainstorming solutions without physics content. This lack of science scaffolding likely forced students to draw overwhelmingly on general knowledge in their initial design discussions.", "perturbed_statement": "In Week 06, students relied 84% on physics knowledge. Table 3’s Week 06 prompt instructed detailed spring experiments, so intensive lab scaffolding explains their physics knowledge emphasis.", "perturbed_explanation": "Figure 8 shows that only 15% of Week 06 statements drew on physics knowledge (not 84%), while 84% drew on general knowledge. Moreover, Table 3’s Week 06 prompt was a brainstorming task without any mention of spring experiments or physics‐content scaffolds, contradicting the perturbed claim.", "claim": "In Week 06, students relied 84% on physics knowledge. Table 3’s Week 06 prompt instructed detailed spring experiments, so intensive lab scaffolding explains their physics knowledge emphasis.", "label": false }, { "paperid": "2411.13093v1", "paper_path": "./SciVer/papers/2411.13093v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13093v1_figure_4.png", "item2_path": "./SciVer/images/2411.13093v1-Table5-1.png", "section": [ "4.4" ], "request_id": 725, "origin_statement": "Figure 4 shows that sampling 64 frames yields the highest overall accuracy with Video-RAG (≈62.3%). Table 5 indicates that at RAG threshold t=0.3, the overall accuracy reaches 62.1%. Thus, applying a similarity threshold of 0.3 nearly matches the top performance achieved by doubling frame sampling to 64 frames.", "perturbed_statement": "Figure 4 shows that sampling 64 frames yields the highest overall accuracy with Video-RAG (≈61.8%). Table 5 indicates that at RAG threshold t=0.3, overall accuracy reaches 63.0%. Therefore, threshold 0.3 outperforms the 64-frame configuration.", "perturbed_explanation": "The perturbed statement misreports two key numbers: Figure 4 actually shows the peak overall accuracy at 64 frames is about 62.3%, not 61.8%. And Table 5 reports an overall accuracy of 62.1% at t=0.3, not 63.0%. These discrepancies invalidate the claim that thresholding at 0.3 outperforms the 64-frame setup.", "claim": "Figure 4 shows that sampling 64 frames yields the highest overall accuracy with Video-RAG (≈61.8%). Table 5 indicates that at RAG threshold t=0.3, overall accuracy reaches 63.0%. Therefore, threshold 0.3 outperforms the 64-frame configuration.", "label": false }, { "paperid": "2410.20847v1", "paper_path": "./SciVer/papers/2410.20847v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20847v1_figure_4.png", "item2_path": "./SciVer/images/2410.20847v1_figure_5.png", "section": [ "3.1" ], "request_id": 745, "origin_statement": "Fig.4’s XZ projection places the spacecraft at the Sgr A* observation moment nearly on the ecliptic plane (Z≈0). Given Fig.5’s 20° maximum downward tilt in the XZ plane, the antenna can easily maintain line-of-sight during that observation.", "perturbed_statement": "Fig.4’s XZ projection places the spacecraft at the Sgr A* observation moment about Z≈−350 000 km below the ecliptic. Since Fig.5 allows only a 20° downward tilt, the antenna cannot achieve this line-of-sight at that moment.", "perturbed_explanation": "In Fig.4’s XZ projection the red dot for Sgr A* clearly lies at Z very close to zero (on the ecliptic plane), not at −350 000 km, so the actual tilt needed is minimal and well within the 20° limit.", "claim": "Fig.4’s XZ projection places the spacecraft at the Sgr A* observation moment about Z≈−350 000 km below the ecliptic. Since Fig.5 allows only a 20° downward tilt, the antenna cannot achieve this line-of-sight at that moment.", "label": false }, { "paperid": "2411.16312v1", "paper_path": "./SciVer/papers/2411.16312v1.json", "claim_type": "sequential", "item1": "6(e)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16312v1_figure_6(e).png", "item2_path": "./SciVer/images/2411.16312v1-Table1-1.png", "section": [ "4.2" ], "request_id": 746, "origin_statement": "Table1 reports that EPS reduces selected patches to 17.5% for 2× scale while achieving 34.61 PSNR with ESPCN. In Fig. 6, EPS’s super-resolved text appears crisper than LiveNAS. Thus, EPS’s patch selection yields both sparse data and sharper text restoration than LiveNAS at 2× scale.", "perturbed_statement": "Table1 reports that EPS reduces selected patches to 17.5% for 2× scale while achieving 33.27 PSNR with ESPCN. In Fig. 6, EPS’s super-resolved text appears crisper than LiveNAS. Thus, EPS’s patch selection yields both sparse data and sharper text restoration than LiveNAS at 2× scale.", "perturbed_explanation": "The perturbed statement misquotes the PSNR value of EPS with ESPCN at 2× scale. According to Table 1, EPS actually achieves 34.61 PSNR, not 33.27 PSNR (33.27 PSNR belongs to LiveNAS). This incorrect premise undermines the claim that EPS yields sharper restoration based on quantitative superiority.", "claim": "Table1 reports that EPS reduces selected patches to 17.5% for 2× scale while achieving 33.27 PSNR with ESPCN. In Fig. 6, EPS’s super-resolved text appears crisper than LiveNAS. Thus, EPS’s patch selection yields both sparse data and sharper text restoration than LiveNAS at 2× scale.", "label": false }, { "paperid": "2411.13545v2", "paper_path": "./SciVer/papers/2411.13545v2.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13545v2_figure_4.png", "item2_path": "./SciVer/images/2411.13545v2-Table3-1.png", "section": [ "4.3.1" ], "request_id": 752, "origin_statement": "DyReLU phasing yields sustained gradient norms >0 at 99.99% sparsity (Fig.4 top), whereas standard ReLU collapses to zero. Correspondingly, Table 3 shows RigL with DyReLU achieves 54.58% accuracy on ResNet34 at 99.99% sparsity, compared to 10.03% with ReLU, confirming better gradient flow drives accuracy gains.", "perturbed_statement": "Fig.4 top shows DyReLU phasing yields non-zero gradient norms at 99.99% sparsity, whereas standard ReLU collapses to zero. Yet Table 3 lists RigL with DyReLU at 10.03% accuracy on ResNet34 at 99.99% sparsity, equal to ReLU, indicating DyReLU phasing fails to improve performance.", "perturbed_explanation": "The perturbed claim misreports Table 3: DyReLU’s accuracy on ResNet34 at 99.99% sparsity is 54.58%, not 10.03%. The 10.03% figure corresponds to standard ReLU, so the statement that DyReLU matches ReLU’s 10.03% is incorrect.", "claim": "Fig.4 top shows DyReLU phasing yields non-zero gradient norms at 99.99% sparsity, whereas standard ReLU collapses to zero. Yet Table 3 lists RigL with DyReLU at 10.03% accuracy on ResNet34 at 99.99% sparsity, equal to ReLU, indicating DyReLU phasing fails to improve performance.", "label": false }, { "paperid": "2410.23828v2", "paper_path": "./SciVer/papers/2410.23828v2.json", "claim_type": "sequential", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23828v2_figure_5.png", "item2_path": "./SciVer/images/2410.23828v2-Table2-1.png", "section": [ "5.2" ], "request_id": 753, "origin_statement": "Figure 5’s second failure case shows the model misclassifies the faded playground as non-vegetated ground. Table 2 reports an SC oIoU of 24.78% for VisTA, indicating its lower accuracy in capturing finer spatial changes compared to coarser categories like CN (oIoU 44.18%).", "perturbed_statement": "Figure 5’s second failure case shows the model correctly classifies the faded playground as vegetated ground. Table 2 reports an SC oIoU of 59.89% for VisTA, indicating its high accuracy in capturing finer spatial changes compared to CN (oIoU 44.18%).", "perturbed_explanation": "This statement is wrong because Figure 5 explicitly describes the playground being misclassified as non-vegetated ground, not correctly labeled as vegetation. Additionally, Table 2 lists VisTA’s SC oIoU as 24.78%, not 59.89%, so its fine-grained spatial performance remains low.", "claim": "Figure 5’s second failure case shows the model correctly classifies the faded playground as vegetated ground. Table 2 reports an SC oIoU of 59.89% for VisTA, indicating its high accuracy in capturing finer spatial changes compared to CN (oIoU 44.18%).", "label": false }, { "paperid": "2411.06866v1", "paper_path": "./SciVer/papers/2411.06866v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06866v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.06866v1_figure_3(b).png", "section": [ "5.8" ], "request_id": 756, "origin_statement": "Since CommonsenseQA accuracy peaks at k=70 (≈74.8%) in Figure 3a, examining Figure 3b at n=50 shows a near-matching accuracy (≈74.6%), suggesting half the nodes suffice to approach peak performance.", "perturbed_statement": "Since CommonsenseQA accuracy peaks at k=50 (≈74.7%) in Figure 3a, examining Figure 3b at n=50 shows a matching accuracy (≈74.6%), suggesting half the nodes suffice to reach peak performance.", "perturbed_explanation": "The perturbed statement incorrectly claims the peak CommonsenseQA accuracy in Figure 3a occurs at k=50 (~74.7%). In reality, the highest accuracy (≈74.8%) occurs at k=70, not k=50, so the premise is false.", "claim": "Since CommonsenseQA accuracy peaks at k=50 (≈74.7%) in Figure 3a, examining Figure 3b at n=50 shows a matching accuracy (≈74.6%), suggesting half the nodes suffice to reach peak performance.", "label": false } ]