[ { "paperid": "2409.07233v1", "paper_path": "./SciVer/papers/2409.07233v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.4", "4.5" ], "image_path": "./SciVer/images/2409.07233v1-Table1-1.png", "request_id": 1, "origin_statement": "CN’s Grade coefficient (−0.982) is more negative than N’s (−0.844), reflecting attenuation bias reversal: incorporating boundary censoring via a log‐σ link reduces variance at 0 and 1, thus amplifying mean‐parameter effects compared to ordinary least squares.", "perturbed_statement": "CN’s Grade coefficient (−0.982) is less negative than N’s (−0.844), reflecting attenuation bias reversal: incorporating boundary censoring via an identity σ link reduces variance at 0.5, thus amplifying mean‐parameter effects compared to ordinary least squares.", "perturbed_explanation": "The perturbation is wrong because (1) CN’s Grade coefficient (−0.982) is actually more negative (in absolute magnitude) than N’s (−0.844), not less negative. (2) The CN model uses a log‐link for σ, not an identity link. (3) Boundary censoring affects 0 and 1, not 0.5.", "claim": "CN’s Grade coefficient (−0.982) is more negative than N’s (−0.844), reflecting attenuation bias reversal: incorporating boundary censoring via a log‐σ link reduces variance at 0 and 1, thus amplifying mean‐parameter effects compared to ordinary least squares.", "label": true }, { "paperid": "2409.06994v2", "paper_path": "./SciVer/papers/2409.06994v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2409.06994v2_figure_2.png", "request_id": 2, "origin_statement": "In setting 10, increasing α from 0.0 to 0.3 reduces DFS performance by approximately 0.18 AUC due to core-periphery mixing, while BFS, DN, RE, and RW remain around 0.85 AUC, highlighting that depth-first sampling is uniquely sensitive to core size expansion.", "perturbed_statement": "In setting 10, increasing α from 0.0 to 0.3 yields a steady 0.1 AUC increase for the RN sampler, while DFS maintains a stable AUC around 0.9 and all other methods drop below 0.8.", "perturbed_explanation": "This is incorrect because in Figure 2 setting 10, RN’s AUC remains roughly constant at about 0.60 (no 0.1 increase), and DFS actually declines from ~0.93 to ~0.75 AUC rather than holding at ~0.9. Therefore both claims contradict the plotted values.", "claim": "In setting 10, increasing α from 0.0 to 0.3 reduces DFS performance by approximately 0.18 AUC due to core-periphery mixing, while BFS, DN, RE, and RW remain around 0.85 AUC, highlighting that depth-first sampling is uniquely sensitive to core size expansion.", "label": true }, { "paperid": "2410.09504v1", "paper_path": "./SciVer/papers/2410.09504v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.09504v1-Table2-1.png", "request_id": 5, "origin_statement": "Double BPS, with α=0.75 and φ=4.11, attains the lowest RMSPE (0.900), showing that ensemble stacking mitigates bias and variance by blending models, thus correcting for slight range misspecifications to achieve optimal spatial smoothing.", "perturbed_statement": "Double BPS, with α=0.75 and φ=14.11, attains the lowest RMSPE (0.900), showing that ensemble stacking mitigates bias and variance by blending models, thus correcting for slight range misspecifications to achieve optimal spatial smoothing.", "perturbed_explanation": "This statement is incorrect because Table 2 specifies the range parameter φ for the double BPS model as 4.11, not 14.11, so the reported φ value contradicts the context.", "claim": "Double BPS, with α=0.75 and φ=4.11, attains the lowest RMSPE (0.900), showing that ensemble stacking mitigates bias and variance by blending models, thus correcting for slight range misspecifications to achieve optimal spatial smoothing.", "label": true }, { "paperid": "2411.15223v1", "paper_path": "./SciVer/papers/2411.15223v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "3.3.3" ], "image_path": "./SciVer/images/2411.15223v1_figure_7.png", "request_id": 10, "origin_statement": "Increasing the number of attention heads from 1 to 2 enhances feature diversity and mitigates overfitting, raising AUC to 0.7850 and reducing Logloss to 0.4628; beyond 2 heads, gradient interference increases, causing AUC to drop and Logloss to rise.", "perturbed_statement": "When using four attention heads, the model achieves the highest AUC of 0.7850 and the lowest Logloss of 0.4628, indicating that four heads deliver optimal prediction performance.", "perturbed_explanation": "The perturbed statement incorrectly identifies four heads as optimal. According to the context and Figure 7, the highest AUC (0.7850) and lowest Logloss (0.4628) occur at two attention heads, not four.", "claim": "Increasing the number of attention heads from 1 to 2 enhances feature diversity and mitigates overfitting, raising AUC to 0.7850 and reducing Logloss to 0.4628; beyond 2 heads, gradient interference increases, causing AUC to drop and Logloss to rise.", "label": true }, { "paperid": "2409.10066v1", "paper_path": "./SciVer/papers/2409.10066v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.10066v1-Table3-1.png", "request_id": 15, "origin_statement": "By leveraging semi-formal interactive pattern sequences, LeGEND uncovers 11 critical scenario types—over five times more than LeGEND–—while cutting the average simulations for first-type detection by 86% (11 vs. 81), illustrating that structured representations significantly expedite diverse fault exposure.", "perturbed_statement": "By leveraging semi-formal interactive pattern sequences, LeGEND uncovers 11 critical scenario types—over five times more than LeGEND–—while cutting the average simulations for first-type detection by 56% (11 vs. 81), illustrating that structured representations significantly expedite diverse fault exposure.", "perturbed_explanation": "The perturbation misstates the reduction percentage: the drop from 81 to 11 simulations corresponds to an approximately 86% decrease, not 56%, contradicting the actual average #SimForFirstType values in Table 3.", "claim": "By leveraging semi-formal interactive pattern sequences, LeGEND uncovers 11 critical scenario types—over five times more than LeGEND–—while cutting the average simulations for first-type detection by 86% (11 vs. 81), illustrating that structured representations significantly expedite diverse fault exposure.", "label": true }, { "paperid": "2409.16902v1", "paper_path": "./SciVer/papers/2409.16902v1.json", "claim_type": "analytical", "type": "chart", "item": "2(d)", "section": [ "3" ], "image_path": "./SciVer/images/2409.16902v1_figure_2(d).png", "request_id": 16, "origin_statement": "SAM2-large achieves 0.653 AUC on UW-COT, surpassing ARTrack (0.615 AUC) by 6.3%, highlighting how enhanced temporal consistency and robust feature embeddings in SAM2 notably improve tracking accuracy under underwater occlusion and deformation.", "perturbed_statement": "SAM2-large achieves 0.653 AUC on UW-COT, surpassing ARTrack (0.575 AUC) by 7.8%, highlighting how enhanced temporal consistency and robust feature embeddings in SAM2 notably improve tracking accuracy under underwater occlusion and deformation.", "perturbed_explanation": "The perturbed statement misreports ARTrack’s AUC as 0.575 instead of the actual 0.615 shown in Figure 2. Consequently, the calculated performance gap of 7.8% is also incorrect, since the true difference is about 6.3%.", "claim": "SAM2-large achieves 0.653 AUC on UW-COT, surpassing ARTrack (0.615 AUC) by 6.3%, highlighting how enhanced temporal consistency and robust feature embeddings in SAM2 notably improve tracking accuracy under underwater occlusion and deformation.", "label": true }, { "paperid": "2409.14672v1", "paper_path": "./SciVer/papers/2409.14672v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "2.2" ], "image_path": "./SciVer/images/2409.14672v1-Table3-1.png", "request_id": 17, "origin_statement": "The decrease in audio responses’ understandability (1.77 vs. 1.84) and informativeness (1.59 vs. 1.72) reflects increased cognitive load during speech processing in interactive ITLM dialogue, underscoring the need for streamlined content tailored to auditory comprehension.", "perturbed_statement": "The decrease in audio responses’ understandability (1.64 vs. 1.84) and informativeness (1.59 vs. 1.72) reflects increased cognitive load during speech processing in interactive ITLM dialogue, underscoring the need for streamlined content tailored to auditory comprehension.", "perturbed_explanation": "The statement misreports the audio understandability score: Table 3 lists it as 1.77 for audio, not 1.64, so the referenced change in understandability is incorrect.", "claim": "The decrease in audio responses’ understandability (1.77 vs. 1.84) and informativeness (1.59 vs. 1.72) reflects increased cognitive load during speech processing in interactive ITLM dialogue, underscoring the need for streamlined content tailored to auditory comprehension.", "label": true }, { "paperid": "2409.06367v1", "paper_path": "./SciVer/papers/2409.06367v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.06367v1-Table4-1.png", "request_id": 21, "origin_statement": "EfficientAD achieves the highest image-level AUROC (63.30%) on metal plate textures but shows a 13.82% drop in pixel-level AUROC for subclass3 compared to its image-level performance, indicating that uniform reflective surfaces challenge fine-grained anomaly segmentation despite strong global detection.", "perturbed_statement": "In metal plate textures, MSFlow achieves the highest image-level AUROC (63.30%), outperforming EfficientAD by 4.56%, but its pixel-level AUROC on subclass3 drops 13.82% relative to its image-level performance, indicating uniform metallic surfaces hinder anomaly localization precision.", "perturbed_explanation": "The perturbed statement incorrectly attributes a 63.30% image-level AUROC to MSFlow and claims it outperforms EfficientAD by 4.56%. The table shows MSFlow’s image-level average is only 58.74%, while EfficientAD’s is 63.30%, so MSFlow does not have the highest image-level performance.", "claim": "EfficientAD achieves the highest image-level AUROC (63.30%) on metal plate textures but shows a 13.82% drop in pixel-level AUROC for subclass3 compared to its image-level performance, indicating that uniform reflective surfaces challenge fine-grained anomaly segmentation despite strong global detection.", "label": true }, { "paperid": "2409.18042v2", "paper_path": "./SciVer/papers/2409.18042v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.18042v2-Table1-1.png", "request_id": 23, "origin_statement": "By employing a continuous vision encoder and a semantic-acoustic disentangled speech tokenizer, EMOVA synergistically aligns vision-language and speech modalities via text-centric training, obviating the need for scarce tri-modal datasets and achieving state-of-the-art, real-time emotional dialogue without external TTS tools.", "perturbed_statement": "By employing a discrete vision encoder and an entangled speech tokenizer, EMOVA requires scarce tri-modal image-text-speech datasets for joint optimization, relying on external TTS tools and failing to achieve state-of-the-art vision-language performance despite supporting real-time emotional dialogue.", "perturbed_explanation": "The perturbation is incorrect because EMOVA uses a continuous (not discrete) vision encoder and a semantic-acoustic disentangled (not entangled) speech tokenizer. It also does not require tri-modal datasets—text-centric bi-modal data suffice—and it generates speech end-to-end without external TTS tools, while achieving state-of-the-art vision-language performance.", "claim": "By employing a continuous vision encoder and a semantic-acoustic disentangled speech tokenizer, EMOVA synergistically aligns vision-language and speech modalities via text-centric training, obviating the need for scarce tri-modal datasets and achieving state-of-the-art, real-time emotional dialogue without external TTS tools.", "label": true }, { "paperid": "2411.14321v1", "paper_path": "./SciVer/papers/2411.14321v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.14321v1_figure_4(a).png", "request_id": 24, "origin_statement": "In the ablation, incrementally expanding the latent dimension keeps the K-step prediction error increasing nearly linearly from ~0.088 to ~0.145 over 1 to 15 steps, substantially below the ~0.21–0.35 range without data increment, indicating richer subspace capacity reduces linearization-induced drift.", "perturbed_statement": "Incrementally expanding the dataset yields K-step errors that rise linearly from ~0.088 to ~0.145 over 1–15 steps, substantially lower than the ~0.21–0.35 range when dimension increment is removed; this implies more data rather than latent capacity primarily reduces linearization error.", "perturbed_explanation": "This is incorrect because the figure shows that the ~0.088–0.145 error curve corresponds to omitting dimension increment (not dataset increment), while the ~0.21–0.35 curve is the result of removing data increment. Thus, low errors arise from dimension increment, not dataset expansion.", "claim": "In the ablation, incrementally expanding the latent dimension keeps the K-step prediction error increasing nearly linearly from ~0.088 to ~0.145 over 1 to 15 steps, substantially below the ~0.21–0.35 range without data increment, indicating richer subspace capacity reduces linearization-induced drift.", "label": true }, { "paperid": "2410.07110v1", "paper_path": "./SciVer/papers/2410.07110v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "1" ], "image_path": "./SciVer/images/2410.07110v1-Table5-1.png", "request_id": 25, "origin_statement": "Due to contrastive replay focusing on misclassified boundary samples, our method achieves a runtime of 3.49h on Split CIFAR-100 with a Quadro RTX 8000—nearly 83% faster than GEM—by reducing buffer search overhead, leading to efficient memory updates without compromising stability or plasticity.", "perturbed_statement": "Due to contrastive replay focusing on misclassified boundary samples, our method achieves a runtime of 1.49h on Split CIFAR-100 with a Quadro RTX 8000—nearly 90% faster than GSS—by reducing buffer search overhead, leading to efficient memory updates without compromising stability or plasticity.", "perturbed_explanation": "The perturbed statement incorrectly states our method’s runtime as 1.49h instead of the actual 3.49h reported for Split CIFAR-100. It also misrepresents the baseline comparison by claiming a 90% speedup over GSS, whereas the context specifies a 3.49h runtime versus GSS’s 15.56h (a roughly 78% reduction compared to GSS).", "claim": "Due to contrastive replay focusing on misclassified boundary samples, our method achieves a runtime of 3.49h on Split CIFAR-100 with a Quadro RTX 8000—nearly 83% faster than GEM—by reducing buffer search overhead, leading to efficient memory updates without compromising stability or plasticity.", "label": true }, { "paperid": "2411.10948v1", "paper_path": "./SciVer/papers/2411.10948v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.10948v1_figure_6.png", "request_id": 26, "origin_statement": "ShiftMM’s quantized 4-bit implementation leverages reduced data transfer to improve cache utilization, yielding up to a 186% throughput increase over FP16 at the (4,256,512) layer (30.3 vs. 10.6 GFLOPS), illustrating how bit-level reduction shifts the compute–memory balance in favor of compute-bound operations.", "perturbed_statement": "ShiftMM’s quantized 4-bit implementation leverages reduced data transfer to improve cache utilization, yielding up to a 94% throughput increase over FP16 at the (4,256,512) layer (30.3 vs. 15.6 GFLOPS), illustrating how bit-level reduction shifts the compute–memory balance in favor of compute-bound operations.", "perturbed_explanation": "The statement misreports the FP16 throughput at (4,256,512). The actual FP16 performance is 10.6 GFLOPS (not 15.6 GFLOPS), so the claimed 94% increase is incorrect. Based on the correct numbers (30.3 vs. 10.6), the true speedup is about 186%, not 94%.", "claim": "ShiftMM’s quantized 4-bit implementation leverages reduced data transfer to improve cache utilization, yielding up to a 186% throughput increase over FP16 at the (4,256,512) layer (30.3 vs. 10.6 GFLOPS), illustrating how bit-level reduction shifts the compute–memory balance in favor of compute-bound operations.", "label": true }, { "paperid": "2409.02218v1", "paper_path": "./SciVer/papers/2409.02218v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "3.2.3" ], "image_path": "./SciVer/images/2409.02218v1-Table4-1.png", "request_id": 27, "origin_statement": "The composite contract shows that DSN and CHRG orientation shifts repeatedly inject trajectory estimation noise bounded by hyperparameters, which SBO autonomously compensates for within an improvement interval, whereas TCM_dv maneuvers amplify uncertainty further but guarantee proportional trajectory progress.", "perturbed_statement": "The composite contract shows that DSN and CHRG orientation shifts sequentially inject trajectory estimation noise bounded by hyperparameters, while SBO autonomously amplifies uncertainty within an improvement interval; subsequent TCM_dv maneuvers further increase uncertainty but leave trajectory distance unchanged.", "perturbed_explanation": "This is incorrect because SBO tasks reduce estimation uncertainty within the specified improvement interval (imp_min…imp_max × duration), not amplify it. Also, TCM_dv maneuvers do not leave relative trajectory distance unchanged; they decrease (advance) distance proportional to the improvement range (imp_min…imp_max × ΔT_TCM_dv).", "claim": "The composite contract shows that DSN and CHRG orientation shifts repeatedly inject trajectory estimation noise bounded by hyperparameters, which SBO autonomously compensates for within an improvement interval, whereas TCM_dv maneuvers amplify uncertainty further but guarantee proportional trajectory progress.", "label": true }, { "paperid": "2409.02664v1", "paper_path": "./SciVer/papers/2409.02664v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.4.2" ], "image_path": "./SciVer/images/2409.02664v1_figure_6.png", "request_id": 28, "origin_statement": "t-SNE plots reveal that visual prompts create distinct FF real/fake clusters by amplifying subtle pixel-level inconsistencies, effectively utilizing CLIP’s frozen embedding space; this modulation also transfers to CDF data, indicating domain-agnostic detection is driven by alignment of latent representations rather than specialized deepfake features.", "perturbed_statement": "t-SNE plots show visual prompts completely eliminate overlap between FF real and fake clusters by amplifying coarse semantic differences, and this modulation fails to generalize to CDF data, suggesting that domain-general detection is not achieved without dataset-specific prompts.", "perturbed_explanation": "This statement is incorrect because Figure 6 shows that visual prompts do not remove all cluster overlap—they improve but do not perfectly separate FF real and fake features—and the context explicitly states that prompts trained on FF successfully transfer to the unseen CDF dataset, contradicting the claim of failed generalization.", "claim": "t-SNE plots reveal that visual prompts create distinct FF real/fake clusters by amplifying subtle pixel-level inconsistencies, effectively utilizing CLIP’s frozen embedding space; this modulation also transfers to CDF data, indicating domain-agnostic detection is driven by alignment of latent representations rather than specialized deepfake features.", "label": true }, { "paperid": "2410.06795v1", "paper_path": "./SciVer/papers/2410.06795v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.06795v1_figure_2(a).png", "request_id": 30, "origin_statement": "By fine-tuning soft prompt tokens (PATCH), object recognition accuracy jumps from 49.0% to 87.0% on MiniGPT-v2, illustrating how adaptive semantic anchoring reduces cross-modal hallucinations by over 38%, thus enhancing tasks that rely on precise visual feature alignment.", "perturbed_statement": "By fine-tuning soft prompt tokens (PATCH), object recognition accuracy jumps from 49.0% to 97.0% on MiniGPT-v2, illustrating how adaptive semantic anchoring reduces cross-modal hallucinations by over 48%, thus enhancing tasks that rely on precise visual feature alignment.", "perturbed_explanation": "The perturbed statement misreports the object recognition result and reduction percentage. Figure 2 shows accuracy rises from 49.01% to 87.01% with PATCH, not 97.0%, and the absolute improvement is ~38% rather than 48%.", "claim": "By fine-tuning soft prompt tokens (PATCH), object recognition accuracy jumps from 49.0% to 87.0% on MiniGPT-v2, illustrating how adaptive semantic anchoring reduces cross-modal hallucinations by over 38%, thus enhancing tasks that rely on precise visual feature alignment.", "label": true }, { "paperid": "2410.09356v1", "paper_path": "./SciVer/papers/2410.09356v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.09356v1-Table2-1.png", "request_id": 31, "origin_statement": "FMPESTF achieves consistently lowest RMSE across PEMS03, PEMS04, PEMS07, and PEMS08 by combining dynamic-static graph fusion to capture non-linear spatial dependencies and multi-head temporal attention to model nonstationary traffic demand fluctuations.", "perturbed_statement": "FMPESTF achieves consistently lowest MAPE across PEMS03, PEMS04, PEMS07, and PEMS08 by combining dynamic-static graph fusion to capture non-linear spatial dependencies and multi-head temporal attention to model nonstationary traffic demand fluctuations.", "perturbed_explanation": "The perturbed claim is incorrect because FMPESTF’s MAPE on PEMS03 is 15.11%, which is higher than ASTGNN’s 14.79%, so FMPESTF does not achieve the lowest MAPE on all listed datasets.", "claim": "FMPESTF achieves consistently lowest RMSE across PEMS03, PEMS04, PEMS07, and PEMS08 by combining dynamic-static graph fusion to capture non-linear spatial dependencies and multi-head temporal attention to model nonstationary traffic demand fluctuations.", "label": true }, { "paperid": "2411.08298v1", "paper_path": "./SciVer/papers/2411.08298v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "5" ], "image_path": "./SciVer/images/2411.08298v1_figure_3.png", "request_id": 32, "origin_statement": "TM theory's g(r) for PE24r exhibits a first peak at ~4.9Å with height ~1.01, closely matching MD, whereas PRISM-PY underestimates local ordering, placing its first peak at ~5.2Å with a lower amplitude (~0.90), due to its coarse-grained potential's inability to capture precise intermolecular packing.", "perturbed_statement": "In PE24r, TM theory predicts the first g(r) peak at ~3.5Å with height ~1.2, perfectly matching MD, while PRISM-PY theory locates its peak at ~6.0Å with amplitude ~0.80, indicating PRISM-PY drastically underestimates solvation shell ordering.", "perturbed_explanation": "TM theory's actual first peak occurs at ~4.9Å with height ~1.01, not at 3.5Å or 1.2. PRISM-PY's first peak lies near ~5.2Å with amplitude ~0.92, not at 6.0Å or 0.80, contradicting the simulated and theoretical data reported in Figure 3(a).", "claim": "TM theory's g(r) for PE24r exhibits a first peak at ~4.9Å with height ~1.01, closely matching MD, whereas PRISM-PY underestimates local ordering, placing its first peak at ~5.2Å with a lower amplitude (~0.90), due to its coarse-grained potential's inability to capture precise intermolecular packing.", "label": true }, { "paperid": "2409.15727v1", "paper_path": "./SciVer/papers/2409.15727v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.15727v1-Table1-1.png", "request_id": 33, "origin_statement": "By leveraging LMM shape variance to guide the PnP solver, LaPose achieves a 6.1% absolute boost in NIoU75 over DMSR, indicating enhanced fine-grained surface alignment under scale-agnostic evaluation on NOCS-REAL275.", "perturbed_statement": "By leveraging LMM shape variance to guide the PnP solver, LaPose achieves a 16.1% absolute boost in NIoU75 over DMSR, indicating enhanced fine-grained surface alignment under scale-agnostic evaluation on NOCS-REAL275.", "perturbed_explanation": "The claimed 16.1% boost is incorrect: DMSR’s NIoU75 is 9.7% and LaPose’s is 15.8%, for an actual improvement of 6.1%, not 16.1% as stated.", "claim": "By leveraging LMM shape variance to guide the PnP solver, LaPose achieves a 6.1% absolute boost in NIoU75 over DMSR, indicating enhanced fine-grained surface alignment under scale-agnostic evaluation on NOCS-REAL275.", "label": true }, { "paperid": "2411.09289v1", "paper_path": "./SciVer/papers/2411.09289v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.09289v1_figure_3.png", "request_id": 36, "origin_statement": "StreamAdapter’s unseen-task accuracy steadily rises with up to 30 demonstrations, e.g., from 48% to 57% on ARC-C, suggesting that its parameterized adaptation transforms in-context examples into weight updates, enhancing generalization by embedding sample-specific semantics rather than relying solely on token-level retrieval.", "perturbed_statement": "StreamAdapter’s unseen-task accuracy plateaus after 10 demonstrations at around 90%, implying diminishing returns as additional in-context examples fail to convert semantics into further weight updates.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 3 shows unseen-task accuracies peaking between 50% and 57% (not 90%) and continuing to rise beyond 10 demonstrations rather than plateauing at that point.", "claim": "StreamAdapter’s unseen-task accuracy steadily rises with up to 30 demonstrations, e.g., from 48% to 57% on ARC-C, suggesting that its parameterized adaptation transforms in-context examples into weight updates, enhancing generalization by embedding sample-specific semantics rather than relying solely on token-level retrieval.", "label": true }, { "paperid": "2409.14067v1", "paper_path": "./SciVer/papers/2409.14067v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.14067v1-Table3-1.png", "request_id": 37, "origin_statement": "By leveraging multi-view consistency regularization, our approach improves average PSNR by 4.3 dB over PNeRFLoc, enhances SSIM by 0.074, and reduces LPIPS by 0.163, indicating superior signal fidelity and perceptual similarity through better high-frequency detail reconstruction.", "perturbed_statement": "By leveraging multi-view consistency regularization, our approach improves average PSNR by 1.5 dB over PNeRFLoc and reduces LPIPS by 0.24, indicating superior signal fidelity and perceptual similarity through better high-frequency detail reconstruction.", "perturbed_explanation": "The perturbed statement misreports the improvements: Table 3 shows an actual PSNR gain of 30.14−25.84=4.30 dB (not 1.5 dB) and an LPIPS reduction of 0.2440−0.0810=0.163 (not 0.24).", "claim": "By leveraging multi-view consistency regularization, our approach improves average PSNR by 4.3 dB over PNeRFLoc, enhances SSIM by 0.074, and reduces LPIPS by 0.163, indicating superior signal fidelity and perceptual similarity through better high-frequency detail reconstruction.", "label": true }, { "paperid": "2411.03966v1", "paper_path": "./SciVer/papers/2411.03966v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.03966v1_figure_8.png", "request_id": 40, "origin_statement": "Because the anxiety–valence correlation is moderately negative while anxiety–arousal correlation is mild, anxiety lexicons are shaped more by perceived unpleasantness than by physiological activation, indicating that valence moderation plays a larger role than arousal in predicting anxiety-related word processing inefficiencies.", "perturbed_statement": "Because the anxiety–arousal correlation is strong while anxiety–valence correlation is weak, anxiety lexicons are shaped more by physiological activation than by perceived unpleasantness, indicating that arousal moderation plays a larger role than valence in predicting anxiety-related word processing inefficiencies.", "perturbed_explanation": "The perturbation incorrectly labels the anxiety–arousal correlation as strong, whereas the context describes it as only mild, and it calls the anxiety–valence correlation weak, despite it being described as moderate negative. These details directly contradict the reported correlation magnitudes.", "claim": "Because the anxiety–valence correlation is moderately negative while anxiety–arousal correlation is mild, anxiety lexicons are shaped more by perceived unpleasantness than by physiological activation, indicating that valence moderation plays a larger role than arousal in predicting anxiety-related word processing inefficiencies.", "label": true }, { "paperid": "2409.05755v1", "paper_path": "./SciVer/papers/2409.05755v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.05755v1-Table1-1.png", "request_id": 41, "origin_statement": "Hyperparameter fine-tuning boosts GCN accuracy by up to 17.51 percentage points on heterophilic datasets like 'questions', indicating that optimizing learning rates and regularization is critical to capture non-smooth label distributions.", "perturbed_statement": "Hyperparameter fine-tuning boosts GCN accuracy by up to 25.51 percentage points on heterophilic datasets like 'questions', indicating that optimizing learning rates and regularization is critical to capture non-smooth label distributions.", "perturbed_explanation": "This claim is incorrect because Table 1 shows the maximum GCN accuracy improvement on the 'questions' dataset is 17.51 percentage points, not 25.51, so the stated 25.51-point boost contradicts the actual results.", "claim": "Hyperparameter fine-tuning boosts GCN accuracy by up to 17.51 percentage points on heterophilic datasets like 'questions', indicating that optimizing learning rates and regularization is critical to capture non-smooth label distributions.", "label": true }, { "paperid": "2410.15355v1", "paper_path": "./SciVer/papers/2410.15355v1.json", "claim_type": "analytical", "type": "chart", "item": "7(b)", "section": [ "6.6" ], "image_path": "./SciVer/images/2410.15355v1_figure_7(b).png", "request_id": 42, "origin_statement": "LAC’s classification accuracy varies by less than 2.0% when the mask ratio increases from 0.1 to 0.9, implying that the masking operation functions as a regularizer by introducing controlled noise without degrading the contrastive representation of graph topology.", "perturbed_statement": "LAC’s classification accuracy fluctuates by up to 5.0% as the mask ratio varies from 0.1 to 0.9, suggesting that the mask mechanism introduces substantial noise that can significantly alter contrastive graph representations.", "perturbed_explanation": "The statement is incorrect because the context specifies the accuracy variation remains below 2.0% when the mask ratio changes from 0.1 to 0.9, not as high as 5.0%, contradicting the claimed fluctuation magnitude.", "claim": "LAC’s classification accuracy varies by less than 2.0% when the mask ratio increases from 0.1 to 0.9, implying that the masking operation functions as a regularizer by introducing controlled noise without degrading the contrastive representation of graph topology.", "label": true }, { "paperid": "2410.09729v2", "paper_path": "./SciVer/papers/2410.09729v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2410.09729v2-Table3-1.png", "request_id": 43, "origin_statement": "Applying spaced targets on 10k samples (V) yields a 14% average F1—half of the 28% achieved by standard fine-tuning on 100k records (W)—demonstrating that improved target formatting can deliver efficiency gains comparable to a tenfold increase in training data.", "perturbed_statement": "Applying spaced targets on 10k samples (V) yields a 14% average F1—one-third of the 42% achieved by standard fine-tuning on 100k records (W)—demonstrating that improved target formatting can deliver efficiency gains comparable to a tenfold increase in training data.", "perturbed_explanation": "The table actually reports an average F1 of 28% for W, not 42%, and 14% is half of 28%, not one-third, making both the performance value and the stated ratio incorrect.", "claim": "Applying spaced targets on 10k samples (V) yields a 14% average F1—half of the 28% achieved by standard fine-tuning on 100k records (W)—demonstrating that improved target formatting can deliver efficiency gains comparable to a tenfold increase in training data.", "label": true }, { "paperid": "2411.15665v1", "paper_path": "./SciVer/papers/2411.15665v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4" ], "image_path": "./SciVer/images/2411.15665v1_figure_6.png", "request_id": 48, "origin_statement": "The sharp decrease in the g(r) peak at 0.75 Å between 188 and 190 GPa at 1800 K reflects abrupt H–H bond breaking, signaling a pressure-induced first-order molecular-to-atomic liquid transition consistent with a density-driven destabilization of the molecular state.", "perturbed_statement": "The sharp decrease in the g(r) peak at 0.85 Å between 188 and 192 GPa at 1800 K reflects abrupt H–H bond breaking, signaling a pressure-induced first-order molecular-to-atomic liquid transition consistent with a density-driven destabilization of the molecular state.", "perturbed_explanation": "This statement is incorrect because the primary molecular bond peak occurs at 0.75 Å (not 0.85 Å), and the pressure increase studied is from 188 to 190 GPa (not to 192 GPa). These details contradict the reported context.", "claim": "The sharp decrease in the g(r) peak at 0.75 Å between 188 and 190 GPa at 1800 K reflects abrupt H–H bond breaking, signaling a pressure-induced first-order molecular-to-atomic liquid transition consistent with a density-driven destabilization of the molecular state.", "label": true }, { "paperid": "2411.11912v1", "paper_path": "./SciVer/papers/2411.11912v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.2", "6.3" ], "image_path": "./SciVer/images/2411.11912v1_figure_7.png", "request_id": 50, "origin_statement": "Server-level multi-objective optimization minimizes the variance of layer selection counts, boosting mid-range layers (e.g., layers 15, 19, 23) that were underutilized at the client level, thereby promoting balanced depth-wise feature extraction and reducing overfitting.", "perturbed_statement": "Server-level multi-objective optimization increases the variance of layer selection counts, boosting only top-range layers (e.g., layers 28, 30) at the expense of mid-range diversity, to concentrate feature extraction in deeper representations.", "perturbed_explanation": "This statement is wrong because the optimization objective is to minimize layer-selection variance, not increase it, and Fig. 7 shows enhanced selection of mid-range layers (15, 19, 23) rather than concentrating on layers 28 and 30.", "claim": "Server-level multi-objective optimization minimizes the variance of layer selection counts, boosting mid-range layers (e.g., layers 15, 19, 23) that were underutilized at the client level, thereby promoting balanced depth-wise feature extraction and reducing overfitting.", "label": true }, { "paperid": "2410.21603v2", "paper_path": "./SciVer/papers/2410.21603v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.3.1", "5.3.2" ], "image_path": "./SciVer/images/2410.21603v2_figure_6.png", "request_id": 52, "origin_statement": "Applying the 0.1% distance threshold elevates the median posterior probability of M₃ to approximately 0.7 for both ABC-CvM and ABC-Wass(log), reflecting enhanced model discrimination but increased variance due to stricter acceptance criteria.", "perturbed_statement": "At the 1% quantile threshold, ABC-Stat attains a median posterior probability of approximately 0.8 for M₃, indicating superior model discrimination with minimal variance.", "perturbed_explanation": "The perturbed statement is incorrect because the threshold in Figure 6 is 0.1%, not 1%, and ABC-Stat never achieves the highest median posterior (around 0.6)—ABC-CvM and ABC-Wass(log) outperform ABC-Stat and show greater variance under stricter thresholds.", "claim": "Applying the 0.1% distance threshold elevates the median posterior probability of M₃ to approximately 0.7 for both ABC-CvM and ABC-Wass(log), reflecting enhanced model discrimination but increased variance due to stricter acceptance criteria.", "label": true }, { "paperid": "2409.15218v1", "paper_path": "./SciVer/papers/2409.15218v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.15218v1_figure_3.png", "request_id": 54, "origin_statement": "One-loop Δaₘᵤ in the ALRM linearly decreases from ≈1.8×10⁻¹² at M_{W_R}=1200 GeV to ≈1.0×10⁻¹² at 1600 GeV, reflecting gauge-loop suppression with heavier W_R, while larger M_{H₂^±} enhances scalar-loop contributions via increased Higgs VEV couplings.", "perturbed_statement": "In the ALRM, one-loop Δaₘᵤ grows quadratically from ≈1.0×10⁻¹² at M_{W_R}=1200 GeV to ≈1.8×10⁻¹² at 1600 GeV, indicating heavier W_R amplifies gauge-loop contributions, while increasing M_{H₂^±} reduces scalar-loop effects.", "perturbed_explanation": "Figure 3 shows Δaₘᵤ actually decreases with increasing M_{W_R}, not grows, and the colour coding indicates that larger M_{H₂^±} values correspond to higher Δaₘᵤ, not reduced scalar contributions, contradicting the perturbed statement.", "claim": "One-loop Δaₘᵤ in the ALRM linearly decreases from ≈1.8×10⁻¹² at M_{W_R}=1200 GeV to ≈1.0×10⁻¹² at 1600 GeV, reflecting gauge-loop suppression with heavier W_R, while larger M_{H₂^±} enhances scalar-loop contributions via increased Higgs VEV couplings.", "label": true }, { "paperid": "2409.13587v1", "paper_path": "./SciVer/papers/2409.13587v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.3" ], "image_path": "./SciVer/images/2409.13587v1-Table3-1.png", "request_id": 55, "origin_statement": "Coefficient-based compression reduces challenge Hamiltonians to less than 1% of their original terms, while open-source molecular Hamiltonians retain more than 80%, reflecting the sparser operator spectrum of larger quantum challenge systems that enables more aggressive small-coefficient cutoff compared to denser molecular operators.", "perturbed_statement": "Coefficient-based compression reduces challenge Hamiltonians to less than 5% of their original terms, while open-source molecular Hamiltonians retain over 95%, reflecting the uniform operator spectrum of molecular systems that requires preserving almost all coefficients.", "perturbed_explanation": "The perturbed statement misstates the compression ratios. In Table 3, challenge Hamiltonians compress to well under 1% of their original term counts (e.g., 20qubits_00: 63636 → 91, ~0.14%), not under 5%. Likewise, open-source molecular Hamiltonians retain about 82–91% of terms (46/56≈82%), not over 95%.", "claim": "Coefficient-based compression reduces challenge Hamiltonians to less than 1% of their original terms, while open-source molecular Hamiltonians retain more than 80%, reflecting the sparser operator spectrum of larger quantum challenge systems that enables more aggressive small-coefficient cutoff compared to denser molecular operators.", "label": true }, { "paperid": "2409.01239v1", "paper_path": "./SciVer/papers/2409.01239v1.json", "claim_type": "analytical", "type": "chart", "item": "10(a)", "section": [ "6.1" ], "image_path": "./SciVer/images/2409.01239v1_figure_10(a).png", "request_id": 60, "origin_statement": "Figure 10 shows that super-Jupiter exoplanets (Mₚ > 3 M_J) around low-mass stars (M* ≤ 0.65 M⊙) occur only for hosts with metallicities above +0.2 dex, consistent with core-accretion theory requiring metal-rich disks to form massive gas giants in low-mass systems.", "perturbed_statement": "Figure 10 shows that super-Jupiter exoplanets (Mₚ > 2 M_J) around low-mass stars (M* ≤ 0.7 M⊙) occur only for hosts with metallicities above +0.1 dex, consistent with core-accretion theory requiring metal-rich disks to form massive gas giants in low-mass systems.", "perturbed_explanation": "The statement misdefines super-Jupiters as Mₚ > 2 M_J, whereas the context and figure use Mₚ > 3 M_J for ‘super-Jupiter.’ It also incorrectly sets the low-mass star cutoff at M* ≤ 0.7 M⊙ instead of 0.65 M⊙, and lowers the metallicity threshold to +0.1 dex rather than the observed +0.2 dex.", "claim": "Figure 10 shows that super-Jupiter exoplanets (Mₚ > 3 M_J) around low-mass stars (M* ≤ 0.65 M⊙) occur only for hosts with metallicities above +0.2 dex, consistent with core-accretion theory requiring metal-rich disks to form massive gas giants in low-mass systems.", "label": true }, { "paperid": "2410.17406v1", "paper_path": "./SciVer/papers/2410.17406v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.2.1" ], "image_path": "./SciVer/images/2410.17406v1-Table1-1.png", "request_id": 61, "origin_statement": "Although chunking with multiple references yields near-perfect exploitation detection (98% TP), its mitigation accuracy drops due to information overload, whereas summarization distills cross-sourced defenses, boosting mitigation TP by 32% (from 47% to 79%), highlighting summary-based retrieval’s efficacy under context-window constraints.", "perturbed_statement": "Although chunking with multiple references yields near-perfect exploitation detection (95% TP), its mitigation accuracy drops due to information overload, whereas summarization distills cross-sourced defenses, boosting mitigation TP by 35% (from 47% to 82%), highlighting summary-based retrieval’s efficacy under context-window constraints.", "perturbed_explanation": "The perturbed statement misreports two metrics: chunking’s exploitation TP is listed as 95%, but Table 1 shows 98% for the NVD+CWE+Refs condition. It also claims summarization’s mitigation TP rises to 82% (a 35% boost), whereas the actual increase is from 47% to 79% (a 32% boost).", "claim": "Although chunking with multiple references yields near-perfect exploitation detection (98% TP), its mitigation accuracy drops due to information overload, whereas summarization distills cross-sourced defenses, boosting mitigation TP by 32% (from 47% to 79%), highlighting summary-based retrieval’s efficacy under context-window constraints.", "label": true }, { "paperid": "2411.01021v1", "paper_path": "./SciVer/papers/2411.01021v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.01021v1_figure_6.png", "request_id": 62, "origin_statement": "Leveraging a 10 s EKF update interval, the nominal 4 h mission attains a maximum position covariance eigenvalue ≈7 m (0.0209% of range), evidencing robust observability, and requires merely 11.5 m/s total Δv—8.5 m/s less than prior approach-cone–constrained solutions.", "perturbed_statement": "Leveraging a 10 s EKF update interval, the nominal 4 h mission attains a maximum position covariance eigenvalue ≈70 m (0.21% of range), evidencing robust observability, and requires merely 12.5 m/s total Δv—8.5 m/s less than baseline.", "perturbed_explanation": "The perturbed statement is wrong because the actual maximum position covariance eigenvalue at mission end is approximately 7 m (not 70 m), corresponding to a 0.0209% error (not 0.21%). Additionally, the nominal trajectory Δv is 11.5 m/s, not 12.5 m/s.", "claim": "Leveraging a 10 s EKF update interval, the nominal 4 h mission attains a maximum position covariance eigenvalue ≈7 m (0.0209% of range), evidencing robust observability, and requires merely 11.5 m/s total Δv—8.5 m/s less than prior approach-cone–constrained solutions.", "label": true }, { "paperid": "2411.07200v1", "paper_path": "./SciVer/papers/2411.07200v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.07200v1-Table2-1.png", "request_id": 63, "origin_statement": "Removing cluster 1 trajectories reduces Seaquest’s initial state value from 3.569 to 2.679, a 25% decrease, suggesting that cluster 1 contributes disproportionately to cultivating high-value exploratory paths crucial for robust policy performance.", "perturbed_statement": "Removing cluster 3 trajectories reduces Seaquest’s initial state value from 3.569 to 2.679, a 25% decrease, suggesting that cluster 3 is vital for high-value exploration.", "perturbed_explanation": "The perturbation is wrong because the 3.569→2.679 drop corresponds to removing cluster 1 trajectories, not cluster 3. Removing cluster 3 actually yields an ISV of 3.061±0.01, not 2.679, so cluster 3 does not cause the reported 25% decrease.", "claim": "Removing cluster 1 trajectories reduces Seaquest’s initial state value from 3.569 to 2.679, a 25% decrease, suggesting that cluster 1 contributes disproportionately to cultivating high-value exploratory paths crucial for robust policy performance.", "label": true }, { "paperid": "2411.18383v1", "paper_path": "./SciVer/papers/2411.18383v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.18383v1_figure_5.png", "request_id": 64, "origin_statement": "Spikes in Topic No.5 video counts in April 2021 and August 2023 align with government treated-water release announcements, showing that surges in public-facing nuclear discourse are driven by heightened radiological safety concerns amplified through risk perception mechanisms.", "perturbed_statement": "Spikes in Topic No.5 video counts in May 2021 and September 2023 align with government Fukushima reactor decommissioning plans, showing that surges in public-facing nuclear discourse are driven by heightened radiological safety concerns amplified through risk perception mechanisms.", "perturbed_explanation": "The perturbed statement incorrectly dates the spikes as occurring in May 2021 and September 2023 instead of April 2021 and August 2023, and misattributes them to Fukushima reactor decommissioning plans rather than treated-water release announcements, contradicting Figure 5 and the context description.", "claim": "Spikes in Topic No.5 video counts in April 2021 and August 2023 align with government treated-water release announcements, showing that surges in public-facing nuclear discourse are driven by heightened radiological safety concerns amplified through risk perception mechanisms.", "label": true }, { "paperid": "2410.10442v1", "paper_path": "./SciVer/papers/2410.10442v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.10442v1-Table4-1.png", "request_id": 65, "origin_statement": "Our DCT approach achieves 79.3% average accuracy on the Office-Home benchmark, outperforming SAR by 2.0% and yielding the lowest overall standard deviation (±0.1%), which suggests that domain-specific channel transformations stabilize predictions across diverse domain shifts.", "perturbed_statement": "Our DCT approach achieves 79.3% average accuracy on the Office-Home benchmark, outperforming SAR by 2.0% and yielding the lowest overall standard deviation (±0.05%), which suggests that domain-specific channel transformations stabilize predictions across diverse domain shifts.", "perturbed_explanation": "The perturbed statement misreports the overall standard deviation: Table 4 indicates the lowest average standard deviation is ±0.1%, not ±0.05%.", "claim": "Our DCT approach achieves 79.3% average accuracy on the Office-Home benchmark, outperforming SAR by 2.0% and yielding the lowest overall standard deviation (±0.1%), which suggests that domain-specific channel transformations stabilize predictions across diverse domain shifts.", "label": true }, { "paperid": "2409.14762v1", "paper_path": "./SciVer/papers/2409.14762v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.14762v1_figure_8.png", "request_id": 66, "origin_statement": "GPT4 and Claude2 exhibit strong positive correlations between accuracy and rethink ability (68.4 vs. 75.0 and 61.0 vs. 74.4, respectively), suggesting that robust error-detection directly enhances their final judgments, in line with iterative refinement theory in cognitive systems.", "perturbed_statement": "GPT4 and Claude2 exhibit strong positive correlations between accuracy and rethink ability (68.4 vs. 85.0 and 61.0 vs. 74.4, respectively), suggesting that robust error-detection directly enhances their final judgments, consistent with Spearman correlation principles.", "perturbed_explanation": "The perturbed statement incorrectly reports GPT4’s rethink correlation as 85.0 instead of the actual 75.0, and it wrongly refers to Spearman correlation when the analysis explicitly uses the Pearson correlation coefficient.", "claim": "GPT4 and Claude2 exhibit strong positive correlations between accuracy and rethink ability (68.4 vs. 75.0 and 61.0 vs. 74.4, respectively), suggesting that robust error-detection directly enhances their final judgments, in line with iterative refinement theory in cognitive systems.", "label": true }, { "paperid": "2410.03437v2", "paper_path": "./SciVer/papers/2410.03437v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.03437v2-Table3-1.png", "request_id": 67, "origin_statement": "Zebra’s 73.3% relative L2 reduction on the Wave 2D dataset compared to CODA (from 0.777 to 0.207) indicates its in-context sampling mechanism robustly models high-dimensional spatial dependencies without gradient-based adaptation.", "perturbed_statement": "Zebra’s 85% relative L2 reduction on the Wave 2D dataset compared to CODA (from 0.777 to 0.207) indicates its in-context sampling mechanism robustly models high-dimensional spatial dependencies without gradient-based adaptation.", "perturbed_explanation": "The stated 85% reduction is incorrect. Table 3 shows CODA’s error at 0.777 and Zebra’s at 0.207, so the actual reduction is (0.777−0.207)/0.777 ≈ 73.3%, not 85%.", "claim": "Zebra’s 73.3% relative L2 reduction on the Wave 2D dataset compared to CODA (from 0.777 to 0.207) indicates its in-context sampling mechanism robustly models high-dimensional spatial dependencies without gradient-based adaptation.", "label": true }, { "paperid": "2410.17032v1", "paper_path": "./SciVer/papers/2410.17032v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.17032v1-Table1-1.png", "request_id": 69, "origin_statement": "The normalized Mann–Whitney U of 0.55 between women and men indicates women raters assign higher harm ratings in 55% of pairwise comparisons, reflecting significant gender-based differences in perceptual sensitivity to potentially offensive imagery.", "perturbed_statement": "The normalized Mann–Whitney U of 0.45 between women and men indicates women raters assign higher harm ratings in 55% of pairwise comparisons, reflecting significant gender-based differences in perceptual sensitivity to potentially offensive imagery.", "perturbed_explanation": "The reported Mann–Whitney U statistic between women and men is 0.55 (not 0.45) according to Table 1 and the study text; stating it as 0.45 contradicts the context’s statistical result.", "claim": "The normalized Mann–Whitney U of 0.55 between women and men indicates women raters assign higher harm ratings in 55% of pairwise comparisons, reflecting significant gender-based differences in perceptual sensitivity to potentially offensive imagery.", "label": true }, { "paperid": "2410.17694v1", "paper_path": "./SciVer/papers/2410.17694v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "5" ], "image_path": "./SciVer/images/2410.17694v1_figure_4.png", "request_id": 70, "origin_statement": "The narrower like distribution of SynthRAG responses (25% unrated, 69.79% in 1–10 likes) compared to humans (57.74% unrated, 34.47% in 1–10 likes) indicates automated content yields more consistent mid-range engagement, likely due to uniform quality control, whereas human answers show greater variability.", "perturbed_statement": "The narrower like distribution of SynthRAG responses (30% unrated, 60% in 1–10 likes) compared to humans (57.74% unrated, 34.47% in 1–10 likes) indicates automated content yields more consistent mid-range engagement, likely due to uniform quality control, whereas human answers show greater variability.", "perturbed_explanation": "The perturbed statement is incorrect because the actual proportion of unrated SynthRAG responses is 25% (not 30%) and the true share in the 1–10 likes category is 69.79% (not 60%), as reported in Fig. 4 and the context.", "claim": "The narrower like distribution of SynthRAG responses (25% unrated, 69.79% in 1–10 likes) compared to humans (57.74% unrated, 34.47% in 1–10 likes) indicates automated content yields more consistent mid-range engagement, likely due to uniform quality control, whereas human answers show greater variability.", "label": true }, { "paperid": "2410.02401v5", "paper_path": "./SciVer/papers/2410.02401v5.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3.2", "4.3" ], "image_path": "./SciVer/images/2410.02401v5_figure_3.png", "request_id": 74, "origin_statement": "MoCo with temperature τ=0.07 yields 67% Top-1 accuracy by epoch 100, outperforming MoCo τ=0.2 by 6% and MoCo-v2 by 20%, illustrating that sharper contrastive distributions (lower τ) accelerate representation convergence by emphasizing harder negatives.", "perturbed_statement": "MoCo with temperature τ=0.07 achieves only 55% Top-1 accuracy by epoch 100, underperforming MoCo τ=0.2 by about 5%, suggesting that lower τ reduces emphasis on hard negatives and slows convergence.", "perturbed_explanation": "The perturbation is incorrect because, according to Figure 3, MoCo τ=0.07 actually reaches around 67% accuracy at 100 epochs, and it outperforms MoCo τ=0.2 (≈61%), rather than underperforming by 5%.", "claim": "MoCo with temperature τ=0.07 yields 67% Top-1 accuracy by epoch 100, outperforming MoCo τ=0.2 by 6% and MoCo-v2 by 20%, illustrating that sharper contrastive distributions (lower τ) accelerate representation convergence by emphasizing harder negatives.", "label": true }, { "paperid": "2409.06448v1", "paper_path": "./SciVer/papers/2409.06448v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.1", "2.2" ], "image_path": "./SciVer/images/2409.06448v1-Table1-1.png", "request_id": 75, "origin_statement": "Combined RC-band polarimetric observations from phase angles 2.7° to 80.4° using MSI, WFGS2, and HONIR produced a polarization–phase curve exhibiting a negative branch below ≈20°, an inversion near 25°, and a positive maximum near 80°, characteristic of low-albedo C-complex dust scattering.", "perturbed_statement": "Combined RC-band polarimetric observations from phase angles 2.7° to 80.4° using MSI, WFGS2, and HONIR produced a polarization–phase curve exhibiting a negative branch below ≈30°, an inversion near 35°, and a positive maximum near 75°, characteristic of low-albedo C-complex dust scattering.", "perturbed_explanation": "The perturbed statement misstates key phase‐angle thresholds: the negative polarization branch for WH only extends up to ≈20°, not 30°; the inversion (zero‐crossing) occurs near ≈25°, not 35°; and the peak positive polarization is observed near 80°, not 75°.", "claim": "Combined RC-band polarimetric observations from phase angles 2.7° to 80.4° using MSI, WFGS2, and HONIR produced a polarization–phase curve exhibiting a negative branch below ≈20°, an inversion near 25°, and a positive maximum near 80°, characteristic of low-albedo C-complex dust scattering.", "label": true }, { "paperid": "2409.16400v1", "paper_path": "./SciVer/papers/2409.16400v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.5" ], "image_path": "./SciVer/images/2409.16400v1_figure_6.png", "request_id": 76, "origin_statement": "CAPTAIN's recall rises from 36.67% to 64.74%, a 77% relative gain over the DLNN baseline, illustrating that sequence-based pattern matching more effectively captures attacker TTP patterns than presence-only features when threat-group data is limited.", "perturbed_statement": "CAPTAIN's recall rises from 36.67% to 64.74%, a 120% relative gain over the DLNN baseline, confirming that sequence-based pattern matching is twice as effective as presence-only features in scarce data conditions.", "perturbed_explanation": "This claim is incorrect because the actual recall improvement from 36.67% to 64.74% corresponds to approximately a 77% relative increase, not 120%. Therefore, stating a 120% gain and that CAPTAIN is twice as effective contradicts the reported metrics.", "claim": "CAPTAIN's recall rises from 36.67% to 64.74%, a 77% relative gain over the DLNN baseline, illustrating that sequence-based pattern matching more effectively captures attacker TTP patterns than presence-only features when threat-group data is limited.", "label": true }, { "paperid": "2409.03735v1", "paper_path": "./SciVer/papers/2409.03735v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.03735v1_figure_3.png", "request_id": 78, "origin_statement": "The variance in prompt responses for quantized and aligned models, such as tulu-2-7B-AWQ, is markedly broader for COPPA, reflecting lower response quality; this heightened inconsistency underlines the need for majority-based consistency checks to robustly assess encoded norms.", "perturbed_statement": "The variance in prompt responses for quantized and aligned models, such as tulu-2-7B-AWQ, is remarkably narrower for COPPA, reflecting higher response quality; this reduced inconsistency suggests minority-based consistency checks suffice to assess encoded norms.", "perturbed_explanation": "This statement contradicts the context in three ways: Figure 3 shows tulu-2-7B-AWQ exhibits broader, not narrower, variance for COPPA. The paper attributes that broader variance to lower, not higher, response quality. Finally, the methodology uses a simple majority threshold (50%), not minority-based checks.", "claim": "The variance in prompt responses for quantized and aligned models, such as tulu-2-7B-AWQ, is markedly broader for COPPA, reflecting lower response quality; this heightened inconsistency underlines the need for majority-based consistency checks to robustly assess encoded norms.", "label": true }, { "paperid": "2410.19886v1", "paper_path": "./SciVer/papers/2410.19886v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.19886v1-Table4-1.png", "request_id": 79, "origin_statement": "By modeling C-rate dynamics with the C kernel, temperature trends with the T kernel, and depth-of-discharge with the RBF kernel, the combined additive kernel decomposition captures cross-feature nonlinearities, yielding the lowest RMSE (24.57 cycles) and predictive covariance (2.60), reflecting robust, low-variance EOL predictions.", "perturbed_statement": "By modeling C-rate dynamics with the C kernel, temperature trends with the T kernel, and depth-of-discharge with the RBF kernel, the combined additive kernel decomposition captures cross-feature nonlinearities, yielding the lowest RMSE (20.42 cycles) and predictive covariance (2.60), reflecting robust, low-variance EOL predictions.", "perturbed_explanation": "The combined kernel’s RMSE is incorrectly stated as 20.42 cycles; Table 4 reports an average RMSE of 24.57 cycles for the combined kernel, making the perturbed value inconsistent with the documented test-set performance.", "claim": "By modeling C-rate dynamics with the C kernel, temperature trends with the T kernel, and depth-of-discharge with the RBF kernel, the combined additive kernel decomposition captures cross-feature nonlinearities, yielding the lowest RMSE (24.57 cycles) and predictive covariance (2.60), reflecting robust, low-variance EOL predictions.", "label": true }, { "paperid": "2411.07133v2", "paper_path": "./SciVer/papers/2411.07133v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.07133v2-Table2-1.png", "request_id": 81, "origin_statement": "Qwen2.5-72B-Instruct achieves a win-rate score of 21.01% compared to GPT-4’s 5.70%, indicating that open-source LLMs generate more evaluator-preferred responses, thereby providing superior synthetic training data that enhance base-model alignment and instruction following.", "perturbed_statement": "Qwen2.5-72B-Instruct achieves a win-rate score of 15.01% compared to GPT-4’s 6.70%, indicating that open-source LLMs generate more evaluator-preferred responses, thereby providing superior synthetic training data that enhance base-model alignment and instruction following.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 shows Qwen2.5-72B-Instruct’s win-rate (WR) is 21.01%, not 15.01%, and GPT-4’s WR is 5.70%, not 6.70%.", "claim": "Qwen2.5-72B-Instruct achieves a win-rate score of 21.01% compared to GPT-4’s 5.70%, indicating that open-source LLMs generate more evaluator-preferred responses, thereby providing superior synthetic training data that enhance base-model alignment and instruction following.", "label": true }, { "paperid": "2411.10018v1", "paper_path": "./SciVer/papers/2411.10018v1.json", "claim_type": "analytical", "type": "chart", "item": "1(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.10018v1_figure_1(a).png", "request_id": 82, "origin_statement": "The rising trend in performance emotionality—from about 0.42 at 10% to nearly 0.50 at 90% narrative time—indicates that characters’ affective expressions intensify toward the climax, aligning with theories of narrative tension that predict escalating emotional arousal as a story progresses.", "perturbed_statement": "The rising trend in performance emotionality—from about 0.42 at 10% to nearly 0.60 at 90% narrative time—indicates that characters’ affective expressions intensify toward the climax, aligning with theories of narrative tension that predict escalating emotional arousal as a story progresses.", "perturbed_explanation": "The perturbed statement misstates the emotionality at 90% narrative time. Figure 1(a) shows an emotionality value just under 0.50 (around 0.493), not 0.60, making the 0.60 figure inconsistent with the actual plotted data.", "claim": "The rising trend in performance emotionality—from about 0.42 at 10% to nearly 0.50 at 90% narrative time—indicates that characters’ affective expressions intensify toward the climax, aligning with theories of narrative tension that predict escalating emotional arousal as a story progresses.", "label": true }, { "paperid": "2411.01703v1", "paper_path": "./SciVer/papers/2411.01703v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.01703v1-Table3-1.png", "request_id": 85, "origin_statement": "Integrating image and optimized text guardrails in UniGuard reduces the attack success ratio on LLaVA 1.5 from 81.61% to 25.17%, a 69.2% relative decrease, indicating complementary mitigation effects from cross-modal defenses that intercept adversarial perturbations at both visual and textual levels.", "perturbed_statement": "Integrating image and optimized text guardrails in UniGuard reduces the attack success ratio on LLaVA 1.5 from 81.61% to 15.17%, an 81% relative decrease, indicating complementary mitigation effects from cross-modal defenses that intercept adversarial perturbations at both visual and textual levels.", "perturbed_explanation": "The perturbed claim incorrectly reports the post-defense attack success ratio and relative decrease. According to Table 3, UniGuard with image & optimized text guardrails achieves a 25.17% attack success ratio (not 15.17%), corresponding to a 69.2% relative reduction (not 81%).", "claim": "Integrating image and optimized text guardrails in UniGuard reduces the attack success ratio on LLaVA 1.5 from 81.61% to 25.17%, a 69.2% relative decrease, indicating complementary mitigation effects from cross-modal defenses that intercept adversarial perturbations at both visual and textual levels.", "label": true }, { "paperid": "2409.01672v2", "paper_path": "./SciVer/papers/2409.01672v2.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.4.1", "4.4.2" ], "image_path": "./SciVer/images/2409.01672v2_figure_5.png", "request_id": 86, "origin_statement": "At k=5, FMR yields approximately 11% overlap between training and testing top-5 weighted features, whereas the fine-tuned baseline remains near 0%. This early divergence indicates that magnitude regularization effectively filters out dataset-specific noise, promoting inherently generalizable discriminative features.", "perturbed_statement": "At k=5, FMR yields approximately 18% overlap between training and testing top-5 weighted features, whereas the fine-tuned baseline remains near 0%. This suggests that magnitude regularization amplifies dataset-specific spurious features rather than filtering them out.", "perturbed_explanation": "The perturbed statement is incorrect because the actual overlap at k=5 for FMR is about 11%, not 18%, as shown in Figure 5. Furthermore, FMR is designed to reduce spurious, dataset-specific features through magnitude regularization, so it does not amplify such features.", "claim": "At k=5, FMR yields approximately 11% overlap between training and testing top-5 weighted features, whereas the fine-tuned baseline remains near 0%. This early divergence indicates that magnitude regularization effectively filters out dataset-specific noise, promoting inherently generalizable discriminative features.", "label": true }, { "paperid": "2410.04803v2", "paper_path": "./SciVer/papers/2410.04803v2.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.04803v2_figure_4.png", "request_id": 88, "origin_statement": "By leveraging fine-grained patch-wise temporal dependencies and explicit inter-series modeling, Timer-XL reduces global temperature forecast MSE to 7.172, 46.7% lower than numerical simulation (ERA5) and 5.6% lower than the best deep baseline (UniRepLKNet), illustrating improved temporal error control.", "perturbed_statement": "By leveraging fine-grained patch-wise temporal dependencies and explicit inter-series modeling, Timer-XL reduces global temperature forecast MSE to 8.172, 39.2% lower than numerical simulation (ERA5) and 7.1% lower than the best deep baseline (UniRepLKNet), illustrating improved temporal error control.", "perturbed_explanation": "The perturbed statement incorrectly cites an MSE of 8.172, whereas Figure 4 reports 7.172 for Timer-XL. Consequently, the percentage improvements are wrong: the actual reduction from ERA5 (13.448) is ~46.7%, not 39.2%, and against UniRepLKNet (7.602) is ~5.6%, not 7.1%.", "claim": "By leveraging fine-grained patch-wise temporal dependencies and explicit inter-series modeling, Timer-XL reduces global temperature forecast MSE to 7.172, 46.7% lower than numerical simulation (ERA5) and 5.6% lower than the best deep baseline (UniRepLKNet), illustrating improved temporal error control.", "label": true }, { "paperid": "2409.11491v1", "paper_path": "./SciVer/papers/2409.11491v1.json", "claim_type": "analytical", "type": "chart", "item": "5(d)", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11491v1_figure_5(d).png", "request_id": 90, "origin_statement": "The strong pairwise Pearson correlations (0.46–0.63) among closed-source LLMs on age prediction imply that these models share systematic biases from pretraining, leading to highly redundant error patterns that limit the potential gains from ensemble averaging.", "perturbed_statement": "The strong pairwise Pearson correlations (0.85–0.92) among closed-source LLMs on age prediction imply that these models share systematic biases from pretraining, leading to highly redundant error patterns that limit the potential gains from ensemble averaging.", "perturbed_explanation": "The perturbed statement misreports the correlation values as 0.85–0.92, whereas Figure 5 indicates that closed-source LLMs actually show correlations between 0.46 and 0.63, making the stated range incorrect.", "claim": "The strong pairwise Pearson correlations (0.46–0.63) among closed-source LLMs on age prediction imply that these models share systematic biases from pretraining, leading to highly redundant error patterns that limit the potential gains from ensemble averaging.", "label": true }, { "paperid": "2411.12812v1", "paper_path": "./SciVer/papers/2411.12812v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.7" ], "image_path": "./SciVer/images/2411.12812v1-Table3-1.png", "request_id": 91, "origin_statement": "Extending the sliding window to 8 hours reduces the titration model’s MAE from 0.0827 to 0.0641, a 22.5% improvement over the 6-hour window, because the longer historical insulin-glucose context captures delayed physiological responses more effectively.", "perturbed_statement": "Extending the sliding window to 8 hours reduces the titration model’s MAE from 0.0827 to 0.0641, a 50% improvement over the 6-hour window, because the longer historical insulin-glucose context captures delayed physiological responses more effectively.", "perturbed_explanation": "The claimed 50% improvement is incorrect: MAE decreases from 0.0827 to 0.0641, which corresponds to a 22.5% reduction relative to the 6-hour window, not 50%, as reported in Table 3.", "claim": "Extending the sliding window to 8 hours reduces the titration model’s MAE from 0.0827 to 0.0641, a 22.5% improvement over the 6-hour window, because the longer historical insulin-glucose context captures delayed physiological responses more effectively.", "label": true }, { "paperid": "2410.20348v1", "paper_path": "./SciVer/papers/2410.20348v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.5.1", "5" ], "image_path": "./SciVer/images/2410.20348v1_figure_6.png", "request_id": 92, "origin_statement": "In Fig.6, ConvexAdam yields a median Dice score of 0.88 for liver segmentation, outperforming all other methods, while UTSRMorph produces the second-highest median of 0.86. This suggests that hand-crafted MIND and thin-plate spline registration provides more robust alignment for organs with low tissue contrast.", "perturbed_statement": "In Fig.6, RDP yields a median Dice score of 0.90 for liver segmentation, outperforming all other methods, while UTSRMorph produces a median of 0.87. This suggests that recursive pyramid strategies with heavy attention layers offer superior organ registration in low-contrast MR-CT imaging.", "perturbed_explanation": "The claim is incorrect because in Fig.6 RDP’s median Dice for liver is approximately 0.83, not 0.90, and UTSRMorph’s median is about 0.86, not 0.87. Thus RDP does not outperform ConvexAdam, and the misreported scores contradict the plotted boxplot data.", "claim": "In Fig.6, ConvexAdam yields a median Dice score of 0.88 for liver segmentation, outperforming all other methods, while UTSRMorph produces the second-highest median of 0.86. This suggests that hand-crafted MIND and thin-plate spline registration provides more robust alignment for organs with low tissue contrast.", "label": true }, { "paperid": "2411.14516v1", "paper_path": "./SciVer/papers/2411.14516v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.3" ], "image_path": "./SciVer/images/2411.14516v1-Table3-1.png", "request_id": 93, "origin_statement": "Increasing trigger symbol size from 1×1 to 3×3 improved image fidelity (SSIM rising from 0.717 to 0.738 on CIFAR-ViT) but decreased attack accuracy (f ACC dropping 0.623→0.613). This trade‐off aligns with Nyquist sampling: higher resolution patches preserve detail yet inject spatial noise that hampers model classification.", "perturbed_statement": "Increasing trigger symbol size from 1×1 to 3×3 improved attack accuracy (f ACC rising from 0.623 to 0.754 on CIFAR-ViT) but decreased SSIM (dropping 0.754→0.717).", "perturbed_explanation": "The perturbed statement incorrectly swaps f ACC and SSIM values. In the table, 0.754 is the SSIM at symbol size 2×2, not an f ACC, and the f ACC values for CIFAR-ViT never exceed 0.631. Therefore claiming f ACC rises to 0.754 and SSIM drops to 0.717 contradicts the actual entries.", "claim": "Increasing trigger symbol size from 1×1 to 3×3 improved image fidelity (SSIM rising from 0.717 to 0.738 on CIFAR-ViT) but decreased attack accuracy (f ACC dropping 0.623→0.613). This trade‐off aligns with Nyquist sampling: higher resolution patches preserve detail yet inject spatial noise that hampers model classification.", "label": true }, { "paperid": "2411.05608v1", "paper_path": "./SciVer/papers/2411.05608v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.05608v1_figure_4.png", "request_id": 94, "origin_statement": "The Baikal-GVD cascade measurements above 200 TeV show a significantly harder neutrino spectrum than IceCube’s KRAγ templates, matching the slope of Tibet-AS gamma-ray–inferred flux, suggesting hadronic cosmic-ray interactions in Galactic PeVatrons are responsible for neutrino production.", "perturbed_statement": "The Baikal-GVD cascade measurements above 200 GeV show a significantly softer neutrino spectrum than IceCube’s KRAγ templates, matching the slope of LHAASO gamma-ray–inferred flux, suggesting leptonic inverse Compton interactions in Galactic PeVatrons are responsible for neutrino production.", "perturbed_explanation": "The perturbation is incorrect because Baikal-GVD results refer to energies above 200 TeV (not 200 GeV), the observed spectrum is harder (not softer) than the KRAγ templates, it aligns better with Tibet-AS (not LHAASO) data, and leptonic inverse Compton processes do not produce neutrinos in proton–proton interactions.", "claim": "The Baikal-GVD cascade measurements above 200 TeV show a significantly harder neutrino spectrum than IceCube’s KRAγ templates, matching the slope of Tibet-AS gamma-ray–inferred flux, suggesting hadronic cosmic-ray interactions in Galactic PeVatrons are responsible for neutrino production.", "label": true }, { "paperid": "2410.02052v3", "paper_path": "./SciVer/papers/2410.02052v3.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.02052v3-Table2-1.png", "request_id": 97, "origin_statement": "By integrating multi-agent debate, R-MCTS (MAD) achieves a consistent additional 1.6% success improvement over single-agent R-MCTS while incurring only 0.1× extra token usage, illustrating how adversarial peer critique can enhance search efficiency in resource-constrained planning.", "perturbed_statement": "By integrating multi-agent debate, R-MCTS (MAD) achieves a consistent additional 6% success improvement over single-agent R-MCTS while incurring 0.5× extra token usage, illustrating how adversarial peer critique can enhance search efficiency in resource-constrained planning.", "perturbed_explanation": "This statement is incorrect because Table 2 shows R-MCTS (MAD) only improves success by 1.6 percentage points on average (not 6%), and its token consumption increases from 7.3× to 7.4×—a 0.1× increase, not 0.5×.", "claim": "By integrating multi-agent debate, R-MCTS (MAD) achieves a consistent additional 1.6% success improvement over single-agent R-MCTS while incurring only 0.1× extra token usage, illustrating how adversarial peer critique can enhance search efficiency in resource-constrained planning.", "label": true }, { "paperid": "2411.16868v1", "paper_path": "./SciVer/papers/2411.16868v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "2.1", "5" ], "image_path": "./SciVer/images/2411.16868v1_figure_1.png", "request_id": 98, "origin_statement": "Despite the major X-ray flare peaking at MJD 60095, the non-detection of X-ray polarization (upper limit ∼ 6.8% at 99.73% confidence) indicates a moderately disordered magnetic field, favoring a leptonic SSC scenario where aligned electron and photon populations dilute net polarization.", "perturbed_statement": "Despite the minor X-ray flare peaking at MJD 60092, the non-detection of X-ray polarization (upper limit ∼ 3.2% at 95% confidence) indicates a highly ordered magnetic field, favoring a hadronic synchrotron scenario contrary to SSC dilution effects.", "perturbed_explanation": "This statement is wrong because the significant X-ray flare actually peaks at MJD 60095, not 60092. The reported polarization upper limit is 6.8% at 99.73% confidence, not 3.2% at 95%. Moreover, the context disfavors hadronic models and supports leptonic SSC, not hadronic synchrotron.", "claim": "Despite the major X-ray flare peaking at MJD 60095, the non-detection of X-ray polarization (upper limit ∼ 6.8% at 99.73% confidence) indicates a moderately disordered magnetic field, favoring a leptonic SSC scenario where aligned electron and photon populations dilute net polarization.", "label": true }, { "paperid": "2410.04088v1", "paper_path": "./SciVer/papers/2410.04088v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.04088v1-Table1-1.png", "request_id": 99, "origin_statement": "Integrating CRED into Conditional-DETR-R50 increases AP by 3.5 points (40.9→44.4) for only 11% extra FLOPs, thanks to cross-resolution feature aggregation that improves multi-scale object localization and speeds convergence by aligning representations across encoder resolutions.", "perturbed_statement": "Integrating CRED into Conditional-DETR-R50 increases AP by 5.0 points (40.9→45.9) for only 5% extra FLOPs, thanks to cross-resolution feature aggregation that reduces latency by aligning representations across encoder resolutions.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 shows AP rises by 3.5 points (from 40.9 to 44.4), not 5.0 points to 45.9. It also reports a FLOPs increase of roughly 11% (90G→100G), not the claimed 5%.", "claim": "Integrating CRED into Conditional-DETR-R50 increases AP by 3.5 points (40.9→44.4) for only 11% extra FLOPs, thanks to cross-resolution feature aggregation that improves multi-scale object localization and speeds convergence by aligning representations across encoder resolutions.", "label": true }, { "paperid": "2411.01299v1", "paper_path": "./SciVer/papers/2411.01299v1.json", "claim_type": "analytical", "type": "chart", "item": "14", "section": [ "3.5.3" ], "image_path": "./SciVer/images/2411.01299v1_figure_14.png", "request_id": 100, "origin_statement": "Angle_Left_3 exhibits outliers above 165°, likely due to measurement misalignment, so using bolt-specific historical averages for imputation mitigates this skew and preserves feature distribution symmetry essential for reliable tensile strength predictions.", "perturbed_statement": "Angle_Right_3 exhibits outliers above 165°, likely due to measurement misalignment, so using bolt-specific historical maxima for imputation mitigates this skew and preserves feature distribution symmetry essential for reliable tensile strength predictions.", "perturbed_explanation": "The box plot indicates that extreme outliers above 165° are observed in Angle_Left_3, not Angle_Right_3. Moreover, the methodology specifies imputing outliers with bolt-specific historical averages, not maxima.", "claim": "Angle_Left_3 exhibits outliers above 165°, likely due to measurement misalignment, so using bolt-specific historical averages for imputation mitigates this skew and preserves feature distribution symmetry essential for reliable tensile strength predictions.", "label": true }, { "paperid": "2409.17730v1", "paper_path": "./SciVer/papers/2409.17730v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2409.17730v1_figure_3.png", "request_id": 104, "origin_statement": "At moderate temperatures (~1.5), Relevance Aggregation maximizes ensemble diversity and yields peak NDCG@10 by balancing the exploration–exploitation trade-off: lower temperatures under-explore, and higher temperatures over-randomize predictions, causing diminishing gains across all datasets except on Gowalla where Reciprocal Rank Aggregation slightly leads.", "perturbed_statement": "Temperatures near 0.5 optimize Relevance Aggregation diversity, peaking NDCG@10 at about 0.22 on MovieLens-20M, reflecting the exploration–exploitation balance where too low temperature under-explores and too high temperature over-randomizes predictions.", "perturbed_explanation": "The perturbed statement is incorrect because the peak NDCG@10 for Relevance Aggregation on MovieLens-20M occurs at temperature around 1–2 (approximately 1.5), not at 0.5, and the actual peak value is about 0.23 rather than 0.22, as shown in Figure 3a.", "claim": "At moderate temperatures (~1.5), Relevance Aggregation maximizes ensemble diversity and yields peak NDCG@10 by balancing the exploration–exploitation trade-off: lower temperatures under-explore, and higher temperatures over-randomize predictions, causing diminishing gains across all datasets except on Gowalla where Reciprocal Rank Aggregation slightly leads.", "label": true }, { "paperid": "2410.16845v1", "paper_path": "./SciVer/papers/2410.16845v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5.7" ], "image_path": "./SciVer/images/2410.16845v1_figure_5.png", "request_id": 106, "origin_statement": "At ρ=5.0, FGSAM+ reduces training loss to below 0.1 within 30 epochs, whereas SAM’s loss remains above 0.5 even after 200 epochs; this performance gap reflects how decoupling perturbation and minimization in FGSAM+ mitigates adversarial gradient imbalance induced by large perturbation magnitudes, supporting tighter generalization bounds.", "perturbed_statement": "At ρ=2.5, SAM reduces training loss to near zero within 40 epochs, whereas FGSAM+ stalls above 0.5 even after 200 epochs; this suggests decoupling perturbation and minimization fails to stabilize adversarial gradient mismatch at moderate perturbation magnitudes.", "perturbed_explanation": "Figure 5 shows for ρ=2.5 that the brown curve (FGSAM+) converges to near-zero loss by around 20 epochs, while the purple dashed curve (SAM) plateaus above 0.2 even after 200 epochs. Thus the claim that SAM converges faster than FGSAM+ at ρ=2.5 contradicts the plotted results.", "claim": "At ρ=5.0, FGSAM+ reduces training loss to below 0.1 within 30 epochs, whereas SAM’s loss remains above 0.5 even after 200 epochs; this performance gap reflects how decoupling perturbation and minimization in FGSAM+ mitigates adversarial gradient imbalance induced by large perturbation magnitudes, supporting tighter generalization bounds.", "label": true }, { "paperid": "2411.00915v1", "paper_path": "./SciVer/papers/2411.00915v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.4" ], "image_path": "./SciVer/images/2411.00915v1-Table3-1.png", "request_id": 107, "origin_statement": "V-LoRA’s throughput scales near-linearly with GPU count: doubling from 1 to 2 GPUs increases throughput by 89%, and from 2 to 4 by 109%, reflecting its efficient pre-allocated contiguous memory and asynchronous swapping that minimize inter-GPU synchronization overhead.", "perturbed_statement": "V-LoRA’s throughput scales superlinearly with GPU count: doubling from 1 to 2 GPUs increases throughput by 126%, and from 2 to 4 by 145%, demonstrating Gustafson’s law effects from its inter-GPU scheduling.", "perturbed_explanation": "The throughput improvements are misquoted. The actual increases are 89% (1→2 GPUs) and 109% (2→4 GPUs), not 126% and 145%. Additionally, V-LoRA does not implement inter-GPU scheduling, making the invocation of Gustafson’s law inapplicable.", "claim": "V-LoRA’s throughput scales near-linearly with GPU count: doubling from 1 to 2 GPUs increases throughput by 89%, and from 2 to 4 by 109%, reflecting its efficient pre-allocated contiguous memory and asynchronous swapping that minimize inter-GPU synchronization overhead.", "label": true }, { "paperid": "2411.16535v1", "paper_path": "./SciVer/papers/2411.16535v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "3.2.2", "4.3.1" ], "image_path": "./SciVer/images/2411.16535v1-Table4-1.png", "request_id": 109, "origin_statement": "At 8× acceleration, forward model calibration improves SSIM by 0.013 and reduces LPIPS by 0.026 compared to the uncalibrated variant, while only increasing runtime by 0.7 s, demonstrating that joint CSM refinement via Tikhonov regularization bolsters perceptual quality with minimal computational cost.", "perturbed_statement": "At 8× acceleration, forward model calibration boosts SSIM by 0.03 and reduces LPIPS by 0.05 versus the non-calibrated variant, while decreasing runtime by 0.3 s, indicating that joint CSM tuning both accelerates inference and enhances perceptual quality.", "perturbed_explanation": "This statement is incorrect because Table 4 shows that with calibration at 8×: SSIM rises only from 0.929 to 0.942 (an increase of 0.013, not 0.03), LPIPS drops from 0.111 to 0.085 (a reduction of 0.026, not 0.05), and runtime increases from 1.92 s to 2.62 s (a 0.7 s increase, not a 0.3 s decrease).", "claim": "At 8× acceleration, forward model calibration improves SSIM by 0.013 and reduces LPIPS by 0.026 compared to the uncalibrated variant, while only increasing runtime by 0.7 s, demonstrating that joint CSM refinement via Tikhonov regularization bolsters perceptual quality with minimal computational cost.", "label": true }, { "paperid": "2410.04797v1", "paper_path": "./SciVer/papers/2410.04797v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "4.6" ], "image_path": "./SciVer/images/2410.04797v1_figure_2(b).png", "request_id": 110, "origin_statement": "The t-SNE plots show the proposed model produces tightly clustered embeddings with clear inter-class boundaries, reflecting its optimized feature extractor that reduces intra-class variance and emphasizes discriminative dimensions, aligning with the manifold hypothesis that preserving local structures enhances classification performance on limited speech datasets.", "perturbed_statement": "Contrary to the paper, the t-SNE visualization for Wav2vec 2.0 reveals the most compact intra-class clusters and nearly orthogonal inter-class boundaries, indicating it outperforms the proposed model in discriminative feature extraction for limited speech data.", "perturbed_explanation": "This statement is incorrect because Figure 2(b) shows Wav2vec 2.0 embeddings are visibly intermixed across labels and fail to form distinct clusters. In contrast, Figure 2(c) demonstrates the proposed model yields compact intra-class clusters and clear inter-class separation, so Wav2vec 2.0 does not outperform it.", "claim": "The t-SNE plots show the proposed model produces tightly clustered embeddings with clear inter-class boundaries, reflecting its optimized feature extractor that reduces intra-class variance and emphasizes discriminative dimensions, aligning with the manifold hypothesis that preserving local structures enhances classification performance on limited speech datasets.", "label": true }, { "paperid": "2410.06423v1", "paper_path": "./SciVer/papers/2410.06423v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4", "4.1", "5.3" ], "image_path": "./SciVer/images/2410.06423v1-Table3-1.png", "request_id": 113, "origin_statement": "The Adult dataset’s extreme race imbalance (White: 41,761 vs Black: 4,685 vs Asian-Pacific-Islander: 1,519) predisposes standard classifiers to higher accuracy on White individuals, necessitating fairness-aware reweighing or oversampling of minority races to achieve demographic parity across outcome distributions.", "perturbed_statement": "The Adult dataset’s extreme race imbalance (White: 41,761 vs Black: 468 vs Asian-Pacific-Islander: 1,519) predisposes standard classifiers to overfit minority groups, suggesting practitioners should oversample White samples to correct underrepresentation and balance accuracy across races.", "perturbed_explanation": "The perturbed statement misreports the Black count as 468 instead of the actual 4,685 in Table 3. Additionally, classifiers typically overfit the majority group (White) rather than minority groups and fairness interventions should oversample minority races (e.g., Black), not White samples.", "claim": "The Adult dataset’s extreme race imbalance (White: 41,761 vs Black: 4,685 vs Asian-Pacific-Islander: 1,519) predisposes standard classifiers to higher accuracy on White individuals, necessitating fairness-aware reweighing or oversampling of minority races to achieve demographic parity across outcome distributions.", "label": true }, { "paperid": "2411.12078v1", "paper_path": "./SciVer/papers/2411.12078v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.12078v1-Table1-1.png", "request_id": 115, "origin_statement": "By integrating hard and soft fragment retrieval with genetic fragment modification, f-RAG achieves a cumulative AUC of 16.928, approximately 4.4% higher than the next best (16.213), demonstrating more efficient exploration of chemical space under the 10 000 oracle-call constraint.", "perturbed_statement": "By integrating hard and soft fragment retrieval with genetic fragment modification, f-RAG achieves a cumulative AUC of 17.928, approximately 4.4% higher than the next best (16.213), demonstrating more efficient exploration of chemical space under the 10 000 oracle-call constraint.", "perturbed_explanation": "This is incorrect because Table 1 reports f-RAG’s cumulative AUC as 16.928, not 17.928; the perturbed value misrepresents the sum of AUC top-10 results from the context.", "claim": "By integrating hard and soft fragment retrieval with genetic fragment modification, f-RAG achieves a cumulative AUC of 16.928, approximately 4.4% higher than the next best (16.213), demonstrating more efficient exploration of chemical space under the 10 000 oracle-call constraint.", "label": true }, { "paperid": "2411.02542v1", "paper_path": "./SciVer/papers/2411.02542v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3" ], "image_path": "./SciVer/images/2411.02542v1_figure_3.png", "request_id": 116, "origin_statement": "The state-level traffic graphs exhibit extreme class imbalance: crash nodes constitute less than 1% of nodes, while non-crash nodes average around 330,000 per state and edges about 880,000, necessitating specialized resampling or cost-sensitive learning to avoid bias in GNN-based predictions.", "perturbed_statement": "Crash nodes make up roughly 10% of all nodes in state-level traffic graphs, with negative nodes averaging 330,000 per state and edges around 500,000, indicating moderate imbalance manageable by standard GNN training without special sampling.", "perturbed_explanation": "The perturbation is incorrect because the figure shows positive (crash) nodes are under 1% of all nodes (approximately 10,000 versus over 340,000), not 10%. It also misstates the average edge count as 500,000, whereas the chart indicates about 880,000 edges per state.", "claim": "The state-level traffic graphs exhibit extreme class imbalance: crash nodes constitute less than 1% of nodes, while non-crash nodes average around 330,000 per state and edges about 880,000, necessitating specialized resampling or cost-sensitive learning to avoid bias in GNN-based predictions.", "label": true }, { "paperid": "2409.04384v1", "paper_path": "./SciVer/papers/2409.04384v1.json", "claim_type": "analytical", "type": "chart", "item": "9(b)", "section": [ "4.4.3" ], "image_path": "./SciVer/images/2409.04384v1_figure_9(b).png", "request_id": 118, "origin_statement": "The SAPG scheme converges to the MMLE of ρ in about 10 iterations, enabling latent-space PnP-ULA to reach optimal image-restoration PSNR by decoupling the noise scale from the step size, thus reducing the sampling space dimensionality and accelerating posterior mixing for faster, more accurate recovery.", "perturbed_statement": "The SAPG scheme converges to the MMLE of ρ in about 50 iterations, enabling latent-space PnP-ULA to reach optimal image-restoration PSNR by decoupling the noise scale from the regularization strength, thus reducing the sampling space dimensionality and accelerating posterior mixing for faster, more accurate recovery.", "perturbed_explanation": "This statement is incorrect because Figure 9(b) and the text state that the SAPG iterates converge to the MMLE in roughly 10 iterations, not 50. It also wrongly claims the decoupling is between noise scale and regularization strength, whereas the context specifies decoupling between the noise scale ρ and the step size τ.", "claim": "The SAPG scheme converges to the MMLE of ρ in about 10 iterations, enabling latent-space PnP-ULA to reach optimal image-restoration PSNR by decoupling the noise scale from the step size, thus reducing the sampling space dimensionality and accelerating posterior mixing for faster, more accurate recovery.", "label": true }, { "paperid": "2410.04422v5", "paper_path": "./SciVer/papers/2410.04422v5.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.2.3", "4.3.3" ], "image_path": "./SciVer/images/2410.04422v5-Table4-1.png", "request_id": 119, "origin_statement": "Using a one-by-one prompt increases time complexity to Θ(n^2) and consumes about 1,500 output tokens, achieving 100% accuracy on both logic-based KV retrieval and student resume tasks; this shows exhaustive item-by-item examination ensures perfect performance at significant computational expense.", "perturbed_statement": "Using a one-by-one prompt increases time complexity to Θ(n) and consumes about 1,200 output tokens, achieving 100% accuracy on both logic-based KV retrieval and student resume tasks; this shows exhaustive item-by-item examination ensures perfect performance at significant computational expense.", "perturbed_explanation": "The perturbed statement is incorrect because the actual time complexity for the one-by-one prompt is Θ(n^2), not Θ(n), and it requires about 1,500 output tokens, not 1,200, as shown in Table 4.", "claim": "Using a one-by-one prompt increases time complexity to Θ(n^2) and consumes about 1,500 output tokens, achieving 100% accuracy on both logic-based KV retrieval and student resume tasks; this shows exhaustive item-by-item examination ensures perfect performance at significant computational expense.", "label": true }, { "paperid": "2409.12479v1", "paper_path": "./SciVer/papers/2409.12479v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "4.5.1" ], "image_path": "./SciVer/images/2409.12479v1_figure_2(a).png", "request_id": 122, "origin_statement": "Enrolling three OOD samples as negative anchors yields a 16.68% FPR95 reduction via refined prototype anchoring, with improvements plateauing thereafter, demonstrating that minimal anchor augmentation suffices to calibrate the decision boundary more effectively than raw model scoring.", "perturbed_statement": "Enrolling two OOD samples as negative anchors yields a 26.68% FPR95 reduction via refined prototype anchoring, with improvements plateauing thereafter, demonstrating that minimal anchor augmentation suffices to calibrate the decision boundary more effectively than raw model scoring.", "perturbed_explanation": "The perturbed statement is incorrect because the actual FPR95 reduction from enrollment is 16.68%, not 26.68%, and the performance gains plateau only after enrolling three OOD samples, not two, as shown in Figure 2 and the reported results.", "claim": "Enrolling three OOD samples as negative anchors yields a 16.68% FPR95 reduction via refined prototype anchoring, with improvements plateauing thereafter, demonstrating that minimal anchor augmentation suffices to calibrate the decision boundary more effectively than raw model scoring.", "label": true }, { "paperid": "2409.05061v2", "paper_path": "./SciVer/papers/2409.05061v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.3.1", "5.3.2" ], "image_path": "./SciVer/images/2409.05061v2_figure_6.png", "request_id": 124, "origin_statement": "Under the pu distribution, the relative improvement in premium acceptance increases with parcel size from +19 pp (small) to +43 pp (large), demonstrating METC’s exploitation of faster pickup speeds to preferentially serve high-value customers with bulky shipments.", "perturbed_statement": "Under the pf distribution, the relative improvement in premium acceptance increases with parcel size from +20 pp (small) to +50 pp (large), demonstrating METC’s exploitation of faster pickup speeds to preferentially serve high-value customers with bulky shipments.", "perturbed_explanation": "This statement is wrong because it misidentifies the pf distribution as having the fastest pickups (that is pu) and overstates the acceptance gains. In fact, under pf the actual improvement is about +16 pp for small parcels (86.4 % vs. 70.6 %) and +28 pp for large parcels (61.6 % vs. 34.0 %), not +20 pp and +50 pp.", "claim": "Under the pu distribution, the relative improvement in premium acceptance increases with parcel size from +19 pp (small) to +43 pp (large), demonstrating METC’s exploitation of faster pickup speeds to preferentially serve high-value customers with bulky shipments.", "label": true }, { "paperid": "2410.13762v1", "paper_path": "./SciVer/papers/2410.13762v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.13762v1_figure_5.png", "request_id": 128, "origin_statement": "Figure 5 shows that after min-max normalization, turbulence kinetic energy values previously spanning 0.00088–0.01902 uniformly occupy 0–1, which reduces weight bias in gradient-based learning, ensuring the DeepONet model treats pressure fluctuations and velocity gradients comparably, thus accelerating convergence and stabilizing the network during training.", "perturbed_statement": "Figure 5 shows that after z-score normalization, turbulence kinetic energy values previously spanning 0.00088–0.01902 were standardized to zero mean and unit variance, which reduces weight bias in batch gradient descent, ensuring the DeepONet model treats pressure fluctuations and velocity gradients comparably, thus accelerating convergence.", "perturbed_explanation": "The perturbation is incorrect because the study applied min-max scaling to normalize all parameters to the [0,1] range, not z-score normalization to achieve zero mean and unit variance. Additionally, the optimization referenced was generic gradient-based learning, not specifically batch gradient descent, contradicting the context.", "claim": "Figure 5 shows that after min-max normalization, turbulence kinetic energy values previously spanning 0.00088–0.01902 uniformly occupy 0–1, which reduces weight bias in gradient-based learning, ensuring the DeepONet model treats pressure fluctuations and velocity gradients comparably, thus accelerating convergence and stabilizing the network during training.", "label": true }, { "paperid": "2411.02640v1", "paper_path": "./SciVer/papers/2411.02640v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.02640v1-Table3-1.png", "request_id": 129, "origin_statement": "KinetiX's polynomial fitting via NumPy introduces up to 9.874×10⁻⁶% maximum error in mixture-averaged transport properties for C₂H₅OH, compared to sub-1×10⁻⁹% mean differences in reaction rates and sub-1×10⁻¹⁴% in thermodynamic properties, highlighting fitting library influence on transport accuracy.", "perturbed_statement": "KinetiX's polynomial fitting via NumPy introduces up to 9.874×10⁻⁴% maximum error in mixture-averaged transport properties for C₂H₅OH, compared to sub-1×10⁻⁹% mean differences in reaction rates and sub-1×10⁻¹⁴% in thermodynamic properties, highlighting fitting library influence on transport accuracy.", "perturbed_explanation": "The table reports a maximum relative error of 9.874×10⁻⁶% for C₂H₅OH mixture-averaged transport properties, not 9.874×10⁻⁴%. This two-order-of-magnitude discrepancy makes the perturbed statement incorrect.", "claim": "KinetiX's polynomial fitting via NumPy introduces up to 9.874×10⁻⁶% maximum error in mixture-averaged transport properties for C₂H₅OH, compared to sub-1×10⁻⁹% mean differences in reaction rates and sub-1×10⁻¹⁴% in thermodynamic properties, highlighting fitting library influence on transport accuracy.", "label": true }, { "paperid": "2410.09133v1", "paper_path": "./SciVer/papers/2410.09133v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2410.09133v1_figure_4.png", "request_id": 130, "origin_statement": "In the hourly traffic dataset, the log-score-trained N-HiTS model exhibits pronounced covariance peaks around 6–7 AM and 6–7 PM due to rush-hour volatility and increasing lead-time uncertainty, whereas the MVG-CRPS-trained model attenuates tail-driven variance inflation, providing more stable hour-to-hour correlation estimates.", "perturbed_statement": "In the hourly traffic dataset, the log-score-trained N-HiTS model exhibits pronounced covariance peaks around 12 PM and 12 AM due to rush-hour volatility and increasing lead-time uncertainty, whereas the MVG-CRPS-trained model attenuates tail-driven variance inflation, providing more stable hour-to-hour correlation estimates.", "perturbed_explanation": "The perturbed statement wrongly locates the covariance peaks at 12 PM and 12 AM, whereas Fig. 4 and the context specify peaks around 6–7 AM and 6–7 PM. This misrepresents the temporal positions of rush-hour uncertainty.", "claim": "In the hourly traffic dataset, the log-score-trained N-HiTS model exhibits pronounced covariance peaks around 6–7 AM and 6–7 PM due to rush-hour volatility and increasing lead-time uncertainty, whereas the MVG-CRPS-trained model attenuates tail-driven variance inflation, providing more stable hour-to-hour correlation estimates.", "label": true }, { "paperid": "2410.00003v2", "paper_path": "./SciVer/papers/2410.00003v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2", "5" ], "image_path": "./SciVer/images/2410.00003v2-Table2-1.png", "request_id": 131, "origin_statement": "LanHAR achieves an average accuracy of 77.3%, outclassing UDAHAR's 41.3% by 36 percentage points, demonstrating that semantic interpretations via LLMs significantly reduce cross-dataset heterogeneity and normalize sensor feature distributions for robust HAR generalization.", "perturbed_statement": "LanHAR achieves an average accuracy of 77.3%, outclassing UDAHAR's 61.3% by 16 percentage points, demonstrating that semantic interpretations via LLMs significantly reduce cross-dataset heterogeneity and normalize sensor feature distributions for robust HAR generalization.", "perturbed_explanation": "The perturbation is incorrect because Table 2 reports UDAHAR’s actual average accuracy as 41.3%, not 61.3%, and the resulting improvement is 36 percentage points, not 16, contradicting the context’s numeric values.", "claim": "LanHAR achieves an average accuracy of 77.3%, outclassing UDAHAR's 41.3% by 36 percentage points, demonstrating that semantic interpretations via LLMs significantly reduce cross-dataset heterogeneity and normalize sensor feature distributions for robust HAR generalization.", "label": true }, { "paperid": "2409.03904v1", "paper_path": "./SciVer/papers/2409.03904v1.json", "claim_type": "analytical", "type": "chart", "item": "7(a)", "section": [ "3.4.1" ], "image_path": "./SciVer/images/2409.03904v1_figure_7(a).png", "request_id": 134, "origin_statement": "Using 62 and 60 Chebyshev-filtered subspace vectors on 483^3×96 and 963^3×192 lattices respectively, mrhs-HDCG reduces fine-grid matrix multiplies by 17×, achieving similar outer convergence rates (≈0.93 reduction per iteration) and an inferred preconditioned condition number around 38, demonstrating consistent deflation fidelity across volumes.", "perturbed_statement": "Using 72 and 60 Chebyshev-filtered subspace vectors on 483^3×96 and 963^3×192 lattices respectively, mrhs-HDCG reduces fine-grid matrix multiplies by 20×, achieving similar outer convergence rates (≈0.93 reduction per iteration) and an inferred preconditioned condition number around 38, demonstrating consistent deflation fidelity across volumes.", "perturbed_explanation": "The statement is incorrect because the context specifies 62 Chebyshev-filtered subspace vectors (not 72) on the 483^3×96 volume, and reports a 17× reduction in fine-grid matrix multiplies (not 20×). These discrepancies contradict the original data.", "claim": "Using 62 and 60 Chebyshev-filtered subspace vectors on 483^3×96 and 963^3×192 lattices respectively, mrhs-HDCG reduces fine-grid matrix multiplies by 17×, achieving similar outer convergence rates (≈0.93 reduction per iteration) and an inferred preconditioned condition number around 38, demonstrating consistent deflation fidelity across volumes.", "label": true }, { "paperid": "2409.00718v1", "paper_path": "./SciVer/papers/2409.00718v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.3", "3.4" ], "image_path": "./SciVer/images/2409.00718v1-Table2-1.png", "request_id": 135, "origin_statement": "The MCGAEc model’s AUC of 0.994 and specificity of 0.972 indicate its exceptional ability to minimize false positives in AMD classification, reflecting the efficacy of multiscale color-space fusion via VGG16-based attention in capturing complementary surface and subsurface retinal features.", "perturbed_statement": "The MCGAEc model’s AUC of 0.994 and specificity of 0.862 indicate its exceptional ability to minimize false negatives in AMD classification, reflecting the efficacy of multiscale color-space fusion via VGG16-based attention in capturing complementary surface and subsurface retinal features.", "perturbed_explanation": "The perturbation misstates the specificity value: Table 2 reports specificity as 0.972, not 0.862. It also incorrectly links specificity (true negative rate) to reducing false negatives (which relates to sensitivity), so the claim contradicts the reported metric meaning and value.", "claim": "The MCGAEc model’s AUC of 0.994 and specificity of 0.972 indicate its exceptional ability to minimize false positives in AMD classification, reflecting the efficacy of multiscale color-space fusion via VGG16-based attention in capturing complementary surface and subsurface retinal features.", "label": true }, { "paperid": "2410.07196v1", "paper_path": "./SciVer/papers/2410.07196v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.07196v1_figure_4.png", "request_id": 136, "origin_statement": "In Fig. 4, the aggregated gamma-band (0–4 Hz) curve exhibits a pronounced low-frequency peak around 0.2 Hz (~5000 amplitude), reflecting dominant slow cortical potentials, whereas the beta band shows a narrow synchronous sensorimotor rhythm at ~20 Hz (~1800 amplitude) typical in resting-state EEG.", "perturbed_statement": "In Fig. 4, the aggregated gamma-band (0–4 Hz) curve exhibits a pronounced low-frequency peak around 2.0 Hz (~20000 amplitude), reflecting dominant slow cortical potentials, whereas the beta band shows a narrow synchronous sensorimotor rhythm at ~30 Hz (~1800 amplitude) typical in resting-state EEG.", "perturbed_explanation": "The perturbation is incorrect because the gamma-band peak actually occurs near 0.2 Hz with amplitude around 5000, not at 2.0 Hz with 20000 amplitude. Likewise, the beta-band peak is at ~20 Hz, not ~30 Hz.", "claim": "In Fig. 4, the aggregated gamma-band (0–4 Hz) curve exhibits a pronounced low-frequency peak around 0.2 Hz (~5000 amplitude), reflecting dominant slow cortical potentials, whereas the beta band shows a narrow synchronous sensorimotor rhythm at ~20 Hz (~1800 amplitude) typical in resting-state EEG.", "label": true }, { "paperid": "2409.09622v1", "paper_path": "./SciVer/papers/2409.09622v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5" ], "image_path": "./SciVer/images/2409.09622v1-Table1-1.png", "request_id": 137, "origin_statement": "For n=3, k from 8 to 10, the observed region counts (46, 72, 108) are all below the Zaslavsky upper bound Σ_{i=0}^3 C(k,i)—84, 115, 176 respectively—illustrating that random cubic hypersurfaces realize roughly 60–70% of worst-case combinatorial complexity.", "perturbed_statement": "For n=3, k from 8 to 10, the observed region counts (46, 82, 108) are all below the Zaslavsky upper bound Σ_{i=0}^3 C(k,i)—84, 115, 126 respectively—illustrating that random cubic hypersurfaces realize roughly 60–70% of worst-case combinatorial complexity.", "perturbed_explanation": "The perturbed statement is incorrect because the actual region count for k=9 from Table 1 is 72, not 82. Moreover, the Zaslavsky bound Σ_{i=0}^3 C(10,i) equals 176, not 126. These two misquoted values contradict the context.", "claim": "For n=3, k from 8 to 10, the observed region counts (46, 72, 108) are all below the Zaslavsky upper bound Σ_{i=0}^3 C(k,i)—84, 115, 176 respectively—illustrating that random cubic hypersurfaces realize roughly 60–70% of worst-case combinatorial complexity.", "label": true }, { "paperid": "2411.07239v1", "paper_path": "./SciVer/papers/2411.07239v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1", "4.1.1" ], "image_path": "./SciVer/images/2411.07239v1-Table1-1.png", "request_id": 139, "origin_statement": "Pretraining on multiple operators via the D2NO algorithm reduces the test relative error for Burgers’-type equation extrapolation to 3.11% under PI-LoRA fine-tuning, outperforming single-operator pretraining (3.49%) and random initialization (21.14%), by capturing diverse dynamical regimes for robust generalization.", "perturbed_statement": "Pretraining on multiple operators via the D2NO algorithm reduces the test relative error for Burgers’-type equation extrapolation to 1.11% under PI-LoRA fine-tuning, outperforming single-operator pretraining (3.49%) and random initialization (21.14%), by capturing diverse dynamical regimes for robust generalization.", "perturbed_explanation": "The relative error for the D2NO pretraining with PI-LoRA fine-tuning is actually reported as 3.11% in Table 1, not 1.11%, so the stated 1.11% value contradicts the empirical results.", "claim": "Pretraining on multiple operators via the D2NO algorithm reduces the test relative error for Burgers’-type equation extrapolation to 3.11% under PI-LoRA fine-tuning, outperforming single-operator pretraining (3.49%) and random initialization (21.14%), by capturing diverse dynamical regimes for robust generalization.", "label": true }, { "paperid": "2411.10213v1", "paper_path": "./SciVer/papers/2411.10213v1.json", "claim_type": "analytical", "type": "chart", "item": "1(c)", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2411.10213v1_figure_1(c).png", "request_id": 140, "origin_statement": "MarsCode Agent resolves 118 of 300 cases (39.3%), with 9 cases uniquely solvable only by it. This superior performance derives from its agent-based multi-turn reasoning compensating for stricter SWE-bench Lite protocols that forbid using dynamic failing-test feedback during patch enumeration.", "perturbed_statement": "MarsCode Agent resolves 112 of 300 cases (37.3%), with 12 cases uniquely solvable only by it. This superior performance derives from its agent-based single-turn reasoning compensating for stricter SWE-bench Lite protocols that forbid using dynamic failing-test feedback during patch enumeration.", "perturbed_explanation": "The perturbed statement incorrectly reports 112 cases (37.3%) and 12 unique cases; Figure 1 shows MarsCode Agent actually resolves 118 cases (39.3%) with 9 unique cases. It also wrongly describes its reasoning as single-turn, whereas MarsCode Agent uses multi-turn dialogue for patch generation.", "claim": "MarsCode Agent resolves 118 of 300 cases (39.3%), with 9 cases uniquely solvable only by it. This superior performance derives from its agent-based multi-turn reasoning compensating for stricter SWE-bench Lite protocols that forbid using dynamic failing-test feedback during patch enumeration.", "label": true }, { "paperid": "2410.19523v1", "paper_path": "./SciVer/papers/2410.19523v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2410.19523v1-Table2-1.png", "request_id": 141, "origin_statement": "Table 2’s non-decreasing c_{k,u} across u implies that the sum of the m smallest cumulative counts is non-decreasing; thus once sum ≥ u for some u, higher u values automatically satisfy the inequality, enabling the branch-and-bound algorithm to stop at the minimal u where the upper bound \\bar{h} is determined.", "perturbed_statement": "Table 2’s non-increasing c_{k,u} across u implies that the sum of the m largest cumulative counts is non-increasing; thus once sum ≥ u for some u, lower u values automatically satisfy the inequality, enabling the branch-and-bound algorithm to stop at the maximal u where the upper bound \\bar{h} is determined.", "perturbed_explanation": "This is incorrect because in Table 2 each row’s cumulative count c_{k,u} is actually non-decreasing in u, not non-increasing. The algorithm sums the m smallest entries, not the m largest, and it identifies the minimal u (not maximal u) satisfying the inequality to set the upper bound.", "claim": "Table 2’s non-decreasing c_{k,u} across u implies that the sum of the m smallest cumulative counts is non-decreasing; thus once sum ≥ u for some u, higher u values automatically satisfy the inequality, enabling the branch-and-bound algorithm to stop at the minimal u where the upper bound \\bar{h} is determined.", "label": true }, { "paperid": "2410.18529v2", "paper_path": "./SciVer/papers/2410.18529v2.json", "claim_type": "analytical", "type": "chart", "item": "5(b)", "section": [ "3.5" ], "image_path": "./SciVer/images/2410.18529v2_figure_5(b).png", "request_id": 142, "origin_statement": "Between 2018 and 2023, the USA averaged eight annual publications in Grounded Tasks versus five in Summarization, underscoring a methodological pivot toward context-aware procedural NLP over abstraction-oriented summarization.", "perturbed_statement": "Between 2018 and 2023, the USA averaged five annual publications in Grounded Tasks versus eight in Summarization, underscoring a methodological pivot toward context-aware procedural NLP over abstraction-oriented summarization.", "perturbed_explanation": "The perturbed statement swaps the actual publication counts. According to Fig. 5(b), the USA consistently published about eight Grounded Tasks papers per year versus only five in Summarization, not the other way around.", "claim": "Between 2018 and 2023, the USA averaged eight annual publications in Grounded Tasks versus five in Summarization, underscoring a methodological pivot toward context-aware procedural NLP over abstraction-oriented summarization.", "label": true }, { "paperid": "2411.14736v1", "paper_path": "./SciVer/papers/2411.14736v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.2", "5" ], "image_path": "./SciVer/images/2411.14736v1_figure_5.png", "request_id": 144, "origin_statement": "The log Bayes factor for GWB versus CURN increases from approximately 5.5 in DR2full to about 7.8 in DR2new, indicating progressively stronger evidence for a stochastic gravitational-wave background as observation span and timing precision improve, enhancing the cross-correlation signal detection across pulsars.", "perturbed_statement": "The log Bayes factor for GWB versus CURN jumps from approximately 6.5 in DR2full to about 10.2 in DR2new, exceeding the very-strong-evidence threshold of log(BF)=8, driven by the 0.7-year InPTA extension doubling the array sensitivity.", "perturbed_explanation": "Figure 5 and the text show that the BF actually rises from ≈5.5 in DR2full to ≈7.8 in DR2new, not to 10.2. Moreover, adding 0.7 years of InPTA data slightly decreases the evidence, rather than doubling the PTA sensitivity as claimed.", "claim": "The log Bayes factor for GWB versus CURN increases from approximately 5.5 in DR2full to about 7.8 in DR2new, indicating progressively stronger evidence for a stochastic gravitational-wave background as observation span and timing precision improve, enhancing the cross-correlation signal detection across pulsars.", "label": true }, { "paperid": "2411.01289v1", "paper_path": "./SciVer/papers/2411.01289v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.01289v1_figure_4.png", "request_id": 146, "origin_statement": "The Sobol analysis in Figure 4 reveals that mean halting duration exhibits both the highest first-order and total-order sensitivities, indicating that prolonged stop times not only directly drive congestion detection but also interact substantially with speed and occupancy, highlighting its pivotal role in the classification model’s predictive accuracy.", "perturbed_statement": "The Sobol analysis in Figure 4 reveals that mean vehicle flow exhibits both the highest first-order and total-order sensitivities, indicating that variations in traffic counts not only directly drive congestion detection but also interact substantially with speed and occupancy, highlighting its pivotal role in the classification model’s predictive accuracy.", "perturbed_explanation": "This is incorrect because Figure 4 shows mean halting duration has the highest first-order (S1 ≈ 0.10) and total-order (ST ≈ 0.93) indices. In contrast, mean vehicle flow has an S1 near zero and an ST of only about 0.28, so it is not the dominant sensitivity driver.", "claim": "The Sobol analysis in Figure 4 reveals that mean halting duration exhibits both the highest first-order and total-order sensitivities, indicating that prolonged stop times not only directly drive congestion detection but also interact substantially with speed and occupancy, highlighting its pivotal role in the classification model’s predictive accuracy.", "label": true }, { "paperid": "2411.00429v1", "paper_path": "./SciVer/papers/2411.00429v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.2", "7.2" ], "image_path": "./SciVer/images/2411.00429v1-Table1-1.png", "request_id": 147, "origin_statement": "Standard deviation scaling equalizes second-order moments, constraining mean distances to within 0.39 units across diverse distributions, while range scaling, which relies on sample minima and maxima, yields up to 0.41 units of inter-distribution disparity, highlighting its susceptibility to distributional extremes.", "perturbed_statement": "Standard deviation scaling equalizes second-order moments, constraining mean distances to within 0.25 units across diverse distributions, while range scaling, which relies on sample minima and maxima, yields up to 0.50 units of inter-distribution disparity, highlighting its susceptibility to distributional extremes.", "perturbed_explanation": "The perturbed statement understates the inter-distribution variability under SD scaling and misrepresents the range-scaling spread. Table 1 shows SD scaling distances range from 0.77 to 1.16 (a difference of 0.39, not 0.25), and range scaling distances span from 0.09 to 0.50 (a spread of 0.41, not 0.50).", "claim": "Standard deviation scaling equalizes second-order moments, constraining mean distances to within 0.39 units across diverse distributions, while range scaling, which relies on sample minima and maxima, yields up to 0.41 units of inter-distribution disparity, highlighting its susceptibility to distributional extremes.", "label": true }, { "paperid": "2411.15413v1", "paper_path": "./SciVer/papers/2411.15413v1.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.15413v1_figure_3(b).png", "request_id": 148, "origin_statement": "In FG-CXR, gaze heatmaps in lower lung fields often exceed their anatomical mask areas (ratios >1), driven by focused examination of the diaphragm; Gaussian smoothing further amplifies these clusters, artificially inflating attention coverage beyond mask boundaries.", "perturbed_statement": "In FG-CXR, gaze heatmaps in the upper left lung often exceed their anatomical mask areas (ratios >1), driven by focused examination of the clavicular region; Gaussian smoothing further amplifies these clusters, artificially inflating attention coverage beyond mask boundaries.", "perturbed_explanation": "The perturbation is incorrect because Figure 3(b) shows that heatmap-to-mask ratios exceed 1 primarily in the lower left and lower right lung regions due to diaphragm examination, not in the upper left lung or clavicular area.", "claim": "In FG-CXR, gaze heatmaps in lower lung fields often exceed their anatomical mask areas (ratios >1), driven by focused examination of the diaphragm; Gaussian smoothing further amplifies these clusters, artificially inflating attention coverage beyond mask boundaries.", "label": true }, { "paperid": "2410.15910v2", "paper_path": "./SciVer/papers/2410.15910v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.4" ], "image_path": "./SciVer/images/2410.15910v2-Table4-1.png", "request_id": 149, "origin_statement": "BC-PMI’s use of state-action mutual information constraints yields over 92% style calibration in all three Destination classes, mitigating policy mode collapse by explicitly encoding destination-specific trajectory patterns.", "perturbed_statement": "BC-PMI’s use of reward-shaped mutual information constraints yields over 95% style calibration in all three Destination classes, mitigating policy mode collapse by explicitly encoding destination-specific trajectory patterns.", "perturbed_explanation": "This statement is incorrect because Table 4 reports BC-PMI calibration for Destination classes as 93.3%, 92.7%, and 89.8%, none exceeding 95%. Additionally, BC-PMI constrains state-action mutual information rather than applying a “reward-shaped” mutual information mechanism.", "claim": "BC-PMI’s use of state-action mutual information constraints yields over 92% style calibration in all three Destination classes, mitigating policy mode collapse by explicitly encoding destination-specific trajectory patterns.", "label": true }, { "paperid": "2411.03025v1", "paper_path": "./SciVer/papers/2411.03025v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.03025v1_figure_1.png", "request_id": 150, "origin_statement": "Due to over-smoothing, layering more than three GNN layers on small graphs reduces feature discrimination, causing accuracy to drop from 80% at depth 3 to 75% at depth 4, whereas medium graphs achieve peak performance at four layers (95%), reflecting optimal receptive field before over-smoothing.", "perturbed_statement": "Due to over-smoothing, layering more than three GNN layers on small graphs reduces feature discrimination, causing accuracy to drop from 80% at depth 3 to 75% at depth 4, whereas medium graphs achieve peak performance at three layers (90%), reflecting optimal receptive field before over-smoothing.", "perturbed_explanation": "The perturbed statement wrongly claims medium graphs peak at three layers with 90% accuracy. According to Figure 1, medium graphs actually reach their highest accuracy of 95% at depth 4, not at depth 3 with 90%.", "claim": "Due to over-smoothing, layering more than three GNN layers on small graphs reduces feature discrimination, causing accuracy to drop from 80% at depth 3 to 75% at depth 4, whereas medium graphs achieve peak performance at four layers (95%), reflecting optimal receptive field before over-smoothing.", "label": true }, { "paperid": "2411.07393v1", "paper_path": "./SciVer/papers/2411.07393v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "5.1", "5.2" ], "image_path": "./SciVer/images/2411.07393v1_figure_6(a).png", "request_id": 152, "origin_statement": "The 2010 peak in the delta-V lightcurve reaching ~0.9 mag marks the DDO68-V1 giant eruption, consistent with LBV behaviour in metal-poor galaxies where reduced line-driven winds yield more pronounced photometric surges. Subsequent 2015–2023 oscillations (~0.2–0.3 mag) reflect classical S Dor variations.", "perturbed_statement": "The 2010 peak in the delta-V lightcurve reaching ~1.2 mag marks the DDO68-V1 giant eruption, consistent with LBV behaviour in metal-poor galaxies where reduced line-driven winds yield more pronounced photometric surges. Subsequent 2015–2023 oscillations (~0.2–0.3 mag) reflect classical S Dor variations.", "perturbed_explanation": "The perturbation erroneously increases the 2010 eruption amplitude to ~1.2 mag, whereas Figure 6 and the text report a maximum delta-V of only ~0.9 mag in that epoch, making the stated value inconsistent with the observed lightcurve.", "claim": "The 2010 peak in the delta-V lightcurve reaching ~0.9 mag marks the DDO68-V1 giant eruption, consistent with LBV behaviour in metal-poor galaxies where reduced line-driven winds yield more pronounced photometric surges. Subsequent 2015–2023 oscillations (~0.2–0.3 mag) reflect classical S Dor variations.", "label": true }, { "paperid": "2410.23844v1", "paper_path": "./SciVer/papers/2410.23844v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3.2.2" ], "image_path": "./SciVer/images/2410.23844v1_figure_3.png", "request_id": 154, "origin_statement": "Mean MLP layer activations for decoupled commonsense tasks cluster around 0.125, while attention layers exhibit distinct peaks at approximately 0.15 in early (layer 2) and late (layer 25) positions, indicating that transformer attentions intermittently amplify abstract commonsense signals at multiple depths.", "perturbed_statement": "Mean MLP layer activations for decoupled commonsense tasks cluster around 0.15, surpassing attention layer peaks of approximately 0.12, indicating that MLP subcomponents dominate abstract commonsense encoding over attention mechanisms.", "perturbed_explanation": "This statement is incorrect because Figure 3 shows mean MLP activations around 0.12–0.13 and attention layer peaks near 0.15, not the reverse. The perturbed values (MLP ≈ 0.15, attention ≈ 0.12) contradict the actual activation ranges depicted in the heatmaps.", "claim": "Mean MLP layer activations for decoupled commonsense tasks cluster around 0.125, while attention layers exhibit distinct peaks at approximately 0.15 in early (layer 2) and late (layer 25) positions, indicating that transformer attentions intermittently amplify abstract commonsense signals at multiple depths.", "label": true }, { "paperid": "2409.17791v1", "paper_path": "./SciVer/papers/2409.17791v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.17791v1_figure_3.png", "request_id": 156, "origin_statement": "Sequentially removing key content from LLM predictions reduces both preferred and dis-preferred response lengths, but shifts reward model calibration: preferred response scores monotonically decline by roughly 0.15 per removal on TL;DR, while dis-preferred scores rise by about 0.05, indicating systematic erosion of preference signal.", "perturbed_statement": "Sequentially removing key content from LLM predictions reduces both preferred and dis-preferred response lengths, but shifts reward model calibration: preferred response scores monotonically decline by roughly 0.3 per removal on TL;DR, while dis-preferred scores rise by about 0.5, indicating systematic erosion of preference signal.", "perturbed_explanation": "The perturbation misreports the magnitude of score changes: Figure 3 shows preferred scores drop by about 0.15 (e.g. from 0.9 to 0.6, then to 0.55 and 0.52) and dis-preferred scores increase by about 0.05 (from –0.05 to 0.0, then 0.1 and 0.2), not 0.3 and 0.5 per removal as stated.", "claim": "Sequentially removing key content from LLM predictions reduces both preferred and dis-preferred response lengths, but shifts reward model calibration: preferred response scores monotonically decline by roughly 0.15 per removal on TL;DR, while dis-preferred scores rise by about 0.05, indicating systematic erosion of preference signal.", "label": true }, { "paperid": "2410.08821v1", "paper_path": "./SciVer/papers/2410.08821v1.json", "claim_type": "analytical", "type": "chart", "item": "2(c)", "section": [ "5.1.4" ], "image_path": "./SciVer/images/2410.08821v1_figure_2(c).png", "request_id": 160, "origin_statement": "On the fair top-k benchmark, Vanilla RAG’s f1-score rises modestly from about 47% at top-5 to 49% at top-9, indicating diminishing returns from additional passages. In contrast, our adaptive retrieval consistently achieves near 60% f1, highlighting superior context integration.", "perturbed_statement": "On the fair top-k benchmark, Vanilla RAG’s f1-score surges from about 45% at top-5 to 52% at top-9, indicating strong retrieval scaling. In contrast, our adaptive retrieval achieves only around 55% f1, showing marginal gains over single-step methods.", "perturbed_explanation": "The perturbed statement incorrectly reports Vanilla RAG f1 scores (actually ~47% at top-5 and ~49% at top-9, not 45% to 52%) and understates our method’s performance (it achieves nearly 60% f1, not 55%). These values contradict Figure 2 and the context.", "claim": "On the fair top-k benchmark, Vanilla RAG’s f1-score rises modestly from about 47% at top-5 to 49% at top-9, indicating diminishing returns from additional passages. In contrast, our adaptive retrieval consistently achieves near 60% f1, highlighting superior context integration.", "label": true }, { "paperid": "2409.15552v1", "paper_path": "./SciVer/papers/2409.15552v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "6.1.2" ], "image_path": "./SciVer/images/2409.15552v1_figure_3.png", "request_id": 162, "origin_statement": "Reducing the PIROCK tolerance from 1e-02 to 1e-05 consistently lowers the median log10 density error by nearly one order across the domain, as dynamic stage adaptation expands the stability region for hyperdiffusion, thereby minimizing numerical dissipation around the shock front.", "perturbed_statement": "Reducing the PIROCK tolerance from 1e-02 to 1e-05 lowers the median log10 density error by two orders across the domain, as dynamic stage adaptation constricts the stability region for hyperdiffusion, thereby amplifying numerical dissipation around the shock front.", "perturbed_explanation": "The top-middle panel shows an average improvement of about one order of magnitude in the density error when decreasing tolerance, not two orders. Moreover, PIROCK’s dynamic stage adaptation actually broadens the stability domain for hyperdiffusion to reduce dissipation, contrary to the claim of constriction and increased dissipation.", "claim": "Reducing the PIROCK tolerance from 1e-02 to 1e-05 consistently lowers the median log10 density error by nearly one order across the domain, as dynamic stage adaptation expands the stability region for hyperdiffusion, thereby minimizing numerical dissipation around the shock front.", "label": true }, { "paperid": "2411.02725v1", "paper_path": "./SciVer/papers/2411.02725v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2411.02725v1_figure_4.png", "request_id": 166, "origin_statement": "The occurrence of English programming terms in Korean and Chinese queries indicates code-switching driven by English-dominated technical jargon, while the minimal Arabic query implies users rely on contextual clues over detailed language, revealing varied non-English help-seeking strategies.", "perturbed_statement": "The Arabic query's use of key technical terms demonstrates code-switching, whereas the Chinese query's brevity implies a preference for contextual clues over jargon.", "perturbed_explanation": "This statement is incorrect because the Arabic query ('ما هى المشكلة') contains no technical terms, while the Chinese query ('private, public, static, instance') explicitly lists English programming keywords; thus the claimed roles of Arabic and Chinese queries are reversed.", "claim": "The occurrence of English programming terms in Korean and Chinese queries indicates code-switching driven by English-dominated technical jargon, while the minimal Arabic query implies users rely on contextual clues over detailed language, revealing varied non-English help-seeking strategies.", "label": true }, { "paperid": "2411.07050v1", "paper_path": "./SciVer/papers/2411.07050v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.07050v1-Table4-1.png", "request_id": 167, "origin_statement": "FedInit achieves a substantially lower global Hausdorff distance (9.19 mm) compared to FedAvg (38.49 mm), suggesting that personalized initialization in federated learning effectively alleviates intra-batch heterogeneity, leading to more accurate boundary delineation in cardiac echo segmentation.", "perturbed_statement": "FedInit achieves a lower global Hausdorff distance of 9.19 mm compared to FedAvg’s 49.38 mm, indicating that personalized initialization reduces label noise from partially annotated data, improving segmentation.", "perturbed_explanation": "The perturbed statement is incorrect because FedAvg’s global Hausdorff distance is 38.49 mm (not 49.38 mm) according to Table 4, and the primary mechanism for FedInit’s improvement is mitigation of intra-batch heterogeneity, not reduction of label noise from partially annotated data.", "claim": "FedInit achieves a substantially lower global Hausdorff distance (9.19 mm) compared to FedAvg (38.49 mm), suggesting that personalized initialization in federated learning effectively alleviates intra-batch heterogeneity, leading to more accurate boundary delineation in cardiac echo segmentation.", "label": true }, { "paperid": "2410.19218v1", "paper_path": "./SciVer/papers/2410.19218v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.19218v1_figure_4(a).png", "request_id": 168, "origin_statement": "On CSFCube, Recall@100 climbs from 0.71 at 6.25% retention to 0.75 at 12.5%, but plateaus beyond 25% retention, indicating that indexing more than a quarter of documents yields diminishing recall returns as core topics already encompass most relevant documents, reducing the marginal benefit of additional candidates.", "perturbed_statement": "On CSFCube, Recall@100 climbs from 0.71 at 6.25% retention to 0.75 at 12.5%, but plateaus beyond 50% retention, indicating that indexing more than half of documents yields diminishing recall returns as core topics already encompass most relevant documents, reducing the marginal benefit of additional candidates.", "perturbed_explanation": "The statement incorrectly shifts the plateau threshold from 25% to 50% retention. Figure 4 shows Recall@100 stabilizes around 0.75 starting at 25% retention, not 50%, so claiming the plateau occurs beyond 50% contradicts the observed data.", "claim": "On CSFCube, Recall@100 climbs from 0.71 at 6.25% retention to 0.75 at 12.5%, but plateaus beyond 25% retention, indicating that indexing more than a quarter of documents yields diminishing recall returns as core topics already encompass most relevant documents, reducing the marginal benefit of additional candidates.", "label": true }, { "paperid": "2411.14711v1", "paper_path": "./SciVer/papers/2411.14711v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "5.2.4", "6", "8.2" ], "image_path": "./SciVer/images/2411.14711v1_figure_2.png", "request_id": 170, "origin_statement": "In ogbl-ddi (degree 500), GCN(X+NE) outperforms GCN(X) by ~83.3 points (95.49 vs 12.21 Hits@20), while on ogbl-collab (degree 8) the gain is only ~2.3 points (53.08 vs 50.76 Hits@50), reflecting that embedding integration yields far greater improvements on denser graphs.", "perturbed_statement": "On ogbl-ddi (degree 50), GCN(X+NE) outperforms GCN(X) by ~83.3 points (95.49 vs 12.21 Hits@20), while on ogbl-collab (degree 8) the gain is only ~2.3 points (53.08 vs 50.76 Hits@50), reflecting that embedding integration yields far greater improvements on sparser graphs.", "perturbed_explanation": "The perturbed statement misstates ogbl-ddi’s average degree as 50; the figure reports degree = 500. It also reverses the observed trend by claiming embedding integration benefits sparser graphs most, whereas the data show much larger gains on the dense ogbl-ddi (degree 500) compared to the sparse ogbl-collab (degree 8).", "claim": "In ogbl-ddi (degree 500), GCN(X+NE) outperforms GCN(X) by ~83.3 points (95.49 vs 12.21 Hits@20), while on ogbl-collab (degree 8) the gain is only ~2.3 points (53.08 vs 50.76 Hits@50), reflecting that embedding integration yields far greater improvements on denser graphs.", "label": true }, { "paperid": "2409.11887v1", "paper_path": "./SciVer/papers/2409.11887v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.11887v1_figure_5.png", "request_id": 174, "origin_statement": "DocMamba’s inference speed declines modestly from 4.4 FPS at 512 tokens to 3.1 FPS at 4096 tokens, reflecting its linear-time state-space model complexity, whereas LayoutLMv3’s FPS plummets from 4.2 FPS to 1.3 FPS due to quadratic self-attention overhead.", "perturbed_statement": "DocMamba’s inference speed declines modestly from 4.4 FPS at 512 tokens to 3.2 FPS at 4096 tokens, reflecting its linear-time self-attention mechanism, whereas LayoutLMv3’s FPS plummets from 4.2 FPS to 1.3 FPS due to quadratic self-attention overhead.", "perturbed_explanation": "The perturbation is incorrect because (1) DocMamba’s FPS at an input length of 4096 is 3.1 FPS, not 3.2 FPS, as shown in Figure 5. (2) DocMamba does not use a linear-time self-attention mechanism—it relies on a state-space model, which accounts for its linear complexity, rather than self-attention.", "claim": "DocMamba’s inference speed declines modestly from 4.4 FPS at 512 tokens to 3.1 FPS at 4096 tokens, reflecting its linear-time state-space model complexity, whereas LayoutLMv3’s FPS plummets from 4.2 FPS to 1.3 FPS due to quadratic self-attention overhead.", "label": true }, { "paperid": "2409.07107v1", "paper_path": "./SciVer/papers/2409.07107v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.07107v1-Table3-1.png", "request_id": 175, "origin_statement": "Our method's implicit gradient computation for the Atlas robot on Apple M3 CPU (95.2 μs) is nearly as fast as its simulation step (127.3 μs), reflecting that solving the NCP for gradients can approach real-time simulation speeds.", "perturbed_statement": "Our method's implicit gradient computation for the Atlas robot on Apple M3 CPU (9.52 μs) is over ten times faster than its simulation step (127.3 μs), reflecting that solving the NCP is trivial relative to forward dynamics.", "perturbed_explanation": "The table reports implicit gradient timing for Atlas as 95.2 μs (±37.6 μs), not 9.52 μs. Hence, the claim of being over ten times faster than the 127.3 μs simulation step is incorrect and contradicts the context.", "claim": "Our method's implicit gradient computation for the Atlas robot on Apple M3 CPU (95.2 μs) is nearly as fast as its simulation step (127.3 μs), reflecting that solving the NCP for gradients can approach real-time simulation speeds.", "label": true }, { "paperid": "2410.22543v1", "paper_path": "./SciVer/papers/2410.22543v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4" ], "image_path": "./SciVer/images/2410.22543v1_figure_4.png", "request_id": 176, "origin_statement": "Minimal DSH=0.17 and D=0.05 between 2003 RW11 and 2009 BE77 at 1094 CE implies their fragmentation ~1.0 kyr ago; this aligns with low-speed ejection models where minimal orbital dispersion at break-up yields coincident orbital similarity before secular perturbations increase DSH over time.", "perturbed_statement": "Minimal DSH=0.07 and D=0.15 between 2003 RW11 and 2009 BE77 at 1194 CE implies their fragmentation ~0.8 kyr ago; this aligns with low-speed ejection models where minimal orbital dispersion at break-up yields coincident orbital similarity before secular perturbations increase DSH over time.", "perturbed_explanation": "The perturbed statement incorrectly reports minimal DSH=0.07 and D=0.15 at 1194 CE. In the provided context, the actual minimum values for the 2003 RW11–2009 BE77 pair are DSH=0.17 and D=0.05 around 1094 CE, so both the criterion values and the timing contradict the study.", "claim": "Minimal DSH=0.17 and D=0.05 between 2003 RW11 and 2009 BE77 at 1094 CE implies their fragmentation ~1.0 kyr ago; this aligns with low-speed ejection models where minimal orbital dispersion at break-up yields coincident orbital similarity before secular perturbations increase DSH over time.", "label": true }, { "paperid": "2409.16057v2", "paper_path": "./SciVer/papers/2409.16057v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.16057v2-Table2-1.png", "request_id": 177, "origin_statement": "By systematically suppressing trigger-activated neuron weights, the proposed removal approach yields a 223% relative AP increase (from 0.088 to 0.285) on the poisoned Faster R-CNN model, while retaining 84.6% of its original clean-data AP, demonstrating effective backdoor neutralization with minimal clean performance loss.", "perturbed_statement": "By systematically suppressing trigger-activated neuron weights, the proposed removal approach yields a 300% relative AP increase (from 0.088 to 0.285) on the poisoned Faster R-CNN model, while retaining 92% of its original clean-data AP, demonstrating effective backdoor neutralization with minimal clean performance loss.", "perturbed_explanation": "The statement is incorrect because the actual relative AP increase is about 223% (not 300%) when going from 0.088 to 0.285, and the clean-data AP retention is approximately 84.6% (0.285/0.337), not 92%, as shown in Table 2.", "claim": "By systematically suppressing trigger-activated neuron weights, the proposed removal approach yields a 223% relative AP increase (from 0.088 to 0.285) on the poisoned Faster R-CNN model, while retaining 84.6% of its original clean-data AP, demonstrating effective backdoor neutralization with minimal clean performance loss.", "label": true }, { "paperid": "2410.02810v1", "paper_path": "./SciVer/papers/2410.02810v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "6.2" ], "image_path": "./SciVer/images/2410.02810v1_figure_6.png", "request_id": 178, "origin_statement": "At 30–40 steps, adding explicit state-tracking boosts success from 0.50 (ReAct) to 0.83 (StateAct), showing that maintaining an internal environment representation alleviates planning complexity in long-horizon tasks and sustains above 0.65 success beyond 40 steps.", "perturbed_statement": "At 30–40 steps, adding explicit state-tracking only raises success from 0.55 to 0.75, and it drops below 0.50 for tasks beyond 40 steps.", "perturbed_explanation": "The perturbed statement incorrectly reports the success rates: the context shows ReAct at 30–40 steps achieves 0.50 (not 0.55) and StateAct (state+thought+action) achieves 0.83 (not 0.75). Additionally, for tasks of 40–50 steps, the actual success is 0.67, which is above 0.50, not below.", "claim": "At 30–40 steps, adding explicit state-tracking boosts success from 0.50 (ReAct) to 0.83 (StateAct), showing that maintaining an internal environment representation alleviates planning complexity in long-horizon tasks and sustains above 0.65 success beyond 40 steps.", "label": true }, { "paperid": "2409.14878v1", "paper_path": "./SciVer/papers/2409.14878v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.14878v1-Table3-1.png", "request_id": 179, "origin_statement": "Instruction fine-tuning of Baichuan2-7B significantly boosts detection of severe depression, raising its F1 score from 0.000 to 0.667, demonstrating that specialized interpretative training enables LLMs to handle the data sparsity of rare classes like moderate and severe depression.", "perturbed_statement": "Instruction fine-tuning of Baichuan2-7B significantly boosts detection of severe depression, raising its F1 score from 0.000 to 0.400, demonstrating that specialized interpretative training enables LLMs to handle the data sparsity of rare classes like moderate and severe depression.", "perturbed_explanation": "The perturbed statement misreports the severe depression F1 score as 0.400, while Table 3 clearly shows the ‘Ours’ model achieves an F1 of 0.667 for the severe category, contradicting the explicit table value.", "claim": "Instruction fine-tuning of Baichuan2-7B significantly boosts detection of severe depression, raising its F1 score from 0.000 to 0.667, demonstrating that specialized interpretative training enables LLMs to handle the data sparsity of rare classes like moderate and severe depression.", "label": true }, { "paperid": "2410.11378v1", "paper_path": "./SciVer/papers/2410.11378v1.json", "claim_type": "analytical", "type": "chart", "item": "5(a)", "section": [ "4.8" ], "image_path": "./SciVer/images/2410.11378v1_figure_5(a).png", "request_id": 180, "origin_statement": "WPFed’s locality-sensitive hashing neighbor selection filters low-quality poisoned gradients, sustaining honest clients’ convergence above 90% accuracy despite 20% adversaries reinitializing models every three iterations; ProxyFL’s undifferentiated aggregation incurs ~7% transient performance drops.", "perturbed_statement": "WPFed’s K-means clustering neighbor selection filters malicious updates, sustaining convergence above 95% accuracy despite 20% adversaries resetting models every five iterations, whereas ProxyFL’s uniform aggregation leads to 15% performance drops.", "perturbed_explanation": "The perturb statement wrongly asserts that WPFed uses K-means clustering instead of LSH-based similarity and rank-based performance, and that adversaries reset models every five iterations instead of every three.", "claim": "WPFed’s locality-sensitive hashing neighbor selection filters low-quality poisoned gradients, sustaining honest clients’ convergence above 90% accuracy despite 20% adversaries reinitializing models every three iterations; ProxyFL’s undifferentiated aggregation incurs ~7% transient performance drops.", "label": true }, { "paperid": "2409.07124v1", "paper_path": "./SciVer/papers/2409.07124v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "2.3.4" ], "image_path": "./SciVer/images/2409.07124v1-Table2-1.png", "request_id": 183, "origin_statement": "Because the Local Arm’s fitted maximal azimuth θ_max=107° coincides with the Gum Nebula’s position, its tight pitch index (p_LA=2.77) effectively channels denser H II complexes into the model, enhancing N II line intensity predictions near the Sun by better matching observed ionization gradients.", "perturbed_statement": "Because the Local Arm’s fitted maximal azimuth θ_max=117° coincides with the Gum Nebula’s position, its loose pitch index (p_LA=3.77) effectively channels denser H II complexes into the model, enhancing N II line intensity predictions near the Sun by better matching observed ionization gradients.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 reports θ_max=107° (not 117°) and p_LA=2.77 (not 3.77). Moreover, a pitch index of 2.77 indicates a tighter spiral, contrary to the ‘loose’ winding described.", "claim": "Because the Local Arm’s fitted maximal azimuth θ_max=107° coincides with the Gum Nebula’s position, its tight pitch index (p_LA=2.77) effectively channels denser H II complexes into the model, enhancing N II line intensity predictions near the Sun by better matching observed ionization gradients.", "label": true }, { "paperid": "2411.09556v1", "paper_path": "./SciVer/papers/2411.09556v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.09556v1_figure_1.png", "request_id": 184, "origin_statement": "In 2TC, radiative losses lower electron temperatures from ≈2×10^11K to ≈1×10^11K within r≲5 r_g, indicating synchrotron cooling timescales become comparable to orbital timescales near the ISCO, thus shifting submillimeter emission to longer wavelengths compared to the noncooled 2T model.", "perturbed_statement": "In 2TC, radiative losses lower electron temperatures from ≈2×10^10K to ≈1×10^10K within r≲5 r_g, indicating synchrotron cooling timescales become shorter than orbital timescales near the ISCO, thus shifting submillimeter emission to longer wavelengths compared to the noncooled 2T model.", "perturbed_explanation": "This is incorrect because Figure 1 and the context report log T_e≈10.9–11.3 (i.e. ∼8×10^10–2×10^11 K), not ∼10^10 K. Moreover, the text states synchrotron cooling timescales approach (become comparable to) the dynamical timescale, rather than becoming shorter than the orbital timescale.", "claim": "In 2TC, radiative losses lower electron temperatures from ≈2×10^11K to ≈1×10^11K within r≲5 r_g, indicating synchrotron cooling timescales become comparable to orbital timescales near the ISCO, thus shifting submillimeter emission to longer wavelengths compared to the noncooled 2T model.", "label": true }, { "paperid": "2411.15583v1", "paper_path": "./SciVer/papers/2411.15583v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15583v1_figure_3.png", "request_id": 186, "origin_statement": "The meta-analytic findings suggest that implicit diegetic guidance (Modality 2) trends towards small positive presence effects (g=0.13), potentially because integrating guidance into environmental cues reduces cognitive load and fosters immersion, whereas explicit diegetic cues (Modality 1) slightly diminish presence (g=-0.14) by introducing disruptive attentional shifts.", "perturbed_statement": "Implicit diegetic guidance (Modality 2) exhibits a moderate positive presence effect (g=0.50), suggesting that embedding cues in environmental textures significantly reduces cognitive load. Conversely, explicit diegetic guidance (Modality 1) yields g=-0.45, indicating that overt prompts substantially distract users and lower immersion.", "perturbed_explanation": "The reported effect sizes in the perturbation are incorrect: the actual meta-analysis shows Modality 2 has g=0.13 (not 0.50) and Modality 1 has g=-0.14 (not -0.45), contradicting the forest plots and summary statistics in Figure 3 and the context description.", "claim": "The meta-analytic findings suggest that implicit diegetic guidance (Modality 2) trends towards small positive presence effects (g=0.13), potentially because integrating guidance into environmental cues reduces cognitive load and fosters immersion, whereas explicit diegetic cues (Modality 1) slightly diminish presence (g=-0.14) by introducing disruptive attentional shifts.", "label": true }, { "paperid": "2409.15317v1", "paper_path": "./SciVer/papers/2409.15317v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.15317v1-Table1-1.png", "request_id": 187, "origin_statement": "In the Quadrant space, IDA increases the noisy surrogate pilot’s hit rate from 2.07 to 3.6 targets per minute, indicating that interventional diffusion assistance can augment noisy control policies by approximately 74% via improved goal inference under spatial constraints.", "perturbed_statement": "In the Linear space, IDA increases the noisy surrogate pilot’s hit rate from 2.07 to 4.6 targets per minute, indicating that interventional diffusion assistance can more than double noisy policy performance via goal inference under spatial constraints.", "perturbed_explanation": "This statement is incorrect because in the Linear goal space IDA only raises the noisy pilot’s hit rate from 2.07 to 3.1 targets/min (Table 1), not 4.6. The claimed >100% improvement is also wrong; the actual improvement (~49%) is far less than doubling.", "claim": "In the Quadrant space, IDA increases the noisy surrogate pilot’s hit rate from 2.07 to 3.6 targets per minute, indicating that interventional diffusion assistance can augment noisy control policies by approximately 74% via improved goal inference under spatial constraints.", "label": true }, { "paperid": "2410.02409v1", "paper_path": "./SciVer/papers/2410.02409v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1.2" ], "image_path": "./SciVer/images/2410.02409v1-Table1-1.png", "request_id": 189, "origin_statement": "Table 1 shows for factors ψ(y) (with |y|=n) in the ternary Thue–Morse word that the weight ℓ|y|₀ + m|y|ₘ takes seven distinct linear values (ℓn+mn, ℓ(n+1)+m(n−1), …), demonstrating additive complexity equals seven.", "perturbed_statement": "Table 1 shows for factors ψ(y) (with |y|=n) in the ternary Thue–Morse word that the weight ℓ|y|₀ + m|y|ₘ takes eight distinct linear values (ℓn+mn, ℓ(n+1)+m(n−1), …), demonstrating additive complexity equals eight.", "perturbed_explanation": "This is incorrect because Table 1 lists exactly seven distinct weight expressions, not eight, so the additive complexity is seven rather than eight.", "claim": "Table 1 shows for factors ψ(y) (with |y|=n) in the ternary Thue–Morse word that the weight ℓ|y|₀ + m|y|ₘ takes seven distinct linear values (ℓn+mn, ℓ(n+1)+m(n−1), …), demonstrating additive complexity equals seven.", "label": true }, { "paperid": "2410.09123v2", "paper_path": "./SciVer/papers/2410.09123v2.json", "claim_type": "analytical", "type": "chart", "item": "3(d)", "section": [ "5.5" ], "image_path": "./SciVer/images/2410.09123v2_figure_3(d).png", "request_id": 190, "origin_statement": "In Fig. 3(d), FB15K-237’s MRR peaks at ~25 adapter neurons, after which performance plateaus due to the bottleneck’s regularizing effect on capacity, while WIKI and UMLS optimally leverage around 50 neurons to balance expressive power and parameter efficiency.", "perturbed_statement": "MRR for FB15K-237 peaks at approximately 100 adapter neurons, indicating a need for wide bottleneck layers, whereas WIKI and UMLS achieve optimal performance around 10 neurons due to their simpler relational structures.", "perturbed_explanation": "The perturbed statement contradicts the context: Fig. 3(d) shows FB15K-237’s MRR peaks at ~25 neurons, not ~100, and WIKI and UMLS both reach optimal performance around 50 neurons, not 10. These misreported numbers are incompatible with the plotted results.", "claim": "In Fig. 3(d), FB15K-237’s MRR peaks at ~25 adapter neurons, after which performance plateaus due to the bottleneck’s regularizing effect on capacity, while WIKI and UMLS optimally leverage around 50 neurons to balance expressive power and parameter efficiency.", "label": true }, { "paperid": "2409.19351v1", "paper_path": "./SciVer/papers/2409.19351v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1", "4.3" ], "image_path": "./SciVer/images/2409.19351v1-Table1-1.png", "request_id": 193, "origin_statement": "By leveraging moving automotive sensors across 0.6 to 4 km² networks with 1–2 s simulation sampling, the CMAE method reduces cloud velocity estimation errors by capturing extended spatial cloud continuity, thus mitigating edge effects present in fixed local networks limited to below 1 km².", "perturbed_statement": "By leveraging moving automotive sensors across 0.06 to 4 km² networks with 0.5–1 s simulation sampling, the CMAE method reduces cloud velocity estimation errors by capturing extended spatial cloud continuity, thus mitigating edge effects present in fixed local networks limited to below 1 km².", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 indicates that this work covers network sizes from 0.6 km² to 4 km², not 0.06 km². Additionally, the sampling period is 1–2 s in the study, not 0.5–1 s as claimed.", "claim": "By leveraging moving automotive sensors across 0.6 to 4 km² networks with 1–2 s simulation sampling, the CMAE method reduces cloud velocity estimation errors by capturing extended spatial cloud continuity, thus mitigating edge effects present in fixed local networks limited to below 1 km².", "label": true }, { "paperid": "2409.08598v1", "paper_path": "./SciVer/papers/2409.08598v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.08598v1_figure_4.png", "request_id": 194, "origin_statement": "The self-contrast objective yields consistent accuracy gains (e.g., +0.97% on RAF-DB with Swin-T), implying that in-model neutral instances as negatives reinforce intra-class compactness and inter-class separation, thereby enhancing the discriminative quality of facial expression representations beyond external contrastive sampling.", "perturbed_statement": "The self-contrast objective yields consistent accuracy losses (e.g., -0.5% on RAF-DB with Swin-T), implying that in-model neutral instances as negatives weaken intra-class compactness and inter-class separation, thereby diminishing the discriminative quality of facial expression representations.", "perturbed_explanation": "This statement is incorrect because Figure 4(a) shows that the self-contrast (SC) objective achieves about 92.3% accuracy versus 91.3% for contrastive learning (CL) on RAF-DB with Swin-T, a +0.97% gain rather than a −0.5% loss; SC thus improves, not degrades, representation quality.", "claim": "The self-contrast objective yields consistent accuracy gains (e.g., +0.97% on RAF-DB with Swin-T), implying that in-model neutral instances as negatives reinforce intra-class compactness and inter-class separation, thereby enhancing the discriminative quality of facial expression representations beyond external contrastive sampling.", "label": true }, { "paperid": "2411.03697v1", "paper_path": "./SciVer/papers/2411.03697v1.json", "claim_type": "analytical", "type": "table", "item": "7", "section": [ "6.6" ], "image_path": "./SciVer/images/2411.03697v1-Table7-1.png", "request_id": 195, "origin_statement": "By reusing the same FPGA arithmetic units for both linear MatMul and non-linear functions, TATAA minimizes data movement and context-switch overhead, achieving 0.643 Inf./sec./DSP on DeiT-B—over 1.32× the DSP efficiency of Huang et al.’s ViT-T (0.486 Inf./sec./DSP), despite similar DSP counts.", "perturbed_statement": "By reusing the same FPGA arithmetic units for both linear MatMul and non-linear functions, TATAA minimizes data movement and context-switch overhead, achieving 0.865 Inf./sec./DSP on DeiT-B—over 4× the DSP efficiency of Huang et al.’s ViT-T (0.486 Inf./sec./DSP), despite similar DSP counts.", "perturbed_explanation": "The perturbed statement incorrectly lists TATAA’s DSP efficiency for DeiT-B as 0.865 Inf./sec./DSP, whereas Table 7 shows 0.643 Inf./sec./DSP. Consequently, the claim of over 4× speedup versus Huang et al.’s 0.486 Inf./sec./DSP is false; 0.643 is only about 1.32× higher, not four times.", "claim": "By reusing the same FPGA arithmetic units for both linear MatMul and non-linear functions, TATAA minimizes data movement and context-switch overhead, achieving 0.643 Inf./sec./DSP on DeiT-B—over 1.32× the DSP efficiency of Huang et al.’s ViT-T (0.486 Inf./sec./DSP), despite similar DSP counts.", "label": true }, { "paperid": "2411.07976v5", "paper_path": "./SciVer/papers/2411.07976v5.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.07976v5-Table5-1.png", "request_id": 197, "origin_statement": "By integrating a label-guided contrastive loss, DINO-LG halves false negatives (76→39), raising sensitivity from 0.79 to 0.89 and specificity from 0.77 to 0.90. This targeted reduction sharpens calcification detection by improving slice-level discrimination of non‐calcified versus calcified areas.", "perturbed_statement": "By integrating a label-guided contrastive loss, DINO-LG halves false negatives (76→39), raising sensitivity from 0.79 to 0.95 and specificity from 0.77 to 0.90. This targeted reduction sharpens calcification detection by improving slice-level discrimination of non‐calcified versus calcified areas.", "perturbed_explanation": "The perturbed statement incorrectly reports the DINO-LG sensitivity as 0.95. According to Table 5, DINO-LG’s true positive rate (sensitivity) is actually 0.89, not 0.95.", "claim": "By integrating a label-guided contrastive loss, DINO-LG halves false negatives (76→39), raising sensitivity from 0.79 to 0.89 and specificity from 0.77 to 0.90. This targeted reduction sharpens calcification detection by improving slice-level discrimination of non‐calcified versus calcified areas.", "label": true }, { "paperid": "2409.15621v1", "paper_path": "./SciVer/papers/2409.15621v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1.1", "4.1.3" ], "image_path": "./SciVer/images/2409.15621v1-Table1-1.png", "request_id": 201, "origin_statement": "By refining only the contact layer, variable-order (VO) NURBS resolves high-gradient stress fields at the indentor–slab interface, halving oscillation amplitudes of frictional forces relative to standard quadratic NURBS while using nearly four times fewer DOFs, thus optimizing computational efficiency in non-linear contact simulations.", "perturbed_statement": "By refining only the contact layer, VO NURBS resolves high-gradient stress fields at the indentor–slab interface, halving oscillation amplitudes of frictional forces relative to standard quadratic NURBS while using nearly eight times fewer DOFs, thus optimizing computational efficiency in non-linear contact simulations.", "perturbed_explanation": "This statement incorrectly claims VO NURBS uses nearly eight times fewer DOFs. The context specifies that at mesh level m2 VO NURBS requires approximately four times fewer DOFs compared to standard NURBS; eight-fold reduction applies only at the coarsest mesh (m1), not at m2.", "claim": "By refining only the contact layer, variable-order (VO) NURBS resolves high-gradient stress fields at the indentor–slab interface, halving oscillation amplitudes of frictional forces relative to standard quadratic NURBS while using nearly four times fewer DOFs, thus optimizing computational efficiency in non-linear contact simulations.", "label": true }, { "paperid": "2410.07484v2", "paper_path": "./SciVer/papers/2410.07484v2.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.07484v2_figure_5.png", "request_id": 202, "origin_statement": "Iterative rule induction accelerates world alignment by capturing deterministic subroutines: gather and fight actions achieve full (100%) and near-full (~91%) cover in a single iteration, indicating low epistemic uncertainty, whereas craft and mine require three to four iterations to reach 87% and 96% coverage, reflecting higher combinatorial complexity.", "perturbed_statement": "Iterative rule induction accelerates world alignment by capturing deterministic subroutines: gather and fight actions achieve full (100%) and near-full (~91%) cover after two iterations, indicating low epistemic uncertainty, whereas craft and mine require three to four iterations to reach 87% and 96% coverage, reflecting higher combinatorial complexity.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows that gather and fight actions reach their respective cover rates (100% and ~91%) at the first iteration, not after two iterations.", "claim": "Iterative rule induction accelerates world alignment by capturing deterministic subroutines: gather and fight actions achieve full (100%) and near-full (~91%) cover in a single iteration, indicating low epistemic uncertainty, whereas craft and mine require three to four iterations to reach 87% and 96% coverage, reflecting higher combinatorial complexity.", "label": true }, { "paperid": "2410.05053v1", "paper_path": "./SciVer/papers/2410.05053v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "3.2", "3.5" ], "image_path": "./SciVer/images/2410.05053v1_figure_7.png", "request_id": 204, "origin_statement": "In bi-, tri-, and quadrilayer slits, water density peaks near graphene interfaces concentrate due to van der Waals attractions, while rising temperature broadens these peaks inward, indicating reduced hydrogen-bond network rigidity that likely enhances confined water mobility under nanoconfinement.", "perturbed_statement": "In monolayer slits, water density peaks are closer to graphene interfaces due to stronger van der Waals attractions, and rising temperature symmetrically broadens these peaks both inward and outward, indicating increased hydrogen bond network rigidity and reduced mobility under nanoconfinement.", "perturbed_explanation": "The perturbed statement erroneously claims monolayer peaks are closer to the graphene sheets, whereas Fig. 7 shows monolayer peaks are significantly farther apart. It also reverses the thermal effect by stating temperature increases hydrogen-bond rigidity and reduces mobility, contradicting the observed peak broadening and network loosening at higher temperatures.", "claim": "In bi-, tri-, and quadrilayer slits, water density peaks near graphene interfaces concentrate due to van der Waals attractions, while rising temperature broadens these peaks inward, indicating reduced hydrogen-bond network rigidity that likely enhances confined water mobility under nanoconfinement.", "label": true }, { "paperid": "2410.18514v1", "paper_path": "./SciVer/papers/2410.18514v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2410.18514v1-Table2-1.png", "request_id": 207, "origin_statement": "The 52% relative increase in LAMBADA accuracy when using MDM over ARM (from 26.92% to 40.99%) indicates that MDM's denoising objective yields emergent sentence-level coherence capabilities absent in autoregressive models under equal compute.", "perturbed_statement": "The 52% relative increase in LAMBADA accuracy when using MDM over ARM (from 26.92% to 40.99%) indicates that MDM's autoregressive sampling objective yields emergent sentence-level coherence capabilities absent in denoising models under equal compute.", "perturbed_explanation": "This statement wrongly asserts that MDM uses an autoregressive sampling objective and that coherence emerges absent in denoising models. In fact, MDMs employ a denoising masked objective—not autoregressive sampling—and denoising models (MDMs) are exactly the ones demonstrating the coherence improvement.", "claim": "The 52% relative increase in LAMBADA accuracy when using MDM over ARM (from 26.92% to 40.99%) indicates that MDM's denoising objective yields emergent sentence-level coherence capabilities absent in autoregressive models under equal compute.", "label": true }, { "paperid": "2411.09850v1", "paper_path": "./SciVer/papers/2411.09850v1.json", "claim_type": "analytical", "type": "chart", "item": "1(c)", "section": [ "3.1", "3.2" ], "image_path": "./SciVer/images/2411.09850v1_figure_1(c).png", "request_id": 208, "origin_statement": "By incorporating crafted measurements, DPS-CM achieves rapid low-frequency convergence followed by high-frequency detail recovery, reducing reconstruction MSE below 0.01 by t≈300, while DPS plateaus around 0.1 MSE.", "perturbed_statement": "By integrating crafted measurements, DPS-CM accelerates high-frequency detail recovery, reducing reconstruction MSE below 0.001 by t≈400, whereas DPS plateaus around 0.01 MSE.", "perturbed_explanation": "This statement is incorrect: Fig.1(c) shows DPS-CM first focuses on low-frequency convergence (not high-frequency) and reaches about 0.005 MSE around t≈300 (not below 0.001 at t≈400). Also, DPS plateaus around 0.1 MSE, not 0.01.", "claim": "By incorporating crafted measurements, DPS-CM achieves rapid low-frequency convergence followed by high-frequency detail recovery, reducing reconstruction MSE below 0.01 by t≈300, while DPS plateaus around 0.1 MSE.", "label": true }, { "paperid": "2409.05305v1", "paper_path": "./SciVer/papers/2409.05305v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.05305v1_figure_3.png", "request_id": 210, "origin_statement": "In the 2D Central Potential experiment, the Pareto front exhibits a sharp decrease in MSE around the complexity score of the true angular momentum expression L = x1*y2 − x2*y1, indicating the symbolic search correctly identifies the cross-product invariant as the most parsimonious latent concept.", "perturbed_statement": "In the 2D Central Potential experiment, the Pareto front shows a pronounced MSE drop around the expression L = x1*y2 + x2*y1, suggesting the symbolic search recovered the dot-product invariant rather than the intended cross-product angular momentum.", "perturbed_explanation": "This is incorrect because the red bar in the 2D Central Potential plot corresponds to the cross-product form L = x1*y2 − x2*y1 (angular momentum), not the sum L = x1*y2 + x2*y1. The dot-product sum is neither shown nor matches the ground truth concept.", "claim": "In the 2D Central Potential experiment, the Pareto front exhibits a sharp decrease in MSE around the complexity score of the true angular momentum expression L = x1*y2 − x2*y1, indicating the symbolic search correctly identifies the cross-product invariant as the most parsimonious latent concept.", "label": true }, { "paperid": "2410.20063v2", "paper_path": "./SciVer/papers/2410.20063v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5" ], "image_path": "./SciVer/images/2410.20063v2-Table3-1.png", "request_id": 211, "origin_statement": "The dominant systematic uncertainty arises from requirements on |cosθ_miss| and α, contributing 2.9%, due to the sensitivity of missing-neutrino angular distributions to detector acceptance and background modeling.", "perturbed_statement": "The dominant systematic uncertainty stems from the fixed branching fraction of D+→μ+νμ at 2.5%, due to its direct impact on the signal normalization.", "perturbed_explanation": "This is incorrect because Table 3 shows the largest systematic uncertainty (2.9%) comes from the |cosθ_miss| and α requirements, not the D+→μ+νμ branching fraction. Furthermore, the uncertainty on B(D+→μ+νμ) is 2.1%, not 2.5%.", "claim": "The dominant systematic uncertainty arises from requirements on |cosθ_miss| and α, contributing 2.9%, due to the sensitivity of missing-neutrino angular distributions to detector acceptance and background modeling.", "label": true }, { "paperid": "2411.15799v1", "paper_path": "./SciVer/papers/2411.15799v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "4.5.5" ], "image_path": "./SciVer/images/2411.15799v1_figure_8.png", "request_id": 214, "origin_statement": "With equal λ_general and λ_fine (0.5 each), the general severity loss curve converges by about the 200th epoch, while the fine-grained loss takes roughly 400 epochs to reach the same ~0.06 level, reflecting higher complexity and more decision boundaries in fine-grained estimation.", "perturbed_statement": "With λ_general=λ_fine=0.5, the fine-grained severity loss converges by about the 200th epoch, whereas the general severity loss requires roughly 400 epochs to reach ~0.06, implying that general estimation is more complex than fine-grained estimation.", "perturbed_explanation": "The perturbed statement incorrectly swaps the convergence epochs and task difficulties: the original figure shows the general severity loss (blue) converging around 200 epochs and the fine-grained loss (green) converging around 400 epochs. It also mischaracterizes which task is more complex.", "claim": "With equal λ_general and λ_fine (0.5 each), the general severity loss curve converges by about the 200th epoch, while the fine-grained loss takes roughly 400 epochs to reach the same ~0.06 level, reflecting higher complexity and more decision boundaries in fine-grained estimation.", "label": true }, { "paperid": "2409.04043v1", "paper_path": "./SciVer/papers/2409.04043v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.04043v1_figure_4.png", "request_id": 216, "origin_statement": "The static-fixed-modeling-civility and static-fixed-support interventions yield higher median sentiment scores across ED Forum communities, suggesting that social support mechanisms more effectively stabilize user affect compared to rule-based or insight-reset strategies in high-stigma environments.", "perturbed_statement": "Reset-insight interventions yield the highest median sentiment scores across ED Forum communities, suggesting that reflective prompts outperform social support and civility mechanisms in reducing affective variability in high-stigma environments.", "perturbed_explanation": "This is incorrect because Figure 4 and the context explicitly report that reset-insight interventions are among the least effective, showing lower median sentiment scores than modeling-civility and support interventions in ED Forum communities.", "claim": "The static-fixed-modeling-civility and static-fixed-support interventions yield higher median sentiment scores across ED Forum communities, suggesting that social support mechanisms more effectively stabilize user affect compared to rule-based or insight-reset strategies in high-stigma environments.", "label": true }, { "paperid": "2409.15155v1", "paper_path": "./SciVer/papers/2409.15155v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.1.3", "4" ], "image_path": "./SciVer/images/2409.15155v1-Table2-1.png", "request_id": 217, "origin_statement": "Integrating LSSIM with pixel-wise L1 loss enhances both structural fidelity and intensity accuracy, leading MAR-DTN trained on the balanced D_All dataset to achieve its peak performance (30.02 dB PSNR, 0.73 SSIM) by jointly optimizing perceptual and reconstruction losses.", "perturbed_statement": "For the pix2pix network, employing only the L1 loss yielded the optimal performance, with a PSNR of 28.7 dB and an SSIM of 0.78 on the full dataset (D_All), surpassing all other loss function combinations.", "perturbed_explanation": "According to Table 2, pix2pix using only L1 loss actually achieves a PSNR of 26.36 dB on D_Art and a mean PSNR of 28.7 dB on D_All, but its SSIM is only 0.69, not 0.78. Moreover, this configuration is not the overall best pix2pix result, as combining L1 with multi-scale SSIM yields higher PSNR and SSIM values.", "claim": "Integrating LSSIM with pixel-wise L1 loss enhances both structural fidelity and intensity accuracy, leading MAR-DTN trained on the balanced D_All dataset to achieve its peak performance (30.02 dB PSNR, 0.73 SSIM) by jointly optimizing perceptual and reconstruction losses.", "label": true }, { "paperid": "2409.15440v1", "paper_path": "./SciVer/papers/2409.15440v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.15440v1_figure_6(a).png", "request_id": 218, "origin_statement": "A spectral index gradient is observed: α150–330 MHz steepens from approximately –0.5 in the central inner lobes to –1.3 in the outer lobes, reflecting progressive synchrotron ageing via inverse Compton scattering off the CMB over ~10 Myr.", "perturbed_statement": "A spectral index gradient is observed: α150–330 MHz steepens from approximately –0.8 in the central inner lobes to –0.6 in the outer lobes, reflecting progressive synchrotron ageing via adiabatic heating over ~10 Myr.", "perturbed_explanation": "The perturbed statement misreports the spectral index values: the image shows a central peak at α≈–0.5 (not –0.8) and steeper outer lobes at α≈–1.3 (not –0.6). It also incorrectly attributes ageing to adiabatic heating rather than the dominant inverse Compton and synchrotron losses.", "claim": "A spectral index gradient is observed: α150–330 MHz steepens from approximately –0.5 in the central inner lobes to –1.3 in the outer lobes, reflecting progressive synchrotron ageing via inverse Compton scattering off the CMB over ~10 Myr.", "label": true }, { "paperid": "2411.16393v1", "paper_path": "./SciVer/papers/2411.16393v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.16393v1-Table2-1.png", "request_id": 219, "origin_statement": "In these single-edge notched tests under plane-stress, the negligible variation in dissipated energy for ℓ from 0.375 to 0.625 mm demonstrates that the phase-field length scale primarily affects the diffuse crack width—proportional to ℓ—without altering global energy release or fracture path.", "perturbed_statement": "In these single-edge notched tests under plane-stress, the dissipated energy rises by about 10% when ℓ increases from 0.375 to 0.75 mm, indicating that the phase-field length scale significantly influences the global energy release, broadening the crack path and raising fracture toughness.", "perturbed_explanation": "This statement is incorrect because the simulations only vary ℓ up to 0.625 mm, not 0.75 mm, and the context notes that results for regularization lengths smaller than 0.625 mm are almost indistinguishable. There is no 10% rise in dissipated energy over the tested range.", "claim": "In these single-edge notched tests under plane-stress, the negligible variation in dissipated energy for ℓ from 0.375 to 0.625 mm demonstrates that the phase-field length scale primarily affects the diffuse crack width—proportional to ℓ—without altering global energy release or fracture path.", "label": true }, { "paperid": "2411.15871v1", "paper_path": "./SciVer/papers/2411.15871v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15871v1-Table2-1.png", "request_id": 221, "origin_statement": "By aligning backward GEMM and FlashAttention operations with forward AllGather and ReduceScatter communications across separate strands, DHelix hides network latencies behind GPU-intensive compute, unlocking up to 20% per-layer throughput improvement in large-scale transformer training.", "perturbed_statement": "By aligning backward GEMM and FlashAttention operations with forward AllGather and Permute communications across separate strands, DHelix hides network latencies behind GPU-intensive compute, unlocking up to 20% per-layer throughput improvement in large-scale transformer training.", "perturbed_explanation": "The perturbed statement incorrectly labels Permute as a communication operator. According to Table 2, Permute is classified under computation, not communication, so it cannot overlap network latency hiding as claimed.", "claim": "By aligning backward GEMM and FlashAttention operations with forward AllGather and ReduceScatter communications across separate strands, DHelix hides network latencies behind GPU-intensive compute, unlocking up to 20% per-layer throughput improvement in large-scale transformer training.", "label": true }, { "paperid": "2409.09641v2", "paper_path": "./SciVer/papers/2409.09641v2.json", "claim_type": "analytical", "type": "chart", "item": "9(a)", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.09641v2_figure_9(a).png", "request_id": 222, "origin_statement": "Over 14 days, EMMeans of overall satisfaction rose from about 3.2 to 3.6 on the 5-point scale, indicating that consistent AAC prompts likely reduced conversational cognitive load, enabling parents to scaffold more responsive exchanges and enhancing perceived communication quality.", "perturbed_statement": "Over 14 days, EMMeans of overall satisfaction rose from about 2.8 to 4.0 on the 7-point scale, suggesting that structured AAC interventions progressively heightened conversational anxiety, which paradoxically motivated parents to engage more intensely.", "perturbed_explanation": "The statement is incorrect for two reasons: the original study used a 5-point Likert scale with EMMeans rising roughly from 3.2 to 3.6, not a 7-point scale rising from 2.8 to 4.0. Moreover, AAC prompts reduced conversational cognitive load rather than heightening anxiety.", "claim": "Over 14 days, EMMeans of overall satisfaction rose from about 3.2 to 3.6 on the 5-point scale, indicating that consistent AAC prompts likely reduced conversational cognitive load, enabling parents to scaffold more responsive exchanges and enhancing perceived communication quality.", "label": true }, { "paperid": "2410.22517v1", "paper_path": "./SciVer/papers/2410.22517v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.3" ], "image_path": "./SciVer/images/2410.22517v1-Table3-1.png", "request_id": 223, "origin_statement": "Atlas’s scaling interventions scarcely affected GPT-J’s fluency—perplexity rose by under 0.1 across bias categories—while causing outputs to flip in over 50% of prompts, illustrating robust bias mitigation without notable degradation in response quality.", "perturbed_statement": "Atlas’s scaling interventions increased GPT-J’s fluency—perplexity rose by over 1.0 across categories—from 13.07 to 14.13—while causing outputs to flip in only 30% of prompts, indicating compromised fluency and limited bias mitigation.", "perturbed_explanation": "This contradicts Table 3, which shows perplexity for Race/Ethnicity rising only from 13.07 to 13.13 (a 0.06 increase, not over 1.0) and a 38.20% flip rate (not 30%), making the stated figures inaccurate.", "claim": "Atlas’s scaling interventions scarcely affected GPT-J’s fluency—perplexity rose by under 0.1 across bias categories—while causing outputs to flip in over 50% of prompts, illustrating robust bias mitigation without notable degradation in response quality.", "label": true }, { "paperid": "2410.02936v1", "paper_path": "./SciVer/papers/2410.02936v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.2.1" ], "image_path": "./SciVer/images/2410.02936v1-Table1-1.png", "request_id": 227, "origin_statement": "Leveraging KiDS+VIKING 9-band GAaP photometry, the typical photo-z error per lens in Table 1 [(z_BPZ(MAX)−z_BPZ(MIN))/2≈0.07] combined with 0.2″ pixel scale of OmegaCam allows GGSL-UNet to distinguish arcs separated by ≳1″, reducing source-lens confusion in training data.", "perturbed_statement": "Leveraging KiDS+VIKING 8-band GAaP photometry, the typical photo-z error per lens in Table 1 [(z_BPZ(MAX)−z_BPZ(MIN))/2≈0.07] combined with 0.5″ pixel scale of OmegaCam allows GGSL-UNet to distinguish arcs separated by ≳1″, reducing source-lens confusion in training data.", "perturbed_explanation": "This statement is incorrect because KiDS+VIKING photometry used for BPZ estimates comprises 9 bands, not 8, and the OmegaCam final coadded images have a 0.2″ pixel scale, not 0.5″, as specified in the KiDS survey description.", "claim": "Leveraging KiDS+VIKING 9-band GAaP photometry, the typical photo-z error per lens in Table 1 [(z_BPZ(MAX)−z_BPZ(MIN))/2≈0.07] combined with 0.2″ pixel scale of OmegaCam allows GGSL-UNet to distinguish arcs separated by ≳1″, reducing source-lens confusion in training data.", "label": true }, { "paperid": "2411.07504v1", "paper_path": "./SciVer/papers/2411.07504v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.4.1" ], "image_path": "./SciVer/images/2411.07504v1_figure_4.png", "request_id": 228, "origin_statement": "By decoupling model training from embedding size search, AdaS&S reduces variance in gradient estimates across runs, yielding over 80% consistency in chosen sizes (e.g., Zip-Code at 90%), implying that this architectural de-coupling enhances stability by mitigating inter-run noise in embedding parameter selection.", "perturbed_statement": "By decoupling model training from embedding size search, AdaS&S reduces variance in gradient estimates across runs, yielding over 95% consistency in chosen sizes (e.g., Zip-Code at 90%), implying that this architectural de-coupling enhances stability by mitigating inter-run noise in embedding parameter selection.", "perturbed_explanation": "The claim of over 95% consistency is incorrect: Figure 4 shows that the most stable feature (Zip-Code) peaks at 90% consistency, and no feature achieves 95% consistency across the 100 searches, contradicting the perturbed threshold.", "claim": "By decoupling model training from embedding size search, AdaS&S reduces variance in gradient estimates across runs, yielding over 80% consistency in chosen sizes (e.g., Zip-Code at 90%), implying that this architectural de-coupling enhances stability by mitigating inter-run noise in embedding parameter selection.", "label": true }, { "paperid": "2410.16092v1", "paper_path": "./SciVer/papers/2410.16092v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.2.1", "3.3" ], "image_path": "./SciVer/images/2410.16092v1-Table3-1.png", "request_id": 229, "origin_statement": "By isolating external side-effects, ChangeGuard’s symbolic path analysis achieves 77% precision and 69% recall on 224 manually annotated code changes, outperforming regression testing’s 83% precision but only 8% recall, highlighting the efficacy of execution-path analysis over test-suite coverage in catching semantic alterations.", "perturbed_statement": "By isolating external side-effects, ChangeGuard’s symbolic path analysis achieves 77% precision and 96% recall on 224 manually annotated code changes, outperforming regression testing’s 83% precision but only 8% recall, highlighting the efficacy of execution-path analysis over test-suite coverage in catching semantic alterations.", "perturbed_explanation": "The recall figure for ChangeGuard is incorrect: according to Table 3, ChangeGuard correctly identifies 91 of 131 semantics-changing changes, yielding approximately 69% recall rather than the stated 96%.", "claim": "By isolating external side-effects, ChangeGuard’s symbolic path analysis achieves 77% precision and 69% recall on 224 manually annotated code changes, outperforming regression testing’s 83% precision but only 8% recall, highlighting the efficacy of execution-path analysis over test-suite coverage in catching semantic alterations.", "label": true }, { "paperid": "2411.12845v1", "paper_path": "./SciVer/papers/2411.12845v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.12845v1_figure_3.png", "request_id": 230, "origin_statement": "The inclusion of breaks in June 1991, April 1998, August 2008, October 2021, April 2022, and April 2023 allows CPI-Common-SC to recalibrate the latent inflation factor, smoothing exogenous volatility particularly evident in the 2021–2022 surge. By partitioning regimes, it reduces high-inflation bias in historical trend estimates.", "perturbed_statement": "The inclusion of breaks in July 1991, May 1998, September 2008, October 2021, April 2022, and April 2023 allows CPI-Common-SC to recalibrate the latent inflation factor, smoothing exogenous volatility particularly evident in the 2021–2022 surge. By partitioning regimes, it reduces high-inflation bias in historical trend estimates.", "perturbed_explanation": "The perturbed statement misstates three estimated break dates. According to Figure 3 and the text, the first break occurred in June 1991 (not July 1991), the second in April 1998 (not May 1998), and the third in August 2008 (not September 2008).", "claim": "The inclusion of breaks in June 1991, April 1998, August 2008, October 2021, April 2022, and April 2023 allows CPI-Common-SC to recalibrate the latent inflation factor, smoothing exogenous volatility particularly evident in the 2021–2022 surge. By partitioning regimes, it reduces high-inflation bias in historical trend estimates.", "label": true }, { "paperid": "2410.04784v1", "paper_path": "./SciVer/papers/2410.04784v1.json", "claim_type": "analytical", "type": "table", "item": "7", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.04784v1-Table7-1.png", "request_id": 231, "origin_statement": "By minimizing contextual noise, scientific report and newspaper styles facilitate earlier plateau in validation accuracy, suggesting that formal linguistic regularity enhances the model's ability to internalize factual relations.", "perturbed_statement": "By minimizing contextual noise, poor spelling and colloquial styles facilitate earlier plateau in validation accuracy, suggesting that informal linguistic irregularity enhances the model's ability to internalize factual relations.", "perturbed_explanation": "This is incorrect because the context reports that the model showed significantly lower preference scores and slower learning for poor spelling texts, not faster convergence, contradicting the claim about informal styles accelerating learning.", "claim": "By minimizing contextual noise, scientific report and newspaper styles facilitate earlier plateau in validation accuracy, suggesting that formal linguistic regularity enhances the model's ability to internalize factual relations.", "label": true }, { "paperid": "2409.07135v1", "paper_path": "./SciVer/papers/2409.07135v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.5" ], "image_path": "./SciVer/images/2409.07135v1_figure_5.png", "request_id": 232, "origin_statement": "Figure 5 reveals that increasing feature dimensionality via AEA or PCA generally reduces reactivity, as higher-dimensional spaces dilute anomaly signal strength, reducing model sensitivity; nuSVM uniquely increases reactivity due to its margin-based decision boundary amplifying subtle deviations.", "perturbed_statement": "Figure 5 reveals that increasing feature dimensionality via AEA or PCA generally reduces reactivity, as higher-dimensional spaces dilute anomaly signal strength, reducing model sensitivity; LOF uniquely increases reactivity due to its local density estimation amplifying subtle deviations.", "perturbed_explanation": "The perturbation incorrectly assigns the inverse reactivity trend to LOF. In both Figure 5 and the accompanying text, nuSVM is identified as the only model whose reactivity increases with feature dimensionality; LOF’s reactivity actually decreases when using higher-dimensional transformations like AEA or PCA.", "claim": "Figure 5 reveals that increasing feature dimensionality via AEA or PCA generally reduces reactivity, as higher-dimensional spaces dilute anomaly signal strength, reducing model sensitivity; nuSVM uniquely increases reactivity due to its margin-based decision boundary amplifying subtle deviations.", "label": true }, { "paperid": "2410.01485v1", "paper_path": "./SciVer/papers/2410.01485v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.01485v1-Table6-1.png", "request_id": 235, "origin_statement": "Strategically embedding 12 full attention layers (one-third of the network) in the middle retains 93% of peak long-context accuracy (0.27 vs. 0.29) while reducing full-layer proportion by two-thirds, balancing computational efficiency and performance.", "perturbed_statement": "Using just six full attention layers (one-fifth of the network) in the middle retains 96% of peak long-context accuracy (0.28 vs. 0.29) while cutting full-layer proportion by 81%, maximizing efficiency with minimal accuracy drop.", "perturbed_explanation": "The perturbed statement misreports the average long-context accuracy for the one-fifth configuration. Table 6 shows a 1/5 full-layer average of 0.26 (not 0.28), yielding roughly 90% retention of 0.29 peak, not 96% as claimed.", "claim": "Strategically embedding 12 full attention layers (one-third of the network) in the middle retains 93% of peak long-context accuracy (0.27 vs. 0.29) while reducing full-layer proportion by two-thirds, balancing computational efficiency and performance.", "label": true }, { "paperid": "2410.01964v1", "paper_path": "./SciVer/papers/2410.01964v1.json", "claim_type": "analytical", "type": "chart", "item": "12", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2410.01964v1_figure_12.png", "request_id": 236, "origin_statement": "The measured Ta EDS profile across the Ta/W interface follows a logistic distribution matching the 100 nm beam simulation, implying that electron‐beam convolution broadens the apparent diffusion zone by an order of magnitude beyond the nominal 10 nm probe diameter.", "perturbed_statement": "The measured Ta EDS profile across the Ta/W interface follows a logistic distribution matching the 50 nm beam simulation, implying that electron‐beam convolution broadens the apparent diffusion zone by a fivefold increase over the nominal 10 nm probe diameter.", "perturbed_explanation": "This is incorrect because Figure 12 shows the measured Ta signal most closely aligns with the 100 nm simulation curve, not the 50 nm one. The actual beam diameter is ten times the nominal 10 nm, whereas the perturbed claim misidentifies a fivefold increase and the wrong simulation curve.", "claim": "The measured Ta EDS profile across the Ta/W interface follows a logistic distribution matching the 100 nm beam simulation, implying that electron‐beam convolution broadens the apparent diffusion zone by an order of magnitude beyond the nominal 10 nm probe diameter.", "label": true }, { "paperid": "2409.05769v1", "paper_path": "./SciVer/papers/2409.05769v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2409.05769v1-Table3-1.png", "request_id": 237, "origin_statement": "During the upward-flow phase, convective collapse rebound generates 2–3 km/s upflows in the photosphere that steepen into 10–12 km/s quasi-acoustic shocks in the low chromosphere, illustrating that vertical magnetic fields channel and amplify wave speeds with height.", "perturbed_statement": "During the upward-flow phase, convective collapse rebound generates 2–3 km/s upflows in the photosphere that steepen into 5–6 km/s quasi-acoustic shocks in the low chromosphere, illustrating that vertical magnetic fields channel and amplify wave speeds with height.", "perturbed_explanation": "Table 3 reports low chromospheric upflows of 10–12 km/s during the upward-flow phase, not 5–6 km/s. The incorrect velocity range contradicts the observed 10–12 km/s values.", "claim": "During the upward-flow phase, convective collapse rebound generates 2–3 km/s upflows in the photosphere that steepen into 10–12 km/s quasi-acoustic shocks in the low chromosphere, illustrating that vertical magnetic fields channel and amplify wave speeds with height.", "label": true }, { "paperid": "2411.07517v1", "paper_path": "./SciVer/papers/2411.07517v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.07517v1_figure_7.png", "request_id": 238, "origin_statement": "Recording at 20,000 fps with PPSI, our method denoises castanet-generated broadband waves while preserving the concave wavefront edges at 0.10 and 0.15 ms, unlike KBNet which rounds peaks, thus ensuring accurate phase-based calculation of acoustic speed from the bottom-left source.", "perturbed_statement": "Recording at 10,000 fps with PPSI, our method denoises castanet-generated broadband waves while preserving the concave wavefront edges at 0.10 and 0.15 ms, unlike KBNet which rounds peaks, thus ensuring accurate phase-based calculation of acoustic speed from the bottom-left source.", "perturbed_explanation": "The statement incorrectly cites a frame rate of 10,000 fps. The context specifies that the PPSI system recorded at 20,000 frames per second, so the experiment cannot support a 10,000 fps claim.", "claim": "Recording at 20,000 fps with PPSI, our method denoises castanet-generated broadband waves while preserving the concave wavefront edges at 0.10 and 0.15 ms, unlike KBNet which rounds peaks, thus ensuring accurate phase-based calculation of acoustic speed from the bottom-left source.", "label": true }, { "paperid": "2410.17276v2", "paper_path": "./SciVer/papers/2410.17276v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3", "5.2" ], "image_path": "./SciVer/images/2410.17276v2-Table2-1.png", "request_id": 239, "origin_statement": "In ML-10M, tail items constitute 90.8% of the catalog but account for only 0.13% MPU, causing traditional hit-rate optimization to reinforce popularity bias toward head items; incorporating a Gini-based balance metric redistributes negative sampling toward tail cohorts, improving long-tail recommendation coverage.", "perturbed_statement": "In ML-10M, head items constitute 90.8% of the catalog but account for only 0.13% MPU, causing traditional hit-rate optimization to underrepresent mid-tier items; incorporating a Gini-based balance metric redistributes sampling toward head cohorts, improving overall recommendation coverage.", "perturbed_explanation": "This is incorrect because Table 2 shows head items in ML-10M are only 2.2% of items with 15.9% MPU, while tail items represent 90.8% of items with 0.13% MPU. The perturbed statement misattributes the 90.8% catalog share and 0.13% MPU to head items and reverses the intended reweighting direction.", "claim": "In ML-10M, tail items constitute 90.8% of the catalog but account for only 0.13% MPU, causing traditional hit-rate optimization to reinforce popularity bias toward head items; incorporating a Gini-based balance metric redistributes negative sampling toward tail cohorts, improving long-tail recommendation coverage.", "label": true }, { "paperid": "2411.16342v1", "paper_path": "./SciVer/papers/2411.16342v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.16342v1_figure_7.png", "request_id": 240, "origin_statement": "In Figure 7's optimal tiling setting, our SJF-Predicted method matches SJF-Truth in mean completion time (~0.97) and yields ~30% lower mean turnaround time than SJF-Nodes, illustrating that precise execution-time predictions can significantly decrease queue waiting and expedite job throughput.", "perturbed_statement": "In Figure 7's optimal tiling setting, SJF-Predicted outperforms SJF-Truth by reducing mean execution time by 20% and achieves an 80% lower mean turnaround time compared to FCFS, demonstrating that latency predictions can exceed theoretical baselines.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 7 shows that SJF-Predicted's mean execution time (~0.97) is actually higher than SJF-Truth's (~0.93), not 20% lower. Moreover, mean turnaround time for FCFS is normalized to ~1.0 and for SJF-Predicted ~0.43, a ~57% reduction rather than 80%.", "claim": "In Figure 7's optimal tiling setting, our SJF-Predicted method matches SJF-Truth in mean completion time (~0.97) and yields ~30% lower mean turnaround time than SJF-Nodes, illustrating that precise execution-time predictions can significantly decrease queue waiting and expedite job throughput.", "label": true }, { "paperid": "2409.05048v1", "paper_path": "./SciVer/papers/2409.05048v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2409.05048v1-Table5-1.png", "request_id": 241, "origin_statement": "Across all tested matrices, the total number of matrix-vector products (MV) scales as approximately twice the iteration count (MV 2·IT), confirming that Algorithm 3’s deflation step consistently incurs two MVs per iteration, independent of the number s of eigenpairs computed.", "perturbed_statement": "Across all tested matrices, the total number of matrix-vector products (MV) scales as approximately s times the iteration count (MV s·IT), confirming that Algorithm 3’s deflation step incurs s MVs per iteration, proportional to the number of eigenpairs computed.", "perturbed_explanation": "The perturbed claim is wrong because Table 5 shows MV/IT≈2 for both s=1 and s=5 (e.g., for s=5 and epb3, MV/IT=15725/7860≈2, not 5), so the algorithm always uses two MVs per iteration, not s.", "claim": "Across all tested matrices, the total number of matrix-vector products (MV) scales as approximately twice the iteration count (MV 2·IT), confirming that Algorithm 3’s deflation step consistently incurs two MVs per iteration, independent of the number s of eigenpairs computed.", "label": true }, { "paperid": "2409.19611v1", "paper_path": "./SciVer/papers/2409.19611v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "4.6" ], "image_path": "./SciVer/images/2409.19611v1_figure_4(a).png", "request_id": 242, "origin_statement": "In LLaMA2-7B, AM-LoRA bypassing the query matrix assigns over 60% of attention to LoRA1 and LoRA3 in middle layers, suggesting cross-task feature integration via query vectors; conversely, the value-bypass predominantly weights LoRA4 above 70% in deep layers, highlighting task-specific adaptation emphasis.", "perturbed_statement": "In LLaMA2-7B, AM-LoRA bypassing the query matrix assigns over 90% of attention to LoRA1 and LoRA3 in middle layers, suggesting cross-task feature integration via query vectors; conversely, the value-bypass predominantly weights LoRA4 above 30% in deep layers, highlighting task-specific adaptation emphasis.", "perturbed_explanation": "The perturbation is incorrect because Figure 4 shows that the query-bypass attention on LoRA1 and LoRA3 peaks around 60–80%, never reaching 90%. Similarly, the value-bypass for LoRA4 in deep layers exceeds 70%, not just 30%, contradicting the actual weight proportions.", "claim": "In LLaMA2-7B, AM-LoRA bypassing the query matrix assigns over 60% of attention to LoRA1 and LoRA3 in middle layers, suggesting cross-task feature integration via query vectors; conversely, the value-bypass predominantly weights LoRA4 above 70% in deep layers, highlighting task-specific adaptation emphasis.", "label": true }, { "paperid": "2410.17236v1", "paper_path": "./SciVer/papers/2410.17236v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.17236v1-Table1-1.png", "request_id": 243, "origin_statement": "By leveraging 1,000 diverse Amazon-derived user profiles and corresponding behavioral histories, PersonalWAB uniquely enables both single-turn and multi-turn personalized Web agent evaluation via Web function APIs, thus bridging the gap in existing benchmarks that lack personalization and multi-turn interactive capabilities.", "perturbed_statement": "By leveraging 500 diverse Amazon-derived user profiles and corresponding behavioral histories, PersonalWAB uniquely enables only single-turn personalized Web agent evaluation via mobile Web UI APIs, thus bridging the gap in existing benchmarks that lack personalization.", "perturbed_explanation": "The perturbed statement is incorrect because PersonalWAB uses 1,000 users (not 500), supports both single-turn and multi-turn evaluation (not only single-turn), and interacts through Web function APIs (not a mobile Web UI). These details contradict the cited context and Table 1.", "claim": "By leveraging 1,000 diverse Amazon-derived user profiles and corresponding behavioral histories, PersonalWAB uniquely enables both single-turn and multi-turn personalized Web agent evaluation via Web function APIs, thus bridging the gap in existing benchmarks that lack personalization and multi-turn interactive capabilities.", "label": true }, { "paperid": "2409.10046v1", "paper_path": "./SciVer/papers/2409.10046v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "2.2", "4.3" ], "image_path": "./SciVer/images/2409.10046v1_figure_3.png", "request_id": 244, "origin_statement": "XGBoost achieves 91.6% accuracy on the full feature set, outperforming logistic regression by 9.5%, reflecting its ability to capture complex non-linear interactions among meteorological variables, historical burned areas, and spatiotemporal patterns for robust global lightning-ignition risk prediction.", "perturbed_statement": "XGBoost achieves 95.6% accuracy on the full feature set, outperforming logistic regression by 12.3%, reflecting its ability to capture complex non-linear interactions among meteorological variables, historical burned areas, and spatiotemporal patterns for robust global lightning-ignition risk prediction.", "perturbed_explanation": "The perturbed statement is incorrect because the actual XGBoost accuracy reported is 91.6%, not 95.6%, and the performance gain over logistic regression is 9.5%, not 12.3%, as stated in Table 2 and the accompanying description.", "claim": "XGBoost achieves 91.6% accuracy on the full feature set, outperforming logistic regression by 9.5%, reflecting its ability to capture complex non-linear interactions among meteorological variables, historical burned areas, and spatiotemporal patterns for robust global lightning-ignition risk prediction.", "label": true }, { "paperid": "2409.14085v1", "paper_path": "./SciVer/papers/2409.14085v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2409.14085v1-Table2-1.png", "request_id": 245, "origin_statement": "Despite maintaining roughly 73 M parameters, AFACodec’s D3 configuration increases sampling rate to 48 kHz at 7.5 kbps, enabling spectral reconstruction up to 24 kHz, which should substantially improve high-frequency audio fidelity compared to D1’s 16 kHz/2 kbps setting that clips frequencies above 8 kHz.", "perturbed_statement": "Despite maintaining roughly 73 M parameters, AFACodec’s D3 configuration increases sampling rate to 44 kHz at 7.5 kbps, enabling spectral reconstruction up to 22 kHz, which should substantially improve high-frequency audio fidelity compared to D1’s 16 kHz/2 kbps setting that clips frequencies above 8 kHz.", "perturbed_explanation": "The sampling rate for D3 is incorrectly stated as 44 kHz; Table 2 shows D3 actually uses 48 kHz. Consequently, the Nyquist limit is 24 kHz, not 22 kHz, so both the sampling rate and the maximum reconstructible frequency are wrong.", "claim": "Despite maintaining roughly 73 M parameters, AFACodec’s D3 configuration increases sampling rate to 48 kHz at 7.5 kbps, enabling spectral reconstruction up to 24 kHz, which should substantially improve high-frequency audio fidelity compared to D1’s 16 kHz/2 kbps setting that clips frequencies above 8 kHz.", "label": true }, { "paperid": "2409.03074v1", "paper_path": "./SciVer/papers/2409.03074v1.json", "claim_type": "analytical", "type": "chart", "item": "1(b)", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.03074v1_figure_1(b).png", "request_id": 246, "origin_statement": "Above a cutoff Λ_nn of ~500 MeV, the n–n 1S0 phase shifts below 20 MeV display negligible Λ dependence, confirming renormalization improves cutoff independence near threshold.", "perturbed_statement": "For Λ_nn above 200 MeV, the n–n 1S0 phase shifts below 30 MeV become nearly identical, indicating absolute cutoff independence and compliance with renormalization group invariance.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 1 shows that only cutoffs of ~500 MeV or higher yield nearly converged phase shifts; at 200 MeV the phase shifts still deviate significantly. Moreover, convergence is observed below 20 MeV, not 30 MeV, and a slight cutoff dependence remains.", "claim": "Above a cutoff Λ_nn of ~500 MeV, the n–n 1S0 phase shifts below 20 MeV display negligible Λ dependence, confirming renormalization improves cutoff independence near threshold.", "label": true }, { "paperid": "2411.05266v1", "paper_path": "./SciVer/papers/2411.05266v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3" ], "image_path": "./SciVer/images/2411.05266v1-Table3-1.png", "request_id": 247, "origin_statement": "Minor 3 bp variations in MT-NADH fragment lengths (893–896 bp) among primates likely result from lineage-specific indels under purifying selection, preserving complex I function while allowing subtle metabolic adaptations across primate clades.", "perturbed_statement": "Major 10 bp variations in MT-NADH fragment lengths (886–896 bp) among primates likely result from lineage-specific indels under purifying selection, preserving complex I function while allowing subtle metabolic adaptations across primate clades.", "perturbed_explanation": "The perturbed claim misstates the observed fragment length range and variation magnitude. Table 3 indicates MT-NADH lengths span only 893–896 bp (a 3 bp range), not 886–896 bp or 10 bp variation as claimed.", "claim": "Minor 3 bp variations in MT-NADH fragment lengths (893–896 bp) among primates likely result from lineage-specific indels under purifying selection, preserving complex I function while allowing subtle metabolic adaptations across primate clades.", "label": true }, { "paperid": "2409.00179v1", "paper_path": "./SciVer/papers/2409.00179v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.00179v1_figure_4.png", "request_id": 248, "origin_statement": "At ~100 Hz, the Livingston detector’s noise curve dips to ≈3×10^(-23) 1/√Hz, roughly 10% below Hanford’s, reflecting more effective seismic isolation, thereby raising the expected SNR of equal-mass 30 M⊙ + 30 M⊙ black hole mergers by about 5%.", "perturbed_statement": "The Livingston noise curve reaches its minimum spectral density of ≈3×10^(-23) 1/√Hz at ~300 Hz, about 20% lower than Hanford’s, indicating superior mid-frequency thermal noise suppression, thus boosting the SNR for 30 M⊙ + 30 M⊙ binary mergers by ~15%.", "perturbed_explanation": "This statement is incorrect because Fig. 4 shows the noise minimum near 100 Hz, not 300 Hz, and the actual Livingston–Hanford difference at that point is about 10%, not 20%. Additionally, the context attributes the improvement to seismic isolation, not thermal noise suppression.", "claim": "At ~100 Hz, the Livingston detector’s noise curve dips to ≈3×10^(-23) 1/√Hz, roughly 10% below Hanford’s, reflecting more effective seismic isolation, thereby raising the expected SNR of equal-mass 30 M⊙ + 30 M⊙ black hole mergers by about 5%.", "label": true }, { "paperid": "2410.12057v2", "paper_path": "./SciVer/papers/2410.12057v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "7" ], "image_path": "./SciVer/images/2410.12057v2-Table3-1.png", "request_id": 249, "origin_statement": "Table 3 shows that the mixture model trained on shared human–LM embeddings attains the highest clustering agreement with human-only responses (CMI=0.600), outperforming its agreement with LM-only responses (CMI=0.596), implying that shared semantic patterns better reflect human distributional structures.", "perturbed_statement": "Table 3 shows that the mixture model trained solely on LM-only embeddings attains the highest clustering agreement with human-only responses (CMI=0.600), slightly above the shared-response model (CMI=0.596), suggesting that language models intrinsically capture human semantic structures without shared data bias.", "perturbed_explanation": "This statement is incorrect because Table 3 reports a clustering agreement of only 0.505 for the LM-only model with human-only responses, not 0.600. In reality, the highest agreement with human-only responses (CMI=0.600) is achieved by the shared-response model, not the LM-only model.", "claim": "Table 3 shows that the mixture model trained on shared human–LM embeddings attains the highest clustering agreement with human-only responses (CMI=0.600), outperforming its agreement with LM-only responses (CMI=0.596), implying that shared semantic patterns better reflect human distributional structures.", "label": true }, { "paperid": "2409.07088v1", "paper_path": "./SciVer/papers/2409.07088v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.07088v1_figure_6.png", "request_id": 250, "origin_statement": "Despite GPT-4o’s stricter evaluations yielding higher unused triplet ratios overall, WikiOFGraph maintains a notably low average ratio, implying that its semi-automated generation process aligns structured knowledge with textual output more effectively than purely automated datasets like GenWiki and TekGen.", "perturbed_statement": "Despite GPT-4o’s more lenient evaluations yielding lower unused triplet ratios, WikiOFGraph maintains a notably low average ratio, implying that its semi-automated generation process aligns structured knowledge with textual output more effectively than purely automated datasets like GenWiki and TekGen.", "perturbed_explanation": "This is incorrect: Figure 6 shows that GPT-4o (hatched bars) reports higher unused-triplet ratios than human evaluators (plain bars), not lower as stated.", "claim": "Despite GPT-4o’s stricter evaluations yielding higher unused triplet ratios overall, WikiOFGraph maintains a notably low average ratio, implying that its semi-automated generation process aligns structured knowledge with textual output more effectively than purely automated datasets like GenWiki and TekGen.", "label": true }, { "paperid": "2410.14875v1", "paper_path": "./SciVer/papers/2410.14875v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.1" ], "image_path": "./SciVer/images/2410.14875v1-Table1-1.png", "request_id": 251, "origin_statement": "Table 1 documents six LLM families encompassing 27 models; notably, the OPT lineup spans from 125 million to 30 billion parameters, facilitating a controlled analysis of how parameter scale correlates with text coherence and detection difficulty, since larger models generally produce more semantically consistent outputs.", "perturbed_statement": "Table 1 documents seven LLM families encompassing 27 models; notably, the OPT lineup spans from 1.3 billion to 65 billion parameters, facilitating a controlled analysis of how parameter scale correlates with text coherence and detection difficulty, since larger models generally produce more semantically consistent outputs.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 actually lists six LLM families (not seven), and the OPT models range from 125 million to 30 billion parameters (not from 1.3 billion to 65 billion).", "claim": "Table 1 documents six LLM families encompassing 27 models; notably, the OPT lineup spans from 125 million to 30 billion parameters, facilitating a controlled analysis of how parameter scale correlates with text coherence and detection difficulty, since larger models generally produce more semantically consistent outputs.", "label": true }, { "paperid": "2409.10951v1", "paper_path": "./SciVer/papers/2409.10951v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.10951v1_figure_3(a).png", "request_id": 252, "origin_statement": "By embedding a re-balancing autoencoder and a dedicated fair contrastive loss, FairAD achieves approximately 70% recall@1200 while constraining the recall disparity between protected groups to under 2%, illustrating that coupled class-balancing in latent representations and fairness regularization sustains both high detection accuracy and group parity.", "perturbed_statement": "By embedding a re-balancing autoencoder and a dedicated fair contrastive loss, FairAD achieves approximately 80% recall@1200 while constraining the recall disparity between protected groups to under 2%, illustrating that coupled class-balancing in latent representations and fairness regularization sustains both high detection accuracy and group parity.", "perturbed_explanation": "The perturbed statement incorrectly claims an 80% recall@1200 for FairAD. According to Figure 3(a), FairAD’s recall@1200 is around 70%, not 80%. This mismatch in the recall value makes the statement factually incorrect.", "claim": "By embedding a re-balancing autoencoder and a dedicated fair contrastive loss, FairAD achieves approximately 70% recall@1200 while constraining the recall disparity between protected groups to under 2%, illustrating that coupled class-balancing in latent representations and fairness regularization sustains both high detection accuracy and group parity.", "label": true }, { "paperid": "2411.13677v1", "paper_path": "./SciVer/papers/2411.13677v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "8" ], "image_path": "./SciVer/images/2411.13677v1-Table5-1.png", "request_id": 253, "origin_statement": "With the Manus Meta gloves accounting for about 94% of the $6395 accessory budget, cost optimizations targeting glove sensor technologies can yield greater savings than incremental reductions in actuator or cabling expenses.", "perturbed_statement": "With the Manus Meta gloves accounting for about 80% of the $6395 accessory budget, cost optimizations targeting glove sensor technologies can yield greater savings than incremental reductions in actuator or cabling expenses.", "perturbed_explanation": "The perturbed statement incorrectly states that the gloves account for 80% of the budget. Table 5 shows the gloves cost $6000 out of a total $6395, which is approximately 94%, not 80%.", "claim": "With the Manus Meta gloves accounting for about 94% of the $6395 accessory budget, cost optimizations targeting glove sensor technologies can yield greater savings than incremental reductions in actuator or cabling expenses.", "label": true }, { "paperid": "2411.00254v1", "paper_path": "./SciVer/papers/2411.00254v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.00254v1_figure_6.png", "request_id": 254, "origin_statement": "The sharp decline of both training and validation loss to under 10 by epoch 8, alongside stable validation accuracy above 95%, indicates that the proposed XAI-guided augmentation reduces overfitting through enriched feature diversity, thereby accelerating ResNet50 convergence and improving generalization in distinguishing benign from malignant ultrasound images.", "perturbed_statement": "The sharp decline of both training and validation loss to under 5 by epoch 8, alongside stable validation accuracy above 98%, indicates that the LRP heatmap regularization alone mitigates overfitting by enriching feature representations, thus driving ResNet50’s rapid convergence in benign vs malignant ultrasound image classification.", "perturbed_explanation": "This statement is incorrect because the losses do not fall under 5 by epoch 8 (they drop to around 10), and the validation accuracy, while high, does not consistently exceed 98%. Moreover, overfitting reduction is primarily due to the XAI-based augmentation and dropout, not solely LRP heatmap regularization.", "claim": "The sharp decline of both training and validation loss to under 10 by epoch 8, alongside stable validation accuracy above 95%, indicates that the proposed XAI-guided augmentation reduces overfitting through enriched feature diversity, thereby accelerating ResNet50 convergence and improving generalization in distinguishing benign from malignant ultrasound images.", "label": true }, { "paperid": "2409.14381v1", "paper_path": "./SciVer/papers/2409.14381v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "6.3" ], "image_path": "./SciVer/images/2409.14381v1-Table4-1.png", "request_id": 255, "origin_statement": "The consistent minimal (<3%) performance losses after ablating non-cornerstone layers in Llama3 and Mixtral models indicate redundancy in learned representations, supporting the overparameterization hypothesis: deep networks rely disproportionately on a small subset of cornerstone layers for core functionality, while auxiliary layers reinforce feature diversity without critical necessity.", "perturbed_statement": "The substantial performance losses (>5%) observed upon ablating non-cornerstone layers in Llama3 and Mixtral models demonstrate that these layers are crucial, contradicting any overparameterization hypothesis because each layer uniquely contributes to the model's core representations.", "perturbed_explanation": "Table 4 shows that performance drops for all non-cornerstone layer ablations remain below 3%, not above 5% as claimed. Therefore, stating ‘substantial performance losses (>5%)’ and describing these layers as ‘crucial’ contradicts the reported minimal degradation (<3%).", "claim": "The consistent minimal (<3%) performance losses after ablating non-cornerstone layers in Llama3 and Mixtral models indicate redundancy in learned representations, supporting the overparameterization hypothesis: deep networks rely disproportionately on a small subset of cornerstone layers for core functionality, while auxiliary layers reinforce feature diversity without critical necessity.", "label": true }, { "paperid": "2409.13441v1", "paper_path": "./SciVer/papers/2409.13441v1.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "4", "5" ], "image_path": "./SciVer/images/2409.13441v1_figure_3(b).png", "request_id": 256, "origin_statement": "The distribution shows a skew towards high AGN fractions: over half of modelled LRDs exhibit AGN fractions above 0.8, suggesting that radiative feedback from central SMBHs likely dominates their UV-to-NIR emission, potentially biasing age and mass estimates in delayed SFH models if AGN templates are underrepresented.", "perturbed_statement": "The distribution shows a skew towards very high AGN fractions: about 80% of modelled LRDs exhibit AGN fractions above 0.9, suggesting that mechanical feedback from central SMBHs dominates their UV-to-NIR emission, potentially biasing mass estimates in delayed SFH models when AGN templates are underrepresented.", "perturbed_explanation": "The perturbation is incorrect because only 7 out of 11 LRDs (≈64%) have AGN fractions above 0.8—not 80% above 0.9—and the study attributes the AGN contribution to radiative feedback rather than mechanical feedback.", "claim": "The distribution shows a skew towards high AGN fractions: over half of modelled LRDs exhibit AGN fractions above 0.8, suggesting that radiative feedback from central SMBHs likely dominates their UV-to-NIR emission, potentially biasing age and mass estimates in delayed SFH models if AGN templates are underrepresented.", "label": true }, { "paperid": "2411.01711v1", "paper_path": "./SciVer/papers/2411.01711v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.01711v1-Table6-1.png", "request_id": 257, "origin_statement": "The four-strategy quantum extension of the Prisoner’s Dilemma supports eight pure Nash equilibria, with the maximum payoff equal to the temptation parameter T=2.5 achieved at profiles (2,3) and (3,2) when the entanglement parameter γ attains its upper bound of 1, highlighting restored symmetry under maximal entanglement.", "perturbed_statement": "The four-strategy quantum extension of the Prisoner’s Dilemma supports six pure Nash equilibria, with the maximum payoff equal to the temptation parameter T=3 achieved at profiles (2,3) and (3,2) when the entanglement parameter γ attains its upper bound of 1, highlighting restored symmetry under maximal entanglement.", "perturbed_explanation": "This is incorrect because Table 6 and the text specify eight pure Nash equilibria, not six. Moreover, the maximal payoff at profiles (2,3) and (3,2) is T=2.5 (payoff 2½), not T=3, as shown by the entries (2½,2½) when γ=1.", "claim": "The four-strategy quantum extension of the Prisoner’s Dilemma supports eight pure Nash equilibria, with the maximum payoff equal to the temptation parameter T=2.5 achieved at profiles (2,3) and (3,2) when the entanglement parameter γ attains its upper bound of 1, highlighting restored symmetry under maximal entanglement.", "label": true }, { "paperid": "2411.06208v2", "paper_path": "./SciVer/papers/2411.06208v2.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "1", "4.2" ], "image_path": "./SciVer/images/2411.06208v2_figure_3.png", "request_id": 258, "origin_statement": "With nearly half (46%) of evaluation instructions imposing content constraints—predominantly Inclusion at 18.7%—Trace emphasizes semantic control, indicating that enhancing LLMs’ fine-grained semantic understanding (e.g., inclusion/exclusion) is critical for obeying complex instructions.", "perturbed_statement": "With over half (56%) of evaluation instructions imposing content constraints—predominantly Inclusion at 28.7%—Trace emphasizes semantic control, indicating that enhancing LLMs’ fine-grained semantic understanding (e.g., inclusion/exclusion) is critical for obeying complex instructions.", "perturbed_explanation": "The perturbation is incorrect because the chart shows content constraints constitute 46.0% (not 56%) of the evaluation set, and Inclusion constraints specifically account for 18.7% (not 28.7%). These mismatched percentages contradict the figure’s reported distribution.", "claim": "With nearly half (46%) of evaluation instructions imposing content constraints—predominantly Inclusion at 18.7%—Trace emphasizes semantic control, indicating that enhancing LLMs’ fine-grained semantic understanding (e.g., inclusion/exclusion) is critical for obeying complex instructions.", "label": true }, { "paperid": "2411.15835v1", "paper_path": "./SciVer/papers/2411.15835v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.15835v1-Table1-1.png", "request_id": 259, "origin_statement": "The Flink Standalone cluster configures 8 TaskManagers with 4 slots each across 96 cores and 503GB memory, allowing each of the 32 parallel tasks to run on a dedicated core with ≈15.7GB RAM, minimizing scheduling overhead and enhancing streaming throughput for the TPC-DS workload.", "perturbed_statement": "The Flink Standalone cluster configures 8 TaskManagers with 8 slots each across 96 cores and 512GB memory, allowing each of the 32 parallel tasks to run on a dedicated core with ≈16GB RAM, minimizing scheduling overhead and enhancing streaming throughput for the TPC-DS workload.", "perturbed_explanation": "The perturbed statement contradicts Table 1: TaskManagers have 4 slots each, not 8, and the system memory is 503GB, not 512GB.", "claim": "The Flink Standalone cluster configures 8 TaskManagers with 4 slots each across 96 cores and 503GB memory, allowing each of the 32 parallel tasks to run on a dedicated core with ≈15.7GB RAM, minimizing scheduling overhead and enhancing streaming throughput for the TPC-DS workload.", "label": true }, { "paperid": "2411.01494v1", "paper_path": "./SciVer/papers/2411.01494v1.json", "claim_type": "analytical", "type": "chart", "item": "10", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.01494v1_figure_10.png", "request_id": 260, "origin_statement": "On G-Ref, a negative image pool size of 400 achieves the highest mIoU (61.36), suggesting moderate negative diversity enhances context discrimination without introducing noise from overly heterogeneous samples.", "perturbed_statement": "On G-Ref, a negative image pool size of 800 achieves the highest mIoU (61.36), suggesting moderate negative diversity enhances context discrimination without introducing noise from overly heterogeneous samples.", "perturbed_explanation": "The statement is incorrect because Fig. 10 shows the peak mIoU of 61.36 occurs at pool size K = 400, not K = 800. At K = 800, the mIoU actually drops to around 60.8, so attributing the highest score to 800 contradicts the data.", "claim": "On G-Ref, a negative image pool size of 400 achieves the highest mIoU (61.36), suggesting moderate negative diversity enhances context discrimination without introducing noise from overly heterogeneous samples.", "label": true }, { "paperid": "2410.13000v1", "paper_path": "./SciVer/papers/2410.13000v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.13000v1-Table1-1.png", "request_id": 263, "origin_statement": "In large-dimensional settings (m >> α), the proposed method's prediction cost O(n m^3 α^3) grows cubically with both m and α, so doubling both parameters multiplies runtime by 64, whereas Fourier's O(n m^2) only increases by factor 4 with m and is independent of α.", "perturbed_statement": "In large-dimensional settings (m >> α), the proposed method's prediction cost O(n m^2 α^3) grows quadratically with m and cubically with α, so doubling both multiplies runtime by 16, whereas Fourier's O(n m^2) only increases by factor 4 with m and is independent of α.", "perturbed_explanation": "The perturbed statement incorrectly replaces the m^3 term with m^2 in the proposed method’s prediction complexity. Table 1 shows prediction cost is O(n m^3 [α]^3), not O(n m^2 [α]^3), so doubling m and α scales runtime by 64× rather than 16×.", "claim": "In large-dimensional settings (m >> α), the proposed method's prediction cost O(n m^3 α^3) grows cubically with both m and α, so doubling both parameters multiplies runtime by 64, whereas Fourier's O(n m^2) only increases by factor 4 with m and is independent of α.", "label": true }, { "paperid": "2410.01839v1", "paper_path": "./SciVer/papers/2410.01839v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "5" ], "image_path": "./SciVer/images/2410.01839v1_figure_6(a).png", "request_id": 266, "origin_statement": "The persistent homology analysis identifies over 2,000 H1 features for Wisconsin lakes, with many mid-lifetime persistent features corresponding to glacial meltwater channels that create annular lake clusters, indicating that Pleistocene glaciation shaped enduring topological loops.", "perturbed_statement": "The persistent homology analysis identifies over 3,000 H1 features for Wisconsin lakes, with many mid-lifetime persistent features corresponding to Holocene meltwater channels that create annular lake clusters, indicating that Holocene glaciation shaped enduring topological loops.", "perturbed_explanation": "This statement is incorrect because the DaC persistence diagram detected 2,346 H1 features, not over 3,000, and the glacial sculpting responsible for Wisconsin’s lake clusters occurred during the late Pleistocene (around 10,000 years ago), not the Holocene.", "claim": "The persistent homology analysis identifies over 2,000 H1 features for Wisconsin lakes, with many mid-lifetime persistent features corresponding to glacial meltwater channels that create annular lake clusters, indicating that Pleistocene glaciation shaped enduring topological loops.", "label": true }, { "paperid": "2410.17196v2", "paper_path": "./SciVer/papers/2410.17196v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.17196v2-Table4-1.png", "request_id": 267, "origin_statement": "Synthetic CommonEval yields approximately 9.8% higher average performance than real data, driven by lower ambient noise improving ASR accuracy and subsequent LLM instruction following.", "perturbed_statement": "Synthetic CommonEval yields approximately 28% higher average performance than real data, driven by reduced room reverberation improving ASR accuracy and subsequent LLM instruction following.", "perturbed_explanation": "This statement is wrong because Table 4 shows an average improvement of about 9.7% (from 3.11 to 3.40), not 28%, and the context attributes the gap to higher ambient noise in real recordings rather than reduced room reverberation.", "claim": "Synthetic CommonEval yields approximately 9.8% higher average performance than real data, driven by lower ambient noise improving ASR accuracy and subsequent LLM instruction following.", "label": true }, { "paperid": "2411.12509v1", "paper_path": "./SciVer/papers/2411.12509v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2411.12509v1-Table3-1.png", "request_id": 269, "origin_statement": "During the first COVID-19 spectator ban (matchdays 26–34 in season 2019/20), the absence of crowds reduced home goals by 0.45 goals per match – with only a marginal 0.06 increase in away goals – supporting social facilitation theory that crowd presence amplifies home team performance over away teams.", "perturbed_statement": "During the first COVID-19 spectator ban (matchdays 26–34 in season 2019/20), the absence of crowds reduced home goals by 1.0 goals per match, while away goals decreased by 0.06, suggesting that home team performance was more adversely affected.", "perturbed_explanation": "Table 3 column (1) reports a home goal reduction of 0.452, not 1.0. Column (2) shows away goals increased by 0.063, not decreased by 0.06. These misstate both the magnitude of the home goal effect and the direction of the away goal change.", "claim": "During the first COVID-19 spectator ban (matchdays 26–34 in season 2019/20), the absence of crowds reduced home goals by 0.45 goals per match – with only a marginal 0.06 increase in away goals – supporting social facilitation theory that crowd presence amplifies home team performance over away teams.", "label": true }, { "paperid": "2411.01030v3", "paper_path": "./SciVer/papers/2411.01030v3.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.01030v3_figure_1.png", "request_id": 270, "origin_statement": "On 16,384‐token sequences with phone books of up to 800 entries, SSMs trained via Birdie reach 100% accuracy retrieving a single number from 1,024 unique phone books and sustain over 40% accuracy retrieving 32 numbers, indicating Birdie’s dynamic mixtures of bidirectional contexts significantly close the SSM-Transformer gap.", "perturbed_statement": "On 16,384‐token sequences with phone books of up to 800 entries, SSMs trained via Birdie reach 100% accuracy retrieving a single number from 2,048 unique phone books and sustain over 60% accuracy retrieving 32 numbers, indicating Birdie’s dynamic mixtures of bidirectional contexts completely eliminate the SSM-Transformer gap.", "perturbed_explanation": "The perturbation is incorrect because the context specifies 1,024 unique phone books for single-number retrieval, not 2,048, and Figure 1A shows Birdie SSM accuracy at 32 retrievals is approximately 45%, not over 60%.", "claim": "On 16,384‐token sequences with phone books of up to 800 entries, SSMs trained via Birdie reach 100% accuracy retrieving a single number from 1,024 unique phone books and sustain over 40% accuracy retrieving 32 numbers, indicating Birdie’s dynamic mixtures of bidirectional contexts significantly close the SSM-Transformer gap.", "label": true }, { "paperid": "2409.14704v2", "paper_path": "./SciVer/papers/2409.14704v2.json", "claim_type": "analytical", "type": "chart", "item": "3(c)", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.14704v2_figure_3(c).png", "request_id": 272, "origin_statement": "During finetuning SD 1.5 on specific teddy bear images, average CLIP similarity drops from approximately 0.335 at initialization to around 0.276 by 3000 steps, indicating semantic specialization where model-generated images increasingly capture training-set biases at the cost of prompt adherence.", "perturbed_statement": "During finetuning SD 1.5 on specific teddy bear images, average CLIP similarity drops from approximately 0.345 at initialization to around 0.286 by 3000 steps, indicating semantic specialization where model-generated images increasingly capture training-set biases at the cost of prompt adherence.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3 shows the initial CLIP similarity is about 0.335 (not 0.345) and the final CLIP similarity at 3000 steps is about 0.276 (not 0.286). These altered values contradict the plotted data.", "claim": "During finetuning SD 1.5 on specific teddy bear images, average CLIP similarity drops from approximately 0.335 at initialization to around 0.276 by 3000 steps, indicating semantic specialization where model-generated images increasingly capture training-set biases at the cost of prompt adherence.", "label": true }, { "paperid": "2409.01971v1", "paper_path": "./SciVer/papers/2409.01971v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.3" ], "image_path": "./SciVer/images/2409.01971v1-Table1-1.png", "request_id": 275, "origin_statement": "The Waymo Open Motion dataset’s 1 s observation horizon at 10 Hz yields 10 trajectory points, providing high temporal resolution that captures abrupt pedestrian maneuvers and improves sub-second trajectory forecasting accuracy despite limited historical context.", "perturbed_statement": "The Waymo Open Motion dataset’s 2 s observation horizon at 10 Hz yields 10 trajectory points, providing high temporal resolution that captures abrupt pedestrian maneuvers and improves sub-second trajectory forecasting accuracy despite limited historical context.", "perturbed_explanation": "This statement is incorrect because Table 1 specifies the Waymo Open Motion dataset uses a 1 s observation horizon, not 2 s; moreover, a 2 s horizon at 10 Hz would generate 20 points, not 10, contradicting the reported values.", "claim": "The Waymo Open Motion dataset’s 1 s observation horizon at 10 Hz yields 10 trajectory points, providing high temporal resolution that captures abrupt pedestrian maneuvers and improves sub-second trajectory forecasting accuracy despite limited historical context.", "label": true }, { "paperid": "2409.04453v2", "paper_path": "./SciVer/papers/2409.04453v2.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.04453v2_figure_3(a).png", "request_id": 278, "origin_statement": "Including simulated ET and two CE GW data tightens the 1σ error on Neff from ≈0.35 in CBS to ≈0.06, an ≈83% improvement, by exploiting GW standard sirens’ direct luminosity-distance calibration to break the Neff–H0 degeneracy inherent in CMB+BAO data.", "perturbed_statement": "Combining CBS with simulated ET and CE GW data reduces the 1σ uncertainty on Neff from ≈0.35 to ≈0.15, a 57% improvement, by breaking the Neff–Ωm degeneracy via mapping of dark matter filaments by GW sirens.", "perturbed_explanation": "The perturbation is wrong because the actual 1σ error on Neff tightens to ≈0.06 (not 0.15), and GW standard sirens break the Neff–H0 degeneracy, not the Neff–Ωm degeneracy; GW sirens calibrate luminosity distances rather than map dark matter filaments.", "claim": "Including simulated ET and two CE GW data tightens the 1σ error on Neff from ≈0.35 in CBS to ≈0.06, an ≈83% improvement, by exploiting GW standard sirens’ direct luminosity-distance calibration to break the Neff–H0 degeneracy inherent in CMB+BAO data.", "label": true }, { "paperid": "2409.11531v1", "paper_path": "./SciVer/papers/2409.11531v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2.2", "5.2.3" ], "image_path": "./SciVer/images/2409.11531v1-Table2-1.png", "request_id": 279, "origin_statement": "The ESV condition's emphasis on internal shortcomings, reflected by its lowest positive VADER score (0.011) for failure feelings, suggests a negative appraisal bias that may counteract self-compassion–driven resilience gains (estimate=0.62), underscoring the interplay between sentiment and adaptive coping.", "perturbed_statement": "The ESV condition's emphasis on internal shortcomings, reflected by its lowest positive VADER score (0.11) for failure feelings, suggests a negative appraisal bias that may counteract self-compassion–driven resilience gains (estimate=0.26), underscoring the interplay between sentiment and adaptive coping.", "perturbed_explanation": "The perturbed statement misquotes two key values: the ESV positive VADER score is actually 0.011 (not 0.11), and the self-compassion effect estimate from the LME model is 0.62 (not 0.26), contradicting the published Table 2 and model results.", "claim": "The ESV condition's emphasis on internal shortcomings, reflected by its lowest positive VADER score (0.011) for failure feelings, suggests a negative appraisal bias that may counteract self-compassion–driven resilience gains (estimate=0.62), underscoring the interplay between sentiment and adaptive coping.", "label": true }, { "paperid": "2409.19136v1", "paper_path": "./SciVer/papers/2409.19136v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.19136v1-Table3-1.png", "request_id": 281, "origin_statement": "The LOF algorithm achieves a mean PR-AUC of 0.087, nearly double the random classifier, indicating LOF’s local density-based scoring can detect subtle kinematic anomalies despite the curse of dimensionality degrading performance in high-dimensional feature spaces.", "perturbed_statement": "The LOF algorithm achieves a mean PR-AUC of 0.87, nearly double the random classifier, indicating LOF’s local density-based scoring can detect subtle kinematic anomalies despite the curse of dimensionality degrading performance in high-dimensional feature spaces.", "perturbed_explanation": "The perturbed statement incorrectly reports LOF’s mean PR-AUC as 0.87 instead of 0.087. Table 3 shows the mean PR-AUC for LOF is 0.087, not 87%, so the perturbed value is off by an order of magnitude and contradicts the context.", "claim": "The LOF algorithm achieves a mean PR-AUC of 0.087, nearly double the random classifier, indicating LOF’s local density-based scoring can detect subtle kinematic anomalies despite the curse of dimensionality degrading performance in high-dimensional feature spaces.", "label": true }, { "paperid": "2411.08087v1", "paper_path": "./SciVer/papers/2411.08087v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.1", "5.2" ], "image_path": "./SciVer/images/2411.08087v1-Table2-1.png", "request_id": 283, "origin_statement": "The chi-squared p-values in Table 2 decrease with increasing multipole range across all Planck maps, indicating that small-scale (arcminute) anisotropies from residual foregrounds become increasingly dominant above ℓ∼50, thus leading to the rejection of isotropy beyond Planck scales.", "perturbed_statement": "The chi-squared p-values in Table 2 decrease with increasing multipole range across all Planck maps, indicating that small-scale (arcminute) anisotropies from residual foregrounds become increasingly dominant above ℓ∼20, thus leading to the rejection of isotropy beyond Planck scales.", "perturbed_explanation": "This statement is incorrect because Table 2 and the accompanying discussion show that the chi-squared p-values only begin to fall significantly and reject isotropy above ℓ∼50 (the Planck-scale cutoff), not at ℓ∼20. The threshold ℓ∼20 contradicts the context’s specified scale of ℓ∼50.", "claim": "The chi-squared p-values in Table 2 decrease with increasing multipole range across all Planck maps, indicating that small-scale (arcminute) anisotropies from residual foregrounds become increasingly dominant above ℓ∼50, thus leading to the rejection of isotropy beyond Planck scales.", "label": true }, { "paperid": "2410.14059v2", "paper_path": "./SciVer/papers/2410.14059v2.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.14059v2_figure_5.png", "request_id": 284, "origin_statement": "Models continued pre-trained on financial corpora, such as Tongyi-Finance-14B-Chat and CFGPT2-7B, outperform their vanilla backbones by approximately 3 points across Claude-3.5-Sonnet, Gemini-1.5-pro, and Deepseek-chat evaluators, illustrating the efficacy of domain-adaptive pretraining for financial tasks.", "perturbed_statement": "Models continued pre-trained on financial corpora, such as Tongyi-Finance-14B-Chat and CFGPT2-7B, outperform their vanilla backbones by approximately 15 points across Claude-3.5-Sonnet, Gemini-1.5-pro, and Deepseek-chat evaluators, illustrating the efficacy of domain-adaptive pretraining for financial tasks.", "perturbed_explanation": "Figure 5 shows that continued pre-trained models only improve by about 2–4 points over their original backbones on each evaluator. Claiming a 15-point gain contradicts these observed small improvements.", "claim": "Models continued pre-trained on financial corpora, such as Tongyi-Finance-14B-Chat and CFGPT2-7B, outperform their vanilla backbones by approximately 3 points across Claude-3.5-Sonnet, Gemini-1.5-pro, and Deepseek-chat evaluators, illustrating the efficacy of domain-adaptive pretraining for financial tasks.", "label": true }, { "paperid": "2411.02712v1", "paper_path": "./SciVer/papers/2411.02712v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.02712v1-Table4-1.png", "request_id": 285, "origin_statement": "Integrating visual guidance into DPO reduces hallucination rates by roughly 15% (from 0.62 to 0.53 on synthetic data) and increases overall scores by about 20% (from 1.97 to 2.36), indicating that vision-conditioned regularization effectively calibrates language models to prevent unsupported text generation.", "perturbed_statement": "Integrating visual guidance into DPO reduces hallucination rates by over 30% (from 0.62 to 0.42 on synthetic data) and increases overall scores by about 30% (from 1.97 to 2.57), indicating that vision-conditioned regularization effectively calibrates language models to prevent unsupported text generation.", "perturbed_explanation": "The hallucination rate for V-DPO on synthetic data is actually 0.53 (not 0.42), and the overall score is 2.36 (not 2.57). These values contradict the reported results in the table.", "claim": "Integrating visual guidance into DPO reduces hallucination rates by roughly 15% (from 0.62 to 0.53 on synthetic data) and increases overall scores by about 20% (from 1.97 to 2.36), indicating that vision-conditioned regularization effectively calibrates language models to prevent unsupported text generation.", "label": true }, { "paperid": "2410.21329v1", "paper_path": "./SciVer/papers/2410.21329v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "2.1", "2.3" ], "image_path": "./SciVer/images/2410.21329v1_figure_2.png", "request_id": 286, "origin_statement": "The strong bimodal distribution, with ~60% of samples at cloud fraction 0 or 1, reflects dominant clear-sky and overcast conditions in high latitudes, driven by the emissivity-based sensitivity of the 10.8 µm channel and frequent stratiform cloud regimes in Scandinavian climatology.", "perturbed_statement": "The pronounced bimodal distribution, with over 80% of samples at cloud fraction 0 or 1, reflects dominant clear-sky and overcast conditions in high latitudes, driven by the emissivity-based sensitivity of the 12 µm channel and frequent convective cloud regimes in Scandinavian climatology.", "perturbed_explanation": "The perturbed statement is incorrect because the histogram and text report about 60% of values at extremes, not over 80%. It also mislabels the effective cloudiness sensor channel as 12 µm instead of the 10.8 µm window and incorrectly substitutes stratiform-dominated regimes with convective clouds.", "claim": "The strong bimodal distribution, with ~60% of samples at cloud fraction 0 or 1, reflects dominant clear-sky and overcast conditions in high latitudes, driven by the emissivity-based sensitivity of the 10.8 µm channel and frequent stratiform cloud regimes in Scandinavian climatology.", "label": true }, { "paperid": "2411.03401v1", "paper_path": "./SciVer/papers/2411.03401v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.03401v1_figure_8.png", "request_id": 288, "origin_statement": "The exclusive presence of spherical gas pores in 4PB specimens at 1300 mm/s, with sphericities above 0.65 and aspect ratios near 0.5 for pores under 25 µm, suggests that the four-point bending geometry reduces melt pool discontinuities that cause elongated LoF defects in axial specimens.", "perturbed_statement": "The exclusive presence of spherical gas pores in 4PB specimens at 1300 mm/s, with sphericities above 0.85 and aspect ratios near 0.5 for pores under 15 µm, suggests that the four-point bending geometry reduces melt pool discontinuities that cause elongated LoF defects in axial specimens.", "perturbed_explanation": "This statement is incorrect because 4PB pores in Fig. 8 exhibit maximum sphericities around 0.72, not above 0.85, and their sizes span approximately 12–25 µm rather than being exclusively under 15 µm, contradicting the stated thresholds.", "claim": "The exclusive presence of spherical gas pores in 4PB specimens at 1300 mm/s, with sphericities above 0.65 and aspect ratios near 0.5 for pores under 25 µm, suggests that the four-point bending geometry reduces melt pool discontinuities that cause elongated LoF defects in axial specimens.", "label": true }, { "paperid": "2411.16474v1", "paper_path": "./SciVer/papers/2411.16474v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3" ], "image_path": "./SciVer/images/2411.16474v1_figure_3.png", "request_id": 290, "origin_statement": "The bottom panel’s linear c_s(t) profile with slope ≈0.35 reveals a constant first-order adsorption flux, confirming that surface kinetics, via the subsurface-to-surface transition frequency factor, dominate the temporal evolution of adsorbed hydrogen in the MMS test.", "perturbed_statement": "Figure 3’s bottom plot shows c_s rising linearly at slope 0.50, demonstrating a constant second-order adsorption flux; this suggests that bulk diffusion, rather than first-order surface kinetics, governs adsorbed hydrogen evolution in the MMS simulation.", "perturbed_explanation": "The perturbed statement is incorrect because the actual slope of c_s(t) is about 0.35 (1.75/5), not 0.50, and the adsorption process is first-order—driven by the specified subsurface-to-surface frequency factor—not a second-order flux. Moreover, surface kinetics, not bulk diffusion, control the adsorbed hydrogen evolution in the MMS test.", "claim": "The bottom panel’s linear c_s(t) profile with slope ≈0.35 reveals a constant first-order adsorption flux, confirming that surface kinetics, via the subsurface-to-surface transition frequency factor, dominate the temporal evolution of adsorbed hydrogen in the MMS test.", "label": true }, { "paperid": "2409.19572v1", "paper_path": "./SciVer/papers/2409.19572v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.19572v1-Table2-1.png", "request_id": 293, "origin_statement": "Model-wholeseq’s superior performance (130.86 vs. 124.94 Sum on WoI) suggests that feedback at whole-sequence granularity better mitigates over-association errors than token-level adjustments, aligning with sequence-level reinforcement learning principles that global reward signals improve generation coherence.", "perturbed_statement": "Model-wholeseq’s superior performance (128.86 vs. 124.94 Sum on WoI) suggests that feedback at whole-sequence granularity better mitigates over-association errors than token-level adjustments, aligning with sequence-level reinforcement learning principles that global reward signals improve generation coherence.", "perturbed_explanation": "The perturbed statement incorrectly reports the WoI Sum score for Model-wholeseq as 128.86. According to Table 2, the correct WoI Sum for Model-wholeseq is actually 130.86, not 128.86.", "claim": "Model-wholeseq’s superior performance (130.86 vs. 124.94 Sum on WoI) suggests that feedback at whole-sequence granularity better mitigates over-association errors than token-level adjustments, aligning with sequence-level reinforcement learning principles that global reward signals improve generation coherence.", "label": true }, { "paperid": "2411.16506v1", "paper_path": "./SciVer/papers/2411.16506v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.16506v1_figure_4.png", "request_id": 294, "origin_statement": "At around 500 agents on warehouse-s and 400 on warehouse-d, on+GPIBT+LNS achieves peak throughput of approximately 8.8 and 5.2 respectively, outperforming off-policy variants by over 30%, demonstrating that integrating LNS with on-policy guidance effectively mitigates congestion at scale.", "perturbed_statement": "At around 700 agents on warehouse-s and 600 on warehouse-d, on+GPIBT+LNS achieves peak throughput of approximately 6.5 and 4.0 respectively, outperforming off-policy variants by over 20%, demonstrating that integrating LNS with on-policy guidance effectively mitigates congestion at scale.", "perturbed_explanation": "The perturbed statement misplaces both the peak agent counts and throughput values. In the figure, on+GPIBT+LNS actually peaks near 500 agents on warehouse-s (~8.8 throughput) and 400 on warehouse-d (~5.2 throughput). Throughputs at 700 and 600 agents are lower (~6.0 and ~3.8) and do not yield a >20% advantage over off-policy methods.", "claim": "At around 500 agents on warehouse-s and 400 on warehouse-d, on+GPIBT+LNS achieves peak throughput of approximately 8.8 and 5.2 respectively, outperforming off-policy variants by over 30%, demonstrating that integrating LNS with on-policy guidance effectively mitigates congestion at scale.", "label": true }, { "paperid": "2409.20058v1", "paper_path": "./SciVer/papers/2409.20058v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1", "4.3" ], "image_path": "./SciVer/images/2409.20058v1-Table1-1.png", "request_id": 295, "origin_statement": "The Galactic Bulge shows a 25–60 keV surface brightness of 155 mCrab, over twice the ~68 mCrab measured in the L±20 spiral arms, mirroring the steep stellar density gradient in the inner few kpc when convolved with IBIS/ISGRI’s collimator response to NIR light.", "perturbed_statement": "The 25–60 keV flux in the Galactic Bulge (155 mCrab) is only 1.2 times that of the spiral arms (68 mCrab), implying a marginal bulge enhancement over the arms inconsistent with a steep radial stellar density gradient.", "perturbed_explanation": "The perturbation is incorrect because 155 mCrab is actually about 2.3 times 68 mCrab, not 1.2. Thus the stated ratio contradicts the measured flux values in Table 1.", "claim": "The Galactic Bulge shows a 25–60 keV surface brightness of 155 mCrab, over twice the ~68 mCrab measured in the L±20 spiral arms, mirroring the steep stellar density gradient in the inner few kpc when convolved with IBIS/ISGRI’s collimator response to NIR light.", "label": true }, { "paperid": "2410.18069v1", "paper_path": "./SciVer/papers/2410.18069v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "5.1.1" ], "image_path": "./SciVer/images/2410.18069v1_figure_2.png", "request_id": 296, "origin_statement": "At large scaling length r, the splitting between the first two-particle energy levels decays exponentially as ∼e⁻ᵐʳ, reflecting finite-volume interaction effects and causing the two-particle thresholds to approach the asymptotic 2M continuum limit.", "perturbed_statement": "At large scaling length r, the splitting between the first two-particle energy levels decays polynomially as 1/r², reflecting finite-volume interaction effects and causing the two-particle thresholds to approach the asymptotic 2M continuum limit.", "perturbed_explanation": "This statement is incorrect because finite-volume interaction corrections in the truncated conformal space approach are exponentially suppressed ∼e⁻ᵐʳ, not polynomially as 1/r², which contradicts both the plotted exponential decay in Figure 5.2 and standard finite-volume theory.", "claim": "At large scaling length r, the splitting between the first two-particle energy levels decays exponentially as ∼e⁻ᵐʳ, reflecting finite-volume interaction effects and causing the two-particle thresholds to approach the asymptotic 2M continuum limit.", "label": true }, { "paperid": "2410.04068v1", "paper_path": "./SciVer/papers/2410.04068v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.1.1", "3.2", "3.2.2" ], "image_path": "./SciVer/images/2410.04068v1_figure_2.png", "request_id": 298, "origin_statement": "Figure 2 shows that answer conflicts are highly skewed towards entity contradictions (72.31%), whereas factoid conflicts exhibit a more balanced composition—entity (31.82%), number (25%) and definition (15.91%)—implying that factoid perturbations introduce varied conflict modalities beyond mere entity mismatches, which can improve robustness testing of conflict detection models.", "perturbed_statement": "Figure 2 shows that answer conflicts are highly skewed towards number contradictions (72.31%), whereas factoid conflicts exhibit a more balanced composition—entity (31.82%), negation (15.91%) and temporal (11.36%)—implying that factoid perturbations introduce varied conflict modalities beyond mere number mismatches, which can improve robustness testing of conflict detection models.", "perturbed_explanation": "The perturbed statement wrongly identifies number contradictions as making up 72.31% of answer conflicts, but the context shows this slice corresponds to entity conflicts (number conflicts are only 0.77%). It also mislabels the 15.91% portion of factoid conflicts as negation, whereas that slice actually represents definition conflicts.", "claim": "Figure 2 shows that answer conflicts are highly skewed towards entity contradictions (72.31%), whereas factoid conflicts exhibit a more balanced composition—entity (31.82%), number (25%) and definition (15.91%)—implying that factoid perturbations introduce varied conflict modalities beyond mere entity mismatches, which can improve robustness testing of conflict detection models.", "label": true }, { "paperid": "2410.07679v2", "paper_path": "./SciVer/papers/2410.07679v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.07679v2_figure_6.png", "request_id": 302, "origin_statement": "Increasing batch size from 64 to 512 reduces FID by 0.08 (3.80→3.72), while memory usage increases exponentially from ~3,000 MB to ~135,000 MB. This implies that larger batches enhance relational feature propagation during distillation, improving generative quality but dramatically raising GPU memory demands.", "perturbed_statement": "Scaling the batch size from 64 to 512 increases FID from 3.80 to 3.82, while memory cost grows quadratically from ~3,000 MB to ~135,000 MB, indicating that larger batches slightly degrade generation quality despite increased computational resources.", "perturbed_explanation": "Both details conflict with Figure 6: FID actually decreases from 3.80 to 3.72, not increases to 3.82. Moreover, memory cost growth is exponential (cost more than triples between 256 and 512), not quadratic. These errors contradict the reported performance and cost trends.", "claim": "Increasing batch size from 64 to 512 reduces FID by 0.08 (3.80→3.72), while memory usage increases exponentially from ~3,000 MB to ~135,000 MB. This implies that larger batches enhance relational feature propagation during distillation, improving generative quality but dramatically raising GPU memory demands.", "label": true }, { "paperid": "2410.24169v1", "paper_path": "./SciVer/papers/2410.24169v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.24169v1-Table1-1.png", "request_id": 303, "origin_statement": "The EScAIP-Large model reduces test energy MAE to 215 meV on OC20 All+MD, demonstrating that scaling model parameters enhances predictive accuracy by better capturing many-body interactions.", "perturbed_statement": "The EScAIP-Large model reduces test energy MAE to 195 meV on OC20 All+MD, demonstrating that scaling model parameters enhances predictive accuracy by better capturing many-body interactions.", "perturbed_explanation": "The perturbed statement incorrectly reports the test energy MAE for EScAIP-Large as 195 meV. According to Table 1, the actual test Energy MAE on OC20 All+MD for EScAIP-Large is 215 meV, not 195 meV.", "claim": "The EScAIP-Large model reduces test energy MAE to 215 meV on OC20 All+MD, demonstrating that scaling model parameters enhances predictive accuracy by better capturing many-body interactions.", "label": true }, { "paperid": "2410.02320v2", "paper_path": "./SciVer/papers/2410.02320v2.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "6.1" ], "image_path": "./SciVer/images/2410.02320v2_figure_3.png", "request_id": 304, "origin_statement": "In the EnRu setting, SFT raises MT probabilities more than PE’s due to zero-shot learning, whereas dCPO drives MT log-probabilities downward while modestly increasing PE’s, thus widening their log-probability gap—a selection pressure that aligns with higher XCOMET-XL evaluation gains.", "perturbed_statement": "In the EnRu setting, SFT raises PE probabilities more than MT’s due to zero-shot learning, whereas dCPO drives PE log-probabilities downward while modestly increasing MT’s, thus narrowing their log-probability gap—a selection pressure that aligns with higher XCOMET-XL gains.", "perturbed_explanation": "This statement is incorrect because Figure 3 shows that under EnRu, SFT actually produces a larger upward shift in MT log-probabilities than in PE log-probabilities. Additionally, dCPO pushes MT log-probabilities down and slightly raises PE log-probabilities, which widens rather than narrows the gap.", "claim": "In the EnRu setting, SFT raises MT probabilities more than PE’s due to zero-shot learning, whereas dCPO drives MT log-probabilities downward while modestly increasing PE’s, thus widening their log-probability gap—a selection pressure that aligns with higher XCOMET-XL evaluation gains.", "label": true }, { "paperid": "2409.02554v1", "paper_path": "./SciVer/papers/2409.02554v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "2.2" ], "image_path": "./SciVer/images/2409.02554v1-Table3-1.png", "request_id": 305, "origin_statement": "Cycle 23 exhibited 37% of DH type II bursts reaching 0.5 AU, whereas cycle 24 showed only 12%, implying that temporal variations in solar wind density and magnetic field geometry significantly modulate shock longevity in interplanetary space.", "perturbed_statement": "Cycle 23 exhibited 45% of DH type II bursts reaching 1 AU, whereas cycle 24 showed only 20%, implying that variations in solar wind temperature primarily govern shock longevity in interplanetary space.", "perturbed_explanation": "Table 3 reports 37% of type II bursts extending to roughly 0.5 AU in cycle 23 (not 45% to 1 AU) and only 12% in cycle 24 (not 20%). Moreover, the context attributes shock survival to ambient density and magnetic field structure, not solar wind temperature.", "claim": "Cycle 23 exhibited 37% of DH type II bursts reaching 0.5 AU, whereas cycle 24 showed only 12%, implying that temporal variations in solar wind density and magnetic field geometry significantly modulate shock longevity in interplanetary space.", "label": true }, { "paperid": "2409.19942v2", "paper_path": "./SciVer/papers/2409.19942v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5" ], "image_path": "./SciVer/images/2409.19942v2-Table4-1.png", "request_id": 307, "origin_statement": "The +21.5% F1 gain in linear evaluation on CycleCrash when pretrained on CCD reveals that R(2+1)D learns highly transferable collision anticipation representations, since linear probing isolates the quality of learned features without updating backbone parameters.", "perturbed_statement": "The +21.5% F1 gain in linear evaluation on CycleCrash when pretrained on DoTA reveals that R(2+1)D learns highly transferable collision anticipation representations, since linear probing isolates feature quality without updating backbone parameters.", "perturbed_explanation": "This is incorrect because Table 4 shows the +21.5% F1 improvement on CycleCrash linear evaluation comes from the model pretrained on CCD, whereas DoTA-pretrained linear evaluation yields only a +1.4% F1 gain.", "claim": "The +21.5% F1 gain in linear evaluation on CycleCrash when pretrained on CCD reveals that R(2+1)D learns highly transferable collision anticipation representations, since linear probing isolates the quality of learned features without updating backbone parameters.", "label": true }, { "paperid": "2409.04846v1", "paper_path": "./SciVer/papers/2409.04846v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3.1.1" ], "image_path": "./SciVer/images/2409.04846v1_figure_3.png", "request_id": 308, "origin_statement": "The seeing distribution’s sharp 0.8″ lower bound and 2.0″ tail suggest intermittent Kolmogorov turbulence: high Fried parameter episodes limit minima, while low r0 periods produce the extended seeing tail, reflecting a -11/3 power-law spatial spectrum.", "perturbed_statement": "The seeing distribution’s sharp 1.0″ lower bound and 2.0″ tail suggest intermittent Kolmogorov turbulence: high Fried parameter episodes limit minima, while low r0 periods produce the extended seeing tail, reflecting a -11/3 power-law spatial spectrum.", "perturbed_explanation": "The perturbed statement incorrectly sets the lower bound at 1.0″, whereas the context explicitly reports a sharp cutoff at 0.8″ in the seeing histogram. This detail contradicts the measured distribution.", "claim": "The seeing distribution’s sharp 0.8″ lower bound and 2.0″ tail suggest intermittent Kolmogorov turbulence: high Fried parameter episodes limit minima, while low r0 periods produce the extended seeing tail, reflecting a -11/3 power-law spatial spectrum.", "label": true }, { "paperid": "2411.03445v1", "paper_path": "./SciVer/papers/2411.03445v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.03445v1_figure_3.png", "request_id": 310, "origin_statement": "CIFAR100 models yield near-perfect AUC (~0.99) with just 10 training models under the base (unsorted) configuration, indicating that random weight initialization alone produces highly separable feature representations, thereby reducing the marginal benefit of permutation-invariant sorting compared to CIFAR10 (<0.8 AUC).", "perturbed_statement": "CIFAR100 models yield only moderate AUC (~0.75) with just 10 training models under the base (unsorted) configuration, indicating that random weight initialization alone produces highly separable feature representations, thereby reducing the marginal benefit of permutation-invariant sorting compared to CIFAR10 (<0.8 AUC).", "perturbed_explanation": "This is incorrect because Figure 3 shows CIFAR100 under the base (unsorted) configuration achieves near-perfect AUC (~0.98–0.99) with 10 training models, not only ~0.75 as claimed.", "claim": "CIFAR100 models yield near-perfect AUC (~0.99) with just 10 training models under the base (unsorted) configuration, indicating that random weight initialization alone produces highly separable feature representations, thereby reducing the marginal benefit of permutation-invariant sorting compared to CIFAR10 (<0.8 AUC).", "label": true }, { "paperid": "2411.06171v1", "paper_path": "./SciVer/papers/2411.06171v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.5" ], "image_path": "./SciVer/images/2411.06171v1-Table4-1.png", "request_id": 311, "origin_statement": "Jointly selecting attention heads by sensitivity and forgettability yields 54.99% accuracy with only –2.61 BWT on Order1, demonstrating how coupling gradient sensitivity and output variance criteria effectively curbs catastrophic forgetting in LLaMA-2-7B.", "perturbed_statement": "Jointly selecting attention heads by sensitivity and forgettability yields 54.99% accuracy with only –3.61 BWT on Order1, demonstrating how coupling gradient sensitivity and output variance criteria effectively curbs catastrophic forgetting in LLaMA-2-7B.", "perturbed_explanation": "The perturbed statement misreports the BWT (backward transfer) value: Table 4 shows a BWT of –2.61 for the ‘both’ head importance measure on Order1, not –3.61 as claimed.", "claim": "Jointly selecting attention heads by sensitivity and forgettability yields 54.99% accuracy with only –2.61 BWT on Order1, demonstrating how coupling gradient sensitivity and output variance criteria effectively curbs catastrophic forgetting in LLaMA-2-7B.", "label": true }, { "paperid": "2411.15553v1", "paper_path": "./SciVer/papers/2411.15553v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.15553v1_figure_6.png", "request_id": 312, "origin_statement": "FTM’s transfer attack peaks at 61.4% when p = 0.1 and α_max = 0.75, indicating that an intermediate mixing probability and moderate clean-feature retention optimally balance perturbation magnitude and feature diversity for robust cross-model adversarial transfer.", "perturbed_statement": "FTM’s transfer attack peaks at 60.9% when p = 0.15 and α_max = 0.5, suggesting that lower mixing ratios and reduced feature retention maximize adversarial transfer by minimizing perturbation noise.", "perturbed_explanation": "The heatmap shows the highest success rate of 61.4% at p = 0.1 with α_max = 0.75. At p = 0.15 and α_max = 0.5 the success rate is only 60.9%, so claiming 60.9% is the peak misreports both the rate and the corresponding parameter values.", "claim": "FTM’s transfer attack peaks at 61.4% when p = 0.1 and α_max = 0.75, indicating that an intermediate mixing probability and moderate clean-feature retention optimally balance perturbation magnitude and feature diversity for robust cross-model adversarial transfer.", "label": true }, { "paperid": "2411.16198v1", "paper_path": "./SciVer/papers/2411.16198v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.4.2" ], "image_path": "./SciVer/images/2411.16198v1-Table4-1.png", "request_id": 313, "origin_statement": "The Visual Precision Search confines attributions to target negative response regions, improving MS-COCO’s explaining success rate by 19.47% and average highest score by 27.4%, thereby enhancing localization fidelity and reducing background interference through refined contextual reasoning.", "perturbed_statement": "The Visual Precision Search confines attributions to target negative response regions, improving MS-COCO’s explaining success rate by 29.47% and average highest score by 37.4%, thereby enhancing localization fidelity and reducing background interference through refined contextual reasoning.", "perturbed_explanation": "The perturbed statement misreports the metrics: Table 4 shows an ESR improvement of 19.47%, not 29.47%, and an average highest score improvement of 27.4%, not 37.4%.", "claim": "The Visual Precision Search confines attributions to target negative response regions, improving MS-COCO’s explaining success rate by 19.47% and average highest score by 27.4%, thereby enhancing localization fidelity and reducing background interference through refined contextual reasoning.", "label": true }, { "paperid": "2409.06801v1", "paper_path": "./SciVer/papers/2409.06801v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "6.2.2" ], "image_path": "./SciVer/images/2409.06801v1_figure_4.png", "request_id": 314, "origin_statement": "Georgia State House districts with BVAP margins between 5% and 15% show MMD discrepancy rates exceeding 60%, reflecting threshold-induced volatility where minor demographic shifts around the 50% Black VAP boundary amplify methodological classification differences between DEMO and SWAP.", "perturbed_statement": "Georgia State House districts with BVAP margins within ±10% of the 60% threshold exhibit MMD discrepancy rates above 80%, peaking at 90% around the threshold, indicating extreme classification volatility due to threshold sensitivity.", "perturbed_explanation": "This statement is wrong because the classification threshold for majority Black districts is 50% BVAP, not 60%. The actual peak MMD discrepancy rate is approximately 62% at about a 10% BVAP margin, not 90% within ±10% of a 60% threshold, as shown in the context.", "claim": "Georgia State House districts with BVAP margins between 5% and 15% show MMD discrepancy rates exceeding 60%, reflecting threshold-induced volatility where minor demographic shifts around the 50% Black VAP boundary amplify methodological classification differences between DEMO and SWAP.", "label": true }, { "paperid": "2409.00671v1", "paper_path": "./SciVer/papers/2409.00671v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.7" ], "image_path": "./SciVer/images/2409.00671v1-Table3-1.png", "request_id": 315, "origin_statement": "InvariantStock's superior RankIC of 0.0021 and RankICIR of 0.01605 imply its invariant feature extraction isolates stable causal price signals, enhancing cross-sectional stock ranking reliability in the sparse six-dimensional US price feature space.", "perturbed_statement": "InvariantStock achieves the lowest maximum drawdown of -0.2194 among all methods, demonstrating robust risk control despite limited US stock features.", "perturbed_explanation": "The table shows InvariantStock’s MDD is –0.3107, not –0.2194. In fact, the DJIA has the lowest drawdown at –0.2194. Thus, attributing a –0.2194 MDD to InvariantStock contradicts the reported values.", "claim": "InvariantStock's superior RankIC of 0.0021 and RankICIR of 0.01605 imply its invariant feature extraction isolates stable causal price signals, enhancing cross-sectional stock ranking reliability in the sparse six-dimensional US price feature space.", "label": true }, { "paperid": "2410.05468v2", "paper_path": "./SciVer/papers/2410.05468v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.05468v2_figure_6.png", "request_id": 316, "origin_statement": "PH-Dropout delivers sub-second inference (0.21s), in contrast to minute-scale methods (e.g., 90s for Bayes Rays, 7200s for CF-NeRF, 18000s for random init), enabling real-time uncertainty-aware view synthesis.", "perturbed_statement": "PH-Dropout delivers sub-second inference (2.1s), in contrast to minute-scale methods (e.g., 900s for Bayes Rays, 7200s for CF-NeRF, 18000s for random init), enabling real-time uncertainty-aware view synthesis.", "perturbed_explanation": "The perturbed statement incorrectly reports PH-Dropout inference time as 2.1 s (actual is 0.21 s) and Bayes Rays processing time as 900 s (actual ~90 s), contradicting the values shown in Figure 6.", "claim": "PH-Dropout delivers sub-second inference (0.21s), in contrast to minute-scale methods (e.g., 90s for Bayes Rays, 7200s for CF-NeRF, 18000s for random init), enabling real-time uncertainty-aware view synthesis.", "label": true }, { "paperid": "2410.23910v1", "paper_path": "./SciVer/papers/2410.23910v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.23910v1-Table3-1.png", "request_id": 317, "origin_statement": "Leveraging predicted uncertainties to re-examine BEV cells with <5% initial detection probability increases average recall from 0.0375 (entropy) to 0.0740, effectively doubling false-negative discovery and improving average F1-score to 0.1002, demonstrating the utility of uncertainty-aware missed-object localization in autonomous 3D detection.", "perturbed_statement": "Leveraging predicted uncertainties to re-examine BEV cells with <10% initial detection probability increases average recall from 0.0225 (MC-DP) to 0.1135, effectively identifying 20 additional missed objects and improving average F1-score to 0.0950, demonstrating the utility of uncertainty-aware missed-object localization.", "perturbed_explanation": "The perturbation is incorrect because the method actually uses a 5% probability threshold (not 10%), predicts only 15 new locations (not 20), and the average recall for our approach is 0.0740 (not 0.1135), with average F1-score 0.1002 (not 0.0950).", "claim": "Leveraging predicted uncertainties to re-examine BEV cells with <5% initial detection probability increases average recall from 0.0375 (entropy) to 0.0740, effectively doubling false-negative discovery and improving average F1-score to 0.1002, demonstrating the utility of uncertainty-aware missed-object localization in autonomous 3D detection.", "label": true }, { "paperid": "2409.04766v1", "paper_path": "./SciVer/papers/2409.04766v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.7" ], "image_path": "./SciVer/images/2409.04766v1_figure_5.png", "request_id": 318, "origin_statement": "Figure 5 reveals that EyeDiap exhibits the highest epistemic uncertainty (~3.29×10−4), whereas MPIIFaceGaze shows the lowest (~1.52×10−4). This suggests that our EIF model encounters novel gaze distributions in EyeDiap due to its per-scene variability, reflecting increased model uncertainty in underrepresented visual contexts.", "perturbed_statement": "Figure 5 indicates that EyeDiap exhibits the lowest epistemic uncertainty (~1.52×10−4), whereas MPIIFaceGaze shows the highest (~3.29×10−4). This implies that per-scene variability in EyeDiap reduces model uncertainty, demonstrating improved generalization in dynamic environmental contexts.", "perturbed_explanation": "The perturbation is incorrect because Figure 5 shows EyeDiap has the highest epistemic uncertainty (approximately 3.29×10⁻⁴) and MPIIFaceGaze the lowest (approximately 1.52×10⁻⁴), not the other way around. Additionally, per-scene variability in EyeDiap actually increases epistemic uncertainty due to unfamiliar gaze distributions.", "claim": "Figure 5 reveals that EyeDiap exhibits the highest epistemic uncertainty (~3.29×10−4), whereas MPIIFaceGaze shows the lowest (~1.52×10−4). This suggests that our EIF model encounters novel gaze distributions in EyeDiap due to its per-scene variability, reflecting increased model uncertainty in underrepresented visual contexts.", "label": true }, { "paperid": "2411.09726v2", "paper_path": "./SciVer/papers/2411.09726v2.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.09726v2_figure_7.png", "request_id": 320, "origin_statement": "On April 20th, neutral regime prevalence across stations aligns with a rainfall spike, illustrating how increased RF elevates humidity and lowers diurnal temperature variability, buffering UTCI fluctuations and preventing hot-state transitions typically driven by peak solar heating.", "perturbed_statement": "On April 20th, the neutral regime prevalence across stations aligns with a wind speed peak, illustrating how increased WS elevates humidity and lowers diurnal temperature variability, buffering UTCI fluctuations and preventing hot-state transitions typically driven by peak solar heating.", "perturbed_explanation": "This is incorrect because the neutral regime on April 20th is linked to a rainfall spike, not a wind speed peak. Moreover, increased wind speed does not elevate humidity but usually enhances evaporative cooling, so attributing higher humidity and neutral conditions to WS contradicts the context and atmospheric principles.", "claim": "On April 20th, neutral regime prevalence across stations aligns with a rainfall spike, illustrating how increased RF elevates humidity and lowers diurnal temperature variability, buffering UTCI fluctuations and preventing hot-state transitions typically driven by peak solar heating.", "label": true }, { "paperid": "2411.04649v1", "paper_path": "./SciVer/papers/2411.04649v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.04649v1-Table4-1.png", "request_id": 321, "origin_statement": "Injecting hurricane-themed decoys labeled POS across SST-2 and MultiRC training sets while keeping the test set untouched simulates spurious token-label shortcuts, revealing that models can memorize patterns like 'ike hurricane october precipitation' as sentiment cues instead of genuine semantic understanding.", "perturbed_statement": "Injecting hurricane-themed decoys labeled NEG into SST-2 and MultiRC test sets creates spurious token-label shortcuts, showing that models memorize phrases like 'ike hurricane october precipitation' as negative sentiment markers instead of learning true semantic relations.", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 shows the hurricane-themed decoys are labeled POS, not NEG, and the context specifies they are injected into the training and validation sets only, not the test set.", "claim": "Injecting hurricane-themed decoys labeled POS across SST-2 and MultiRC training sets while keeping the test set untouched simulates spurious token-label shortcuts, revealing that models can memorize patterns like 'ike hurricane october precipitation' as sentiment cues instead of genuine semantic understanding.", "label": true }, { "paperid": "2409.02184v1", "paper_path": "./SciVer/papers/2409.02184v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "5.2", "6.1" ], "image_path": "./SciVer/images/2409.02184v1_figure_4.png", "request_id": 322, "origin_statement": "When excluding the BCG core, the β slope peaks at ∼0.55 in the 30–80 kpc range, implying ICL mass grows super-linearly with halo mass as predicted by hierarchical merger models that redistribute accreted stars to intermediate radii.", "perturbed_statement": "When including the BCG core, the β slope peaks at ∼0.40 in the 50–100 kpc range, implying ICL mass grows sub-linearly with halo mass contrary to hierarchical merger models, suggesting the core dominates mass assembly at intermediate radii.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 shows the maximum β of about 0.55 occurs in the 30–80 kpc bin when the core is excluded, not included, and not at 50–100 kpc. Moreover, β in that region is super-linear (>0.5), not sub-linear (~0.40).", "claim": "When excluding the BCG core, the β slope peaks at ∼0.55 in the 30–80 kpc range, implying ICL mass grows super-linearly with halo mass as predicted by hierarchical merger models that redistribute accreted stars to intermediate radii.", "label": true }, { "paperid": "2409.05249v1", "paper_path": "./SciVer/papers/2409.05249v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.05249v1-Table2-1.png", "request_id": 329, "origin_statement": "NetDPSyn synthetically-generated packet data maintains the relative performance ordering of anomaly detection modes better than NetShare or PGM, with a positive Spearman coefficient on DC (0.26 vs. -0.65 and -0.26) and a less negative correlation on CAIDA (−0.48 vs. −0.82), indicating improved fidelity under differential privacy.", "perturbed_statement": "NetDPSyn synthetically-generated packet data maintains the relative performance ordering of anomaly detection modes better than NetShare or PGM, with a positive Spearman coefficient on CAIDA (0.48) and DC (0.26), outperforming NetShare (-0.82, -0.65) and PGM (-0.26), indicating enhanced model fidelity under differential privacy.", "perturbed_explanation": "The perturbation misstates the Spearman correlation for NetDPSyn on CAIDA: the actual value is −0.48 (negative), not +0.48. This incorrect sign contradicts the context and reverses the true ranking relationship.", "claim": "NetDPSyn synthetically-generated packet data maintains the relative performance ordering of anomaly detection modes better than NetShare or PGM, with a positive Spearman coefficient on DC (0.26 vs. -0.65 and -0.26) and a less negative correlation on CAIDA (−0.48 vs. −0.82), indicating improved fidelity under differential privacy.", "label": true }, { "paperid": "2411.13584v1", "paper_path": "./SciVer/papers/2411.13584v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "1" ], "image_path": "./SciVer/images/2411.13584v1-Table4-1.png", "request_id": 333, "origin_statement": "The 14-tier Chinese address hierarchy—from province (level 1) to room number (6C)—underpins geocoding by progressively narrowing spatial scope; omitting mid-tier components (e.g., township at level 4A) can escalate misrouting due to loss of hierarchical resolution necessary for LBS algorithms.", "perturbed_statement": "The Chinese address system comprises 12 hierarchical tiers, going from provinces (level 1) to room numbers (level 6C), which allows geocoders to progressively refine locations; omitting level 5B (village) leads to a threefold increase in parcel misrouting by erasing essential micro-level context.", "perturbed_explanation": "This statement is incorrect because the Chinese address hierarchy actually includes more than 12 tiers (it spans levels 1–6 with multiple sub-levels, totaling 17 distinct components), and level 5B denotes Points of Interest, not villages (villages are at level 4B).", "claim": "The 14-tier Chinese address hierarchy—from province (level 1) to room number (6C)—underpins geocoding by progressively narrowing spatial scope; omitting mid-tier components (e.g., township at level 4A) can escalate misrouting due to loss of hierarchical resolution necessary for LBS algorithms.", "label": true }, { "paperid": "2410.10289v1", "paper_path": "./SciVer/papers/2410.10289v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.10289v1_figure_1.png", "request_id": 334, "origin_statement": "FAPrompt achieves a consistent 3–5% AUROC improvement over AnomalyCLIP across diverse image AD benchmarks, due to its fine-grained and adaptive abnormality prompt modules (CAP and DAP) enhancing visual-textual alignment at anomaly details.", "perturbed_statement": "FAPrompt yields over 90% AUROC on all 12 benchmarks, outperforming WinCLIP by an average of 10–15%, due to its coarse-grained abnormality prompting that captures global semantic shifts.", "perturbed_explanation": "The perturbation is incorrect because Figure 1 (Right) shows FAPrompt’s AUROC varies across datasets and does not exceed 90% on all benchmarks. Its average gain over WinCLIP is 3–5%, not 10–15%. Moreover, FAPrompt uses fine-grained, not coarse-grained, abnormality prompting.", "claim": "FAPrompt achieves a consistent 3–5% AUROC improvement over AnomalyCLIP across diverse image AD benchmarks, due to its fine-grained and adaptive abnormality prompt modules (CAP and DAP) enhancing visual-textual alignment at anomaly details.", "label": true }, { "paperid": "2409.17137v3", "paper_path": "./SciVer/papers/2409.17137v3.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.17137v3_figure_5.png", "request_id": 336, "origin_statement": "On CIFAR-100, increasing the regularization strength (λ) from 1e-3 to 5e4 causes PACE to systematically reduce the average gradient norm, indicating controlled weight update dampening and improved training stability, while FPA exhibits erratic gradient spikes and potential explosions, highlighting its inability to robustly regularize deep Transformer parameters.", "perturbed_statement": "On CIFAR-100, increasing the regularization strength (λ) from 1e-4 to 1e5 causes PACE to systematically reduce the average gradient norm, indicating controlled weight update dampening and improved training stability, while FPA exhibits erratic gradient spikes and potential explosions, highlighting its inability to robustly regularize deep Transformer parameters.", "perturbed_explanation": "The specified λ range is incorrect: Figure 5 varies λ from 1e-3 to 5×10⁴, not from 1e-4 to 1e5, so the perturbed λ values contradict the context.", "claim": "On CIFAR-100, increasing the regularization strength (λ) from 1e-3 to 5e4 causes PACE to systematically reduce the average gradient norm, indicating controlled weight update dampening and improved training stability, while FPA exhibits erratic gradient spikes and potential explosions, highlighting its inability to robustly regularize deep Transformer parameters.", "label": true }, { "paperid": "2409.11357v1", "paper_path": "./SciVer/papers/2409.11357v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11357v1-Table3-1.png", "request_id": 337, "origin_statement": "Audio notifications significantly improved noticeability (M=6.865 vs. 4.10, p<.001), whereas visual notifications showed no significant effect (5.35 vs. 5.62, p=.78), suggesting auditory cues more effectively capture attention under high workload.", "perturbed_statement": "Visual notifications significantly increased noticeability (p<.05), with display conditions averaging 6.58 calls versus 4.08 calls in non-display conditions, indicating on-screen alerts strongly enhance user noticeability.", "perturbed_explanation": "This statement is incorrect because visual notifications actually had a non-significant effect (p=.78, not <.05). It also misrepresents the means by comparing the DS-only mean (6.58) to the O-only mean (4.08) instead of the correct overall display versus non-display averages (5.35 vs. 5.62).", "claim": "Audio notifications significantly improved noticeability (M=6.865 vs. 4.10, p<.001), whereas visual notifications showed no significant effect (5.35 vs. 5.62, p=.78), suggesting auditory cues more effectively capture attention under high workload.", "label": true }, { "paperid": "2410.10177v1", "paper_path": "./SciVer/papers/2410.10177v1.json", "claim_type": "analytical", "type": "chart", "item": "4(c)", "section": [ "6.3" ], "image_path": "./SciVer/images/2410.10177v1_figure_4(c).png", "request_id": 338, "origin_statement": "In latent diffusion models, AUC-ROC increases from approximately 0.62 at 80 sampling steps to a maximum of about 0.87 at 150 steps, suggesting that intermediate denoising optimally exposes identifiable features; beyond 150 steps, further denoising yields diminishing returns due to reduced latent variability.", "perturbed_statement": "In latent diffusion models, AUC-ROC increases from approximately 0.62 at 80 sampling steps to a maximum of about 0.90 at 200 steps, suggesting that late-stage denoising near completion best exposes identifiable features.", "perturbed_explanation": "This perturbation is incorrect because Figure 4 shows the AUC-ROC peaking at about 0.87 at 150 steps and then decreasing to approximately 0.83 at 200 steps; it never reaches 0.90 at 200 sampling steps.", "claim": "In latent diffusion models, AUC-ROC increases from approximately 0.62 at 80 sampling steps to a maximum of about 0.87 at 150 steps, suggesting that intermediate denoising optimally exposes identifiable features; beyond 150 steps, further denoising yields diminishing returns due to reduced latent variability.", "label": true }, { "paperid": "2409.00700v1", "paper_path": "./SciVer/papers/2409.00700v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2409.00700v1-Table3-1.png", "request_id": 339, "origin_statement": "Integrating IAQ-CL boosts UTMOS by 0.282 (from 2.945 to 3.227) by leveraging contrastive learning to align facial embeddings with acoustic prosodic patterns, thereby substantially improving perceptual naturalness in voice generation.", "perturbed_statement": "Integrating MIDD boosts UTMOS by 0.282 (from 2.945 to 3.227) by leveraging contrastive learning to align facial embeddings with acoustic prosodic patterns, thereby substantially improving perceptual naturalness in voice generation.", "perturbed_explanation": "This is incorrect because the 0.282 UTMOS increase (from 2.945 to 3.227) is achieved by adding the IAQ-CL module, not MIDD. Additionally, MIDD extracts speaker embeddings directly and does not employ contrastive learning to align facial embeddings with prosody.", "claim": "Integrating IAQ-CL boosts UTMOS by 0.282 (from 2.945 to 3.227) by leveraging contrastive learning to align facial embeddings with acoustic prosodic patterns, thereby substantially improving perceptual naturalness in voice generation.", "label": true }, { "paperid": "2409.13175v1", "paper_path": "./SciVer/papers/2409.13175v1.json", "claim_type": "analytical", "type": "chart", "item": "9", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.13175v1_figure_9.png", "request_id": 340, "origin_statement": "During peak evening hours (20-22), PoolRank sustains approximately 4,500 real-time recommendations per hour, outperforming batch-based methods that underutilize up to 10% of computational budget, illustrating PoolRank’s capacity to adaptively allocate streaming resources in the absence of future demand aggregation.", "perturbed_statement": "During peak evening hours (20-22), PoolRank sustains approximately 6,000 real-time recommendations per hour, outperforming batch-based methods that underutilize up to 30% of computational budget, illustrating PoolRank’s capacity to adaptively allocate streaming resources in the absence of future demand aggregation.", "perturbed_explanation": "The perturbed statement incorrectly reports PoolRank’s throughput as ~6,000 per hour, whereas Figure 9 shows it remains around 4,500. It also exaggerates batch methods’ underutilization at 30%, while the actual underuse is closer to 5–10% relative to the ideal real-time capacity.", "claim": "During peak evening hours (20-22), PoolRank sustains approximately 4,500 real-time recommendations per hour, outperforming batch-based methods that underutilize up to 10% of computational budget, illustrating PoolRank’s capacity to adaptively allocate streaming resources in the absence of future demand aggregation.", "label": true }, { "paperid": "2409.04257v1", "paper_path": "./SciVer/papers/2409.04257v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.04257v1_figure_2.png", "request_id": 342, "origin_statement": "The gap between CAPᵈ and DCAP for the “workab” target exceeds 20 percentage points, indicating that synthesis particularly mitigates attribute disclosure risk for highly imbalanced binary variables by inflating denominators and diluting the dominant “no” category.", "perturbed_statement": "The gap between CAPᵈ and DCAP for the “workab” target is only 5 percentage points, indicating that synthesis has minimal effect even for highly imbalanced binary variables by slightly adjusting denominators.", "perturbed_explanation": "Figure 2 shows CAPᵈ for “workab” near 100% and DCAP near 80%, a gap of about 20 points, not 5. Thus the claim of only a 5-point difference and minimal effect contradicts the actual observed gap in disclosure measures.", "claim": "The gap between CAPᵈ and DCAP for the “workab” target exceeds 20 percentage points, indicating that synthesis particularly mitigates attribute disclosure risk for highly imbalanced binary variables by inflating denominators and diluting the dominant “no” category.", "label": true }, { "paperid": "2410.21769v1", "paper_path": "./SciVer/papers/2410.21769v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "2.3" ], "image_path": "./SciVer/images/2410.21769v1-Table3-1.png", "request_id": 345, "origin_statement": "In the 2H phase, the in-plane E mode frequencies (bands 4-5) drop from 25.4 meV in TiSH to 19.4 meV in TiSeH, closely following the harmonic approximation’s ω∝m^(−1/2) scaling, with deviations (~0.76 ratio vs sqrt ratio ~0.64) arising from TM–chalcogen–H coupling.", "perturbed_statement": "In the 2H phase, the in-plane E mode frequencies (bands 4-5) drop from 25.4 cm⁻¹ in TiSH to 19.4 cm⁻¹ in TiSeH, following ω∝m scaling, indicating that heavier Se reduces phonon frequencies linearly with atomic mass.", "perturbed_explanation": "The perturbed statement is wrong because the table reports phonon frequencies in meV, not cm⁻¹, and the harmonic approximation predicts ω∝m^(−1/2), not a direct linear ω∝m dependence.", "claim": "In the 2H phase, the in-plane E mode frequencies (bands 4-5) drop from 25.4 meV in TiSH to 19.4 meV in TiSeH, closely following the harmonic approximation’s ω∝m^(−1/2) scaling, with deviations (~0.76 ratio vs sqrt ratio ~0.64) arising from TM–chalcogen–H coupling.", "label": true }, { "paperid": "2410.17020v2", "paper_path": "./SciVer/papers/2410.17020v2.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "4.1", "4.2", "6.6" ], "image_path": "./SciVer/images/2410.17020v2_figure_2(b).png", "request_id": 346, "origin_statement": "In LFME, the logit regularization term constrains the ground-truth probability to decrease from roughly 0.85 to about 0.6 within 500 steps and then stabilize near 0.63 by 2000 iterations, exemplifying a balanced entropy-accuracy trade-off that prevents overconfident predictions.", "perturbed_statement": "In LFME, the logit regularization term constrains the ground-truth probability to decrease from roughly 0.85 to about 0.4 within 500 steps and then stabilize near 0.75 by 2000 iterations, exemplifying a balanced entropy-accuracy trade-off that prevents overconfident predictions.", "perturbed_explanation": "This claim is incorrect because Figure 2 shows the ground-truth probability decreases to around 0.6 within the first 500 iterations and then converges near 0.63 by 2000 iterations, not to 0.4 then 0.75 as stated.", "claim": "In LFME, the logit regularization term constrains the ground-truth probability to decrease from roughly 0.85 to about 0.6 within 500 steps and then stabilize near 0.63 by 2000 iterations, exemplifying a balanced entropy-accuracy trade-off that prevents overconfident predictions.", "label": true }, { "paperid": "2409.04050v1", "paper_path": "./SciVer/papers/2409.04050v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2409.04050v1-Table4-1.png", "request_id": 347, "origin_statement": "EigenSR-α, which fine-tunes the pre-trained IPT model in the eigenimage domain, achieves a 0.32 dB PSNR gain and reduces SAM by 0.045 compared to the IPT baseline, demonstrating that eigenimage-based learning effectively captures spectral-spatial features while enabling faster inference.", "perturbed_statement": "EigenSR-α, which fine-tunes the pre-trained IPT model in the eigenimage domain, achieves a 0.52 dB PSNR gain and reduces SAM by 0.085 compared to the IPT baseline, demonstrating that eigenimage-based learning effectively captures spectral-spatial features while enabling faster inference.", "perturbed_explanation": "The actual PSNR improvement reported in Table 4 is 40.28 − 39.96 = 0.32 dB, not 0.52 dB. Likewise, SAM decreases from 1.268 to 1.223, a reduction of 0.045, not 0.085, so both the PSNR and SAM differences are overstated.", "claim": "EigenSR-α, which fine-tunes the pre-trained IPT model in the eigenimage domain, achieves a 0.32 dB PSNR gain and reduces SAM by 0.045 compared to the IPT baseline, demonstrating that eigenimage-based learning effectively captures spectral-spatial features while enabling faster inference.", "label": true }, { "paperid": "2410.22015v1", "paper_path": "./SciVer/papers/2410.22015v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "2.1.1", "2.1.3", "3.1.2", "3.1.3", "3.2.1", "3.2.2" ], "image_path": "./SciVer/images/2410.22015v1_figure_2(a).png", "request_id": 348, "origin_statement": "Using DBSCAN with a 500 m search radius and minimum cluster size of three, sensors are partitioned into nine spatial clusters that closely correspond to Barcelona’s administrative districts, indicating that pedestrian flow hotspots spatially align with urban morphological divisions and the underlying sidewalk network connectivity.", "perturbed_statement": "Using DBSCAN with a 300 m search radius and minimum cluster size of five, sensors are partitioned into eight spatial clusters that correspond to Barcelona’s districts, indicating pedestrian flow hotspots align with administrative boundaries and sidewalk connectivity.", "perturbed_explanation": "The perturbed statement is incorrect because the study applies DBSCAN using a 500 m bandwidth and a minimum cluster size of three, resulting in nine clusters. The claim of a 300 m radius, a minimum size of five, and eight clusters contradicts the documented methodology and the reported cluster count.", "claim": "Using DBSCAN with a 500 m search radius and minimum cluster size of three, sensors are partitioned into nine spatial clusters that closely correspond to Barcelona’s administrative districts, indicating that pedestrian flow hotspots spatially align with urban morphological divisions and the underlying sidewalk network connectivity.", "label": true }, { "paperid": "2411.02860v1", "paper_path": "./SciVer/papers/2411.02860v1.json", "claim_type": "analytical", "type": "chart", "item": "4(c)", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.02860v1_figure_4(c).png", "request_id": 352, "origin_statement": "Allocating larger episodic memory per class progressively reduces interference from new tasks, boosting SDR by over 25% from 1 to 30 samples, and similarly raising SIR and SAR, as richer replay buffers better preserve source fidelity and suppress residual artifacts.", "perturbed_statement": "Allocating larger episodic memory per class progressively reduces interference from new tasks, boosting SDR by over 40% from 1 to 30 samples, and similarly raising SIR and SAR, as richer replay buffers better preserve source fidelity and suppress residual artifacts.", "perturbed_explanation": "The perturbed statement incorrectly claims a 40% SDR improvement from 1 to 30 samples. According to Figure 4(a), SDR rises from about 8.5 dB to 11.3 dB, an increase of roughly 33%, not 40%.", "claim": "Allocating larger episodic memory per class progressively reduces interference from new tasks, boosting SDR by over 25% from 1 to 30 samples, and similarly raising SIR and SAR, as richer replay buffers better preserve source fidelity and suppress residual artifacts.", "label": true }, { "paperid": "2411.02091v1", "paper_path": "./SciVer/papers/2411.02091v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.2", "3.3" ], "image_path": "./SciVer/images/2411.02091v1-Table1-1.png", "request_id": 357, "origin_statement": "Higher LOCADD values indicate greater addition of code lines, which increases the search space for regressions, leading negative binomial regression models to exhibit significantly positive LOCADD coefficients, thus causally linking code churn magnitude to longer bug fixing times.", "perturbed_statement": "Higher LOCADD values indicate greater addition of code lines, which increases the search space for regressions, leading Poisson regression models to exhibit significantly negative LOCADD coefficients, thus causally linking code churn magnitude to shorter bug fixing times.", "perturbed_explanation": "The perturbation is incorrect because the study explicitly uses negative binomial regression (not Poisson regression) to model count data, and the LOCADD coefficient is positive in those models, indicating longer—not shorter—bug fixing times.", "claim": "Higher LOCADD values indicate greater addition of code lines, which increases the search space for regressions, leading negative binomial regression models to exhibit significantly positive LOCADD coefficients, thus causally linking code churn magnitude to longer bug fixing times.", "label": true }, { "paperid": "2411.02175v1", "paper_path": "./SciVer/papers/2411.02175v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.02175v1_figure_5.png", "request_id": 358, "origin_statement": "The entropy-based aggregation progressively shifts weight towards the fast learner as class index increases, reflecting lower prediction entropy on novel categories; this dynamic arbitration mirrors hippocampal facilitation for recent classes while maintaining neocortical stability on older classes.", "perturbed_statement": "The entropy-based aggregation progressively shifts weight towards the fast learner as class index increases, reflecting lower prediction variance on novel categories; this dynamic arbitration mirrors hippocampal facilitation for classes above 80, while defaulting to neocortical stability on older classes.", "perturbed_explanation": "This statement is incorrect because the aggregation mechanism uses entropy, not variance, as the weighting metric, and the fast learner’s weights overtake the slow learner’s around class indices 100–119, not above 80, as shown in Figure 5.", "claim": "The entropy-based aggregation progressively shifts weight towards the fast learner as class index increases, reflecting lower prediction entropy on novel categories; this dynamic arbitration mirrors hippocampal facilitation for recent classes while maintaining neocortical stability on older classes.", "label": true }, { "paperid": "2411.10703v1", "paper_path": "./SciVer/papers/2411.10703v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2411.10703v1-Table1-1.png", "request_id": 359, "origin_statement": "By decomposing blood glucose into high- and low-frequency intrinsic mode functions via VMD, GlucoNet’s 1D CNN isolates meal-induced spikes while its LSTM-Transformer sequence modeling captures basal glycemic trends, enhancing multi-horizon forecast accuracy and robustness against sensor noise.", "perturbed_statement": "GlucoNet decomposes blood glucose signals into three IMFs using VMD, enabling its 1D CNN to isolate basal glycemic trends and the LSTM-Transformer to capture high-frequency meal spikes, thereby improving narrow-horizon forecasts under noisy conditions.", "perturbed_explanation": "This statement is incorrect because Table 1 specifies that GlucoNet’s VMD step splits signals into only two components (high- and low-frequency bands), not three IMFs. Moreover, the 1D CNN is designed to capture high-frequency meal-induced glucose spikes, not basal low-frequency trends as claimed.", "claim": "By decomposing blood glucose into high- and low-frequency intrinsic mode functions via VMD, GlucoNet’s 1D CNN isolates meal-induced spikes while its LSTM-Transformer sequence modeling captures basal glycemic trends, enhancing multi-horizon forecast accuracy and robustness against sensor noise.", "label": true }, { "paperid": "2409.13171v1", "paper_path": "./SciVer/papers/2409.13171v1.json", "claim_type": "analytical", "type": "chart", "item": "13", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.13171v1_figure_13.png", "request_id": 360, "origin_statement": "Across Gaussian filter sizes (σ=3–31), the latent diffusion model holds normalized covariance distance nearly constant (~0.27–0.38), reflecting preserved high-frequency textures, while synthetic low-resolution MAE rises sharply (0.009→0.03) due to blurring, compared to the model’s gentler MAE increase (0.011→0.021).", "perturbed_statement": "Across Gaussian filter sizes (σ=3–31), the latent diffusion model reduces normalized covariance distance dramatically (from ~0.27 to ~0.20), indicating improved texture fidelity at higher blurring, while synthetic low-resolution MAE rises sharply (0.009→0.03) due to blurring, compared to the model’s gentler MAE increase (0.011→0.021).", "perturbed_explanation": "The perturbation incorrectly states that the SR covariance distance decreases from ~0.27 to ~0.20. In Figure 13a, the SR curve actually increases from about 0.27 to 0.38 as σ grows, so there is no dramatic drop in covariance distance.", "claim": "Across Gaussian filter sizes (σ=3–31), the latent diffusion model holds normalized covariance distance nearly constant (~0.27–0.38), reflecting preserved high-frequency textures, while synthetic low-resolution MAE rises sharply (0.009→0.03) due to blurring, compared to the model’s gentler MAE increase (0.011→0.021).", "label": true }, { "paperid": "2411.12785v1", "paper_path": "./SciVer/papers/2411.12785v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "6.3" ], "image_path": "./SciVer/images/2411.12785v1-Table5-1.png", "request_id": 361, "origin_statement": "Integrating counterfactual debiasing and bias alignment yields a 5.83% absolute improvement in overall ABLE score, concurrently reducing demographic bias divergence (NDKL) by approximately 0.068 on UTKFace and compressing inter-group embedding overlaps, indicating more distinct, unbiased visual-language representations.", "perturbed_statement": "Combining the two debiasing losses produces a 4.5% absolute increase in ABLE, reduces NDKL by merely 0.017 on UTKFace, and eliminates inter-group embedding overlaps, achieving fully orthogonal demographic representations.", "perturbed_explanation": "The ABLE score actually rises from 63.31% to 69.14%, a 5.83% increase—not 4.5%. UTKFace NDKL drops from 0.137 to 0.069 (a reduction of 0.068), not 0.017. Furthermore, the ‘After’ scatter plots still show some male/female and age-group overlap, so embeddings are not fully orthogonal.", "claim": "Integrating counterfactual debiasing and bias alignment yields a 5.83% absolute improvement in overall ABLE score, concurrently reducing demographic bias divergence (NDKL) by approximately 0.068 on UTKFace and compressing inter-group embedding overlaps, indicating more distinct, unbiased visual-language representations.", "label": true }, { "paperid": "2409.14940v1", "paper_path": "./SciVer/papers/2409.14940v1.json", "claim_type": "analytical", "type": "chart", "item": "5(b)", "section": [ "4.5" ], "image_path": "./SciVer/images/2409.14940v1_figure_5(b).png", "request_id": 362, "origin_statement": "The global confidence threshold increases from approximately 53% to 74% across streaming indices, reflecting escalating model confidence through pseudo-label self-training and highlighting the need for adaptive regularization to prevent reinforcement of incorrect high-confidence predictions.", "perturbed_statement": "The global confidence threshold steadily decreases from approximately 53% to 48% as more samples are processed, indicating growing model uncertainty and implying that a fixed threshold suffices for self-training stability.", "perturbed_explanation": "Incorrect because Fig. 5 and the context state that the confidence threshold rises from about 53% to 75% over the data stream. Claiming it decreases to 48% directly contradicts the observed monotonic increase in the global confidence threshold.", "claim": "The global confidence threshold increases from approximately 53% to 74% across streaming indices, reflecting escalating model confidence through pseudo-label self-training and highlighting the need for adaptive regularization to prevent reinforcement of incorrect high-confidence predictions.", "label": true }, { "paperid": "2409.08361v1", "paper_path": "./SciVer/papers/2409.08361v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.2", "4.1", "4.2" ], "image_path": "./SciVer/images/2409.08361v1-Table1-1.png", "request_id": 363, "origin_statement": "The equivalent stellar CMF ranges from 0.21 (TOI-561) to 0.35 (Kepler-21 and Kepler-78), closely matching the Solar CMF of 0.30, indicating that stars’ Fe/Mg mass ratios in protoplanetary disks likely govern iron core sizes in rocky exoplanets.", "perturbed_statement": "The equivalent stellar CMF ranges from 0.25 (TOI-561) to 0.45 (Kepler-21), matching the Solar CMF of 0.25, suggesting that stellar Fe/Si ratios dictate terrestrial planet core sizes.", "perturbed_explanation": "This statement is incorrect because Table 1 lists TOI-561 and Kepler-21 equivalent CMFs as 0.21 and 0.35, not 0.25 and 0.45. The Solar CMF reference is 0.30, not 0.25. Additionally, the mechanism involves stellar Fe/Mg ratios rather than Fe/Si ratios.", "claim": "The equivalent stellar CMF ranges from 0.21 (TOI-561) to 0.35 (Kepler-21 and Kepler-78), closely matching the Solar CMF of 0.30, indicating that stars’ Fe/Mg mass ratios in protoplanetary disks likely govern iron core sizes in rocky exoplanets.", "label": true }, { "paperid": "2411.15060v1", "paper_path": "./SciVer/papers/2411.15060v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.15060v1_figure_6.png", "request_id": 364, "origin_statement": "Figure 6 shows that optimal NHP selections concentrate at the final layer (l≈1.0), reflecting deeper convolutional features’ superior discriminative power for detecting hallucinations, and that aggressive truncation thresholds (q≥0.75) consistently boost HRP by filtering out noise in validation samples.", "perturbed_statement": "Figure 6 shows that optimal NHP selections concentrate at the bottleneck layer (l≈0.5), reflecting mid-level semantic features’ superior discriminative power for detecting hallucinations, and that conservative truncation thresholds (q≤0.25) consistently boost HRP by preserving noise in validation samples.", "perturbed_explanation": "The perturbed statement misidentifies the optimal layer: the histogram for l is peaked near 1.0 (final layer), not 0.5. It also wrongly claims q≤0.25 is best, whereas Fig. 6 shows aggressive truncation (q≥0.75) yields higher HRP.", "claim": "Figure 6 shows that optimal NHP selections concentrate at the final layer (l≈1.0), reflecting deeper convolutional features’ superior discriminative power for detecting hallucinations, and that aggressive truncation thresholds (q≥0.75) consistently boost HRP by filtering out noise in validation samples.", "label": true }, { "paperid": "2410.09865v2", "paper_path": "./SciVer/papers/2410.09865v2.json", "claim_type": "analytical", "type": "chart", "item": "7(c)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.09865v2_figure_7(c).png", "request_id": 366, "origin_statement": "Linear probe accuracy on AffectNet and RAF-DB improves from 67.0% to 69.8% and 91.2% to 92.7%, respectively, as synthetic data scales from 0.2M to 1.0M images, showing diminishing incremental gains consistent with power-law behavior in self-supervised learning.", "perturbed_statement": "Linear probe accuracy on AffectNet and RAF-DB improves from 67.0% to 69.8% and from 88.9% to 94.0%, respectively, as synthetic data scales from 0.2M to 1.0M images, demonstrating accelerating gains inconsistent with typical power-law behavior.", "perturbed_explanation": "The statement is incorrect because RAF-DB performance actually starts at 91.2%, not 88.9%, and peaks at 92.7%, not 94.0%. Furthermore, the observed gains diminish at larger data scales rather than accelerate, contradicting the claim of accelerating gains.", "claim": "Linear probe accuracy on AffectNet and RAF-DB improves from 67.0% to 69.8% and 91.2% to 92.7%, respectively, as synthetic data scales from 0.2M to 1.0M images, showing diminishing incremental gains consistent with power-law behavior in self-supervised learning.", "label": true }, { "paperid": "2409.12993v1", "paper_path": "./SciVer/papers/2409.12993v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.12993v1-Table6-1.png", "request_id": 367, "origin_statement": "Integrating CC data increases VerilogEval-Human accuracy from 37.7% to 62.0% (+24.3%), reflecting that structured non-textual representation training substantially improves module comprehension by encoding schematic principles inherent in hardware design.", "perturbed_statement": "Integrating CC data increases VerilogEval-Human accuracy from 37.7% to 62.0% (+15.7%), reflecting that structured non-textual representation training substantially improves module comprehension by encoding schematic principles inherent in hardware design.", "perturbed_explanation": "The perturbed statement incorrectly cites a +15.7% increase; according to Table 6, the absolute improvement from 37.7% to 62.0% is +24.3%, not +15.7%.", "claim": "Integrating CC data increases VerilogEval-Human accuracy from 37.7% to 62.0% (+24.3%), reflecting that structured non-textual representation training substantially improves module comprehension by encoding schematic principles inherent in hardware design.", "label": true }, { "paperid": "2411.09534v1", "paper_path": "./SciVer/papers/2411.09534v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "5.5" ], "image_path": "./SciVer/images/2411.09534v1_figure_6(a).png", "request_id": 368, "origin_statement": "GAP9EVK’s multicore mode slashes the SeizureDetCNN processing-phase energy by 57%, from 17.5 mJ to 7.5 mJ, demonstrating that halving inference runtime through parallelization offsets its higher instantaneous power for net energy savings.", "perturbed_statement": "GAP9EVK’s multicore mode slashes the SeizureDetCNN processing-phase energy by 75%, from 20.5 mJ to 5.2 mJ, demonstrating that halving inference runtime through parallelization offsets its higher instantaneous power for net energy savings.", "perturbed_explanation": "The perturbed claim misreports the processing-phase energies and percentage reduction: Figure 6 shows GAP9EVK-S consumes 17.528 mJ and GAP9EVK-M 7.528 mJ on SeizureDetCNN, a 57% drop. The stated values (20.5 mJ, 5.2 mJ) and 75% reduction contradict the chart.", "claim": "GAP9EVK’s multicore mode slashes the SeizureDetCNN processing-phase energy by 57%, from 17.5 mJ to 7.5 mJ, demonstrating that halving inference runtime through parallelization offsets its higher instantaneous power for net energy savings.", "label": true }, { "paperid": "2411.09458v1", "paper_path": "./SciVer/papers/2411.09458v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1.2" ], "image_path": "./SciVer/images/2411.09458v1-Table1-1.png", "request_id": 369, "origin_statement": "Approximating the full likelihood with a multivariate Gaussian reduces eight experimental nuisance parameters to four effective observations, which accelerates MCMC sampling while preserving tree-level, leading-power HQET accuracy in B(s)→D(s)(*) branching ratio analysis.", "perturbed_statement": "Approximating the full likelihood with a multivariate Gaussian reduces six experimental nuisance parameters to five effective observations, which accelerates MCMC sampling while preserving tree-level, leading-power HQET accuracy in B(s)→D(s)(*) branching ratio analysis.", "perturbed_explanation": "This statement is incorrect because the context specifies that the Gaussian approximation avoids eight experimental nuisance parameters and contributes four observations, not six nuisance parameters and five observations.", "claim": "Approximating the full likelihood with a multivariate Gaussian reduces eight experimental nuisance parameters to four effective observations, which accelerates MCMC sampling while preserving tree-level, leading-power HQET accuracy in B(s)→D(s)(*) branching ratio analysis.", "label": true }, { "paperid": "2411.15743v1", "paper_path": "./SciVer/papers/2411.15743v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.15743v1_figure_2.png", "request_id": 370, "origin_statement": "In zero-shot transfer, datasets sharing high spectral similarity (PCC>0.9) yield substantially lower scaled MSE, demonstrating that Fourier-based alignment significantly enhances generalization compared to same-domain training, highlighting the importance of frequency-domain features over sector homogeneity.", "perturbed_statement": "In zero-shot transfer, datasets sharing moderate spectral similarity (PCC 0.5–0.9) yield the lowest scaled MSE, even outperforming high similarity (PCC>0.9), indicating that mid-range frequency alignment is most effective.", "perturbed_explanation": "This is incorrect because the figure shows that datasets with high spectral similarity (PCC>0.9) produce the lowest scaled MSE (~0.05), whereas the PCC 0.5–0.9 group has a higher MSE (~0.28). Thus, moderate similarity does not yield the best performance.", "claim": "In zero-shot transfer, datasets sharing high spectral similarity (PCC>0.9) yield substantially lower scaled MSE, demonstrating that Fourier-based alignment significantly enhances generalization compared to same-domain training, highlighting the importance of frequency-domain features over sector homogeneity.", "label": true }, { "paperid": "2410.02099v1", "paper_path": "./SciVer/papers/2410.02099v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.5", "5.5.3" ], "image_path": "./SciVer/images/2410.02099v1-Table1-1.png", "request_id": 371, "origin_statement": "In the Flat watermark (k=1) scheme, pAUC increases from 68.8% to 90.0% as sample count n grows from 2 to 32, demonstrating that larger test sets stabilize the empirical z-score through CLT effects, substantially boosting low-FPR detection.", "perturbed_statement": "In the Flat watermark (k=1) scheme, pAUC increases from 58.8% to 90.0% as sample count n grows from 2 to 32, demonstrating that larger test sets stabilize the empirical z-score through CLT effects, substantially boosting low-FPR detection.", "perturbed_explanation": "Table 1 shows pAUC is 68.8% for n=2 in the Flat (k=1) scheme, not 58.8% as stated, so the perturbed claim misreports the initial pAUC value.", "claim": "In the Flat watermark (k=1) scheme, pAUC increases from 68.8% to 90.0% as sample count n grows from 2 to 32, demonstrating that larger test sets stabilize the empirical z-score through CLT effects, substantially boosting low-FPR detection.", "label": true }, { "paperid": "2409.02120v1", "paper_path": "./SciVer/papers/2409.02120v1.json", "claim_type": "analytical", "type": "chart", "item": "15", "section": [ "7.1" ], "image_path": "./SciVer/images/2409.02120v1_figure_15.png", "request_id": 372, "origin_statement": "On July 19, the downscaled dataset displays a larger IQR than UrbClim, reflecting microscale heating heterogeneity captured by 5 m impervious surface features under strong summer convection.", "perturbed_statement": "On July 19, the downscaled dataset displays a 2 K larger IQR than UrbClim, reflecting microscale heating heterogeneity captured by 50 m impervious surface features under strong summer convection.", "perturbed_explanation": "The downscaling resolution is 5 m, not 50 m, and the observed IQR difference on July 19 is under 0.5 K, contradicting the claim of a 2 K IQR increase.", "claim": "On July 19, the downscaled dataset displays a larger IQR than UrbClim, reflecting microscale heating heterogeneity captured by 5 m impervious surface features under strong summer convection.", "label": true }, { "paperid": "2409.02246v1", "paper_path": "./SciVer/papers/2409.02246v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.02246v1-Table1-1.png", "request_id": 373, "origin_statement": "Joint optimization reduces average response time by ~20% versus the heuristic under high call volume by jointly tuning dispatch and patrol policies, mitigating queue buildup and lowering overflows through improved resource allocation that aligns service rates with bursty arrival patterns.", "perturbed_statement": "Joint optimization reduces average response time by ~30% versus the heuristic under high call volume by jointly tuning dispatch and patrol policies, mitigating queue buildup and lowering overflows through improved resource allocation that aligns service rates with bursty arrival patterns.", "perturbed_explanation": "The claimed ~30% reduction is incorrect: Table 1 shows average response dropping from 10.0 to 8.09 under high call volume, which equals only a ~19% reduction relative to the heuristic, not 30%.", "claim": "Joint optimization reduces average response time by ~20% versus the heuristic under high call volume by jointly tuning dispatch and patrol policies, mitigating queue buildup and lowering overflows through improved resource allocation that aligns service rates with bursty arrival patterns.", "label": true }, { "paperid": "2409.11704v1", "paper_path": "./SciVer/papers/2409.11704v1.json", "claim_type": "analytical", "type": "chart", "item": "1(a)", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.11704v1_figure_1(a).png", "request_id": 374, "origin_statement": "By best-of-n sampling, reward models' formatting preferences are amplified through order-statistics: e.g., the bold-pattern ratio under the attacked model rises from 42.8% at n=1 to 51.9% at n=128, reflecting exponential amplification of minor reward biases when selecting maxima among candidates.", "perturbed_statement": "By best-of-n sampling, reward models' formatting preferences are amplified through order-statistics: e.g., the bold-pattern ratio under the attacked model rises from 44.2% at n=2 to 52.4% at n=128, reflecting exponential amplification of minor reward biases when selecting maxima among candidates.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 1 shows the bold-pattern ratio is about 45.0%—not 44.2%—at n=2, and 51.9%—not 52.4%—at n=128 for the attacked model, so both reported percentages contradict the actual data.", "claim": "By best-of-n sampling, reward models' formatting preferences are amplified through order-statistics: e.g., the bold-pattern ratio under the attacked model rises from 42.8% at n=1 to 51.9% at n=128, reflecting exponential amplification of minor reward biases when selecting maxima among candidates.", "label": true }, { "paperid": "2410.02346v2", "paper_path": "./SciVer/papers/2410.02346v2.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.02346v2_figure_4.png", "request_id": 376, "origin_statement": "Model M-1/6COM’s H2O see-saw arises from successive evaporation and re-freeze across four grain size bins at their sublimation temperatures, driving three-order-of-magnitude gas‐phase H2O fluctuations while total H2O ice remains stable until the hottest small grains reach 108 K, initiating permanent desorption.", "perturbed_statement": "Model M-1/6COM’s H2O see-saw arises from successive evaporation and re-freeze across four grain size bins at their sublimation temperatures, driving two-order-of-magnitude gas-phase H2O fluctuations while total H2O ice remains stable until the hottest small grains reach 120 K, initiating permanent desorption.", "perturbed_explanation": "The perturbed statement understates the fluctuation range and misnames the desorption threshold: the context reports a three-order-of-magnitude variation, not two, and H2O evaporation from the smallest grains begins at 108 K, not 120 K.", "claim": "Model M-1/6COM’s H2O see-saw arises from successive evaporation and re-freeze across four grain size bins at their sublimation temperatures, driving three-order-of-magnitude gas‐phase H2O fluctuations while total H2O ice remains stable until the hottest small grains reach 108 K, initiating permanent desorption.", "label": true }, { "paperid": "2411.02359v1", "paper_path": "./SciVer/papers/2411.02359v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.02359v1-Table5-1.png", "request_id": 377, "origin_statement": "On the Nvidia V100 GPU, DeeR’s adaptive early-exit mechanism reduces actual inference time by 68.1%, paralleling an 80.7% theoretical FLOPs reduction, demonstrating that dynamic layer skipping translates to nearly proportional real-world acceleration despite GPU memory and scheduling overheads.", "perturbed_statement": "On the Nvidia V100 GPU, DeeR’s adaptive early-exit mechanism reduces actual inference time by 75%, paralleling a 90% theoretical FLOPs reduction, demonstrating that dynamic layer skipping translates to nearly proportional real-world acceleration despite GPU memory and scheduling overheads.", "perturbed_explanation": "The perturbed statement is incorrect because Table 5 reports a real inference time reduction from 55 ms to 17.5 ms, which equals a 68.1% reduction—not 75%. Additionally, the theoretical FLOPs drop is from 31.2 GFLOPs to 6.0 GFLOPs, an 80.7% reduction—not 90%.", "claim": "On the Nvidia V100 GPU, DeeR’s adaptive early-exit mechanism reduces actual inference time by 68.1%, paralleling an 80.7% theoretical FLOPs reduction, demonstrating that dynamic layer skipping translates to nearly proportional real-world acceleration despite GPU memory and scheduling overheads.", "label": true }, { "paperid": "2410.22782v1", "paper_path": "./SciVer/papers/2410.22782v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.22782v1_figure_4(a).png", "request_id": 378, "origin_statement": "MALoRA's asymmetric low-rank projection stabilizes training, yielding 56.61% inter-domain performance at MoLoRA rank 16 (MALoRA rank 24), outperforming MoLoRA's 56.19% score while using 48% fewer trainable parameters. This suggests the shared subspace effectively regularizes expert-specific adaptations to reduce overfitting at higher ranks.", "perturbed_statement": "MALoRA's asymmetric low-rank projection stabilizes training, yielding 55.61% inter-domain performance at MoLoRA rank 16 (MALoRA rank 24), outperforming MoLoRA's 56.19% score while using 60% fewer trainable parameters. This suggests the shared subspace effectively regularizes expert-specific adaptations to reduce overfitting at higher ranks.", "perturbed_explanation": "The perturbed statement is incorrect because MALoRA actually achieves 56.61% performance (not 55.61%) at MoLoRA rank 16 (MALoRA rank 24), and it reduces trainable parameters by 48% rather than 60%, as shown in Figure 4(a) and the accompanying context.", "claim": "MALoRA's asymmetric low-rank projection stabilizes training, yielding 56.61% inter-domain performance at MoLoRA rank 16 (MALoRA rank 24), outperforming MoLoRA's 56.19% score while using 48% fewer trainable parameters. This suggests the shared subspace effectively regularizes expert-specific adaptations to reduce overfitting at higher ranks.", "label": true }, { "paperid": "2411.17820v1", "paper_path": "./SciVer/papers/2411.17820v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.2", "4.3" ], "image_path": "./SciVer/images/2411.17820v1-Table1-1.png", "request_id": 379, "origin_statement": "In the detour scenario (12% of samples), fine-tuning reduces CityWalker’s 5° Angular Orientation Error from 10.68° to 8.71°, illustrating limited gains due to sparse detour instances in the training set’s distribution.", "perturbed_statement": "In the detour scenario (6% of samples), fine-tuning reduces CityWalker’s 5° Angular Orientation Error from 10.68° to 7.83°, illustrating limited gains due to sparse detour instances in the training set’s distribution.", "perturbed_explanation": "The perturbed statement misstates two facts: Table 1 shows the detour scenario comprises 12% of samples (not 6%), and fine-tuning reduces the error to 8.71° (not 7.83°, which is actually ViNT’s best zero-shot error in detours).", "claim": "In the detour scenario (12% of samples), fine-tuning reduces CityWalker’s 5° Angular Orientation Error from 10.68° to 8.71°, illustrating limited gains due to sparse detour instances in the training set’s distribution.", "label": true }, { "paperid": "2411.06529v1", "paper_path": "./SciVer/papers/2411.06529v1.json", "claim_type": "analytical", "type": "chart", "item": "9", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.06529v1_figure_9.png", "request_id": 380, "origin_statement": "In Figure 9, the DEQ-based TherINO method achieves sub-0.05 L2 strain error within 6 ms, outperforming Modified FNO-DEQ due to its latent-space encoding enhancing contractivity, which accelerates convergence compared to the non-iterative FNO models.", "perturbed_statement": "In Figure 9, the DEQ-based TherINO method achieves sub-0.01 L2 strain error within 6 s, outperforming Modified FNO-DEQ due to its latent-space encoding enhancing contractivity, which accelerates convergence compared to the non-iterative FNO models.", "perturbed_explanation": "This is incorrect because in Figure 9 TherINO only reaches around 0.03 L2 strain error at ~6 ms (never below 0.01), and the inference-time axis is in milliseconds (ms), not seconds (s).", "claim": "In Figure 9, the DEQ-based TherINO method achieves sub-0.05 L2 strain error within 6 ms, outperforming Modified FNO-DEQ due to its latent-space encoding enhancing contractivity, which accelerates convergence compared to the non-iterative FNO models.", "label": true }, { "paperid": "2410.09432v1", "paper_path": "./SciVer/papers/2410.09432v1.json", "claim_type": "analytical", "type": "table", "item": "8", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.09432v1-Table8-1.png", "request_id": 381, "origin_statement": "Exact-FedIT at rank 4 achieves a BLEU score of 68.49, nearly matching the centralized LoRA's 68.91, suggesting that exact weight aggregation in federated learning preserves high lexical fidelity and semantic coherence comparable to centralized fine-tuning.", "perturbed_statement": "Exact-FedIT at rank 4 achieves a BLEU of 69.49, matching the centralized LoRA's 68.91, indicating that exact weight aggregation in federated learning outperforms centralized fine-tuning in preserving semantic coherence.", "perturbed_explanation": "The BLEU score is incorrectly stated as 69.49; Table 8 reports it as 68.49. Moreover, Exact-FedIT does not outperform centralized LoRA—it slightly underperforms (68.49 vs 68.91)—so it cannot be said to surpass centralized fine-tuning.", "claim": "Exact-FedIT at rank 4 achieves a BLEU score of 68.49, nearly matching the centralized LoRA's 68.91, suggesting that exact weight aggregation in federated learning preserves high lexical fidelity and semantic coherence comparable to centralized fine-tuning.", "label": true }, { "paperid": "2410.03090v1", "paper_path": "./SciVer/papers/2410.03090v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.03090v1_figure_4.png", "request_id": 382, "origin_statement": "In Figure 4, truncation matrix entropy consistently peaks at intermediate layers (around 8–12) across all datasets, indicating that mid-level attention heads encode the most complex token interactions; thus, uncertainty-aware compression should preserve greater capacity in these heads to maintain semantic richness.", "perturbed_statement": "In Figure 4, truncation matrix entropy consistently peaks at final layers (around 26–30) across all datasets with values reaching ~0.7, implying late-stage attention heads hold the richest token representations and should receive the highest compression capacity.", "perturbed_explanation": "This is wrong because Figure 4’s heatmaps clearly show the highest truncation matrix entropy around layers 8–12, not 26–30, and the colorbar scale tops at approximately 0.65, not 0.7.", "claim": "In Figure 4, truncation matrix entropy consistently peaks at intermediate layers (around 8–12) across all datasets, indicating that mid-level attention heads encode the most complex token interactions; thus, uncertainty-aware compression should preserve greater capacity in these heads to maintain semantic richness.", "label": true }, { "paperid": "2410.14731v1", "paper_path": "./SciVer/papers/2410.14731v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.2", "5.1" ], "image_path": "./SciVer/images/2410.14731v1-Table1-1.png", "request_id": 383, "origin_statement": "At a 50% cache budget, MKV sustains 59.11% average accuracy on LLaMA2-7B, outperforming PCA’s 43.07%, illustrating that hierarchical orthogonal projection subspaces better preserve non-linear attention interactions under compression.", "perturbed_statement": "At a 50% cache budget, MKV sustains 59.11% average accuracy on LLaMA2-7B, outperforming PCA’s 53.07%, illustrating that hierarchical orthogonal projection subspaces better preserve linear attention interactions under compression.", "perturbed_explanation": "The perturbation is incorrect because the table shows PCA’s average accuracy at 50% budget is 43.07%, not 53.07%. Additionally, attention mechanisms rely on non-linear softmax operations, so claiming preservation of “linear attention interactions” misrepresents the non-linear nature of attention.", "claim": "At a 50% cache budget, MKV sustains 59.11% average accuracy on LLaMA2-7B, outperforming PCA’s 43.07%, illustrating that hierarchical orthogonal projection subspaces better preserve non-linear attention interactions under compression.", "label": true }, { "paperid": "2410.20807v2", "paper_path": "./SciVer/papers/2410.20807v2.json", "claim_type": "analytical", "type": "chart", "item": "1(a)", "section": [ "1" ], "image_path": "./SciVer/images/2410.20807v2_figure_1(a).png", "request_id": 384, "origin_statement": "In Figure 1(a), the Texture OOD distribution uniquely peaks in the tail region around classes 40–50 of CIFAR100-LT, implying that low-level texture patterns resemble underrepresented ID classes, thereby producing higher energy scores than other OOD datasets.", "perturbed_statement": "In Figure 1(a), the SVHN OOD distribution uniquely peaks in the tail region around classes 60–70 of CIFAR100-LT, implying that handwritten digit features resemble underrepresented ID classes, thereby producing higher energy scores than other OOD datasets.", "perturbed_explanation": "This statement is incorrect: Figure 1(a) shows the SVHN energy curve first peaking near class indices 10–20, not at 60–70 in the tail region.", "claim": "In Figure 1(a), the Texture OOD distribution uniquely peaks in the tail region around classes 40–50 of CIFAR100-LT, implying that low-level texture patterns resemble underrepresented ID classes, thereby producing higher energy scores than other OOD datasets.", "label": true }, { "paperid": "2410.21205v1", "paper_path": "./SciVer/papers/2410.21205v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.21205v1-Table2-1.png", "request_id": 385, "origin_statement": "The introduction of intermediate F in iteration 3 causes the AIC to plummet by 1,428.58 points versus iteration 2, demonstrating that the added intermediate enhances model fit so significantly that the complexity penalty in AIC is overwhelmed.", "perturbed_statement": "The AIC difference between iterations 2 and 3 is only 10.5 units — below the standard 12-unit threshold for strong evidence — indicating that adding intermediate F yielded negligible improvement in model parsimony.", "perturbed_explanation": "The actual AIC reduction is 1,428.58 units (848.18 − (−580.40)), not 10.5 units, and the well‐accepted cutoff for strong AIC improvement is about 10 units rather than 12. Thus both the magnitude of the change and the significance threshold are misreported.", "claim": "The introduction of intermediate F in iteration 3 causes the AIC to plummet by 1,428.58 points versus iteration 2, demonstrating that the added intermediate enhances model fit so significantly that the complexity penalty in AIC is overwhelmed.", "label": true }, { "paperid": "2410.13371v2", "paper_path": "./SciVer/papers/2410.13371v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.13371v2-Table2-1.png", "request_id": 387, "origin_statement": "The proposed calibration approach cuts the mean reprojection error by 39%, reduces KL divergence marginally, and accelerates optimization over fivefold to under half a second, combining intensity-difference cost functions with a 10-pixel window to improve both precision and convergence efficiency.", "perturbed_statement": "The proposed calibration approach cuts the mean reprojection error by 60%, reduces KL divergence to 0.35, and accelerates optimization tenfold to just 0.14 seconds, combining absolute-intensity cost functions with a 20-pixel window to improve both precision and convergence efficiency.", "perturbed_explanation": "This statement is wrong because the actual MRE drops from 0.0532 to 0.0326 (≈39%), not 60%; KL divergence only decreases from 0.50 to 0.49, not to 0.35; the average time is 0.41 seconds, not 0.14; and the method uses a 10-pixel window and intensity-difference cost, not a 20-pixel window or absolute-intensity.", "claim": "The proposed calibration approach cuts the mean reprojection error by 39%, reduces KL divergence marginally, and accelerates optimization over fivefold to under half a second, combining intensity-difference cost functions with a 10-pixel window to improve both precision and convergence efficiency.", "label": true }, { "paperid": "2410.06313v1", "paper_path": "./SciVer/papers/2410.06313v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.06313v1_figure_6(a).png", "request_id": 388, "origin_statement": "Health economics ’novelty’ scores exhibit two significant peaks around 2008 and 2021, reflecting successive methodological diffusion waves: initially propelled by high-impact fetal origins research (2007–08), and later by innovative empirical methods (2014–16), consistent with citation-driven quality metrics where impact surges precede novelty gains.", "perturbed_statement": "Health economics ’novelty’ scores exhibit two significant peaks around 2010 and 2022, reflecting successive methodological diffusion waves: initially propelled by high-impact health care financing studies (2007–08), and later by innovative empirical methods (2014–16), consistent with citation-driven quality metrics where impact surges precede novelty gains.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows peaks in health economics novelty around 2008 and 2021, not 2010 and 2022. It also misattributes the initial methodological wave to health care financing research, whereas the context identifies fetal origins studies as driving the 2007–08 impact surge.", "claim": "Health economics ’novelty’ scores exhibit two significant peaks around 2008 and 2021, reflecting successive methodological diffusion waves: initially propelled by high-impact fetal origins research (2007–08), and later by innovative empirical methods (2014–16), consistent with citation-driven quality metrics where impact surges precede novelty gains.", "label": true }, { "paperid": "2409.19974v2", "paper_path": "./SciVer/papers/2409.19974v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3", "4.3.1", "4.3.3" ], "image_path": "./SciVer/images/2409.19974v2-Table2-1.png", "request_id": 389, "origin_statement": "The Newtonian network exhibits a viscosity ratio of 100:1 with the solvent, causing steep shear layers at the network-solvent interface under four-roll mill forcing. These high shear gradients, predicted by Stokes flow scaling, trigger AMR refinement when |∇φ| > 0.75, concentrating resolution along evolving blob boundaries.", "perturbed_statement": "The Newtonian network exhibits a viscosity ratio of 10:1 with the solvent, causing steep shear layers at the network-solvent interface under four-roll mill forcing. These high shear gradients trigger AMR refinement when |∇φ| > 1.0, concentrating resolution along evolving blob boundaries.", "perturbed_explanation": "The viscosity ratio of 10:1 is incorrect: Table 2a shows µ_n^v=4.0 and µ_s=0.04, giving a 100:1 ratio. Likewise, the AMR refinement criterion is |∇φ| > 0.75, not > 1.0, so both stated values contradict the documented parameters.", "claim": "The Newtonian network exhibits a viscosity ratio of 100:1 with the solvent, causing steep shear layers at the network-solvent interface under four-roll mill forcing. These high shear gradients, predicted by Stokes flow scaling, trigger AMR refinement when |∇φ| > 0.75, concentrating resolution along evolving blob boundaries.", "label": true }, { "paperid": "2411.00154v1", "paper_path": "./SciVer/papers/2411.00154v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.4.2" ], "image_path": "./SciVer/images/2411.00154v1-Table4-1.png", "request_id": 391, "origin_statement": "Chain-of-thought fine-tuned Phi-2 achieves a dataset-level MIA AUROC of 0.993 ± 0.012 on collections of 20 questions, versus a sentence-level AUROC of 0.793 ± 0.024, demonstrating that aggregating inference signals across small batches substantially amplifies membership detection accuracy.", "perturbed_statement": "Chain-of-thought fine-tuned Phi-2 achieves a dataset-level MIA AUROC of 0.893 ± 0.012 on collections of 20 questions, versus a sentence-level AUROC of 0.793 ± 0.024, demonstrating that aggregating inference signals across small batches substantially amplifies membership detection accuracy.", "perturbed_explanation": "This statement is incorrect because Table 4 reports a dataset-level AUROC of 0.993 ± 0.012 for collections of 20 questions, not 0.893 ± 0.012.", "claim": "Chain-of-thought fine-tuned Phi-2 achieves a dataset-level MIA AUROC of 0.993 ± 0.012 on collections of 20 questions, versus a sentence-level AUROC of 0.793 ± 0.024, demonstrating that aggregating inference signals across small batches substantially amplifies membership detection accuracy.", "label": true }, { "paperid": "2409.06224v1", "paper_path": "./SciVer/papers/2409.06224v1.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "3" ], "image_path": "./SciVer/images/2409.06224v1_figure_4(a).png", "request_id": 392, "origin_statement": "Human agreement falls to 48% on image 2 and 54% on image 13, each below the 80% average. This indicates when visual contextual cues for identifying the Most Important Person are subtle, participants face greater cognitive uncertainty, lowering consensus in MIP selection.", "perturbed_statement": "Human agreement dips to 54% for image 2 and 58% for image 13, both below the 75% mean. This implies that reduced color contrast in these images elevates cognitive load, thereby lowering consensus in MIP detection among participants.", "perturbed_explanation": "The perturbed statement is wrong because Figure 4 shows a mean accuracy of 80%, not 75%. Image 2’s accuracy is 48% (not 54%) and image 13’s is 54% (not 58%). The study does not attribute lower agreement to color contrast or specifically link it to cognitive load.", "claim": "Human agreement falls to 48% on image 2 and 54% on image 13, each below the 80% average. This indicates when visual contextual cues for identifying the Most Important Person are subtle, participants face greater cognitive uncertainty, lowering consensus in MIP selection.", "label": true }, { "paperid": "2409.03054v1", "paper_path": "./SciVer/papers/2409.03054v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.03054v1-Table1-1.png", "request_id": 393, "origin_statement": "Webpage context cues like the 'hidden' CSS attribute and thumbnail dimensions (<100px) systematically influence alt-text richness: decorative images flagged hidden and rendered under 100px typically omit detailed descriptions, whereas prominently positioned full-width images adjacent to main article headings necessitate comprehensive descriptions under accessibility guidelines.", "perturbed_statement": "Webpage context cues like the 'hidden' CSS attribute and thumbnail dimensions (<100px) systematically influence alt-text richness: decorative images flagged hidden and rendered under 100px are required to include comprehensive descriptions, whereas prominently positioned full-width images adjacent to main article headings may omit alt text under accessibility guidelines.", "perturbed_explanation": "The perturbation is incorrect because decorative images with the 'hidden' CSS attribute and small thumbnail size should omit detailed alt-text (often receiving empty alt attributes) under WCAG guidance, not require comprehensive descriptions. Likewise, main article images should always have meaningful alt text, not be allowed to omit it.", "claim": "Webpage context cues like the 'hidden' CSS attribute and thumbnail dimensions (<100px) systematically influence alt-text richness: decorative images flagged hidden and rendered under 100px typically omit detailed descriptions, whereas prominently positioned full-width images adjacent to main article headings necessitate comprehensive descriptions under accessibility guidelines.", "label": true }, { "paperid": "2409.10995v1", "paper_path": "./SciVer/papers/2409.10995v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.10995v1-Table5-1.png", "request_id": 397, "origin_statement": "Incorporating musically-motivated random annotations reduces overfitting by diversifying performance: the Proposed-trained model achieves a 2.20 dB SDR gain on Original-annotated evaluation relative to the Plain model (5.22 vs 3.02), reflecting improved generalization to unseen articulation patterns.", "perturbed_statement": "Incorporating musically-motivated random annotations reduces overfitting by diversifying performance: the Proposed-trained model achieves a 2.20 dB SDR gain on URMP evaluation compared to the Plain model (2.20 vs 1.55), reflecting robust generalization to real recording conditions.", "perturbed_explanation": "The perturbation incorrectly reports the Proposed model’s URMP SDR as 2.20 dB, but Table 5 shows it actually achieves only 1.92 dB on URMP. Thus the claimed URMP gain and generalization strength are overstated and contradict the context.", "claim": "Incorporating musically-motivated random annotations reduces overfitting by diversifying performance: the Proposed-trained model achieves a 2.20 dB SDR gain on Original-annotated evaluation relative to the Plain model (5.22 vs 3.02), reflecting improved generalization to unseen articulation patterns.", "label": true }, { "paperid": "2411.00566v1", "paper_path": "./SciVer/papers/2411.00566v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.1.3" ], "image_path": "./SciVer/images/2411.00566v1-Table3-1.png", "request_id": 399, "origin_statement": "The number of local searches required by PatternBoost scales super-linearly with n, reflecting combinatorial explosion; as n increases from 20 to 30, search effort increases from 0.4 to 78.3 million, indicating that doubling vertices roughly squares search efforts due to exponential configuration space.", "perturbed_statement": "The number of local searches required by PatternBoost scales super-linearly with n, reflecting combinatorial explosion; as n increases from 20 to 30, search effort increases from 0.4 to 23.8 million, indicating that doubling vertices roughly squares search efforts due to exponential configuration space.", "perturbed_explanation": "This statement is incorrect because Table 3 shows 78.3 million searches for n=30, not 23.8 million (23.8 corresponds to n=28). The misreported search count contradicts the context’s data.", "claim": "The number of local searches required by PatternBoost scales super-linearly with n, reflecting combinatorial explosion; as n increases from 20 to 30, search effort increases from 0.4 to 78.3 million, indicating that doubling vertices roughly squares search efforts due to exponential configuration space.", "label": true }, { "paperid": "2410.02220v2", "paper_path": "./SciVer/papers/2410.02220v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "4.1", "4.2", "6.1" ], "image_path": "./SciVer/images/2410.02220v2_figure_2.png", "request_id": 400, "origin_statement": "Robust LLMs exhibit systematically lower perplexity on safe security-relevant responses than harmful ones, reflecting fine-tuning on normative content, while jailbroken variants trained on toxic prompts invert this pattern, showing reduced surprisal for harmful sequences, indicating adversarial fine-tuning reshapes their internal probability distributions.", "perturbed_statement": "Robust LLMs exhibit systematically higher perplexity on safe security-relevant responses than harmful ones, reflecting fine-tuning on normative content, while jailbroken variants trained on toxic prompts invert this pattern, showing reduced surprisal for harmful sequences, indicating adversarial fine-tuning reshapes their internal probability distributions.", "perturbed_explanation": "The perturbation flips the robust LLM trend: it incorrectly states that robust models have higher perplexity on safe texts. Figure 2(a)(c) actually shows robust LLMs have lower perplexity (greater confidence) on safe security-relevant responses compared to harmful ones.", "claim": "Robust LLMs exhibit systematically lower perplexity on safe security-relevant responses than harmful ones, reflecting fine-tuning on normative content, while jailbroken variants trained on toxic prompts invert this pattern, showing reduced surprisal for harmful sequences, indicating adversarial fine-tuning reshapes their internal probability distributions.", "label": true }, { "paperid": "2411.01423v1", "paper_path": "./SciVer/papers/2411.01423v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2.1" ], "image_path": "./SciVer/images/2411.01423v1-Table2-1.png", "request_id": 401, "origin_statement": "Despite training on only 18,706 substructures with a maximum QED of 0.53, CLaSMO’s substructure-focused CVAE achieves a top QED of 0.948 by effectively recombining high-scoring chemical scaffolds, demonstrating that emphasizing localized structural motifs can yield state-of-the-art drug-likeness without massive molecular datasets.", "perturbed_statement": "Despite training on only 1,870 substructures with a maximum QED of 0.73, CLaSMO’s substructure-focused CVAE achieves a top QED of 0.948 by effectively recombining high-scoring chemical scaffolds, demonstrating that emphasizing localized structural motifs can yield state-of-the-art drug-likeness without massive molecular datasets.", "perturbed_explanation": "The perturbed statement is incorrect because CLaSMO’s CVAE is trained on 18,706 substructures (not 1,870), and the maximum QED of those substructures is 0.53 (not 0.73), as reported in Table 2 and the accompanying text.", "claim": "Despite training on only 18,706 substructures with a maximum QED of 0.53, CLaSMO’s substructure-focused CVAE achieves a top QED of 0.948 by effectively recombining high-scoring chemical scaffolds, demonstrating that emphasizing localized structural motifs can yield state-of-the-art drug-likeness without massive molecular datasets.", "label": true }, { "paperid": "2411.13151v1", "paper_path": "./SciVer/papers/2411.13151v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.13151v1-Table4-1.png", "request_id": 403, "origin_statement": "As the truck travel cost is triple that of drones per minute, FL effectively filters fragments with minimal truck legs, and CERE consistently absorbs over 88% of nodes across instances up to 85 requests, yielding solve times under 400 seconds with optimality gaps below 0.5%.", "perturbed_statement": "With the truck travel cost only double the drone's per minute, FL filters long truck legs, and CERE absorbs over 90% of nodes, yielding solve times under 250 seconds for instances up to 85 requests.", "perturbed_explanation": "The perturbation incorrectly states a 2× cost ratio instead of the actual 3× ratio. It also falsely claims CERE absorbs over 90% of nodes for n≤85 (actual removal is 88% at n=85) and misreports solve times under 250s (the solve time for 85 requests is 391s).", "claim": "As the truck travel cost is triple that of drones per minute, FL effectively filters fragments with minimal truck legs, and CERE consistently absorbs over 88% of nodes across instances up to 85 requests, yielding solve times under 400 seconds with optimality gaps below 0.5%.", "label": true }, { "paperid": "2411.00387v1", "paper_path": "./SciVer/papers/2411.00387v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.00387v1-Table4-1.png", "request_id": 405, "origin_statement": "Despite GPT-4o attaining 58.6% accuracy on matrices, discipline-specific constants remain challenging (<56.2%), implying that encoding disciplinary jargon across full manuscripts demands deeper semantic integration than structural symbol recognition, which benefits more directly from longer context windows.", "perturbed_statement": "Despite GPT-4o attaining 65.2% accuracy on matrices, discipline-specific constants remain challenging (<56.2%), implying that encoding disciplinary jargon across full manuscripts demands deeper semantic integration than structural symbol recognition, which benefits more directly from longer context windows.", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 shows GPT-4o achieves 58.6% accuracy for matrices, not 65.2%.", "claim": "Despite GPT-4o attaining 58.6% accuracy on matrices, discipline-specific constants remain challenging (<56.2%), implying that encoding disciplinary jargon across full manuscripts demands deeper semantic integration than structural symbol recognition, which benefits more directly from longer context windows.", "label": true }, { "paperid": "2409.08388v1", "paper_path": "./SciVer/papers/2409.08388v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.08388v1-Table2-1.png", "request_id": 407, "origin_statement": "On ShapeNet, our fusion of input, local, and global point features halves exemplar memory usage (530 vs. 1000) and reduces catastrophic forgetting from 9.6% to 7.6%, illustrating how multi-scale spectral embeddings prevent representation drift across nine incremental stages.", "perturbed_statement": "On ShapeNet, our fusion of input, local, and global point features reduces exemplar memory to only 260 points (vs. 1000) and lowers catastrophic forgetting from 9.6% to 5.6%, suggesting multi-scale embeddings halved memory and sharply curtail representation drift over nine stages.", "perturbed_explanation": "This is incorrect because Table 2 reports exemplar memory usage of 530 points for the fusion method (not 260) and a forgetting rate reduction from 9.6% to 7.6% (not 5.6%).", "claim": "On ShapeNet, our fusion of input, local, and global point features halves exemplar memory usage (530 vs. 1000) and reduces catastrophic forgetting from 9.6% to 7.6%, illustrating how multi-scale spectral embeddings prevent representation drift across nine incremental stages.", "label": true }, { "paperid": "2409.06123v1", "paper_path": "./SciVer/papers/2409.06123v1.json", "claim_type": "analytical", "type": "chart", "item": "9", "section": [ "6.1.2" ], "image_path": "./SciVer/images/2409.06123v1_figure_9.png", "request_id": 412, "origin_statement": "With 25% fewer clients, the CFL method maintains delta accuracies below 0.04 across all tasks and rounds, outperforming SubTab FL by up to 0.14, likely due to its consensus-driven parameter aggregation that mitigates the increased heterogeneity from data-size imbalance.", "perturbed_statement": "In the imbalanced setting, the LL approach yields delta accuracies below 0.02 on the MNIST task across all four evaluation rounds, indicating near-perfect alignment with the global model due to its local fine-tuning mechanism.", "perturbed_explanation": "This claim is incorrect because Figure 9 shows LL’s delta accuracies for MNIST are approximately 0.08, 0.075, 0.08, and 0.078 across rounds 0–3, well above 0.02, contradicting the stated threshold.", "claim": "In the imbalanced setting, the LL approach yields delta accuracies below 0.02 on the MNIST task across all four evaluation rounds, indicating near-perfect alignment with the global model due to its local fine-tuning mechanism.", "label": false }, { "paperid": "2411.00997v1", "paper_path": "./SciVer/papers/2411.00997v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.00997v1-Table1-1.png", "request_id": 413, "origin_statement": "OpenCLIP’s embedding co-locates Middle Eastern images with a high similarity to “terrorist” (1.05), likely reflecting an overrepresentation of conflict-themed training data in LAION, which illustrates how skewed visual statistics can encode harmful stereotypes in vision-language models.", "perturbed_statement": "OpenCLIP’s embedding co-locates Latino/Hispanic images with a high similarity to “terrorist” (1.05), suggesting conflict imagery overexposure drives its learned biases.", "perturbed_explanation": "This is incorrect because Table 1 shows that Middle Eastern images, not Latino/Hispanic, have the highest similarity (1.05) to “terrorist.” Latino/Hispanic’s highest crime-related term is “gang-related” with similarity 0.20, and it has no association with “terrorist.”", "claim": "OpenCLIP’s embedding co-locates Latino/Hispanic images with a high similarity to “terrorist” (1.05), suggesting conflict imagery overexposure drives its learned biases.", "label": false }, { "paperid": "2409.15568v1", "paper_path": "./SciVer/papers/2409.15568v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.15568v1-Table1-1.png", "request_id": 415, "origin_statement": "In the warm-start scenario, Sport and Cloth both have 9,928 train users, yet Sport’s item catalog (30,796) is 21% smaller than Cloth’s (39,008), indicating Sport users focus interactions on fewer items, which likely increases item-level rating density and boosts collaborative-filtering accuracy.", "perturbed_statement": "In the warm-start scenario, Sport’s item space (30,796) is 31% smaller than Cloth’s (39,008), implying that Sport users have a lower rating density per item, which likely degrades neighborhood-based recommendations due to sparse interactions.", "perturbed_explanation": "The perturbation is incorrect because the item space difference between Sport (30,796) and Cloth (39,008) is about 21%, not 31%, and fewer items actually increase rating density per item, which typically improves rather than degrades collaborative-filtering performance.", "claim": "In the warm-start scenario, Sport’s item space (30,796) is 31% smaller than Cloth’s (39,008), implying that Sport users have a lower rating density per item, which likely degrades neighborhood-based recommendations due to sparse interactions.", "label": false }, { "paperid": "2409.17476v1", "paper_path": "./SciVer/papers/2409.17476v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.2.2" ], "image_path": "./SciVer/images/2409.17476v1-Table3-1.png", "request_id": 419, "origin_statement": "Across the three datasets, VAT's user-adaptive perturbations consistently improve HR@20 and NDCG@20 by 2–6%, suggesting that fine-tuned adversarial magnitudes smooth the recommendation loss landscape and enhance model generalization against both random and targeted attacks.", "perturbed_statement": "VAT’s application of global fixed-magnitude perturbations yields over a 15% absolute increase in HR@20 on the Gowalla dataset under clean and attack scenarios, indicating that uniform adversarial noise regularizes the recommendation model by amplifying gradient variance.", "perturbed_explanation": "This statement is incorrect because VAT employs user-adaptive, not global fixed-magnitude perturbations, and the observed absolute HR@20 improvement on Gowalla is only 1.68%, not over 15% as claimed.", "claim": "VAT’s application of global fixed-magnitude perturbations yields over a 15% absolute increase in HR@20 on the Gowalla dataset under clean and attack scenarios, indicating that uniform adversarial noise regularizes the recommendation model by amplifying gradient variance.", "label": false }, { "paperid": "2409.13828v1", "paper_path": "./SciVer/papers/2409.13828v1.json", "claim_type": "analytical", "type": "chart", "item": "11(a)", "section": [ "5.7" ], "image_path": "./SciVer/images/2409.13828v1_figure_11(a).png", "request_id": 420, "origin_statement": "For CIFAR-100, raising β_attn in ViTGuard-I from 10⁻³ to 1 sharply reduces adaptive attack fooling rates from nearly 98% to 14%, illustrating that stronger attention-distance regularization in the MAE-based reconstruction increasingly constrains adversarial perturbations, though gains plateau beyond β_attn≈1 due to masking-induced randomness.", "perturbed_statement": "For CIFAR-100, raising β_attn in ViTGuard-I from 10⁻³ to 1 sharply reduces adaptive attack fooling rates from nearly 98% to 28%, illustrating that stronger attention-distance regularization in the MAE-based reconstruction increasingly constrains adversarial perturbations, though gains plateau beyond β_attn≈1 due to masking-induced randomness.", "perturbed_explanation": "The perturbation is incorrect because, as shown in Figure 11 and described in the context, at β_attn=1 the fooling rate with ViTGuard-I under FPR=0.01 is approximately 14%, not 28%. The stated 28% contradicts the actual fooling rate data.", "claim": "For CIFAR-100, raising β_attn in ViTGuard-I from 10⁻³ to 1 sharply reduces adaptive attack fooling rates from nearly 98% to 28%, illustrating that stronger attention-distance regularization in the MAE-based reconstruction increasingly constrains adversarial perturbations, though gains plateau beyond β_attn≈1 due to masking-induced randomness.", "label": false }, { "paperid": "2411.17674v1", "paper_path": "./SciVer/papers/2411.17674v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.17674v1-Table1-1.png", "request_id": 423, "origin_statement": "In the 6-way IEMOCAP setting, applying multi-tasking loss to CORECT yields a 0.25% accuracy and 0.26% F1 improvement by encouraging cross-modal alignment and shared representation learning, particularly enhancing detection of emotions with distinct dimensional patterns like ‘excited’ and ‘sad.’", "perturbed_statement": "Applying multi-tasking loss to CORECT yields a 2% accuracy and 2.6% F1 improvement by encouraging cross-modal alignment and shared representation learning, particularly enhancing detection of emotions with distinct dimensional patterns like ‘neutral’ and ‘angry.’", "perturbed_explanation": "The perturbed statement misstates the performance gains—Table 1 reports only a 0.25% accuracy and 0.26% F1 improvement for CORECT (multi-task), not 2% and 2.6%. It also incorrectly identifies ‘neutral’ and ‘angry’ as the most improved classes, whereas the actual biggest gains were for ‘excited’ (+6.42%) and ‘sad’ (+5.58%).", "claim": "Applying multi-tasking loss to CORECT yields a 2% accuracy and 2.6% F1 improvement by encouraging cross-modal alignment and shared representation learning, particularly enhancing detection of emotions with distinct dimensional patterns like ‘neutral’ and ‘angry.’", "label": false }, { "paperid": "2410.01157v1", "paper_path": "./SciVer/papers/2410.01157v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.01157v1_figure_2(b).png", "request_id": 424, "origin_statement": "With increasing sample-to-audience ratio beyond 2, Company B’s precision declines from 72% to 34% due to over-sampling negative class, while recall remains around 85%, illustrating that inverse-frequency weighting sustains sensitivity but at the cost of greater false positives.", "perturbed_statement": "With increasing sample-to-audience ratio beyond 2, Company B’s precision declines from 78% to 42% due to over-sampling negative class, while recall remains around 92%, illustrating that inverse-frequency weighting sustains sensitivity but at the cost of greater false positives.", "perturbed_explanation": "The perturbed statement misreports precision values and recall. The figure shows precision drops from 72% at ratio 2 to 34% at ratio 10 (not 78% to 42%), and recall stays near 85% (not 92%), so the cited percentages contradict the chart.", "claim": "With increasing sample-to-audience ratio beyond 2, Company B’s precision declines from 78% to 42% due to over-sampling negative class, while recall remains around 92%, illustrating that inverse-frequency weighting sustains sensitivity but at the cost of greater false positives.", "label": false }, { "paperid": "2409.10756v1", "paper_path": "./SciVer/papers/2409.10756v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.10756v1_figure_3.png", "request_id": 426, "origin_statement": "Codellama-7b zero-shot patches reduce cyclomatic complexity from 13.8 to 6.05, over a 56% drop in decision paths. By eliminating nested conditionals, this streamlining enhances readability but risks removing critical input validation checks essential for robust vulnerability mitigation.", "perturbed_statement": "Codellama-7b few-shot patches reduce cyclomatic complexity from 13.8 to 8.5, a 38% drop in decision paths. By trimming nested conditionals, this simplification preserves all security checks while improving code clarity.", "perturbed_explanation": "Figure 3 shows that Codellama-7b few-shot complexity is 5.7, not 8.5, yielding a ∼59% reduction rather than 38%. Thus both the reported post-patch complexity and reduction percentage are incorrect and contradict the context data.", "claim": "Codellama-7b few-shot patches reduce cyclomatic complexity from 13.8 to 8.5, a 38% drop in decision paths. By trimming nested conditionals, this simplification preserves all security checks while improving code clarity.", "label": false }, { "paperid": "2409.12680v2", "paper_path": "./SciVer/papers/2409.12680v2.json", "claim_type": "analytical", "type": "chart", "item": "5(a)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.12680v2_figure_5(a).png", "request_id": 428, "origin_statement": "In the T-SNE projection of 256 samples per class, STPG produces tighter intra-class clusters and wider inter-class gaps than the supervised baseline, reflecting its capacity to align semantic features and sharpen decision boundaries, which correlates with its measured improvements in mIoU on Cityscapes.", "perturbed_statement": "In the T-SNE projection of 512 samples per class, STPG yields tighter clusters with 40% reduced intra-class variance but paradoxically decreases inter-class separation, suggesting a trade-off where compactness comes at the cost of class distinctiveness.", "perturbed_explanation": "The statement is incorrect because the visualization sampled 256 points per class, not 512, and STPG actually increases inter-class separation rather than decreasing it, as shown by the clearer cluster boundaries compared to the baseline.", "claim": "In the T-SNE projection of 512 samples per class, STPG yields tighter clusters with 40% reduced intra-class variance but paradoxically decreases inter-class separation, suggesting a trade-off where compactness comes at the cost of class distinctiveness.", "label": false }, { "paperid": "2411.00653v1", "paper_path": "./SciVer/papers/2411.00653v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "1", "5", "5.2" ], "image_path": "./SciVer/images/2411.00653v1-Table2-1.png", "request_id": 429, "origin_statement": "NCI uses weak relation coherence to evaluate node embeddings, resulting in a Mean Reciprocal Rank (MRR) of 0.67 on the Brazil dataset—outperforming Kendall’s τ (0.61) and classification-based methods (0.45), which demonstrates its robustness to lower embedding expressiveness.", "perturbed_statement": "By leveraging weak relation coherence, classification-based methods attain an MRR of 0.67 on the Brazil dataset—surpassing NCI (0.45) and Kendall’s τ (0.61), indicating classification approaches are more robust to embedding expressiveness variation.", "perturbed_explanation": "The perturbation is incorrect because, according to Table 2, classification-based methods (Class) achieve an MRR of 0.45, not 0.67, on the Brazil dataset, while NCI actually scores 0.67. The swapped MRR values contradict the table data.", "claim": "By leveraging weak relation coherence, classification-based methods attain an MRR of 0.67 on the Brazil dataset—surpassing NCI (0.45) and Kendall’s τ (0.61), indicating classification approaches are more robust to embedding expressiveness variation.", "label": false }, { "paperid": "2410.06645v4", "paper_path": "./SciVer/papers/2410.06645v4.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.5" ], "image_path": "./SciVer/images/2410.06645v4_figure_5.png", "request_id": 432, "origin_statement": "The normalized activation counts for S-CIFAR-10 show semantically related classes (e.g., cat–dog and auto–truck) consistently select overlapping frequency-domain features, suggesting CFFS leverages frequency correlations to align class-specific representations and reduce inter-class interference.", "perturbed_statement": "The heatmap shows that semantically unrelated classes like plane and frog have over 0.08 normalized activation correlation in high-frequency components, indicating CFFS actually increases interference across distinct categories.", "perturbed_explanation": "This is incorrect because the color scale in Figure 5 tops out at approximately 0.07, so no class pair—including plane and frog—reaches an activation correlation above 0.08. Additionally, plane and frog exhibit low similarity in feature selection, contradicting the claim of high interference.", "claim": "The heatmap shows that semantically unrelated classes like plane and frog have over 0.08 normalized activation correlation in high-frequency components, indicating CFFS actually increases interference across distinct categories.", "label": false }, { "paperid": "2410.16928v2", "paper_path": "./SciVer/papers/2410.16928v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.16928v2_figure_2.png", "request_id": 434, "origin_statement": "Figure 2 shows that xLSTM-Mixer produces smoother forecasts that adhere to the true seasonal peaks in both Weather and ETTm1, implying its spatio-temporal mixing layers adeptly model periodic components while attenuating high-frequency fluctuations more effectively than benchmark models.", "perturbed_statement": "Figure 2 shows that xLSTM-Mixer produces smoother forecasts that adhere to the true seasonal peaks in both Weather and ETTm1, implying its spatio-temporal mixing layers adeptly model periodic components while amplifying high-frequency fluctuations more effectively than benchmark models.", "perturbed_explanation": "The perturbed statement incorrectly claims xLSTM-Mixer amplifies high-frequency fluctuations. In Figure 2, the orange prediction curves for xLSTM-Mixer are visibly smoother than the blue targets and baseline forecasts, demonstrating noise attenuation rather than amplification.", "claim": "Figure 2 shows that xLSTM-Mixer produces smoother forecasts that adhere to the true seasonal peaks in both Weather and ETTm1, implying its spatio-temporal mixing layers adeptly model periodic components while amplifying high-frequency fluctuations more effectively than benchmark models.", "label": false }, { "paperid": "2409.13844v1", "paper_path": "./SciVer/papers/2409.13844v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.13844v1-Table3-1.png", "request_id": 435, "origin_statement": "The convex loss landscapes for SW and EDIP, coupled with analytic Hessian computation via automatic differentiation, enable BFGS and CG to converge globally within 4–6 iterations, significantly reducing computational overhead compared to finite-difference-based force constant evaluation.", "perturbed_statement": "The convex loss landscapes for SW and EDIP, coupled with finite-difference Hessian estimation, enable NM to converge globally within 2–3 iterations, significantly reducing computational overhead compared to automatic-differentiation-based evaluation.", "perturbed_explanation": "This statement is incorrect because the context specifies that Hessians are computed analytically using automatic differentiation, not finite differences, and that gradient-free Nelder–Mead (NM) converges slower than gradient-based BFGS and CG, which actually reach the global minimum in 4 (EDIP) to 6 (SW) iterations.", "claim": "The convex loss landscapes for SW and EDIP, coupled with finite-difference Hessian estimation, enable NM to converge globally within 2–3 iterations, significantly reducing computational overhead compared to automatic-differentiation-based evaluation.", "label": false }, { "paperid": "2410.08695v2", "paper_path": "./SciVer/papers/2410.08695v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.08695v2_figure_6.png", "request_id": 438, "origin_statement": "Across SEEDBench, applying three hard strategies reduces GPT-4o's accuracy from 80% to 35%—a 56% relative drop—while a single strategy only causes a 10% decrease. This non-linear decline suggests synergistic complexity effects in visual-linguistic tasks exacerbate multimodal integration challenges.", "perturbed_statement": "Across SEEDBench, applying three hard strategies reduces GPT-4o's accuracy from 78% to 42%—a 36% drop—while a single strategy only causes a 20% decrease, suggesting a linear performance degradation unaffected by strategy interactions.", "perturbed_explanation": "The perturbed statement misreports GPT-4o’s accuracy decline: Figure 6 shows a drop from approximately 80% to 35%, not from 78% to 42%. It also overstates the single-strategy decrease as 20% (actual is about 9%) and incorrectly describes the decline as linear rather than non-linear.", "claim": "Across SEEDBench, applying three hard strategies reduces GPT-4o's accuracy from 78% to 42%—a 36% drop—while a single strategy only causes a 20% decrease, suggesting a linear performance degradation unaffected by strategy interactions.", "label": false }, { "paperid": "2411.07565v1", "paper_path": "./SciVer/papers/2411.07565v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.07565v1-Table2-1.png", "request_id": 439, "origin_statement": "For N2, the maximum number of nonzero Hamiltonian entries per column (NH) scales from <4×10^3 for 56 spin-orbitals to <8×10^4 for 220 spin-orbitals, reflecting roughly quadratic growth in matrix density as orbital count increases, consistent with two-electron integral scaling in correlated systems.", "perturbed_statement": "For N2, the maximum number of nonzero Hamiltonian entries per column (NH) scales linearly from <4×10^3 for 56 spin-orbitals to <8×10^4 for 220 spin-orbitals, reflecting linear growth in matrix density as orbital count increases, consistent with one-electron integral scaling.", "perturbed_explanation": "The perturbed statement is incorrect because the observed NH values (<4×10^3, <3×10^4, <8×10^4) grow faster than linearly with orbital count (56→120→220), indicating quadratic behavior. Additionally, Hamiltonian density is driven by two-electron integrals, not one-electron integrals.", "claim": "For N2, the maximum number of nonzero Hamiltonian entries per column (NH) scales linearly from <4×10^3 for 56 spin-orbitals to <8×10^4 for 220 spin-orbitals, reflecting linear growth in matrix density as orbital count increases, consistent with one-electron integral scaling.", "label": false }, { "paperid": "2410.18462v1", "paper_path": "./SciVer/papers/2410.18462v1.json", "claim_type": "analytical", "type": "chart", "item": "7(a)", "section": [ "2.2.4" ], "image_path": "./SciVer/images/2410.18462v1_figure_7(a).png", "request_id": 440, "origin_statement": "On Thruxton, agents using abstracted latents achieved higher episodic returns and traversed greater distances by promoting smoother policy updates, albeit at reduced average speeds due to conservative acceleration, suggesting a trade-off between reward maximization and dynamic maneuverability.", "perturbed_statement": "On Thruxton, agents using abstracted latents achieved lower episodic returns and traversed shorter distances by promoting erratic policy updates, at increased average speeds due to aggressive acceleration, suggesting a trade-off favoring maneuverability over reward maximization.", "perturbed_explanation": "This claim contradicts Figure 7: abstract-latent agents actually exhibit higher episodic returns (Fig. 7a) and greater total distance traveled (Fig. 7b) compared to the baseline, and their average speed is lower, not higher, as shown in Fig. 7c.", "claim": "On Thruxton, agents using abstracted latents achieved lower episodic returns and traversed shorter distances by promoting erratic policy updates, at increased average speeds due to aggressive acceleration, suggesting a trade-off favoring maneuverability over reward maximization.", "label": false }, { "paperid": "2411.04200v1", "paper_path": "./SciVer/papers/2411.04200v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5" ], "image_path": "./SciVer/images/2411.04200v1-Table3-1.png", "request_id": 441, "origin_statement": "In the cost-minimizing solution (x_B=1), CDP B captures 20% of zone A's demand and 5% of zone B’s due to CDP A’s capacity constraints, demonstrating that smaller facility capacities can drive the LP to favor a higher-cost location to achieve overall cost reduction.", "perturbed_statement": "In the cost-minimizing solution (x_B=1), CDP B captures 30% of zone A's demand and 5% of zone B’s due to CDP A’s capacity constraints, demonstrating that smaller facility capacities can drive the LP to favor a higher-cost location to achieve overall cost reduction.", "perturbed_explanation": "Table 3 shows that under x_B=1, CDP B captures 20% (not 30%) of zone A’s demand. The stated 30% capture rate contradicts the reported effective fraction in the context.", "claim": "In the cost-minimizing solution (x_B=1), CDP B captures 30% of zone A's demand and 5% of zone B’s due to CDP A’s capacity constraints, demonstrating that smaller facility capacities can drive the LP to favor a higher-cost location to achieve overall cost reduction.", "label": false }, { "paperid": "2410.06971v2", "paper_path": "./SciVer/papers/2410.06971v2.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.06971v2_figure_1.png", "request_id": 442, "origin_statement": "The formal employment rate's elasticity with respect to city size is highest in Brazil (0.0845), moderate in Colombia (0.0505), low in Mexico (0.0432), and minimal in the USA (0.0194), illustrating more pronounced agglomeration effects and skill complementarities in developing urban labour markets.", "perturbed_statement": "The formal employment rate's elasticity with respect to city size is highest in Mexico (0.432), moderate in Colombia (0.0505), low in Brazil (0.0845), and minimal in the USA (0.0194), illustrating more pronounced agglomeration effects and skill complementarities in developing urban labour markets.", "perturbed_explanation": "The perturbation misstates Mexico’s elasticity by an order of magnitude: the actual coefficient for Mexico is 0.0432, not 0.432. This error also misorders Mexico as having the highest elasticity when Brazil’s coefficient (0.0845) is actually larger.", "claim": "The formal employment rate's elasticity with respect to city size is highest in Mexico (0.432), moderate in Colombia (0.0505), low in Brazil (0.0845), and minimal in the USA (0.0194), illustrating more pronounced agglomeration effects and skill complementarities in developing urban labour markets.", "label": false }, { "paperid": "2409.17732v1", "paper_path": "./SciVer/papers/2409.17732v1.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "2.2" ], "image_path": "./SciVer/images/2409.17732v1-Table6-1.png", "request_id": 443, "origin_statement": "Regularized DTW hierarchical clustering yields that within the Italian Highland group, Valpelline (VP) forms Cluster I distinct from other high-altitude sites grouped in Cluster II, reflecting its lower elevation which modulates monthly temperature trend temporal alignment.", "perturbed_statement": "Regularized DTW hierarchical clustering yields that within the Italian Highland group, Valpelline (VP) forms Cluster II distinct from other high-altitude sites grouped in Cluster I, reflecting its higher elevation which modulates monthly temperature trend temporal alignment.", "perturbed_explanation": "This statement is incorrect because Table 5 shows Valpelline (VP) is assigned to Cluster I, not Cluster II, and the other Italian highland sites are in Cluster II. Additionally, Valpelline is the lowest station in Valle d’Aosta, not the highest, so it cannot reflect a ‘higher elevation’ effect.", "claim": "Regularized DTW hierarchical clustering yields that within the Italian Highland group, Valpelline (VP) forms Cluster II distinct from other high-altitude sites grouped in Cluster I, reflecting its higher elevation which modulates monthly temperature trend temporal alignment.", "label": false }, { "paperid": "2410.23494v1", "paper_path": "./SciVer/papers/2410.23494v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.23494v1_figure_5.png", "request_id": 444, "origin_statement": "The root cause analysis reveals that ResNet50’s accuracy drop under a shift in the G (Gaussian noise) mechanism is most negative, with an estimated contribution of −0.007, confirming that larger perturbations to the noise generation edge critically impair model performance.", "perturbed_statement": "The root cause analysis reveals that ResNet50’s accuracy increases by +0.007 under a shift in the G (Gaussian noise) mechanism, indicating that larger perturbations to the noise generation edge unexpectedly enhance model performance.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows that ResNet50’s contribution for the G shift is −0.007 (a decrease in accuracy), not +0.007. The sign and magnitude of the contribution are reversed compared to the actual result.", "claim": "The root cause analysis reveals that ResNet50’s accuracy increases by +0.007 under a shift in the G (Gaussian noise) mechanism, indicating that larger perturbations to the noise generation edge unexpectedly enhance model performance.", "label": false }, { "paperid": "2411.02142v1", "paper_path": "./SciVer/papers/2411.02142v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2411.02142v1-Table5-1.png", "request_id": 445, "origin_statement": "By applying the scaling law, our 7.2B CLM model doubles the attention heads (32 vs. 16) and adds four layers over PROGEN2-xlarge, achieving 1.14×10^22 FLOPs—about 15% less than PROGEN2’s 1.34×10^22—improving computational efficiency per parameter.", "perturbed_statement": "Our 7.2B CLM model doubles attention heads (32 vs. 16) and adds six layers over PROGEN2-xlarge, achieving 1.04×10^22 FLOPs—about 25% less than PROGEN2’s 1.34×10^22—improving computational efficiency per parameter.", "perturbed_explanation": "This is incorrect because the 7.2B model uses 36 layers versus PROGEN2-xlarge’s 32 (a four-layer increase, not six), its FLOPs are 1.14×10^22 rather than 1.04×10^22, and the reduction in FLOPs is ~15%, not 25%.", "claim": "Our 7.2B CLM model doubles attention heads (32 vs. 16) and adds six layers over PROGEN2-xlarge, achieving 1.04×10^22 FLOPs—about 25% less than PROGEN2’s 1.34×10^22—improving computational efficiency per parameter.", "label": false }, { "paperid": "2410.19779v1", "paper_path": "./SciVer/papers/2410.19779v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.19779v1_figure_5.png", "request_id": 446, "origin_statement": "As pre-training data volume increases from 0 to 1B tokens, EEGPT-Large exhibits diminishing marginal performance gains, with less than 1.2% improvement beyond 0.75B tokens. This log-linear trend aligns with established power-law scaling, suggesting further data yields progressively smaller returns.", "perturbed_statement": "As pre-training data volume increases from 0 to 1B tokens, EEGPT-Large exhibits constant marginal performance gains, with approximately 2% improvement beyond 0.75B tokens. This exponential trend aligns with established power-law scaling, suggesting further data yields robust returns.", "perturbed_explanation": "The perturbed statement is incorrect because gains actually taper off rather than remain constant. From 0.75B to 1B tokens, ER improves by only 0.3%, MI by 0.4%, MW by 0.4%, SS by 1.1%, and CM by 0.5%, not ~2%. Moreover, the observed trend is log-linear (power-law), not exponential.", "claim": "As pre-training data volume increases from 0 to 1B tokens, EEGPT-Large exhibits constant marginal performance gains, with approximately 2% improvement beyond 0.75B tokens. This exponential trend aligns with established power-law scaling, suggesting further data yields robust returns.", "label": false }, { "paperid": "2410.21100v1", "paper_path": "./SciVer/papers/2410.21100v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.21100v1-Table2-1.png", "request_id": 447, "origin_statement": "mSSRM-PGA (m=10) leverages state-space shrinkage to filter estimation noise, achieving about 6% and 8% higher cumulative wealth than PLCT on FF25 (615.34 vs. 581.41) and FF32 (991.89 vs. 918.62) at T=60, indicating improved robustness to sample volatility.", "perturbed_statement": "mSSRM-PGA (m=10) leverages state-space shrinkage to filter estimation noise, achieving about 12% and 16% higher cumulative wealth than PLCT on FF25 (615.34 vs. 581.41) and FF32 (991.89 vs. 918.62) at T=60, indicating improved robustness to sample volatility.", "perturbed_explanation": "The stated performance gaps are incorrect: on FF25 the actual increase is (615.34−581.41)/581.41≈5.8%, not 12%, and on FF32 it is (991.89−918.62)/918.62≈8.0%, not 16%, contradicting the context’s reported values.", "claim": "mSSRM-PGA (m=10) leverages state-space shrinkage to filter estimation noise, achieving about 12% and 16% higher cumulative wealth than PLCT on FF25 (615.34 vs. 581.41) and FF32 (991.89 vs. 918.62) at T=60, indicating improved robustness to sample volatility.", "label": false }, { "paperid": "2409.17561v1", "paper_path": "./SciVer/papers/2409.17561v1.json", "claim_type": "analytical", "type": "chart", "item": "11", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.17561v1_figure_11.png", "request_id": 448, "origin_statement": "Applying the repair strategy to GPT-3.5’s generated tests cuts syntax errors by 95.46% (from 97.84% to 4.38%), boosting its compilation pass rate to 44.94%, which illustrates that targeted syntactic fixes dramatically increase the yield of valid test cases available for execution-driven validation.", "perturbed_statement": "Applying the repair strategy to GPT-3.5’s generated tests cuts syntax errors by 92.12% (from 97.84% to 7.72%), boosting its compilation pass rate to 60.23%, which illustrates that targeted syntactic fixes dramatically increase the yield of valid test cases available for execution-driven validation.", "perturbed_explanation": "The perturbed statement misreports the post-repair syntax error rate as 7.72% instead of the correct 4.38%, so the error reduction is overstated. It also incorrectly claims a 60.23% compilation pass rate, whereas the actual rate after repair is 44.94%.", "claim": "Applying the repair strategy to GPT-3.5’s generated tests cuts syntax errors by 92.12% (from 97.84% to 7.72%), boosting its compilation pass rate to 60.23%, which illustrates that targeted syntactic fixes dramatically increase the yield of valid test cases available for execution-driven validation.", "label": false }, { "paperid": "2411.07954v2", "paper_path": "./SciVer/papers/2411.07954v2.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.2", "6" ], "image_path": "./SciVer/images/2411.07954v2_figure_4.png", "request_id": 450, "origin_statement": "In Mortar Mayhem, AttentionTuner’s memory loss term helps the optimizer overcome two pronounced local optima by epoch 50 with low seed variance, whereas the vanilla Transformer escapes only one optimum after over 500 epochs, illustrating that memory dependency pairs effectively reshape the POMDP loss landscape to avoid flat minima.", "perturbed_statement": "In Mortar Mayhem, AttentionTuner’s memory loss term helps the optimizer overcome a single local optimum by epoch 20 with low seed variance, whereas the vanilla Transformer escapes only one optimum after over 500 epochs, illustrating that memory dependency pairs effectively reshape the POMDP loss landscape to avoid flat minima.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 and the text report two distinct local optima in Mortar Mayhem (not one), and AttentionTuner consistently escapes both by around epoch 50 (not epoch 20). These two details contradict the original results.", "claim": "In Mortar Mayhem, AttentionTuner’s memory loss term helps the optimizer overcome a single local optimum by epoch 20 with low seed variance, whereas the vanilla Transformer escapes only one optimum after over 500 epochs, illustrating that memory dependency pairs effectively reshape the POMDP loss landscape to avoid flat minima.", "label": false }, { "paperid": "2410.08387v1", "paper_path": "./SciVer/papers/2410.08387v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.08387v1-Table2-1.png", "request_id": 453, "origin_statement": "Under the optically thin, single-temperature modified blackbody assumption with κν from Li & Draine (2001), the observed 1.2–1.5 mJy S₈₇₀µm fluxes correspond to cold dust masses of ~10⁸.6–10⁸.9 M⊙, sustaining star formation rates of ~400 M⊙ yr⁻¹ through efficient ISM cooling.", "perturbed_statement": "Under the optically thin, single-temperature modified blackbody assumption with κν from Li & Draine (2001), the observed 1.2–1.5 µJy S₈₇₀µm fluxes correspond to cold dust masses of ~10⁷.6–10⁷.9 M⊙, sustaining star formation rates of ~400 M⊙ yr⁻¹ through efficient ISM cooling.", "perturbed_explanation": "This is incorrect because Table 2 reports S₈₇₀µm flux densities of 1.2–1.5 mJy, not µJy, and log10(M_dust) values of 8.56–8.88 (≈10⁸.6–10⁸.9 M⊙), not 10⁷.6–10⁷.9 M⊙.", "claim": "Under the optically thin, single-temperature modified blackbody assumption with κν from Li & Draine (2001), the observed 1.2–1.5 µJy S₈₇₀µm fluxes correspond to cold dust masses of ~10⁷.6–10⁷.9 M⊙, sustaining star formation rates of ~400 M⊙ yr⁻¹ through efficient ISM cooling.", "label": false }, { "paperid": "2409.08732v1", "paper_path": "./SciVer/papers/2409.08732v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.08732v1-Table5-1.png", "request_id": 455, "origin_statement": "By leveraging NCDE\u000269s continuous dynamic modeling, NCDENow achieves a 78.6% reduction in MSE and a 45.9% reduction in MAPE compared to DFM during South Korea\u000269s Q1–Q3 2020 GDP slump, indicating superior adaptation to abrupt structural breaks in growth.", "perturbed_statement": "By leveraging NCDE\u000269s continuous dynamic modeling, NCDENow achieves over a 95% reduction in MSE and a 65% reduction in MAPE compared to DFM during South Korea\u000269s Q1–Q3 2020 GDP slump, demonstrating its ability to capture linear trends amid structural breaks.", "perturbed_explanation": "The perturbed statement is incorrect because the actual MSE reduction from 4.6265 to 0.9900 is approximately 78.6%, not over 95%, and the actual MAPE reduction from 0.9954 to 0.5387 is about 45.9%, not 65%. Additionally, the context emphasizes NCDENow\u000269s capacity to model nonlinear patterns, so claiming it captures only linear trends contradicts its described advantage.", "claim": "By leveraging NCDE\u000269s continuous dynamic modeling, NCDENow achieves over a 95% reduction in MSE and a 65% reduction in MAPE compared to DFM during South Korea\u000269s Q1–Q3 2020 GDP slump, demonstrating its ability to capture linear trends amid structural breaks.", "label": false }, { "paperid": "2410.22023v3", "paper_path": "./SciVer/papers/2410.22023v3.json", "claim_type": "analytical", "type": "chart", "item": "5(a)", "section": [ "3.6" ], "image_path": "./SciVer/images/2410.22023v3_figure_5(a).png", "request_id": 456, "origin_statement": "t-SNE visualization of FDAN features on SAVEE reveals seven well-separated clusters for visual and acoustic modalities, reflecting effective alignment of high-dimensional embeddings and enhanced local cohesion within emotional categories, thereby minimizing intra-emotion variance and maximizing inter-emotion separation for robust multimodal emotion discrimination.", "perturbed_statement": "t-SNE visualization of FDAN features on SAVEE reveals five well-separated clusters for visual and acoustic modalities, reflecting effective alignment of high-dimensional embeddings and enhanced local cohesion within emotional categories, thereby minimizing intra-emotion variance and maximizing inter-emotion separation for robust multimodal emotion discrimination.", "perturbed_explanation": "The perturbed statement incorrectly asserts five clusters, whereas Fig. 5 and its legend display seven distinct emotion categories (AN, DI, FE, HA, NE, SA, SU), demonstrating seven clusters rather than five.", "claim": "t-SNE visualization of FDAN features on SAVEE reveals five well-separated clusters for visual and acoustic modalities, reflecting effective alignment of high-dimensional embeddings and enhanced local cohesion within emotional categories, thereby minimizing intra-emotion variance and maximizing inter-emotion separation for robust multimodal emotion discrimination.", "label": false }, { "paperid": "2410.05341v2", "paper_path": "./SciVer/papers/2410.05341v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.3", "3.4" ], "image_path": "./SciVer/images/2410.05341v2-Table1-1.png", "request_id": 457, "origin_statement": "By integrating the spectral learning module, NeuroBOLT achieved a 0.285 increase in average correlation, reflecting its ability to extract multi-scale EEG spectral features that temporally align with slow (~0.01–0.1 Hz) BOLD oscillations and thus enhance fMRI reconstruction across cortical and subcortical ROIs.", "perturbed_statement": "By integrating the spectral learning module, NeuroBOLT achieved a 0.528 increase in average correlation, reflecting its ability to extract sub-second EEG spectral features that temporally align with rapid (1–10 Hz) BOLD oscillations and thus enhance fMRI reconstruction across cortical and subcortical ROIs.", "perturbed_explanation": "The perturbation is incorrect because the actual improvement in average correlation reported in Table 2 and the text is 0.285, not 0.528. Additionally, BOLD fluctuations targeted in resting-state EEG–fMRI translation occur in the slow 0.01–0.1 Hz range, not at rapid 1–10 Hz frequencies.", "claim": "By integrating the spectral learning module, NeuroBOLT achieved a 0.528 increase in average correlation, reflecting its ability to extract sub-second EEG spectral features that temporally align with rapid (1–10 Hz) BOLD oscillations and thus enhance fMRI reconstruction across cortical and subcortical ROIs.", "label": false }, { "paperid": "2410.07416v1", "paper_path": "./SciVer/papers/2410.07416v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2410.07416v1_figure_5.png", "request_id": 458, "origin_statement": "Fig. 5 shows that both μ^7:11_1 and μ^25:40_2 are moderately anticorrelated with H₀ (r≈−0.65, −0.72), implying that overestimating the characteristic mass scale in either low- or high-mass features leads to underestimating the Hubble constant via the siren distance–mass degeneracy.", "perturbed_statement": "Fig. 5 shows that both μ^7:11_1 and μ^25:40_2 are moderately positively correlated with H₀ (r≈+0.65, +0.72), implying that overestimating the characteristic mass scale in either low- or high-mass features leads to overestimating the Hubble constant via the siren distance–mass degeneracy.", "perturbed_explanation": "The perturbation erroneously flips the sign of the correlation coefficients. In Fig. 5, the labeled values are r≈−0.65 (purple) and r≈−0.72 (green), indicating anticorrelations with H₀, not positive correlations.", "claim": "Fig. 5 shows that both μ^7:11_1 and μ^25:40_2 are moderately positively correlated with H₀ (r≈+0.65, +0.72), implying that overestimating the characteristic mass scale in either low- or high-mass features leads to overestimating the Hubble constant via the siren distance–mass degeneracy.", "label": false }, { "paperid": "2410.08642v1", "paper_path": "./SciVer/papers/2410.08642v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.08642v1_figure_3.png", "request_id": 460, "origin_statement": "Figure 3 shows that Protests messages peak modestly on October 3 at about 150 total, with image posts comprising roughly 60% and multimodal posts about 40%, reflecting the visual demand for documenting demonstrations. In contrast, Ukraine War discussions average over 50% multimodal content, combining narrative text and imagery.", "perturbed_statement": "Figure 3 shows that Protests messages peak on October 1 at about 150 total, with image posts comprising roughly 80% and multimodal posts about 40%, reflecting the visual demand for documenting demonstrations. In contrast, Ukraine War discussions average over 50% multimodal content, combining narrative text and imagery.", "perturbed_explanation": "This statement incorrectly shifts the protests peak from October 3 to October 1 and overestimates image posts as 80% of the Protests topic, whereas Figure 3 shows the actual peak occurs on October 3 with images constituting approximately 60% of daily messages.", "claim": "Figure 3 shows that Protests messages peak on October 1 at about 150 total, with image posts comprising roughly 80% and multimodal posts about 40%, reflecting the visual demand for documenting demonstrations. In contrast, Ukraine War discussions average over 50% multimodal content, combining narrative text and imagery.", "label": false }, { "paperid": "2410.23426v1", "paper_path": "./SciVer/papers/2410.23426v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.23426v1-Table1-1.png", "request_id": 461, "origin_statement": "By integrating human expert brainstorming with LLMs in Step 1 and manual character scenario composition in Step 2 anchored in social science literature, the pipeline isolates single-turn dialogues. Step 3’s GPT-4o refinement coupled with a four-expert panel review ensures coherence and mitigates simulation bias.", "perturbed_statement": "By integrating human expert brainstorming with LLMs in Step 1 and manual character scenario composition in Step 2 anchored in social science literature, the pipeline analyzes multi-turn dialogues. Step 3’s GPT-3 refinement coupled with a ten-expert panel review ensures coherence and mitigates simulation bias.", "perturbed_explanation": "The perturbed statement contradicts the context in three ways: 1) the dataset focuses on single-turn dialogues, not multi-turn; 2) it specifies GPT-3 for refinement, whereas the text states GPT-4o is used; and 3) the human review panel consists of four experts, not ten.", "claim": "By integrating human expert brainstorming with LLMs in Step 1 and manual character scenario composition in Step 2 anchored in social science literature, the pipeline analyzes multi-turn dialogues. Step 3’s GPT-3 refinement coupled with a ten-expert panel review ensures coherence and mitigates simulation bias.", "label": false }, { "paperid": "2411.09127v1", "paper_path": "./SciVer/papers/2411.09127v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "6.3" ], "image_path": "./SciVer/images/2411.09127v1-Table5-1.png", "request_id": 463, "origin_statement": "Applying a larger sparsity regularizer (γ increased from 1e-5 to 4e-3 with β=1) on ResNet110 yields a 36% increase in FLOPS pruning (from 41 to 77%), illustrating that enhanced regularization exploits network overcapacity to reduce computations at the expense of a ≈1.1% accuracy decline due to reduced representational capacity.", "perturbed_statement": "Applying a larger sparsity regularizer (γ increased from 1e-5 to 4e-3 with β=1) on ResNet110 yields a 46% increase in FLOPS pruning (from 41 to 87%), illustrating that enhanced regularization exploits network overcapacity to reduce computations at the expense of a ≈1.1% accuracy decline due to reduced representational capacity.", "perturbed_explanation": "The perturbed statement incorrectly claims a 46% improvement in FLOPS pruning (from 41% to 87%) instead of the actual 36% increase (41% to 77%) reported in Table 5, misrepresenting the pruning effectiveness.", "claim": "Applying a larger sparsity regularizer (γ increased from 1e-5 to 4e-3 with β=1) on ResNet110 yields a 46% increase in FLOPS pruning (from 41 to 87%), illustrating that enhanced regularization exploits network overcapacity to reduce computations at the expense of a ≈1.1% accuracy decline due to reduced representational capacity.", "label": false }, { "paperid": "2411.01603v1", "paper_path": "./SciVer/papers/2411.01603v1.json", "claim_type": "analytical", "type": "chart", "item": "13", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.01603v1_figure_13.png", "request_id": 464, "origin_statement": "The UWB-based position estimation exhibits increased lateral deviation from RTK benchmarks as the UAV moves beyond the convex hull of anchor nodes, reflecting elevated geometric dilution of precision; this effect parallels GNSS altitude errors when satellite GDOP worsens at low elevation angles.", "perturbed_statement": "The UWB-based position estimation exhibits increased vertical deviation from RTK benchmarks as the UAV moves outside the convex hull of anchor nodes, reflecting elevated time dilution of precision; this effect parallels GNSS timing errors when satellite TDOP worsens at high elevation angles.", "perturbed_explanation": "It is incorrect to claim a vertical deviation because the context and Figure 13 show larger horizontal (lateral) errors along the y-axis when the UAV is outside the anchor envelope. Time Dilution of Precision (TDOP) and GNSS timing errors at high elevation angles are not relevant here; UWB errors stem from geometric dilution of precision (GDOP), which worsens at low, not high, satellite elevation angles.", "claim": "The UWB-based position estimation exhibits increased vertical deviation from RTK benchmarks as the UAV moves outside the convex hull of anchor nodes, reflecting elevated time dilution of precision; this effect parallels GNSS timing errors when satellite TDOP worsens at high elevation angles.", "label": false }, { "paperid": "2411.02790v1", "paper_path": "./SciVer/papers/2411.02790v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "6.1.2" ], "image_path": "./SciVer/images/2411.02790v1-Table2-1.png", "request_id": 467, "origin_statement": "Across all domains, CtrlCE_lt\u0019s NDCG@10 drops by only 6.6% when disabling personalization, compared to CrossEnc_RA\u0019s 30.2% drop, illustrating that calibrated mixing enables efficient, controlled personalization within a single model while leveraging cached cross-encodings for faster inference.", "perturbed_statement": "Across all domains, CtrlCE_cv\u0019s NDCG@10 declines by only 2% when disabling personalization, compared to CrossEnc_RA\u0019s 40% drop, demonstrating that calibrated mixing not only controls personalization but also halves inference latency relative to CrossEnc.", "perturbed_explanation": "This statement is incorrect because Table 2 shows CtrlCE_cv\u0019s NDCG@10 actually falls from 0.3118 to 0.2936, a drop of about 5.8%, not 2%. Additionally, CrossEnc_RA\u0019s NDCG@10 drops from 0.3244 to 0.2264, approximately 30.2%, not 40%. The claim of halving inference latency is unsupported by the context, which only notes improved speed without quantifying a 50% reduction.", "claim": "Across all domains, CtrlCE_cv\u0019s NDCG@10 declines by only 2% when disabling personalization, compared to CrossEnc_RA\u0019s 40% drop, demonstrating that calibrated mixing not only controls personalization but also halves inference latency relative to CrossEnc.", "label": false }, { "paperid": "2411.14254v1", "paper_path": "./SciVer/papers/2411.14254v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2411.14254v1-Table3-1.png", "request_id": 469, "origin_statement": "Applying a 5e-5 learning rate with batch size 64 over 10 epochs on 512-token sequences leverages PyTorch’s AdamW optimizer to ensure stable gradient updates, enabling the NVIDIA RTX A2000 GPU to process 8,949 CO-PO/PSO pairs efficiently while reducing overfitting in transformer models.", "perturbed_statement": "Applying a 5e-4 learning rate with batch size 64 over 20 epochs on 512-token sequences leverages PyTorch’s AdamW optimizer to ensure stable gradient updates, enabling the NVIDIA RTX A2000 GPU to process 8,949 CO-PO/PSO pairs efficiently while reducing overfitting in transformer models.", "perturbed_explanation": "The perturbed statement incorrectly reports the learning rate as 5e-4 (actual: 5e-5) and the number of epochs as 20 (actual: 10), contradicting the values specified in Table 3.", "claim": "Applying a 5e-4 learning rate with batch size 64 over 20 epochs on 512-token sequences leverages PyTorch’s AdamW optimizer to ensure stable gradient updates, enabling the NVIDIA RTX A2000 GPU to process 8,949 CO-PO/PSO pairs efficiently while reducing overfitting in transformer models.", "label": false }, { "paperid": "2409.17531v2", "paper_path": "./SciVer/papers/2409.17531v2.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.4.1" ], "image_path": "./SciVer/images/2409.17531v2_figure_5.png", "request_id": 470, "origin_statement": "On testA, precision@0.5 increases only from 88.5% at two layers to 88.7% at six layers, indicating diminishing returns: adding depth beyond two transformer layers yields minimal performance improvement in visual grounding tasks.", "perturbed_statement": "On testA, precision@0.5 decreases from 88.5% at two layers to 87.7% at six layers, indicating that adding transformer layers beyond two leads to overfitting and degraded grounding performance.", "perturbed_explanation": "This statement is incorrect because, according to Figure 7, testA precision@0.5 actually increases from 88.5% at two layers to 88.7% at six layers. There is no observed drop to 87.7% in the provided data.", "claim": "On testA, precision@0.5 decreases from 88.5% at two layers to 87.7% at six layers, indicating that adding transformer layers beyond two leads to overfitting and degraded grounding performance.", "label": false }, { "paperid": "2409.11143v1", "paper_path": "./SciVer/papers/2409.11143v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.11143v1-Table1-1.png", "request_id": 471, "origin_statement": "By predicting complete semantic routes rather than local tokens, Semformer maintains above 99% accuracy across all GPT2-Large benchmarks, including the challenging G(5,30) where autoregressive models fall below 20%.", "perturbed_statement": "By predicting complete semantic routes rather than local tokens, Semformer achieves only 95% accuracy on the challenging G(5,30) benchmark, while autoregressive baselines fall below 10% accuracy.", "perturbed_explanation": "This statement is incorrect because Semformer actually attains 99.2% accuracy on G(5,30), and standard autoregressive baselines (e.g. GPT2-Large Standard) reach about 19.8% accuracy, not below 10%.", "claim": "By predicting complete semantic routes rather than local tokens, Semformer achieves only 95% accuracy on the challenging G(5,30) benchmark, while autoregressive baselines fall below 10% accuracy.", "label": false }, { "paperid": "2411.10060v1", "paper_path": "./SciVer/papers/2411.10060v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.10060v1_figure_7.png", "request_id": 474, "origin_statement": "On IEMOCAP, the model mislabels 42 out of 48 'Angry' utterances as 'Frustrated', reflecting representation confusion between these adjacent high-arousal negative emotions. On MELD, over 1,080 'Neutral' utterances attract misclassifications from other classes, demonstrating bias from disproportionate class distribution.", "perturbed_statement": "On IEMOCAP, the model mislabels 42 out of 48 'Angry' utterances as 'Excited', reflecting representation confusion between these adjacent high-arousal negative emotions. On MELD, over 1,200 'Neutral' utterances attract misclassifications from other classes, demonstrating bias from disproportionate class distribution.", "perturbed_explanation": "The perturbed statement incorrectly states that 'Angry' utterances are misclassified as 'Excited'; the IEMOCAP confusion matrix shows they are misclassified as 'Frustrated'. It also misstated the neutral count on MELD, which is 1,080 rather than 1,200.", "claim": "On IEMOCAP, the model mislabels 42 out of 48 'Angry' utterances as 'Excited', reflecting representation confusion between these adjacent high-arousal negative emotions. On MELD, over 1,200 'Neutral' utterances attract misclassifications from other classes, demonstrating bias from disproportionate class distribution.", "label": false }, { "paperid": "2409.14780v2", "paper_path": "./SciVer/papers/2409.14780v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.3", "5" ], "image_path": "./SciVer/images/2409.14780v2-Table3-1.png", "request_id": 477, "origin_statement": "Increasing supernova feedback from 5% to 10% in the CDM simulation boosts total dust mass from 1.44×10^7 M⊙ to 4.56×10^7 M⊙ (over 3× increase) but only raises the central 15 kpc dust fraction from 85% to 87%, indicating stronger feedback drives outflows yet the deep CDM potential retains dust in the inner galaxy.", "perturbed_statement": "Increasing supernova feedback from 5% to 10% in the CDM simulation doubles total dust mass from 1.44×10^7 M⊙ to 2.88×10^7 M⊙ and reduces the central 15 kpc dust fraction from 85% to 82%, implying stronger feedback expels dust into the outer halo where the shallower WDM potential fails to recapture it.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows the total dust mass actually increases from 1.44×10^7 M⊙ to 4.56×10^7 M⊙ (not 2.88×10^7 M⊙), and the central 15 kpc dust fraction rises from 85% to 87% (not decreases to 82%). Additionally, invoking a WDM potential in a CDM run is irrelevant to the data.", "claim": "Increasing supernova feedback from 5% to 10% in the CDM simulation doubles total dust mass from 1.44×10^7 M⊙ to 2.88×10^7 M⊙ and reduces the central 15 kpc dust fraction from 85% to 82%, implying stronger feedback expels dust into the outer halo where the shallower WDM potential fails to recapture it.", "label": false }, { "paperid": "2411.15524v1", "paper_path": "./SciVer/papers/2411.15524v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.1", "2.2", "3", "4.1" ], "image_path": "./SciVer/images/2411.15524v1-Table1-1.png", "request_id": 479, "origin_statement": "In our survey, only 0.36% of galaxies meet WISE color cuts (W1–W2>0.5, W1–W4>7), isolating rare dust-obscured AGNs whose torus temperatures (>300 K) produce red mid-IR slopes and provide sufficient X-ray–heated molecular gas to invert the 22 GHz H2O transition via collisional excitation.", "perturbed_statement": "In our survey, only 3.6% of galaxies meet WISE color cuts (W1–W3>0.5, W2–W4>7), isolating dust-obscured AGNs whose torus temperatures (>1000 K) produce red mid-IR slopes and provide sufficient UV–heated molecular gas to invert the 22 GHz H2O transition via radiative pumping.", "perturbed_explanation": "The perturbed statement is incorrect because the actual survey applied cuts of W1–W2>0.5 and W1–W4>7, yielding 0.36% of galaxies, not 3.6%. Torus temperatures required to redden WISE mid-IR colors are of order a few hundred kelvin (>300 K), not >1000 K. Additionally, 22 GHz H2O maser inversion is driven by collisional excitation in X-ray–heated environments, not UV–radiative pumping.", "claim": "In our survey, only 3.6% of galaxies meet WISE color cuts (W1–W3>0.5, W2–W4>7), isolating dust-obscured AGNs whose torus temperatures (>1000 K) produce red mid-IR slopes and provide sufficient UV–heated molecular gas to invert the 22 GHz H2O transition via radiative pumping.", "label": false }, { "paperid": "2409.16217v1", "paper_path": "./SciVer/papers/2409.16217v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2409.16217v1_figure_5.png", "request_id": 480, "origin_statement": "The MAC-layer downlink throughput exhibits near-linear scaling with PRB allocation, increasing from about 35 Mbps at low PRB counts to around 72 Mbps at the highest allocation, consistently exceeding App-layer throughput by roughly 5 Mbps due to MAC-level retransmissions reducing packet loss and overhead.", "perturbed_statement": "The App-layer downlink throughput consistently exceeds the MAC-layer throughput by approximately 5 Mbps across all slicing configurations, attributed to higher-layer buffering mechanisms smoothing out transmission bursts.", "perturbed_explanation": "Figure 5 shows that, for every slicing configuration, the solid MAC-layer bars are taller than the hatched App-layer bars. This indicates that MAC-layer throughput always exceeds App-layer throughput, contradicting the statement that App-layer outperforms MAC by ~5 Mbps.", "claim": "The App-layer downlink throughput consistently exceeds the MAC-layer throughput by approximately 5 Mbps across all slicing configurations, attributed to higher-layer buffering mechanisms smoothing out transmission bursts.", "label": false }, { "paperid": "2411.15668v1", "paper_path": "./SciVer/papers/2411.15668v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.3.2" ], "image_path": "./SciVer/images/2411.15668v1_figure_5.png", "request_id": 482, "origin_statement": "Figure 5 shows that for He II, the critical heat flux q* in the Kapitza transition increases from ~28 kW/m² at 10 cm to ~47 kW/m² at 50 cm immersion depth, consistent with Zuber’s hydrostatic-pressure CHF scaling q* ∝ (σ g (ρ_l–ρ_v))^1/4.", "perturbed_statement": "Figure 5 indicates that for He II, the critical heat flux q* in the Kapitza transition decreases from ~47 kW/m² at 10 cm to ~28 kW/m² at 50 cm immersion depth, following an inverse hydrostatic scaling q* ∝ (σ g (ρ_l–ρ_v))^-1/4.", "perturbed_explanation": "The perturbed statement reverses the observed trend: Fig. 5 shows q* actually increases from ~28 to ~47 kW/m² with depth, not decreases. It also misuses Zuber’s CHF scaling by applying a negative 1/4 exponent, whereas the correct exponent is +1/4.", "claim": "Figure 5 indicates that for He II, the critical heat flux q* in the Kapitza transition decreases from ~47 kW/m² at 10 cm to ~28 kW/m² at 50 cm immersion depth, following an inverse hydrostatic scaling q* ∝ (σ g (ρ_l–ρ_v))^-1/4.", "label": false }, { "paperid": "2411.03743v1", "paper_path": "./SciVer/papers/2411.03743v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "2.2.1" ], "image_path": "./SciVer/images/2411.03743v1-Table3-1.png", "request_id": 487, "origin_statement": "Llama-3.1-70B-UltraMed’s domain-tailored finetuning closes the domain shift gap, boosting its professional medicine accuracy to 94.9%, a 35.7-point increase over Llama-2-70B-Chat, illustrating that specialized instruction promotes retention of clinical reasoning compared to general-purpose LLMs.", "perturbed_statement": "Llama-3.1-70B-UltraMed’s domain-tailored finetuning closes the domain shift gap, boosting its anatomy accuracy to 97.2%, a 30.5-point increase over Llama-2-70B-Chat, illustrating that specialized instruction promotes retention of clinical reasoning compared to general-purpose LLMs.", "perturbed_explanation": "This statement is incorrect because Table 3 shows Llama-3.1-70B-UltraMed’s anatomy accuracy is 83.7%, not 97.2%, and Llama-2-70B-Chat’s anatomy is 54.1%, so the actual improvement is 29.6 points, not 30.5.", "claim": "Llama-3.1-70B-UltraMed’s domain-tailored finetuning closes the domain shift gap, boosting its anatomy accuracy to 97.2%, a 30.5-point increase over Llama-2-70B-Chat, illustrating that specialized instruction promotes retention of clinical reasoning compared to general-purpose LLMs.", "label": false }, { "paperid": "2411.10746v1", "paper_path": "./SciVer/papers/2411.10746v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.10746v1_figure_3(a).png", "request_id": 488, "origin_statement": "The ROC AUC for Support Devices (0.94) indicates the model can distinguish high-contrast artifacts more reliably than subtle pathologies like Pneumonia (0.66), likely because device appearances are more uniform and distinctive across chest X-rays.", "perturbed_statement": "The ROC AUC for Support Devices (0.84) indicates the model can distinguish high-contrast artifacts more reliably than subtle pathologies like Pneumonia (0.76), likely because device appearances are more uniform and distinctive across chest X-rays.", "perturbed_explanation": "The perturbed statement is incorrect because the actual AUC for Support Devices is 0.94 (not 0.84) and the AUC for Pneumonia is 0.66 (not 0.76), as reported in Fig. 3 of the context.", "claim": "The ROC AUC for Support Devices (0.84) indicates the model can distinguish high-contrast artifacts more reliably than subtle pathologies like Pneumonia (0.76), likely because device appearances are more uniform and distinctive across chest X-rays.", "label": false }, { "paperid": "2409.14836v2", "paper_path": "./SciVer/papers/2409.14836v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.2.1" ], "image_path": "./SciVer/images/2409.14836v2-Table1-1.png", "request_id": 489, "origin_statement": "On MT-Bench with Mistral-Base, RoPO raises WWR by 8.06 points over DPO(β=0.1) (14.78%→22.84%) while cutting token length by ~39%, suggesting its regularization curbs overfitting and repetitive verbosity while improving alignment.", "perturbed_statement": "On MT-Bench with Mistral-Base, RoPO raises WWR by 12.5 points over DPO(β=0.1) (14.78%→27.28%) while cutting token length by ~60%, suggesting its regularization curbs overfitting and repetitive verbosity while improving alignment.", "perturbed_explanation": "The perturbed statement incorrectly reports the WWR increase and resulting WWR. The actual WWR rises by only 8.06 points (from 14.78% to 22.84%), not 12.5 points to 27.28%. Likewise, the token length reduction is ~39% (1591→968), not ~60%.", "claim": "On MT-Bench with Mistral-Base, RoPO raises WWR by 12.5 points over DPO(β=0.1) (14.78%→27.28%) while cutting token length by ~60%, suggesting its regularization curbs overfitting and repetitive verbosity while improving alignment.", "label": false }, { "paperid": "2410.20597v1", "paper_path": "./SciVer/papers/2410.20597v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.20597v1-Table5-1.png", "request_id": 491, "origin_statement": "The analyst network’s average diameter of 11.29 indicates firms connect through extended chains, mitigating GAT over-smoothing and enhancing multi-hop information aggregation. Combined with its high transitivity (0.67), this structure likely enables the model to capture latent inter-firm dependencies better than the correlation network (diameter 5.4).", "perturbed_statement": "The analyst network’s average diameter of 6.7 indicates firms connect through extended chains, mitigating GAT over-smoothing and enhancing multi-hop information aggregation. Combined with its high transitivity (0.67), this structure likely enables the model to capture latent inter-firm dependencies better than the correlation network (diameter 11.29).", "perturbed_explanation": "Table 5 reports the analyst network diameter as 11.29 (not 6.7) and the correlation network diameter as 5.4 (not 11.29). The perturbed statement swaps these values, contradicting the context.", "claim": "The analyst network’s average diameter of 6.7 indicates firms connect through extended chains, mitigating GAT over-smoothing and enhancing multi-hop information aggregation. Combined with its high transitivity (0.67), this structure likely enables the model to capture latent inter-firm dependencies better than the correlation network (diameter 11.29).", "label": false }, { "paperid": "2409.11074v2", "paper_path": "./SciVer/papers/2409.11074v2.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.11074v2_figure_3.png", "request_id": 492, "origin_statement": "In the Competitions domain, Mathstral-7b-v0.1 achieves around 70% accuracy on Calculus problems, outperforming Romanian-specialized RoMistral-7b (~55%), indicating mathematically fine-tuned models significantly improve performance on advanced calculus tasks under zero-shot prompting.", "perturbed_statement": "In the Competitions domain, Mathstral-7b-v0.1 achieves around 85% accuracy on Calculus problems, outperforming Romanian-specialized RoMistral-7b (~55%), indicating mathematically fine-tuned models significantly improve performance on advanced calculus tasks under zero-shot prompting.", "perturbed_explanation": "The perturbed statement incorrectly reports Mathstral-7b-v0.1’s calculus accuracy as 85%. According to Figure 3, its true accuracy is approximately 70%, so the 85% figure contradicts the benchmark results.", "claim": "In the Competitions domain, Mathstral-7b-v0.1 achieves around 85% accuracy on Calculus problems, outperforming Romanian-specialized RoMistral-7b (~55%), indicating mathematically fine-tuned models significantly improve performance on advanced calculus tasks under zero-shot prompting.", "label": false }, { "paperid": "2409.03466v1", "paper_path": "./SciVer/papers/2409.03466v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2409.03466v1-Table2-1.png", "request_id": 493, "origin_statement": "Table 2 shows that ∼75% of simulated planetary transits (2677 of 3593) survive truncation, compared to over 99% retention of eclipsing binaries (1984/2005) and background binaries (736/741). This reflects depth and stellar radius filters disproportionately excluding low SNR, Earth-sized transit signals.", "perturbed_statement": "Table 2 indicates that only 10% of planetary transits (2677 of 3593) remain post-truncation, whereas eclipsing binaries and background binaries retain over 99%. This reflects period-based filtering that removes signals with periods longer than 30 days, disproportionately eliminating Earth-like transits.", "perturbed_explanation": "The perturbed statement is incorrect because the actual retention rate of planetary transits is about 75% (2677/3593), not 10%. Additionally, truncation is based on transit depth (<50 ppm) and stellar radius (>2.5 R⊙), not on a period-based filter for signals longer than 30 days.", "claim": "Table 2 indicates that only 10% of planetary transits (2677 of 3593) remain post-truncation, whereas eclipsing binaries and background binaries retain over 99%. This reflects period-based filtering that removes signals with periods longer than 30 days, disproportionately eliminating Earth-like transits.", "label": false }, { "paperid": "2409.04941v1", "paper_path": "./SciVer/papers/2409.04941v1.json", "claim_type": "analytical", "type": "chart", "item": "1(b)", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.04941v1_figure_1(b).png", "request_id": 494, "origin_statement": "The copy_mem workload exhibits ~85% scalar memory instructions (Fig.1b), suggesting lower dynamic power since scalar memory operations have reduced switching capacitance compared to vector operations; conversely, peakflops_avx_fma’s ~90% vector arithmetic usage amplifies dynamic power due to increased transistor switching in SIMD units.", "perturbed_statement": "The copy_mem workload exhibits ~85% scalar arithmetic instructions, implying lower dynamic power since scalar arithmetic threads switch fewer transistors than vector units; in contrast, peakflops_avx_fma’s ~90% vector logic usage results in greater power consumption due to intensified gate switching in SIMD pipelines.", "perturbed_explanation": "The perturbed statement misidentifies the instruction types: copy_mem is dominated by scalar memory (not scalar arithmetic), and peakflops_avx_fma is dominated by vector arithmetic (not vector logic), directly contradicting the histogram in Fig.1b.", "claim": "The copy_mem workload exhibits ~85% scalar arithmetic instructions, implying lower dynamic power since scalar arithmetic threads switch fewer transistors than vector units; in contrast, peakflops_avx_fma’s ~90% vector logic usage results in greater power consumption due to intensified gate switching in SIMD pipelines.", "label": false }, { "paperid": "2409.17090v1", "paper_path": "./SciVer/papers/2409.17090v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.17090v1-Table3-1.png", "request_id": 495, "origin_statement": "On the Ionosphere dataset, SRSG improves AC from 0.7095 to 0.7635 (a 7.6% absolute gain) and NMI from 0.1285 to 0.2355, indicating that incorporating spectral regularization enhances cluster separability and robustness against noise.", "perturbed_statement": "On the Heart dataset, SRSG improves AC from 0.5889 to 0.6481, representing a 15% absolute gain, demonstrating subspace regularization’s effectiveness in mitigating feature noise.", "perturbed_explanation": "The statement is incorrect because SRSG’s AC improvement on the Heart dataset is only 0.6481 − 0.5889 = 0.0592 (5.92% absolute), not 15%.", "claim": "On the Heart dataset, SRSG improves AC from 0.5889 to 0.6481, representing a 15% absolute gain, demonstrating subspace regularization’s effectiveness in mitigating feature noise.", "label": false }, { "paperid": "2409.02076v6", "paper_path": "./SciVer/papers/2409.02076v6.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.02076v6_figure_2.png", "request_id": 496, "origin_statement": "At 16K sequence length, GPT_4o_mini's accuracy peaks at roughly 0.39 around 3K tokens but declines steeply by over 20% beyond the 4K-token mark, illustrating that the quadratic scaling of self-attention hampers instruction adherence over extended context windows.", "perturbed_statement": "At 16K sequence length, GPT_4o_mini's accuracy peaks at roughly 0.39 around 3K tokens but declines steeply by over 20% beyond the 8K-token mark, illustrating that the quadratic scaling of self-attention hampers instruction adherence over extended context windows.", "perturbed_explanation": "The statement incorrectly shifts the degradation threshold to 8K tokens. Both the figure and the context specify that accuracy begins to degrade significantly after 4,000 tokens, not 8,000.", "claim": "At 16K sequence length, GPT_4o_mini's accuracy peaks at roughly 0.39 around 3K tokens but declines steeply by over 20% beyond the 8K-token mark, illustrating that the quadratic scaling of self-attention hampers instruction adherence over extended context windows.", "label": false }, { "paperid": "2410.04380v1", "paper_path": "./SciVer/papers/2410.04380v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "3", "7.1" ], "image_path": "./SciVer/images/2410.04380v1_figure_1.png", "request_id": 498, "origin_statement": "Experiments reveal that reducing Encodec’s frame rate below 48 Hz causes PESQ to drop from ~4.3 at 48 Hz to ~3.5 at 32 Hz and triggers a WER collapse to 100% at 16 Hz, demonstrating that low token density from coarse temporal quantization critically undermines speech intelligibility.", "perturbed_statement": "Experiments reveal that reducing Encodec’s frame rate below 48 Hz causes PESQ to drop from ~4.3 at 48 Hz to ~3.5 at 32 Hz and triggers a WER collapse to 100% at 32 Hz, demonstrating that low token density from coarse temporal quantization critically undermines speech intelligibility.", "perturbed_explanation": "The perturbed statement incorrectly claims that WER collapses to 100% at 32 Hz. According to Figure 1, WER remains low (~10%) at 32 Hz and only rises to 100% when the frame rate is reduced to 16 Hz.", "claim": "Experiments reveal that reducing Encodec’s frame rate below 48 Hz causes PESQ to drop from ~4.3 at 48 Hz to ~3.5 at 32 Hz and triggers a WER collapse to 100% at 32 Hz, demonstrating that low token density from coarse temporal quantization critically undermines speech intelligibility.", "label": false }, { "paperid": "2409.01295v2", "paper_path": "./SciVer/papers/2409.01295v2.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.1.2" ], "image_path": "./SciVer/images/2409.01295v2-Table2-1.png", "request_id": 499, "origin_statement": "Despite displacement showing a stronger Pearson correlation with mpg, the horsepower-based linear model achieves a 14% MAPE and 2.75-mile MAE, indicating that it provides more precise fuel economy estimates, potentially due to lower leverage from extreme displacement values affecting residual variance.", "perturbed_statement": "Despite displacement showing a stronger Pearson correlation with mpg, the horsepower-based linear model achieves a 19% MAPE and 3.68-mile MAE, indicating that it provides more precise fuel economy estimates due to reduced variance in horsepower measurements.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 reports the horsepower model’s MAPE as 14% and MAE as 2.75 miles, not 19% and 3.68 miles. Those larger error values belong to the displacement-based model, not the horsepower-based one.", "claim": "Despite displacement showing a stronger Pearson correlation with mpg, the horsepower-based linear model achieves a 19% MAPE and 3.68-mile MAE, indicating that it provides more precise fuel economy estimates due to reduced variance in horsepower measurements.", "label": false }, { "paperid": "2411.02278v1", "paper_path": "./SciVer/papers/2411.02278v1.json", "claim_type": "analytical", "type": "chart", "item": "3(c)", "section": [ "5.2", "5.3" ], "image_path": "./SciVer/images/2411.02278v1_figure_3(c).png", "request_id": 500, "origin_statement": "Figure 3 shows that increasing optimization levels yield higher median structural similarity for WASM decompilers (w2c2 and wasm2c) due to reduced code size improving tree alignment, whereas native binary decompilers (Ghidra and RetDec) see decreasing median similarity as optimization introduces complex assembly idioms that hinder C semantic recovery.", "perturbed_statement": "Figure 3 shows that increasing optimization levels yield lower median structural similarity for WASM decompilers (w2c2 and wasm2c), while native binary decompilers (Ghidra and RetDec) show increasing median similarity as optimization introduces complex assembly idioms that hinder C semantic recovery.", "perturbed_explanation": "The perturbation is incorrect because the context and Figure 3 indicate that WASM decompilers actually exhibit higher median structural similarity at higher optimization levels, while native binary decompilers (Ghidra and RetDec) display decreasing similarity with optimization.", "claim": "Figure 3 shows that increasing optimization levels yield lower median structural similarity for WASM decompilers (w2c2 and wasm2c), while native binary decompilers (Ghidra and RetDec) show increasing median similarity as optimization introduces complex assembly idioms that hinder C semantic recovery.", "label": false }, { "paperid": "2410.09247v1", "paper_path": "./SciVer/papers/2410.09247v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.09247v1_figure_2.png", "request_id": 504, "origin_statement": "The strict alignment of all pre-2024 model accuracies within the 95% confidence band in Figure 2 indicates that performance on Retro-Misconceptions mirrors original TruthfulQA misconceptions, implying that these retro-holdout models exhibit no systematic evaluation bias from dataset-specific artifacts.", "perturbed_statement": "Figure 2 shows that Davinci-002 and Amplified models fall outside the 80% confidence band, revealing a significant performance gap between Retro-Misconceptions and TruthfulQA for pre-2024 models.", "perturbed_explanation": "The statement is incorrect because the figure depicts a 95% confidence band, not an 80% one, and no model points (including Davinci-002 and Amplified) lie outside the shown band. Thus both the band level and exceedance claim contradict the context.", "claim": "Figure 2 shows that Davinci-002 and Amplified models fall outside the 80% confidence band, revealing a significant performance gap between Retro-Misconceptions and TruthfulQA for pre-2024 models.", "label": false }, { "paperid": "2410.19093v1", "paper_path": "./SciVer/papers/2410.19093v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "4.2.6" ], "image_path": "./SciVer/images/2410.19093v1_figure_8.png", "request_id": 506, "origin_statement": "The equidistant ∼0.506 d^-1 spacing between f1, f2, and f3 indicates rotational splitting of an l=1 non-radial pulsation mode in ZGP-BLAP-08, implying a rotation period of ∼1.98 days, consistent with modest rotational velocities in BLAP-type stars.", "perturbed_statement": "The equidistant ∼5.06 d^-1 spacing between f1, f2, and f3 indicates rotational splitting of an l=0 radial pulsation mode in ZGP-BLAP-08, implying a rotation period of ∼0.198 days, suggesting ZGP-BLAP-08 is an ultrafast rotator.", "perturbed_explanation": "This is incorrect because the observed mode spacing is ∼0.506 d^-1 (not 5.06 d^-1), and rotational splitting produces multiplets only in non-radial (l≥1) modes—radial (l=0) modes do not split.", "claim": "The equidistant ∼5.06 d^-1 spacing between f1, f2, and f3 indicates rotational splitting of an l=0 radial pulsation mode in ZGP-BLAP-08, implying a rotation period of ∼0.198 days, suggesting ZGP-BLAP-08 is an ultrafast rotator.", "label": false }, { "paperid": "2409.17455v1", "paper_path": "./SciVer/papers/2409.17455v1.json", "claim_type": "analytical", "type": "table", "item": "7", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.17455v1-Table7-1.png", "request_id": 507, "origin_statement": "The observed parameter growth from 110M in BERT to 13B in Llama2-13B suggests that larger LLMs leverage their higher capacity to capture complex linguistic patterns, thereby enhancing resistance to spurious correlations through improved feature abstraction.", "perturbed_statement": "However, BERT’s 110B parameters drastically exceed Llama2-13B’s 130B capacity, and models below 100M parameters typically achieve superior generalization by avoiding excessive abstraction.", "perturbed_explanation": "The perturbed statement misreports BERT’s size as 110 billion parameters (it is 110 million) and Llama2-13B’s size as 130 billion (it is 13 billion). Additionally, it falsely asserts that smaller models generalize better, contradicting evidence that larger parameter counts usually improve abstraction and robustness against spurious correlations.", "claim": "However, BERT’s 110B parameters drastically exceed Llama2-13B’s 130B capacity, and models below 100M parameters typically achieve superior generalization by avoiding excessive abstraction.", "label": false }, { "paperid": "2409.06290v1", "paper_path": "./SciVer/papers/2409.06290v1.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.06290v1_figure_3(b).png", "request_id": 510, "origin_statement": "The t-SNE visualization for EntAugment reveals well-separated, color-coded CIFAR-10 class clusters with a Dunn index of 7.93×10⁻⁵, reflecting enhanced inter-class distance and intra-class compactness, which by reducing feature overlap, likely improves ResNet-50’s classification generalization.", "perturbed_statement": "The t-SNE visualization for EntAugment reveals well-separated clusters with a Dunn index of 7.93×10⁻³, exceeding the 10⁻⁴ threshold for clear class separation, which guarantees zero feature overlap and optimal ResNet-50 generalization.", "perturbed_explanation": "The perturbed statement misreports the Dunn index as 7.93×10⁻³ instead of the actual 7.93×10⁻⁵ and falsely claims a 10⁻⁴ separability threshold; in reality, 7.93×10⁻⁵ falls below that made-up threshold, so the metrics do not guarantee perfect class separation.", "claim": "The t-SNE visualization for EntAugment reveals well-separated clusters with a Dunn index of 7.93×10⁻³, exceeding the 10⁻⁴ threshold for clear class separation, which guarantees zero feature overlap and optimal ResNet-50 generalization.", "label": false }, { "paperid": "2409.19764v2", "paper_path": "./SciVer/papers/2409.19764v2.json", "claim_type": "analytical", "type": "chart", "item": "5(b)", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.19764v2_figure_5(b).png", "request_id": 514, "origin_statement": "STAtten outperforms spatial-only baselines by approximately 3–5 percentage points across all model sizes on Sequential CIFAR100, with the greatest relative gain at the smallest 2.56M configuration, indicating that temporal attention modules enhance efficiency under constrained parameter budgets, consistent with transformer scaling principles.", "perturbed_statement": "STAtten uniformly outperforms spatial-only baselines by approximately 5–7 percentage points across all model sizes on Sequential CIFAR100, with the greatest relative gain at the medium-sized 7.07M configuration, indicating that temporal attention modules enhance efficiency under constrained parameter budgets, consistent with transformer scaling principles.", "perturbed_explanation": "Fig. 5(b) shows STAtten’s accuracy gain over spatial-only ranges from about 1.4 to 4.9 percentage points, not 5–7, and the highest relative improvement (~4.9 points) occurs at the 2.56M parameter configuration, not at 7.07M.", "claim": "STAtten uniformly outperforms spatial-only baselines by approximately 5–7 percentage points across all model sizes on Sequential CIFAR100, with the greatest relative gain at the medium-sized 7.07M configuration, indicating that temporal attention modules enhance efficiency under constrained parameter budgets, consistent with transformer scaling principles.", "label": false }, { "paperid": "2411.02614v1", "paper_path": "./SciVer/papers/2411.02614v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.02614v1-Table3-1.png", "request_id": 515, "origin_statement": "Incorporating SSL pretraining in early training of fundus image DG models likely improves class separability by providing initial robust representations, leading to a 6.6% accuracy gain and 3.0% F1 improvement, thus mitigating label noise effects inherent in diabetic retinopathy datasets.", "perturbed_statement": "Incorporating SSL pretraining in early training of fundus image DG models likely improves inter-domain invariance by providing initial robust representations, leading to an 8.2% accuracy gain and 1.2% F1 improvement, thus counteracting domain shifts inherent in diabetic retinopathy datasets.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 reports a 6.6% accuracy gain (not 8.2%) and a 3.0% F1 improvement (not 1.2%) for SSL pretraining. Moreover, SSL pretraining primarily mitigates label noise, not domain shifts, as described in the context.", "claim": "Incorporating SSL pretraining in early training of fundus image DG models likely improves inter-domain invariance by providing initial robust representations, leading to an 8.2% accuracy gain and 1.2% F1 improvement, thus counteracting domain shifts inherent in diabetic retinopathy datasets.", "label": false }, { "paperid": "2410.20335v1", "paper_path": "./SciVer/papers/2410.20335v1.json", "claim_type": "analytical", "type": "chart", "item": "2(c)", "section": [ "5.4" ], "image_path": "./SciVer/images/2410.20335v1_figure_2(c).png", "request_id": 516, "origin_statement": "The proposed IFUTSVM-ID model achieves 59% specificity versus 52% sensitivity, implying a 7-point advantage in negative case detection; this reflects its effective utilization of intensity-level features to reduce false-positive Alzheimer’s diagnoses under imbalanced CN-dominant datasets.", "perturbed_statement": "The proposed IFUTSVM-ID model achieves 52% specificity versus 59% sensitivity, implying a 7-point advantage in positive case detection; this reflects its effective utilization of intensity-level features to reduce false-positive Alzheimer’s diagnoses under imbalanced CN-dominant datasets.", "perturbed_explanation": "The perturbed statement swaps sensitivity and specificity values. According to Figure 2, the actual specificity of IFUTSVM-ID is ~59% and sensitivity is ~52%, not the other way around. This inversion contradicts the context data.", "claim": "The proposed IFUTSVM-ID model achieves 52% specificity versus 59% sensitivity, implying a 7-point advantage in positive case detection; this reflects its effective utilization of intensity-level features to reduce false-positive Alzheimer’s diagnoses under imbalanced CN-dominant datasets.", "label": false }, { "paperid": "2409.05429v2", "paper_path": "./SciVer/papers/2409.05429v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.05429v2-Table1-1.png", "request_id": 517, "origin_statement": "By combining aircraft type, age, and wingspan—which influence aerodynamic drag and engine efficiency—with ADS-B trajectory-derived speed and altitude data and ACARS fuel metrics, the model achieves interval fuel-consumption estimates with a mean absolute error below 5% across over 6 million flights.", "perturbed_statement": "By combining aircraft type, age, and wingspan—which influence aerodynamic drag and engine efficiency—with ADS-B trajectory-derived speed and altitude data and ACARS fuel metrics, the model achieves interval fuel-consumption estimates with a mean absolute error below 5% across over 10 million flights.", "perturbed_explanation": "The perturbed statement erroneously states that the model was trained on over 10 million flights, while the context clearly specifies a dataset of 6.147 million flights.", "claim": "By combining aircraft type, age, and wingspan—which influence aerodynamic drag and engine efficiency—with ADS-B trajectory-derived speed and altitude data and ACARS fuel metrics, the model achieves interval fuel-consumption estimates with a mean absolute error below 5% across over 10 million flights.", "label": false }, { "paperid": "2411.00848v1", "paper_path": "./SciVer/papers/2411.00848v1.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2411.00848v1_figure_3(b).png", "request_id": 518, "origin_statement": "The triangular fuzzy membership function for Iris setosa sepal width peaks at 3.4 cm with a span of 2.3–4.4 cm, indicating its sepal width has the narrowest distribution, facilitating high-confidence separation from Versicolor and Virginica based on fuzzy classifiers.", "perturbed_statement": "The triangular fuzzy membership function for Iris setosa sepal width peaks at 3.0 cm with a span of 2.0–4.0 cm, indicating its sepal width has the broadest distribution, hindering separation from Versicolor and Virginica based on fuzzy classifiers.", "perturbed_explanation": "This is incorrect because the figure shows Setosa’s sepal width membership actually peaks at 3.4 cm (not 3.0 cm) with support from 2.3 to 4.4 cm (not 2.0–4.0 cm), and its distribution is the narrowest, not the broadest.", "claim": "The triangular fuzzy membership function for Iris setosa sepal width peaks at 3.0 cm with a span of 2.0–4.0 cm, indicating its sepal width has the broadest distribution, hindering separation from Versicolor and Virginica based on fuzzy classifiers.", "label": false }, { "paperid": "2410.21256v1", "paper_path": "./SciVer/papers/2410.21256v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1", "4.1" ], "image_path": "./SciVer/images/2410.21256v1-Table1-1.png", "request_id": 519, "origin_statement": "The evaluation cohort’s higher T1 proportion (53% vs 40%) and N0 rate (65% vs 55%), coupled with shorter follow-up (4.5 vs 5.1 years), likely underrepresent late recurrences, thus biasing prognostic model performance toward early-stage disease.", "perturbed_statement": "The evaluation cohort’s higher T2 proportion (48% vs 38%) and N1 rate (25% vs 23%), alongside longer follow-up (6.5 vs 5.1 years), likely underrepresent late recurrences, biasing prognostic model performance toward early-stage disease.", "perturbed_explanation": "This statement is incorrect because in the actual data the evaluation cohort’s T2 proportion is 36.75% (not 48%), N1 is 23.96% (not 25%), and follow-up is shorter at 4.51 years (not longer at 6.5 years) compared with training.", "claim": "The evaluation cohort’s higher T2 proportion (48% vs 38%) and N1 rate (25% vs 23%), alongside longer follow-up (6.5 vs 5.1 years), likely underrepresent late recurrences, biasing prognostic model performance toward early-stage disease.", "label": false }, { "paperid": "2410.21562v1", "paper_path": "./SciVer/papers/2410.21562v1.json", "claim_type": "analytical", "type": "chart", "item": "2(h)", "section": [ "2.3.2" ], "image_path": "./SciVer/images/2410.21562v1_figure_2(h).png", "request_id": 520, "origin_statement": "Figure 2 shows vertical red lines indicating detected angular boundaries Ω_θ^i. A higher number of angular sectors leads to narrower wedges in the Fourier domain, increasing directional sensitivity but raising computational complexity almost linearly with the sector count.", "perturbed_statement": "Figure 2 shows vertical red lines indicating detected angular boundaries Ω_θ^i. Increasing the number of angular sectors produces wider wedges in the Fourier domain, which decreases directional sensitivity yet increases computational complexity roughly linearly with the sector count.", "perturbed_explanation": "The perturbation wrongly claims that more angular sectors yield wider Fourier-domain wedges and thus lower directional sensitivity. In reality, subdividing into more sectors produces narrower wedges and enhances directional sensitivity, as indicated by the vertical red lines in Figure 2 and standard curvelet theory.", "claim": "Figure 2 shows vertical red lines indicating detected angular boundaries Ω_θ^i. Increasing the number of angular sectors produces wider wedges in the Fourier domain, which decreases directional sensitivity yet increases computational complexity roughly linearly with the sector count.", "label": false }, { "paperid": "2410.18766v2", "paper_path": "./SciVer/papers/2410.18766v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.18766v2-Table3-1.png", "request_id": 521, "origin_statement": "Incorporating the adjacency graph fusion reduces RMSE by approximately 17.5%, demonstrating that exploiting spatial correlations via graph-based modules markedly improves mid-range EV charging demand forecasts by capturing area-to-area contagion effects that traditional local models miss.", "perturbed_statement": "Incorporating the adjacency graph fusion reduces MAE by approximately 25%, demonstrating that exploiting spatial correlations via graph-based modules markedly improves long-term EV charging demand forecasts by capturing temporal autocorrelation effects that traditional local models miss.", "perturbed_explanation": "Table 3 shows removing module (b) increases average RMSE from 4.51×10⁻² to 5.46×10⁻², a ~17.5% improvement, not a 25% reduction in MAE. The gain stems from spatial, not temporal autocorrelation, and is strongest in mid-range (30–45 min), not long-term forecasts.", "claim": "Incorporating the adjacency graph fusion reduces MAE by approximately 25%, demonstrating that exploiting spatial correlations via graph-based modules markedly improves long-term EV charging demand forecasts by capturing temporal autocorrelation effects that traditional local models miss.", "label": false }, { "paperid": "2409.09946v2", "paper_path": "./SciVer/papers/2409.09946v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2", "3" ], "image_path": "./SciVer/images/2409.09946v2-Table1-1.png", "request_id": 525, "origin_statement": "The substantial number of BOSS CMASS galaxies (620,292 at 0.40.70 bpp) OLVQ yields minimal SSIM gains (under 0.001) and no more than ~8% bitrate savings, while the autoregressive context model still achieves higher SSIM, contradicting the claimed 0.010 gain and 30% savings.", "claim": "At bitrates above 0.70 bpp, OLVQ exhibits a larger SSIM gain than the autoregressive model, providing a 0.010 SSIM improvement over USQ and nearly 30% bitrate savings, indicating its dominance in high‐rate scenarios.", "label": false }, { "paperid": "2409.07770v1", "paper_path": "./SciVer/papers/2409.07770v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.07770v1-Table5-1.png", "request_id": 537, "origin_statement": "Combining MCA layer pooling with the AttnVAD+D2Block L/F-network consistently yields the lowest EERs, reducing error by up to 46% on VoxCeleb1, which underscores that synergistic multi-scale aggregation and attentive frame-level modeling best leverage pretrained features to generalize under acoustic variability.", "perturbed_statement": "When using WavLM features, applying only the MCA pooling without the L/F-network achieves a 35% relative EER reduction on VoxCeleb2, surpassing the 32% reduction obtained by combining MCA with AttnVAD+D2Block.", "perturbed_explanation": "The table shows that WavLM with MCA alone yields a 34% relative EER reduction on VoxCeleb2 (not 35%), and the combined MCA+AttnVAD+D2Block method actually achieves a 42% reduction (not 32%), so both the reported percentages and the claimed ranking are incorrect.", "claim": "When using WavLM features, applying only the MCA pooling without the L/F-network achieves a 35% relative EER reduction on VoxCeleb2, surpassing the 32% reduction obtained by combining MCA with AttnVAD+D2Block.", "label": false }, { "paperid": "2410.19346v2", "paper_path": "./SciVer/papers/2410.19346v2.json", "claim_type": "analytical", "type": "table", "item": "6", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.19346v2-Table6-1.png", "request_id": 539, "origin_statement": "The roughly 10-point accuracy difference between GPT-4o (0.82) and Llama-3-8b (0.72) underscores how parameter scaling—from 8 billion to 70 billion parameters—and instruction fine-tuning combine to significantly improve goal assessment fidelity under deterministic judge settings (temperature=0).", "perturbed_statement": "The roughly 20-point accuracy difference between GPT-4o (0.82) and Llama-3-8b (0.72) underscores how parameter scaling—from 8 billion to 70 billion parameters—and instruction fine-tuning combine to significantly improve goal assessment fidelity under deterministic judge settings (temperature=0).", "perturbed_explanation": "The perturbed statement incorrectly claims a 20-point difference: the actual accuracy gap is 0.82−0.72=0.10, i.e. a 10-point difference, not 20.", "claim": "The roughly 20-point accuracy difference between GPT-4o (0.82) and Llama-3-8b (0.72) underscores how parameter scaling—from 8 billion to 70 billion parameters—and instruction fine-tuning combine to significantly improve goal assessment fidelity under deterministic judge settings (temperature=0).", "label": false }, { "paperid": "2410.18555v1", "paper_path": "./SciVer/papers/2410.18555v1.json", "claim_type": "analytical", "type": "chart", "item": "7(b)", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2410.18555v1_figure_7(b).png", "request_id": 540, "origin_statement": "The heatmap in Fig.7(d) reveals that strokes within the same symbol cluster (e.g., successive “4”s or “F”s) exhibit higher mutual attention, indicating EGAT’s edge weights effectively capture local structural dependencies. Concurrently, the uniformly light first column confirms the master node aggregates global context from all strokes.", "perturbed_statement": "The heatmap in Fig.7(d) shows that strokes within the same symbol cluster (e.g., successive “4”s or “F”s) exhibit higher mutual attention, and the uniformly light first row indicates the master node distributes global context equally to all strokes.", "perturbed_explanation": "This statement is incorrect because the uniformly light region appears in the first column, not the first row. In the heatmap, the first row is actually darker, indicating that the master node does not distribute information equally across all strokes via the first row.", "claim": "The heatmap in Fig.7(d) shows that strokes within the same symbol cluster (e.g., successive “4”s or “F”s) exhibit higher mutual attention, and the uniformly light first row indicates the master node distributes global context equally to all strokes.", "label": false }, { "paperid": "2410.06992v2", "paper_path": "./SciVer/papers/2410.06992v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5" ], "image_path": "./SciVer/images/2410.06992v2-Table4-1.png", "request_id": 541, "origin_statement": "Despite RAG+GPT-3.5’s minimal average cost ($0.05 per instance), its low resolution rate inflates its effectiveness-aware cost to $10 per fix, exemplifying how per-unit affordability can be offset by increased iteration and manual validation overhead in software patch generation.", "perturbed_statement": "Despite RAG+GPT-3.5’s minimal average cost ($0.50 per instance), its low resolution rate inflates its effectiveness-aware cost to $20 per fix, exemplifying how per-unit affordability can be offset by increased iteration and manual validation overhead in software patch generation.", "perturbed_explanation": "The perturbed statement misstates RAG+GPT-3.5’s cost metrics. Table 4 reports an average cost of $0.05 per instance (not $0.50) and an effectiveness-aware cost of $10 per fix (not $20), so both figures contradict the provided data.", "claim": "Despite RAG+GPT-3.5’s minimal average cost ($0.50 per instance), its low resolution rate inflates its effectiveness-aware cost to $20 per fix, exemplifying how per-unit affordability can be offset by increased iteration and manual validation overhead in software patch generation.", "label": false }, { "paperid": "2409.01722v2", "paper_path": "./SciVer/papers/2409.01722v2.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "6.1" ], "image_path": "./SciVer/images/2409.01722v2_figure_6(a).png", "request_id": 542, "origin_statement": "Because ACCESS-FL limits each client to only two neighbor masks per round, its cumulative message count remains at 100 messages after 100 rounds, demonstrating O(1) scaling. In contrast, SecAgg's full pairing yields O(n) messages per client, reaching ~10,000 messages with n=100.", "perturbed_statement": "Because ACCESS-FL limits each client to only two neighbor masks per round, its cumulative message count remains at 200 messages after 100 rounds, demonstrating O(1) scaling. In contrast, SecAgg's full pairing yields O(n) messages per client, reaching ~10,000 messages with n=100.", "perturbed_explanation": "This statement is incorrect because ACCESS-FL actually accumulates about 100 messages per client over 100 rounds (one masked model update each round), not 200, as specified in Figure 6 and the accompanying text describing constant messaging at roughly 100 messages.", "claim": "Because ACCESS-FL limits each client to only two neighbor masks per round, its cumulative message count remains at 200 messages after 100 rounds, demonstrating O(1) scaling. In contrast, SecAgg's full pairing yields O(n) messages per client, reaching ~10,000 messages with n=100.", "label": false }, { "paperid": "2409.16708v1", "paper_path": "./SciVer/papers/2409.16708v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.4", "5.1" ], "image_path": "./SciVer/images/2409.16708v1-Table2-1.png", "request_id": 543, "origin_statement": "Participants with higher SNAIL Technical Understanding scores showed significantly greater AI-driven performance gains (ΔEP: r = 0.21, p < 0.01), suggesting that deeper technical literacy enhances the effective utilization of AI assistance.", "perturbed_statement": "Participants with higher SNAIL Critical Awareness scores showed significantly greater AI-driven performance gains (ΔEP: r = 0.35, p < 0.01), indicating that critical AI literacy enhances AI utilization.", "perturbed_explanation": "The perturbed statement is incorrect because the table reports only a small, non-significant correlation between ΔEP and SNAIL Critical Awareness (r = 0.10, not 0.35), whereas the significant correlation (r = 0.21, p < 0.01) was observed with SNAIL Technical Understanding.", "claim": "Participants with higher SNAIL Critical Awareness scores showed significantly greater AI-driven performance gains (ΔEP: r = 0.35, p < 0.01), indicating that critical AI literacy enhances AI utilization.", "label": false }, { "paperid": "2410.06169v2", "paper_path": "./SciVer/papers/2410.06169v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.5" ], "image_path": "./SciVer/images/2410.06169v2-Table4-1.png", "request_id": 545, "origin_statement": "Removing up to 10 non-active attention heads per layer cuts visual multi-head attention FLOPs by roughly 16% while only reducing the LLaVA-1.5-13B model’s averaged benchmark performance from 68.5 to 68.4 (0.1), highlighting substantial redundancy in visual attention and potential proportional GPU memory savings.", "perturbed_statement": "Removing 25 non-active attention heads per layer cuts visual multi-head attention FLOPs by roughly 40% while only reducing the LLaVA-1.5-13B model’s averaged benchmark performance from 68.5 to 68.4 (0.1), highlighting substantial redundancy in visual attention and potential proportional GPU memory savings.", "perturbed_explanation": "Tab. 4 shows that dropping 25 heads actually yields an average accuracy of 67.4— a 1.1-point drop from the baseline—not 68.4. Therefore, the claimed 0.1-point performance reduction is incorrect, and the FLOPs reduction percentage is also misaligned with the actual head count dropped.", "claim": "Removing 25 non-active attention heads per layer cuts visual multi-head attention FLOPs by roughly 40% while only reducing the LLaVA-1.5-13B model’s averaged benchmark performance from 68.5 to 68.4 (0.1), highlighting substantial redundancy in visual attention and potential proportional GPU memory savings.", "label": false }, { "paperid": "2409.11267v1", "paper_path": "./SciVer/papers/2409.11267v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11267v1_figure_6.png", "request_id": 546, "origin_statement": "As Np increases from 4 to 48, the RL method maintains an infeasibility rate under 2.5% (max 23/1000), illustrating that its reward-driven policy effectively captures system constraints and generalizes across longer horizons, unlike SL whose error-driven training leads to rapidly rising infeasibility.", "perturbed_statement": "As Np increases from 4 to 48, the RL method maintains an infeasibility rate under 0.5% (max 5/1000), illustrating that its reward-driven policy effectively captures system constraints and generalizes across longer horizons, unlike SL whose error-driven training leads to rapidly rising infeasibility.", "perturbed_explanation": "The perturbed statement incorrectly claims a maximum infeasibility rate of 5/1000 (0.5%), but Figure 6 shows the RL approach reaches 23/1000 (2.3%) at Np=48, so the under-0.5% threshold is verifiably false.", "claim": "As Np increases from 4 to 48, the RL method maintains an infeasibility rate under 0.5% (max 5/1000), illustrating that its reward-driven policy effectively captures system constraints and generalizes across longer horizons, unlike SL whose error-driven training leads to rapidly rising infeasibility.", "label": false }, { "paperid": "2409.14838v1", "paper_path": "./SciVer/papers/2409.14838v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1.1" ], "image_path": "./SciVer/images/2409.14838v1-Table1-1.png", "request_id": 549, "origin_statement": "By substituting detailed activation traces with statistical averages, the average mode smooths current peaks, marginally reducing dynamic energy consumption variability and yielding ≤8.8% lower energy efficiency across VGG8, ResNet18, and DenseNet40, while preserving identical peak throughput due to unchanged data parallelism.", "perturbed_statement": "By substituting detailed activation traces with statistical averages, the average mode smooths current peaks, marginally reducing dynamic energy consumption variability and yielding ≤1.5% lower energy efficiency across VGG8, ResNet18, and DenseNet40, while boosting peak throughput by 0.02 TOPS due to optimized SRAM access.", "perturbed_explanation": "The claimed efficiency drop of ≤1.5% contradicts the table, which shows energy efficiency decreases of 7.4% (VGG8), 7.7% (ResNet18), and 8.8% (DenseNet40). Additionally, throughput remains identical (no change of 0.02 TOPS) for all networks in both modes.", "claim": "By substituting detailed activation traces with statistical averages, the average mode smooths current peaks, marginally reducing dynamic energy consumption variability and yielding ≤1.5% lower energy efficiency across VGG8, ResNet18, and DenseNet40, while boosting peak throughput by 0.02 TOPS due to optimized SRAM access.", "label": false }, { "paperid": "2410.21259v2", "paper_path": "./SciVer/papers/2410.21259v2.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4", "4.3" ], "image_path": "./SciVer/images/2410.21259v2_figure_6.png", "request_id": 550, "origin_statement": "Spatial tasks exhibit an average 20-point performance decline from easy to hard, while atmospheric understanding drops by only 12 points, indicating LVLMs' particular sensitivity to spatial reasoning demands and potential need for specialized spatial inductive biases to capture 3D geometry.", "perturbed_statement": "Spatial tasks exhibit an average 30-point performance decline from easy to hard, while atmospheric understanding drops by only 5 points, indicating LVLMs' particular sensitivity to spatial reasoning demands and potential need for specialized spatial inductive biases to capture 3D geometry.", "perturbed_explanation": "The perturbation is incorrect because Figure 6 shows spatial tasks decline by about 20 percentage points from easy to hard (e.g., ~0.85 to ~0.65), not 30 points. Similarly, atmospheric understanding drops by approximately 12 points (e.g., ~0.93 to ~0.81), not 5 points.", "claim": "Spatial tasks exhibit an average 30-point performance decline from easy to hard, while atmospheric understanding drops by only 5 points, indicating LVLMs' particular sensitivity to spatial reasoning demands and potential need for specialized spatial inductive biases to capture 3D geometry.", "label": false }, { "paperid": "2409.01696v1", "paper_path": "./SciVer/papers/2409.01696v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "5.1", "5.2", "5.3" ], "image_path": "./SciVer/images/2409.01696v1-Table5-1.png", "request_id": 553, "origin_statement": "By initially training ResNet-34 with full skip connections and then removing its last-stage skip-connections, the two-stage training scheme (TTS) leverages improved gradient propagation for parameter initialization, yielding a 31.48% ratio of attack accuracy drop to natural accuracy drop (Δ), thus optimizing the privacy–utility trade-off.", "perturbed_statement": "For ResNet-50, the TTS method first trains with full skip connections then removes the last-stage skip-connections, resulting in a 31.48% ratio of attack accuracy drop to natural accuracy drop (Δ), thereby maximizing the privacy–utility trade-off.", "perturbed_explanation": "The perturbed statement is incorrect because Table 5 shows that for ResNet-50, TTS achieves a Δ of 5.44%, not 31.48%. The 31.48% Δ corresponds to ResNet-34, so attributing it to ResNet-50 contradicts the context data.", "claim": "For ResNet-50, the TTS method first trains with full skip connections then removes the last-stage skip-connections, resulting in a 31.48% ratio of attack accuracy drop to natural accuracy drop (Δ), thereby maximizing the privacy–utility trade-off.", "label": false }, { "paperid": "2409.15683v1", "paper_path": "./SciVer/papers/2409.15683v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.3", "3.4" ], "image_path": "./SciVer/images/2409.15683v1_figure_4.png", "request_id": 554, "origin_statement": "By sampling the initial condition u0(x) from a Gaussian random field with an Exp-Sine-Squared kernel of periodicity 1 and Lévy exponent 0.5, the analytic solution u(x,t)=u0(x−t) can be accurately learned by both classical and quantum DeepONet architectures, achieving a relative test error of ≈2.3×10−4.", "perturbed_statement": "By sampling the initial condition u0(x) from a Gaussian random field with an Exp-Sine-Squared kernel of periodicity 2 and Lévy exponent 0.5, the analytic solution u(x,t)=u0(x−t) can be accurately learned by both classical and quantum DeepONet architectures, achieving a relative test error of ≈1.2×10−3.", "perturbed_explanation": "The perturbation is incorrect because the kernel’s periodicity is specified as 1 (not 2) in the context, and the reported relative test error for both classical and quantum DeepONet is ≈2.3×10⁻⁴ (not ≈1.2×10⁻³).", "claim": "By sampling the initial condition u0(x) from a Gaussian random field with an Exp-Sine-Squared kernel of periodicity 2 and Lévy exponent 0.5, the analytic solution u(x,t)=u0(x−t) can be accurately learned by both classical and quantum DeepONet architectures, achieving a relative test error of ≈1.2×10−3.", "label": false }, { "paperid": "2409.05790v1", "paper_path": "./SciVer/papers/2409.05790v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.05790v1_figure_6(a).png", "request_id": 558, "origin_statement": "By interpolating within its latent manifold, the CVAE yields similar error distributions for in-hull and out-of-hull points, with only a slight rise in mean relative error (<1.7%) and variance during extrapolation, demonstrating that generative sampling effectively regularizes predictions beyond the training domain.", "perturbed_statement": "By interpolating within its latent manifold, the CVAE yields similar error distributions for in-hull and out-of-hull points, with only a slight rise in mean relative error (<0.7%) and a modest decrease in variance during extrapolation, demonstrating that generative sampling effectively regularizes predictions beyond the training domain.", "perturbed_explanation": "The statement is incorrect because the actual mean relative error remains under 1.7%, not under 0.7%, and the variance modestly increases—not decreases—when predicting outside the convex hull, contrary to the perturbed claim.", "claim": "By interpolating within its latent manifold, the CVAE yields similar error distributions for in-hull and out-of-hull points, with only a slight rise in mean relative error (<0.7%) and a modest decrease in variance during extrapolation, demonstrating that generative sampling effectively regularizes predictions beyond the training domain.", "label": false }, { "paperid": "2411.08284v1", "paper_path": "./SciVer/papers/2411.08284v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "2" ], "image_path": "./SciVer/images/2411.08284v1-Table2-1.png", "request_id": 559, "origin_statement": "Hard thresholding H_k(u) retains u’s k largest magnitude entries, yielding the ℓ2-optimal k-sparse approximation of u by minimizing the reconstruction error ‖u - H_k(u)‖2^2 among all k-sparse vectors.", "perturbed_statement": "Hard thresholding H_k(u) retains u’s k largest magnitude entries, yielding the ℓ1-optimal k-sparse approximation of u by minimizing the reconstruction error ‖u - H_k(u)‖1 among all k-sparse vectors.", "perturbed_explanation": "The perturbed statement is incorrect because H_k(u) is defined to produce the best k-term approximation in the ℓ2 sense by minimizing the squared ℓ2-norm error ‖u - H_k(u)‖2^2. It does not guarantee ℓ1-optimality nor minimize the ℓ1-norm ‖u - H_k(u)‖1.", "claim": "Hard thresholding H_k(u) retains u’s k largest magnitude entries, yielding the ℓ1-optimal k-sparse approximation of u by minimizing the reconstruction error ‖u - H_k(u)‖1 among all k-sparse vectors.", "label": false }, { "paperid": "2409.11839v1", "paper_path": "./SciVer/papers/2409.11839v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.11839v1_figure_1.png", "request_id": 560, "origin_statement": "By 1973, operating SCAs in County Boroughs (~1,050) represented roughly a quarter of total operating SCAs in England (~4,200), underscoring the disproportionate urban focus of the 1956 Clean Air Act implementation.", "perturbed_statement": "By 1973, operating SCAs in County Boroughs (~1,050) represented roughly half of total operating SCAs in England (~4,200), underscoring the disproportionate urban focus of the 1956 Clean Air Act implementation.", "perturbed_explanation": "This statement is incorrect because, according to Figure 1 and the context, County Boroughs accounted for about 1,050 of 4,200 operating SCAs in 1973, which is approximately 25%, not 50%.", "claim": "By 1973, operating SCAs in County Boroughs (~1,050) represented roughly half of total operating SCAs in England (~4,200), underscoring the disproportionate urban focus of the 1956 Clean Air Act implementation.", "label": false }, { "paperid": "2409.06748v1", "paper_path": "./SciVer/papers/2409.06748v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.06748v1-Table1-1.png", "request_id": 561, "origin_statement": "On PEMS-04, EasyST achieves an RMSE of 30.46, 1.54 lower than MTGNN, showing that the IB-constrained distillation filters spurious correlations, improving the student MLP’s generalization of dynamic spatio-temporal transitions under diverse traffic patterns.", "perturbed_statement": "On PEMS-04, EasyST achieves an RMSE of 28.46, 2.54 lower than MTGNN, showing that the IB-constrained distillation filters spurious correlations, improving the student MLP’s generalization of dynamic spatio-temporal transitions under diverse traffic patterns.", "perturbed_explanation": "Table 1 reports EasyST’s RMSE on PEMS-04 as 30.46, not 28.46, and the actual reduction relative to MTGNN (32.00) is 1.54, not 2.54. The perturbed values therefore contradict the context.", "claim": "On PEMS-04, EasyST achieves an RMSE of 28.46, 2.54 lower than MTGNN, showing that the IB-constrained distillation filters spurious correlations, improving the student MLP’s generalization of dynamic spatio-temporal transitions under diverse traffic patterns.", "label": false }, { "paperid": "2410.22364v2", "paper_path": "./SciVer/papers/2410.22364v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.2" ], "image_path": "./SciVer/images/2410.22364v2-Table3-1.png", "request_id": 563, "origin_statement": "While asymmetric token dropout yields maximal NN accuracy at 100M units, its performance declines at higher budgets, whereas symmetric patch scaling continually benefits from increased budgets, reaching peak LP accuracy of 83.2% at 200M units due to uniform compression preserving feature distribution and reducing gradient bias.", "perturbed_statement": "Asymmetric token dropout peaks LP accuracy at 150M units with 82.1%, but then declines, whereas symmetric patch scaling achieves its highest NN accuracy of 81.2% at just 100M units, due to symmetric compression optimizing gradient diversity.", "perturbed_explanation": "The perturbation is incorrect because asymmetric token dropout LP accuracy at 150M units is actually 80.8%, not 82.1% (82.1% corresponds to symmetric patch scaling LP at 150M). Additionally, symmetric patch scaling NN accuracy at 100M is 66.7%, not 81.2%.", "claim": "Asymmetric token dropout peaks LP accuracy at 150M units with 82.1%, but then declines, whereas symmetric patch scaling achieves its highest NN accuracy of 81.2% at just 100M units, due to symmetric compression optimizing gradient diversity.", "label": false }, { "paperid": "2409.11395v1", "paper_path": "./SciVer/papers/2409.11395v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.3", "3.4" ], "image_path": "./SciVer/images/2409.11395v1_figure_4.png", "request_id": 564, "origin_statement": "The divergence in linear ramp coefficients around 10–11 μm, where the slope flips sign, indicates detector illumination memory effects can bias mid-infrared eclipse depths, requiring decorrelation of PSF width and temporal slope parameters to accurately retrieve exoplanetary brightness temperatures.", "perturbed_statement": "The divergence in linear ramp coefficients around 5–6 μm, where the slope flips sign, indicates detector illumination memory effects can bias near-infrared eclipse depths, requiring decorrelation of PSF width and temporal slope parameters to accurately retrieve exoplanetary brightness temperatures.", "perturbed_explanation": "The ramp slope reversal actually occurs around 10–11 μm—not 5–6 μm—so the stated wavelength range is incorrect. Moreover, the MIRI LRS observations span the mid-infrared (5–12 μm), making the reference to “near-infrared eclipse depths” factually wrong.", "claim": "The divergence in linear ramp coefficients around 5–6 μm, where the slope flips sign, indicates detector illumination memory effects can bias near-infrared eclipse depths, requiring decorrelation of PSF width and temporal slope parameters to accurately retrieve exoplanetary brightness temperatures.", "label": false }, { "paperid": "2411.07795v2", "paper_path": "./SciVer/papers/2411.07795v2.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.07795v2_figure_3(a).png", "request_id": 566, "origin_statement": "InvisMark’s significantly weaker residual amplitudes reduce pixel-level distortions, producing a PSNR distribution tightly clustered around 51 dB and an SSIM mode exceeding 0.9985 on both DALL·E 3 and DIV2K, thus maintaining imperceptible watermarking with minimal perceptual artifacts compared to TrustMark and StegaStamp.", "perturbed_statement": "InvisMark’s significantly weaker residual amplitudes reduce pixel-level distortions, producing a PSNR distribution tightly clustered around 45 dB and an SSIM mode exceeding 0.987 on both DALL·E 3 and DIV2K, thus maintaining imperceptible watermarking with minimal perceptual artifacts compared to TrustMark and StegaStamp.", "perturbed_explanation": "The perturbed PSNR value of 45 dB and SSIM mode of 0.987 contradict the context and Figure 3, which report PSNR values concentrated around 51 dB and SSIM values above 0.998, not as low as 45 dB or 0.987.", "claim": "InvisMark’s significantly weaker residual amplitudes reduce pixel-level distortions, producing a PSNR distribution tightly clustered around 45 dB and an SSIM mode exceeding 0.987 on both DALL·E 3 and DIV2K, thus maintaining imperceptible watermarking with minimal perceptual artifacts compared to TrustMark and StegaStamp.", "label": false }, { "paperid": "2411.13919v1", "paper_path": "./SciVer/papers/2411.13919v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.5" ], "image_path": "./SciVer/images/2411.13919v1-Table2-1.png", "request_id": 567, "origin_statement": "The SVC’s test accuracy climbs from 92.34% to 97.86% with pre-clustering, while training time drops from 732.22s to 112.28s, illustrating that dimensionality reduction via pre-clustering enhances both test performance and computational efficiency by mitigating overfitting and matrix inversion costs.", "perturbed_statement": "The KNN’s test accuracy declines from 98.19% to 85.85% with pre-clustering, while training time decreases from 1.17s to 0.86s, suggesting that pre-clustering reduces both accuracy and computation time.", "perturbed_explanation": "This statement is incorrect because Table 2 shows that KNN’s test accuracy actually increases from 85.85% (without pre-clustering) to 98.19% (with pre-clustering), and training time increases from 0.86s to 1.17s when pre-clustering is applied.", "claim": "The KNN’s test accuracy declines from 98.19% to 85.85% with pre-clustering, while training time decreases from 1.17s to 0.86s, suggesting that pre-clustering reduces both accuracy and computation time.", "label": false }, { "paperid": "2410.23317v1", "paper_path": "./SciVer/papers/2410.23317v1.json", "claim_type": "analytical", "type": "chart", "item": "1(a)", "section": [ "1", "3.2", "4.2" ], "image_path": "./SciVer/images/2410.23317v1_figure_1(a).png", "request_id": 570, "origin_statement": "Given that VLM attention sparsity ratios range from 70% in early layers to 99% in later layers, VL-Cache dynamically allocates KV cache budgets per layer based on measured sparsity, retaining 98% of task accuracy while using only 10% of the original cache capacity.", "perturbed_statement": "Given that VLM attention sparsity ratios range from 50% in early layers to 90% in later layers, VL-Cache dynamically allocates KV cache budgets per layer based on measured sparsity, retaining 98% of task accuracy while using only 5% of the original cache capacity.", "perturbed_explanation": "This statement is incorrect because the paper reports VLM attention sparsity ratios between 70% and 99%, not 50% to 90%, and experimental results (Section 5.1) indicate VL-Cache uses 10% of the KV cache, not 5%.", "claim": "Given that VLM attention sparsity ratios range from 50% in early layers to 90% in later layers, VL-Cache dynamically allocates KV cache budgets per layer based on measured sparsity, retaining 98% of task accuracy while using only 5% of the original cache capacity.", "label": false }, { "paperid": "2409.15053v1", "paper_path": "./SciVer/papers/2409.15053v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.15053v1-Table3-1.png", "request_id": 573, "origin_statement": "Increasing the Chebyshev filter degree m reduces FLP iterations but raises per-iteration work, shifting total compute time towards MV products, which dominate GPU cost: for Ga41As41H72 at m=500, 93% of compute time is spent on MV due to extensive sparse matrix nonzeros.", "perturbed_statement": "As m increases, FLP iterations reduce, but higher-degree filters shift cost to preprocessing, causing PREPROC to dominate at 90% of compute time for Ga41As41H72 at m=500.", "perturbed_explanation": "The statement is incorrect because Table 3 shows PREPROC accounts for only 2% (not 90%) of compute time at m=500 for Ga41As41H72. In fact, MV products dominate at 93%, and ORTH is just 3%, so preprocessing does not dominate.", "claim": "As m increases, FLP iterations reduce, but higher-degree filters shift cost to preprocessing, causing PREPROC to dominate at 90% of compute time for Ga41As41H72 at m=500.", "label": false }, { "paperid": "2410.01631v1", "paper_path": "./SciVer/papers/2410.01631v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "2.2", "2.4", "3.1" ], "image_path": "./SciVer/images/2410.01631v1_figure_1.png", "request_id": 574, "origin_statement": "The measured hyperfine field of 26 T combined with the typical on-site coupling (~10 T/μB) implies a Cr moment of 2.6 μB at 3.4 K, indicating a ~13% reduction from the spin-only 3 μB value, consistent with covalent Nb–Cr hybridization.", "perturbed_statement": "The measured hyperfine field of 24 T combined with a typical on-site coupling (~10 T/μB) implies a Cr moment of 2.4 μB at 3.4 K, indicating a ~20% reduction from the spin-only 3 μB value, consistent with covalent Nb–Cr hybridization.", "perturbed_explanation": "This is incorrect because the context specifies the hyperfine field is 26 T (Table 2), not 24 T. Using 26 T with A≈10 T/μB yields a Cr moment of 2.6 μB (∼13% below 3 μB), so the perturbed values of 24 T, 2.4 μB, and 20% reduction contradict the reported data.", "claim": "The measured hyperfine field of 24 T combined with a typical on-site coupling (~10 T/μB) implies a Cr moment of 2.4 μB at 3.4 K, indicating a ~20% reduction from the spin-only 3 μB value, consistent with covalent Nb–Cr hybridization.", "label": false }, { "paperid": "2410.20320v1", "paper_path": "./SciVer/papers/2410.20320v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "5.5" ], "image_path": "./SciVer/images/2410.20320v1_figure_4.png", "request_id": 576, "origin_statement": "In complex 5-way-5-shot tasks with a NOTA rate of 0.15, incorporating an adaptive margin amplifies total accuracy by over 6%, as it dynamically scales prototype boundaries based on negative instances; additionally, a pseudo-negative sampling rate of 0.2 consistently maximizes performance before significant decline beyond 0.4.", "perturbed_statement": "In complex 5-way-5-shot tasks with a NOTA rate of 0.15, incorporating an adaptive margin amplifies total accuracy by only 2%, as it dynamically scales prototype boundaries based on negative instances; additionally, a pseudo-negative sampling rate of 0.5 consistently maximizes performance before significant decline beyond 0.4.", "perturbed_explanation": "The perturbed statement is incorrect because the adaptive margin yields over 6% accuracy improvement in the 5-way-5-shot 0.15 setting (not only 2%), and the optimal pseudo-negative sampling rate is 0.2 (not 0.5), as shown in Figure 4 and the ablation study results.", "claim": "In complex 5-way-5-shot tasks with a NOTA rate of 0.15, incorporating an adaptive margin amplifies total accuracy by only 2%, as it dynamically scales prototype boundaries based on negative instances; additionally, a pseudo-negative sampling rate of 0.5 consistently maximizes performance before significant decline beyond 0.4.", "label": false }, { "paperid": "2411.09118v1", "paper_path": "./SciVer/papers/2411.09118v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.09118v1_figure_4.png", "request_id": 578, "origin_statement": "TSNE clustering reveals that FxTS-Net produces more compact, well-separated class features under Gaussian perturbations compared to Neural ODE, illustrating how fixed-time stability constrains feature drift and maintains robust decision boundaries despite additive noise.", "perturbed_statement": "TSNE clustering reveals that Neural ODE yields more compact, well-separated class features under Gaussian perturbations compared to FxTS-Net, indicating that fixed-time stability may not effectively constrain feature drift under additive noise.", "perturbed_explanation": "The perturbed statement contradicts the context’s observation that under Gaussian noise, Neural ODE decision boundaries become highly blurred while FxTS-Net preserves sharp cluster separation. Claiming that Neural ODE yields more compact, well-separated features under Gaussian perturbations is therefore inaccurate.", "claim": "TSNE clustering reveals that Neural ODE yields more compact, well-separated class features under Gaussian perturbations compared to FxTS-Net, indicating that fixed-time stability may not effectively constrain feature drift under additive noise.", "label": false }, { "paperid": "2409.03769v1", "paper_path": "./SciVer/papers/2409.03769v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.3" ], "image_path": "./SciVer/images/2409.03769v1-Table3-1.png", "request_id": 579, "origin_statement": "The DistMult-Ensemble model organizes processors by core count rather than precise frequency, grouping a 24-core Xeon with 32-core AMD variants; it learns HDD unit equivalence by matching 2000GB to 2TB; and via negative sampling, it identifies a 5.49 kΩ resistor as similar to 2.37 kΩ parts.", "perturbed_statement": "By clustering embeddings by core count rather than frequency, the DistMult-Ensemble model groups a 24-core Xeon with 32-core AMD parts; it learns HDD unit equivalence by matching 2000GB to 3TB; and via negative sampling, it identifies a 5.49 kΩ resistor as similar to 4.49 kΩ parts.", "perturbed_explanation": "The perturbation is incorrect because the model actually matches 2000GB HDDs to 2TB, not 3TB, and the top resistor neighbor for a 5.49 kΩ part is 2.37 kΩ, not 4.49 kΩ, as shown in Table 3.", "claim": "By clustering embeddings by core count rather than frequency, the DistMult-Ensemble model groups a 24-core Xeon with 32-core AMD parts; it learns HDD unit equivalence by matching 2000GB to 3TB; and via negative sampling, it identifies a 5.49 kΩ resistor as similar to 4.49 kΩ parts.", "label": false }, { "paperid": "2411.00028v2", "paper_path": "./SciVer/papers/2411.00028v2.json", "claim_type": "analytical", "type": "chart", "item": "4(d)", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.00028v2_figure_4(d).png", "request_id": 586, "origin_statement": "On the Shanghai dataset, replacing the semantic-guided fusion module with standard self-attention reduces accuracy by around 2%, from roughly 87% to 85%, indicating that guided fusion better tailors meta-path representations to each task's semantics.", "perturbed_statement": "On Shanghai, omitting the embedding transfer module instead of semantic fusion decreases accuracy by approximately 4%, from roughly 87% to 83%, suggesting cross-task embedding exchange is crucial for aligning meta-path features with task contexts.", "perturbed_explanation": "This statement is incorrect because the chart shows that removing embedding transfer (w/o trans.) only lowers accuracy from about 87% to 86% (a ≈1% drop), not from 87% to 83% (4%). The module attribution and percentage are both wrong.", "claim": "On Shanghai, omitting the embedding transfer module instead of semantic fusion decreases accuracy by approximately 4%, from roughly 87% to 83%, suggesting cross-task embedding exchange is crucial for aligning meta-path features with task contexts.", "label": false }, { "paperid": "2409.01854v1", "paper_path": "./SciVer/papers/2409.01854v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.4.2" ], "image_path": "./SciVer/images/2409.01854v1-Table4-1.png", "request_id": 587, "origin_statement": "The ablation in Table 4 indicates that excluding labelled sample retrieval reduces DuIE F1 by 10.67 points relative to full AgentRE, underscoring that exemplar-based contextual cues are more critical than annotation guidelines or KG facts, likely because they directly supply pattern variants optimizing the model’s decision boundary.", "perturbed_statement": "The ablation in Table 4 indicates that excluding labelled sample retrieval reduces DuIE F1 by 15 points relative to full AgentRE, underscoring that exemplar-based contextual cues are more critical than annotation guidelines or KG facts, likely because they directly supply pattern variants optimizing the model’s decision boundary.", "perturbed_explanation": "Table 4 reports DuIE F1 scores of 42.97 for full AgentRE versus 32.30 for -samples, a drop of 10.67 points. The perturbed statement’s claim of a 15-point reduction contradicts these values.", "claim": "The ablation in Table 4 indicates that excluding labelled sample retrieval reduces DuIE F1 by 15 points relative to full AgentRE, underscoring that exemplar-based contextual cues are more critical than annotation guidelines or KG facts, likely because they directly supply pattern variants optimizing the model’s decision boundary.", "label": false }, { "paperid": "2411.18432v1", "paper_path": "./SciVer/papers/2411.18432v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.1.1" ], "image_path": "./SciVer/images/2411.18432v1_figure_7.png", "request_id": 588, "origin_statement": "Grid 62’s tri-modal hourly demand peaks (around 8:00–9:00, 12:00–13:00, and 22:00–23:00) reflect commuting, lunch-time mobility, and entertainment-driven travel, consistent with high mixed-use density in west-central Kowloon, where dense residential and commercial land parcels intensify taxi requests during rush and leisure hours.", "perturbed_statement": "Grid 62’s tri-modal hourly demand peaks (around 10:00–11:00, 14:00–15:00, and 19:00–20:00) reflect commuting, lunch-time mobility, and entertainment-driven travel, consistent with high mixed-use density in west-central Kowloon, where dense residential and commercial land parcels intensify taxi requests during rush and leisure hours.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 7(c) shows Grid 62’s demand peaks at approximately 8:00–9:00, 12:00–13:00, and 22:00–23:00, not at 10:00–11:00, 14:00–15:00, and 19:00–20:00 as claimed.", "claim": "Grid 62’s tri-modal hourly demand peaks (around 10:00–11:00, 14:00–15:00, and 19:00–20:00) reflect commuting, lunch-time mobility, and entertainment-driven travel, consistent with high mixed-use density in west-central Kowloon, where dense residential and commercial land parcels intensify taxi requests during rush and leisure hours.", "label": false }, { "paperid": "2409.10775v1", "paper_path": "./SciVer/papers/2409.10775v1.json", "claim_type": "analytical", "type": "chart", "item": "9(b)", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.10775v1_figure_9(b).png", "request_id": 590, "origin_statement": "Under heavy occlusion (>50%), all top-three models under real occlusion are Swin-B variants, and they deviate by at most two ranks under synthetic occlusions, implying hierarchical windowed self-attention preserves critical features even with substantial object occlusion.", "perturbed_statement": "Under heavy occlusion (>50%), the top-three models under real occlusion are ResNeXt-50 + Mixup, VGG-16, and DeiT-B, and they shift by at most one rank under synthetic occlusions, implying convolutional architectures sustain robustness via Mixup’s regularization.", "perturbed_explanation": "This is incorrect because Figure 9(b) shows Swin-B + Mixup, Swin-B, and Swin-B + CutMix—not ResNeXt-50 + Mixup, VGG-16, and DeiT-B—occupy the top three ranks under real occlusion. Moreover, several convolutional and transformer models move by up to three or four positions across occlusion types, not at most one rank.", "claim": "Under heavy occlusion (>50%), the top-three models under real occlusion are ResNeXt-50 + Mixup, VGG-16, and DeiT-B, and they shift by at most one rank under synthetic occlusions, implying convolutional architectures sustain robustness via Mixup’s regularization.", "label": false }, { "paperid": "2411.15091v1", "paper_path": "./SciVer/papers/2411.15091v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.15091v1-Table4-1.png", "request_id": 593, "origin_statement": "Despite fluctuations in total domains, the proportion of Tranco top-100k sites with accessible robots.txt remains stable at roughly 75%, indicating consistent Common Crawl coverage over two years and validating reliability for robots.txt–based crawling policy analyses.", "perturbed_statement": "Despite fluctuations in total domains, the proportion of Tranco top-100k sites with accessible robots.txt remains stable at roughly 85%, indicating consistent Common Crawl coverage over two years and validating reliability for robots.txt–based crawling policy analyses.", "perturbed_explanation": "The perturbed statement falsely inflates the robots.txt coverage to 85%. Table 4 shows the actual proportion is around 74–78% (e.g., 31,494/40,177 ≈78.5% in Sep/Oct 2022 and 29,867/40,420 ≈73.9% in Oct 2024), not 85%.", "claim": "Despite fluctuations in total domains, the proportion of Tranco top-100k sites with accessible robots.txt remains stable at roughly 85%, indicating consistent Common Crawl coverage over two years and validating reliability for robots.txt–based crawling policy analyses.", "label": false }, { "paperid": "2409.19872v3", "paper_path": "./SciVer/papers/2409.19872v3.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "1" ], "image_path": "./SciVer/images/2409.19872v3_figure_2.png", "request_id": 596, "origin_statement": "By integrating intrinsic and external knowledge in a unified latent-space paradigm, UniKE yields a generality of 0.946 and locality of 0.934, confirming that synergistic key-value memory alignment overcomes the generality-locality compromises seen in TP+IKE.", "perturbed_statement": "By integrating intrinsic and external knowledge in a unified latent-space paradigm, UniKE yields a generality of 0.856 and locality of 0.934, confirming that synergistic key-value memory alignment overcomes the generality-locality compromises seen in TP+IKE.", "perturbed_explanation": "The perturbed claim misstates UniKE’s generality score as 0.856, whereas Figure 2(a) reports a generality of 0.946 for UniKE, making the altered figure contradictory to the context.", "claim": "By integrating intrinsic and external knowledge in a unified latent-space paradigm, UniKE yields a generality of 0.856 and locality of 0.934, confirming that synergistic key-value memory alignment overcomes the generality-locality compromises seen in TP+IKE.", "label": false }, { "paperid": "2409.03247v1", "paper_path": "./SciVer/papers/2409.03247v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.03247v1_figure_1.png", "request_id": 598, "origin_statement": "The Rule and Prompt Systems both support interactive filtering by allowing users to view how filter definitions apply to training examples, leveraging immediate feedback to refine filters, while the Label System omits this feature to prevent user confusion during active learning.", "perturbed_statement": "All three systems—including the Label System—allow users to interactively filter examples by displaying how filters apply to training data, providing immediate feedback that accelerates the active learning process.", "perturbed_explanation": "The perturbation is incorrect because the Label System does not implement interactive filter creation. Figure 1 explicitly notes that interactive filtering is “Not implemented” for the Label System, so it cannot display filter applications or offer this feedback.", "claim": "All three systems—including the Label System—allow users to interactively filter examples by displaying how filters apply to training data, providing immediate feedback that accelerates the active learning process.", "label": false }, { "paperid": "2409.11927v1", "paper_path": "./SciVer/papers/2409.11927v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "3", "4.1" ], "image_path": "./SciVer/images/2409.11927v1-Table5-1.png", "request_id": 599, "origin_statement": "The inverse correlation between assumed distance and measured spin arises because the continuum normalization scales with D^2: increasing distance lifts the required mass accretion rate, which shifts the disk peak to higher energies, so the model compensates by reducing spin to maintain the observed peak flux energy.", "perturbed_statement": "In Table 5, the black hole spin a* increases monotonically from ~0.97 at 2.4 kpc to ~1.03 at 8 kpc, because larger assumed distances lower the intrinsic disk flux, forcing a higher spin to preserve the peak emission energy.", "perturbed_explanation": "This statement is incorrect because Table 5 actually shows the spin a* decreasing from 0.972 at 2.4 kpc to 0.393 at 8 kpc, not increasing. Furthermore, a* cannot exceed unity (the Kerr limit), and larger distances increase the inferred accretion rate rather than lowering disk flux.", "claim": "In Table 5, the black hole spin a* increases monotonically from ~0.97 at 2.4 kpc to ~1.03 at 8 kpc, because larger assumed distances lower the intrinsic disk flux, forcing a higher spin to preserve the peak emission energy.", "label": false }, { "paperid": "2410.02010v1", "paper_path": "./SciVer/papers/2410.02010v1.json", "claim_type": "analytical", "type": "chart", "item": "4(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.02010v1_figure_4(b).png", "request_id": 600, "origin_statement": "PriorCELoss achieves the highest performance among normalization-based methods (~0.85 AUROC) but shows the largest convergence instability with an 8% drop from the selected to final epoch, suggesting that class-prior adjusted losses may overfit early and require careful checkpoint selection under validation imbalance.", "perturbed_statement": "PriorCELoss achieves the highest performance among normalization-based methods (~0.85 AUROC) but shows a minimal convergence instability with a 2% drop from the selected to final epoch, suggesting that class-prior adjusted losses may maintain stable performance under validation imbalance.", "perturbed_explanation": "The perturbation incorrectly states that PriorCELoss has only a 2% drop between the selected and final epochs, whereas Figure 4(b) shows an actual gap of approximately 8%, making the claim of minimal instability inconsistent with the data.", "claim": "PriorCELoss achieves the highest performance among normalization-based methods (~0.85 AUROC) but shows a minimal convergence instability with a 2% drop from the selected to final epoch, suggesting that class-prior adjusted losses may maintain stable performance under validation imbalance.", "label": false }, { "paperid": "2409.06998v2", "paper_path": "./SciVer/papers/2409.06998v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.06998v2-Table1-1.png", "request_id": 601, "origin_statement": "Adaptive Selection (AS) boosts average accuracy by 6.04% on ACM-GCN versus only 2.70% on GCNII, reflecting that AS more effectively mitigates overfitting in models like ACM-GCN by leveraging greater representation variance absent in residual-rich architectures.", "perturbed_statement": "Adaptive Selection (AS) boosts average accuracy by 6.04% on GCNII versus only 2.70% on ACM-GCN, reflecting that AS more effectively mitigates overfitting in models like GCNII by leveraging greater representation variance absent in residual-rich architectures.", "perturbed_explanation": "The perturbation incorrectly swaps the improvement percentages: Table 1 shows GCNII-AS yields a 2.70% average gain (not 6.04%) and ACM-GCN-AS yields a 6.04% gain (not 2.70%). Consequently, the claim that GCNII benefits more from AS is contradicted by the reported results.", "claim": "Adaptive Selection (AS) boosts average accuracy by 6.04% on GCNII versus only 2.70% on ACM-GCN, reflecting that AS more effectively mitigates overfitting in models like GCNII by leveraging greater representation variance absent in residual-rich architectures.", "label": false }, { "paperid": "2411.14215v1", "paper_path": "./SciVer/papers/2411.14215v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.14215v1-Table5-1.png", "request_id": 603, "origin_statement": "GPT-4’s 14-point accuracy drop on paraphrased problems, compared to an 8-point human decline, implies model reliance on surface lexical patterns; humans leverage deeper conceptual schemas, yielding more stable performance under linguistic variation.", "perturbed_statement": "GPT-4’s accuracy decreases by only 5 percentage points on paraphrased stories, whereas human performance falls by 12 points, indicating that GPT-4 is more robust to linguistic rephrasing than human solvers.", "perturbed_explanation": "This is incorrect because GPT-4’s accuracy actually drops by 14 percentage points (from 86% to 72%), not 5 points, and human accuracy declines by 8 points (78% to 70%), not 12 points, contradicting the stated values.", "claim": "GPT-4’s accuracy decreases by only 5 percentage points on paraphrased stories, whereas human performance falls by 12 points, indicating that GPT-4 is more robust to linguistic rephrasing than human solvers.", "label": false }, { "paperid": "2411.02653v1", "paper_path": "./SciVer/papers/2411.02653v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.02653v1_figure_5.png", "request_id": 606, "origin_statement": "On both Lutetia and 1996 HW1, DeepONet reproduces temperature distributions within 1% error on most facets, with localized errors up to 2% in concave, self-shadowing regions, demonstrating its capacity to model complex radiative flux for accurate Yarkovsky force estimations.", "perturbed_statement": "DeepONet yields surface temperature predictions within 0.5% error overall, with maximum errors under 1% even in highly shadowed concave regions on both asteroids, confirming its near-perfect capture of complex radiative interactions for Yarkovsky force calculations.", "perturbed_explanation": "This claim is incorrect because Figure 5 and the accompanying text report relative errors reaching up to 2% in self-shadowing concave areas, not uniformly below 1%. A small number of facets exhibit errors around 2%, contradicting the perturbed thresholds of 0.5% and 1%.", "claim": "DeepONet yields surface temperature predictions within 0.5% error overall, with maximum errors under 1% even in highly shadowed concave regions on both asteroids, confirming its near-perfect capture of complex radiative interactions for Yarkovsky force calculations.", "label": false }, { "paperid": "2411.06096v1", "paper_path": "./SciVer/papers/2411.06096v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.06096v1-Table1-1.png", "request_id": 607, "origin_statement": "ZhoBLiMP triples paradigm coverage relative to previous Chinese benchmarks—CLiMP’s 16 and SLING’s 38 paradigms—by offering 118 paradigms across 35 k minimal pairs, achieved via linguist-designed grammar templates that capture missing structures like '把' and '被', indicating enhanced linguistic breadth.", "perturbed_statement": "ZhoBLiMP triples paradigm coverage relative to previous Chinese benchmarks—CLiMP’s 16 and SLING’s 38 paradigms—by offering 48 paradigms across 50 k minimal pairs, achieved via linguist-designed grammar templates that capture missing structures like '把' and '被', indicating enhanced linguistic breadth.", "perturbed_explanation": "The perturbed statement is incorrect because ZhoBLiMP actually provides 118 paradigms (not 48) and 35 k minimal pairs (not 50 k), as specified in the context and Table 1.", "claim": "ZhoBLiMP triples paradigm coverage relative to previous Chinese benchmarks—CLiMP’s 16 and SLING’s 38 paradigms—by offering 48 paradigms across 50 k minimal pairs, achieved via linguist-designed grammar templates that capture missing structures like '把' and '被', indicating enhanced linguistic breadth.", "label": false }, { "paperid": "2411.16471v1", "paper_path": "./SciVer/papers/2411.16471v1.json", "claim_type": "analytical", "type": "chart", "item": "6(b)", "section": [ "5.3", "5.4", "6.1" ], "image_path": "./SciVer/images/2411.16471v1_figure_6(b).png", "request_id": 608, "origin_statement": "At 375 nm, the squared visibility of Eta Centauri decreases to half its zero-baseline amplitude at ~70 m projected baseline, while at 470 nm it reaches half-amplitude around ~100 m, consistent with spatial coherence scaling inversely with wavelength.", "perturbed_statement": "At 375 nm, the squared visibility of Eta Centauri decreases to half its zero-baseline amplitude at ~120 m projected baseline, while at 470 nm it reaches half-amplitude around ~150 m, consistent with spatial coherence scaling inversely with wavelength.", "perturbed_explanation": "The half-amplitude baselines are wrongly stated. From Fig. 6 the visibility drops to half its zero-baseline value at roughly 70 m for 375 nm and about 100 m for 470 nm, not 120 m and 150 m as claimed.", "claim": "At 375 nm, the squared visibility of Eta Centauri decreases to half its zero-baseline amplitude at ~120 m projected baseline, while at 470 nm it reaches half-amplitude around ~150 m, consistent with spatial coherence scaling inversely with wavelength.", "label": false }, { "paperid": "2409.04723v1", "paper_path": "./SciVer/papers/2409.04723v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2409.04723v1-Table2-1.png", "request_id": 609, "origin_statement": "Integrating post-night sleep metrics with ECG-based mood classifiers leverages REM sleep’s role in emotional memory consolidation, boosting F1 performance by 11% compared to linear evaluation without sleep features.", "perturbed_statement": "Integrating post-night sleep metrics with ECG-based mood classifiers leverages deep sleep’s role in emotional memory consolidation, boosting F1 performance by 15% compared to linear evaluation without sleep features.", "perturbed_explanation": "This statement is incorrect because the actual F1 improvement for ECG-based mood classification with sleep measures is 11%, not 15%, and REM sleep—not deep sleep—was identified as having the highest impact on emotion regulation (Figure 5 and related literature).", "claim": "Integrating post-night sleep metrics with ECG-based mood classifiers leverages deep sleep’s role in emotional memory consolidation, boosting F1 performance by 15% compared to linear evaluation without sleep features.", "label": false }, { "paperid": "2410.20609v1", "paper_path": "./SciVer/papers/2410.20609v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.4" ], "image_path": "./SciVer/images/2410.20609v1_figure_5.png", "request_id": 610, "origin_statement": "The preferred slow-jet solution peaks at ~10^39.7 W jet power, ~5×10^4 yr age and ~2×10^10 M⊙ gas mass, implying jet-head deceleration primarily arises from momentum transfer with the dense ISM beyond the galaxy’s half-light radius.", "perturbed_statement": "The preferred slow-jet solution peaks at ~10^46 W jet power, ~5×10^4 yr age and ~2×10^10 M⊙ gas mass, implying jet-head deceleration primarily arises from magnetic tension in the ISM beyond the galaxy’s half-light radius.", "perturbed_explanation": "This statement is incorrect because Figure 5 shows the slow‐jet power PDF peaks at log₁₀(jet power)≈39.7 W (not 10^46 W). Moreover, the RAiSE modelling attributes jet deceleration to momentum transfer with the ISM density contrast, not magnetic tension.", "claim": "The preferred slow-jet solution peaks at ~10^46 W jet power, ~5×10^4 yr age and ~2×10^10 M⊙ gas mass, implying jet-head deceleration primarily arises from magnetic tension in the ISM beyond the galaxy’s half-light radius.", "label": false }, { "paperid": "2410.23409v1", "paper_path": "./SciVer/papers/2410.23409v1.json", "claim_type": "analytical", "type": "chart", "item": "5(a)", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.23409v1_figure_5(a).png", "request_id": 612, "origin_statement": "TPP-Gaze’s return offset distribution closely aligns with human patterns, peaking at one-intervening fixation and exhibiting a similar heavy-tailed decay, indicating its LGMM effectively models human revisitation dynamics.", "perturbed_statement": "TPP-Gaze’s return offset distribution peaks at three intervening fixations and is modeled by an exponential mixture, indicating its exponential mixture model accurately captures mid-range revisit dynamics akin to human observers.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows the highest return frequency at one-intervening fixation, not three. Moreover, TPP-Gaze uses a Gaussian mixture model (LGMM) rather than an exponential mixture to fit return fixation intervals.", "claim": "TPP-Gaze’s return offset distribution peaks at three intervening fixations and is modeled by an exponential mixture, indicating its exponential mixture model accurately captures mid-range revisit dynamics akin to human observers.", "label": false }, { "paperid": "2410.02475v1", "paper_path": "./SciVer/papers/2410.02475v1.json", "claim_type": "analytical", "type": "chart", "item": "2(d)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.02475v1_figure_2(d).png", "request_id": 614, "origin_statement": "The geometry-unaware policy achieves a 43.8% success rate on the video game console, exceeding the full-observation policy's 12.4% by 31.4% and the full-pose policy's 27.6% by 16.2%, indicating that abstracting away shape specifics enhances transfer via robust contact heuristics.", "perturbed_statement": "The geometry-unaware policy achieves a 50.0% success rate on the video game console, surpassing both the full-observation and full-pose policies, indicating that abstracting shape-specific information enhances transfer via robust contact heuristics.", "perturbed_explanation": "The perturbed statement incorrectly reports the geometry-unaware policy's success rate on the video game console as 50.0%, whereas Figure 2 shows it is actually 43.8%.", "claim": "The geometry-unaware policy achieves a 50.0% success rate on the video game console, surpassing both the full-observation and full-pose policies, indicating that abstracting shape-specific information enhances transfer via robust contact heuristics.", "label": false }, { "paperid": "2410.15678v1", "paper_path": "./SciVer/papers/2410.15678v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.15678v1-Table2-1.png", "request_id": 615, "origin_statement": "Mamba achieves over 99.9% accuracy when trained on or above the test’s n-gram length (e.g., 99.96% on K1V2→K1V2 and 99.98% on K2V2→K2V2), but it fails completely (0.00%) on tests requiring multi-token recall when trained on K1V1, revealing n-gram shortcut reliance.", "perturbed_statement": "Mamba maintains over 99% accuracy whenever its training n-gram length matches testing, but accuracy on K1V1→K2V2 only drops to around 66.82%, suggesting partial recall flexibility.", "perturbed_explanation": "This is incorrect because Table 2 reports 0.00% accuracy for the K1V1→K2V2 configuration, not 66.82%. The 66.82% value actually corresponds to testing K1V1 after training on K2V2, so the stated drop for K1V1→K2V2 is false.", "claim": "Mamba maintains over 99% accuracy whenever its training n-gram length matches testing, but accuracy on K1V1→K2V2 only drops to around 66.82%, suggesting partial recall flexibility.", "label": false }, { "paperid": "2409.03550v1", "paper_path": "./SciVer/papers/2409.03550v1.json", "claim_type": "analytical", "type": "chart", "item": "4(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.03550v1_figure_4(b).png", "request_id": 616, "origin_statement": "Integrating DDIM with 160 denoising steps into a 14M DKDM-derived student DM increases sampling throughput to over 1500 images per second, while maintaining an FID around 25 on CIFAR10, illustrating the non-linear trade-off between iterative step count and sample quality in diffusion models.", "perturbed_statement": "Integrating DDIM with 80 denoising steps into a 14M DKDM-derived student DM increases sampling throughput to over 2500 images per second, while maintaining an FID around 15 on CIFAR10, illustrating the quadratic performance–speed scaling in diffusion models.", "perturbed_explanation": "At 80 DDIM steps the throughput is approximately 950 images/sec (not over 2500), and the FID is about 35 (not 15). Both the claimed speed and quality metrics are inconsistent with the experimental results shown in Figure 4(b).", "claim": "Integrating DDIM with 80 denoising steps into a 14M DKDM-derived student DM increases sampling throughput to over 2500 images per second, while maintaining an FID around 15 on CIFAR10, illustrating the quadratic performance–speed scaling in diffusion models.", "label": false }, { "paperid": "2409.04477v1", "paper_path": "./SciVer/papers/2409.04477v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.04477v1_figure_4.png", "request_id": 622, "origin_statement": "The final signed-bias update in iteration 10 induces a near-deterministic convergence: the approximation ratio jumps from ~0.56 to ~0.9, and the residual Hamming distance collapses to near zero, demonstrating that introducing bias sign substantially enhances the algorithm’s ability to escape local minima on noisy superconducting hardware.", "perturbed_statement": "The final signed-bias update in iteration 9 induces a near-deterministic convergence: the approximation ratio jumps from ~0.55 to ~0.85, and the residual Hamming distance collapses to near zero, demonstrating that introducing bias sign substantially enhances the algorithm’s ability to escape local minima on noisy superconducting hardware.", "perturbed_explanation": "This statement is incorrect because the signed-bias update actually occurs at iteration 10 (not 9), and the observed jump in approximation ratio is from approximately 0.56 to 0.9 (not from 0.55 to 0.85), as shown by the shaded region and data points in Fig. 4(b).", "claim": "The final signed-bias update in iteration 9 induces a near-deterministic convergence: the approximation ratio jumps from ~0.55 to ~0.85, and the residual Hamming distance collapses to near zero, demonstrating that introducing bias sign substantially enhances the algorithm’s ability to escape local minima on noisy superconducting hardware.", "label": false }, { "paperid": "2410.21357v1", "paper_path": "./SciVer/papers/2410.21357v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.21357v1-Table2-1.png", "request_id": 625, "origin_statement": "On OpenWebText, EDLM-coAR achieves a perplexity of 17.58, nearly matching AR’s 17.56, demonstrating that diffusion models with carry-over can, via exact ELBO optimization, approach autoregressive performance.", "perturbed_statement": "On OpenWebText, EDLM-coAR attains a perplexity of 16.58, outperforming AR’s 17.56 by nearly 1.0, indicating that diffusion with exact ELBO yields superior modeling efficiency.", "perturbed_explanation": "The perturbed statement incorrectly reports EDLM-coAR’s OpenWebText perplexity as 16.58. According to Table 2, its actual perplexity is 17.58, not 16.58, so it does not outperform AR by that margin.", "claim": "On OpenWebText, EDLM-coAR attains a perplexity of 16.58, outperforming AR’s 17.56 by nearly 1.0, indicating that diffusion with exact ELBO yields superior modeling efficiency.", "label": false }, { "paperid": "2411.10545v1", "paper_path": "./SciVer/papers/2411.10545v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "4" ], "image_path": "./SciVer/images/2411.10545v1_figure_2(b).png", "request_id": 626, "origin_statement": "ISA's two-stage GMM-based clustering with entropy sampling yields a 21.73% win rate—higher than density (19.65%) and random (15.23%)—by ensuring both sample diversity and informativeness, demonstrating efficient resource allocation for alignment.", "perturbed_statement": "ISA's three-component GMM-based clustering with variance-based sampling achieves a 21.73% win rate—surpassing density (19.65%) and random (15.23%)—by ensuring both sample diversity and informativeness, demonstrating efficient resource allocation for alignment.", "perturbed_explanation": "The ISA methodology uses a 2-component Gaussian Mixture Model to distinguish desired versus undesired outputs and relies on entropy-based sampling. The perturbed statement incorrectly claims a three-component GMM and substitutes variance-based sampling, both contradicting the described ISA approach.", "claim": "ISA's three-component GMM-based clustering with variance-based sampling achieves a 21.73% win rate—surpassing density (19.65%) and random (15.23%)—by ensuring both sample diversity and informativeness, demonstrating efficient resource allocation for alignment.", "label": false }, { "paperid": "2411.18473v1", "paper_path": "./SciVer/papers/2411.18473v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.18473v1-Table1-1.png", "request_id": 627, "origin_statement": "At a fixed PSNR of 24.40 dB on the Tank&Temples dataset, HEMGS cuts storage by 48.8% compared to HAC (5.75 MB vs. 11.24 MB), demonstrating a Pareto‐efficient rate–distortion operating point.", "perturbed_statement": "At a fixed PSNR of 24.40 dB on the Tank&Temples dataset, HEMGS cuts storage by 65% compared to HAC (5.75 MB vs. 11.24 MB), demonstrating a Pareto‐efficient rate–distortion operating point.", "perturbed_explanation": "The perturbation overstates the storage savings: the actual reduction from HAC’s 11.24 MB to HEMGS’s 5.75 MB is approximately 48.8%, not 65% as claimed.", "claim": "At a fixed PSNR of 24.40 dB on the Tank&Temples dataset, HEMGS cuts storage by 65% compared to HAC (5.75 MB vs. 11.24 MB), demonstrating a Pareto‐efficient rate–distortion operating point.", "label": false }, { "paperid": "2410.13605v1", "paper_path": "./SciVer/papers/2410.13605v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4" ], "image_path": "./SciVer/images/2410.13605v1_figure_4.png", "request_id": 628, "origin_statement": "Figure 4 shows that inference time correlates with model depth and attention use: pure transformers like ViT-SAM incur up to twice the latency (9.0 s) of convolution-fused models such as CA-ViT-SAM (1.5 s), reflecting the quadratic O(n^2) computational cost of self-attention over sequence length.", "perturbed_statement": "Figure 4 suggests that pure transformer models like ViT-SAM (4.5 s) exhibit 30% lower inference latency than convolution-fusion variants like CA-ViT-SAM (6.5 s), due to self-attention’s linear O(n) complexity in sequence length.", "perturbed_explanation": "This is incorrect because Figure 4 indicates that ViT-SAM has approximately 9 s inference time and CA-ViT-SAM around 1.5 s, not 4.5 s and 6.5 s. Additionally, self-attention’s computational cost scales quadratically O(n^2), not linearly O(n).", "claim": "Figure 4 suggests that pure transformer models like ViT-SAM (4.5 s) exhibit 30% lower inference latency than convolution-fusion variants like CA-ViT-SAM (6.5 s), due to self-attention’s linear O(n) complexity in sequence length.", "label": false }, { "paperid": "2409.13499v2", "paper_path": "./SciVer/papers/2409.13499v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.13499v2_figure_2.png", "request_id": 630, "origin_statement": "Regularization with 100h supervised data cuts WERs by up to 8% absolute on small TT models (Whisper-tiny to base) by stabilizing noisy pseudo-labels, while models using high-quality PLs from Whisper-large v3 (>1B parameters) only improve marginally (<1%).", "perturbed_statement": "Regularization with 100h supervised data cuts WERs by up to 15% absolute on small TT models (Whisper-tiny to base) by stabilizing noisy pseudo-labels, and yields over 10% absolute improvements on mid-sized models (700M parameters), while Whisper-large v3 sees no gain.", "perturbed_explanation": "The perturbed statement exaggerates the absolute WER reduction. Figure 2 shows a maximum ~8% drop for small models (Whisper-tiny) and only ~1% gain for mid-sized models (around 700M parameters), not the claimed 15% and 10% improvements.", "claim": "Regularization with 100h supervised data cuts WERs by up to 15% absolute on small TT models (Whisper-tiny to base) by stabilizing noisy pseudo-labels, and yields over 10% absolute improvements on mid-sized models (700M parameters), while Whisper-large v3 sees no gain.", "label": false }, { "paperid": "2410.15705v1", "paper_path": "./SciVer/papers/2410.15705v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "6" ], "image_path": "./SciVer/images/2410.15705v1-Table2-1.png", "request_id": 633, "origin_statement": "In Model (c) with n=5000, r=0.2, and m=0.2, the SIS median minimum model size matches the true active set size (4), yet the 95th percentile remains at 21, indicating that in heavy-tailed regimes, extreme value selection may require up to five times more variables under rare high-variance scenarios.", "perturbed_statement": "In Model (c) with n=5000, r=0.2, and m=0.2, the SIS median minimum model size matches the true active set size (4), yet the 95th percentile soars to 75.2, indicating that heavy-tailed sampling forces selection of almost nineteen times more covariates in rare high-variance scenarios.", "perturbed_explanation": "The perturbed statement wrongly cites 75.2 as the 95th percentile of the minimum model size. In fact, Table 2 shows 75.2 as the discrepancy measure P, whereas the actual 95th percentile of the minimum model size S for n=5000,r=0.2,m=0.2 is 21.", "claim": "In Model (c) with n=5000, r=0.2, and m=0.2, the SIS median minimum model size matches the true active set size (4), yet the 95th percentile soars to 75.2, indicating that heavy-tailed sampling forces selection of almost nineteen times more covariates in rare high-variance scenarios.", "label": false }, { "paperid": "2411.11293v1", "paper_path": "./SciVer/papers/2411.11293v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.11293v1_figure_5.png", "request_id": 634, "origin_statement": "Anchors’ execution time increases from roughly 50 seconds on ISCXTor2016 (28 features) to about 100 seconds on CIC-DoHBrw-2020 (33 features), illustrating the combinatorial explosion in rule search with added features, whereas KAM’s nearly linear rise (from ~30 to ~60 s) demonstrates its scalability advantage.", "perturbed_statement": "Anchors’ execution time grows modestly from about 5 seconds on ISCXTor2016 (28 features) to around 10 seconds on CIC-DoHBrw-2020 (33 features), indicating only linear complexity, while KAM’s runtime increases by just 20% (30 to 36 seconds), underscoring its efficiency.", "perturbed_explanation": "This statement is incorrect because, according to Figure 5, Anchors actually takes about 50 seconds on ISCXTor2016 and about 100 seconds on CIC-DoHBrw-2020—not 5 and 10 seconds—and KAM’s runtime doubles from approximately 30 to 60 seconds (a 100% increase), not a mere 20% rise.", "claim": "Anchors’ execution time grows modestly from about 5 seconds on ISCXTor2016 (28 features) to around 10 seconds on CIC-DoHBrw-2020 (33 features), indicating only linear complexity, while KAM’s runtime increases by just 20% (30 to 36 seconds), underscoring its efficiency.", "label": false }, { "paperid": "2410.19279v1", "paper_path": "./SciVer/papers/2410.19279v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.4.1" ], "image_path": "./SciVer/images/2410.19279v1-Table1-1.png", "request_id": 639, "origin_statement": "PhysNet’s MAE increases from 1.86 in slight head motion to 8.20 when the user is speaking, a 4.4× rise, highlighting that short-range spatio-temporal methods lack the capacity to exploit periodic rPPG characteristics over longer sequences, reducing robustness against high-intensity motion noise.", "perturbed_statement": "PhysNet’s MAE increases from 1.86 in slight head motion to 5.20 when the user is speaking, a 2.8× rise, highlighting that short-range spatio-temporal methods lack the capacity to exploit periodic rPPG characteristics over longer sequences, reducing robustness against high-intensity motion noise.", "perturbed_explanation": "This statement is incorrect because Table 1 reports PhysNet’s MAE under noisy speaking (Task 2) as 8.20, not 5.20, and the true increase is approximately 4.4× rather than 2.8×.", "claim": "PhysNet’s MAE increases from 1.86 in slight head motion to 5.20 when the user is speaking, a 2.8× rise, highlighting that short-range spatio-temporal methods lack the capacity to exploit periodic rPPG characteristics over longer sequences, reducing robustness against high-intensity motion noise.", "label": false }, { "paperid": "2410.10880v1", "paper_path": "./SciVer/papers/2410.10880v1.json", "claim_type": "analytical", "type": "chart", "item": "3(b)", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.10880v1_figure_3(b).png", "request_id": 640, "origin_statement": "Applying FSD-based Min-k%, the non-member distribution mean rose to ≈5.9% while the member mean remained ≈5.3%, expanding their separation by ~12% and shifting the level-set threshold, which reduces classification ambiguity through asymmetric perplexity adaptation across seen and unseen data.", "perturbed_statement": "Applying FSD-based Min-k%, the non-member distribution mean rose to ≈6.9% while the member mean remained ≈4.3%, expanding their separation by ~30% and shifting the level-set threshold, which reduces classification ambiguity through asymmetric perplexity adaptation across seen and unseen data.", "perturbed_explanation": "The perturbed statement misreports both class means and the separation: the actual non-member and member means are approximately 5.9% and 5.3%, not 6.9% and 4.3%. Consequently, the true inter-class gap is ~0.6% (~12%), not ~2.6% (~30%).", "claim": "Applying FSD-based Min-k%, the non-member distribution mean rose to ≈6.9% while the member mean remained ≈4.3%, expanding their separation by ~30% and shifting the level-set threshold, which reduces classification ambiguity through asymmetric perplexity adaptation across seen and unseen data.", "label": false }, { "paperid": "2410.21526v1", "paper_path": "./SciVer/papers/2410.21526v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2410.21526v1-Table5-1.png", "request_id": 641, "origin_statement": "With 10012 generated samples versus 3392 real training instances in the Financial dataset, the synthetic augmentation triples the corpus size, which may shift the model’s decision boundary toward artifacts introduced by generator bias, necessitating careful regularization.", "perturbed_statement": "Since the MRPC synthetic set contains 3005 samples—exactly twice its 408-instance development set—it delivers a modest generalization boost without significant overfitting risks.", "perturbed_explanation": "The statement incorrectly asserts the synthetic MRPC data (3005) is exactly twice the development set size (408); 3005 is actually about 7.37 times larger, refuting the claimed ratio.", "claim": "Since the MRPC synthetic set contains 3005 samples—exactly twice its 408-instance development set—it delivers a modest generalization boost without significant overfitting risks.", "label": false }, { "paperid": "2409.04073v2", "paper_path": "./SciVer/papers/2409.04073v2.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.04073v2_figure_1.png", "request_id": 648, "origin_statement": "By modeling zero-shot entity matching as transfer learning sequence classification and fine-tuning a lightweight GPT-2, AnyMatch attains an average F1 only 4.4% below GPT-4-powered MatchGPT while cutting inference cost per 1,000 tokens by about 3,899×, illustrating efficient utilization of limited parameters for robust, schema-agnostic matching.", "perturbed_statement": "By modeling zero-shot entity matching as transfer learning sequence classification and fine-tuning a lightweight GPT-2, AnyMatch attains an average F1 only 9.8% below GPT-4-powered MatchGPT while cutting inference cost per 1,000 tokens by about 1,000×, illustrating efficient utilization of limited parameters for robust, schema-agnostic matching.", "perturbed_explanation": "The perturbation is incorrect because the actual average F1 gap reported is 4.4%, not 9.8%, and the cost reduction is about 3,899× per 1,000 tokens, not 1,000×, according to the figure and text.", "claim": "By modeling zero-shot entity matching as transfer learning sequence classification and fine-tuning a lightweight GPT-2, AnyMatch attains an average F1 only 9.8% below GPT-4-powered MatchGPT while cutting inference cost per 1,000 tokens by about 1,000×, illustrating efficient utilization of limited parameters for robust, schema-agnostic matching.", "label": false }, { "paperid": "2410.16908v1", "paper_path": "./SciVer/papers/2410.16908v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.16908v1-Table3-1.png", "request_id": 649, "origin_statement": "Pruning 25% of capsules in CapsNet[C=20, D=8, I=7] reduces dead capsule proportion by suppressing low-activity paths, boosting validation accuracy from 10% to 79.6%, demonstrating that structured pruning enhances information flow in deep capsule layers by alleviating vanishing activations.", "perturbed_statement": "Pruning 50% of capsules in CapsNet[C=20, D=8, I=7] reduces dead capsule proportion by suppressing low-activity paths, boosting validation accuracy from 10% to 79.6%, demonstrating that structured pruning enhances information flow in deep capsule layers by alleviating vanishing activations.", "perturbed_explanation": "The statement is incorrect because Table 3 only reports pruning ratios up to 0.375 (37.5%). A 50% pruning ratio is not evaluated in the context, and no data supports a 79.6% accuracy at 50% pruning.", "claim": "Pruning 50% of capsules in CapsNet[C=20, D=8, I=7] reduces dead capsule proportion by suppressing low-activity paths, boosting validation accuracy from 10% to 79.6%, demonstrating that structured pruning enhances information flow in deep capsule layers by alleviating vanishing activations.", "label": false }, { "paperid": "2410.15658v2", "paper_path": "./SciVer/papers/2410.15658v2.json", "claim_type": "analytical", "type": "chart", "item": "6(b)", "section": [ "4.3", "4.3.1" ], "image_path": "./SciVer/images/2410.15658v2_figure_6(b).png", "request_id": 650, "origin_statement": "During mistaken predictions on Adience, ORCU's softmax peaks at 38–43 with ~0.45 probability and smoothly distributes residual mass over neighboring intervals (25–32: ~0.38; 48–53: ~0.10), exemplifying unimodal ordinal smoothing. LS, however, assigns >75% confidence exclusively to 38–43, breaching unimodality and inflating miscalibration risk.", "perturbed_statement": "During mistaken predictions on Adience, ORCU's softmax peaks at 38–43 with ~0.85 probability and neglects neighboring classes entirely, assigning zero probability to 25–32 and 48–53. LS, however, spreads ~0.6 confidence to 25–32 and ~0.25 to 38–43, thus maintaining unimodal ordinal smoothing.", "perturbed_explanation": "The perturbed statement misreports ORCU’s distribution: Fig. 6 shows ORCU peaks at ~0.45 on 38–43 and allocates ~0.38 to 25–32 and ~0.10 to 48–53, not ~0.85 with zero mass on neighbors. It also mischaracterizes LS: LS actually concentrates >75% on 38–43 and gives minimal probability to 25–32, rather than spreading 0.6 to 25–32 and 0.25 to 38–43.", "claim": "During mistaken predictions on Adience, ORCU's softmax peaks at 38–43 with ~0.85 probability and neglects neighboring classes entirely, assigning zero probability to 25–32 and 48–53. LS, however, spreads ~0.6 confidence to 25–32 and ~0.25 to 38–43, thus maintaining unimodal ordinal smoothing.", "label": false }, { "paperid": "2410.12378v1", "paper_path": "./SciVer/papers/2410.12378v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.1.2" ], "image_path": "./SciVer/images/2410.12378v1-Table2-1.png", "request_id": 651, "origin_statement": "The FEM results (Table 2) confirm linear finite elements yield first-order convergence (EOC≈1.00–1.03) for both temperature profiles and interface displacement, consistent with the method’s theoretical accuracy. Computing times escalate superlinearly—roughly tenfold when Δy halves—due to the growing cost of assembling and solving larger global systems.", "perturbed_statement": "The FEM results (Table 2) confirm quadratic finite elements yield second-order convergence (EOC≈2.00–2.03) for both temperature profiles and interface displacement, consistent with the method’s theoretical accuracy. Computing times double when Δy halves due to the cost of solving larger global systems.", "perturbed_explanation": "The perturbation is incorrect because Table 2 reports EOC values around 1.00–1.03, not around 2.00, indicating first-order convergence from linear elements rather than second-order from quadratic elements. Additionally, computing times increase by roughly tenfold per halving of Δy, not merely doubling.", "claim": "The FEM results (Table 2) confirm quadratic finite elements yield second-order convergence (EOC≈2.00–2.03) for both temperature profiles and interface displacement, consistent with the method’s theoretical accuracy. Computing times double when Δy halves due to the cost of solving larger global systems.", "label": false }, { "paperid": "2409.01700v2", "paper_path": "./SciVer/papers/2409.01700v2.json", "claim_type": "analytical", "type": "chart", "item": "10", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.01700v2_figure_10.png", "request_id": 652, "origin_statement": "Direct cosmic-ray sputtering from CO-rich or CO2-rich ices, due to lower H2CO binding energies (~1200–1500 K) than in pure ice, yields ~1e-9 gas-phase abundance in L1689B, counteracting freeze-out at n~1e5 cm–3 and Av>10 mag, unlike inefficient photodesorption.", "perturbed_statement": "Direct cosmic-ray sputtering from H2O-rich ices, due to lower H2CO binding energies (~1200–1500 K) than in pure ice, yields ~1e-9 gas-phase abundance in L1689B, counteracting freeze-out at n~1e5 cm–3 and Av>5 mag, unlike inefficient photodesorption.", "perturbed_explanation": "Figure 10 shows that the H2O-rich ice model (blue line) remains well below the observed abundance (hatched zone), so H2O-rich matrices cannot produce ~1e-9 H2CO. Moreover, the gas-phase abundance only levels off above Av≈8 mag, not at Av>5 mag as stated.", "claim": "Direct cosmic-ray sputtering from H2O-rich ices, due to lower H2CO binding energies (~1200–1500 K) than in pure ice, yields ~1e-9 gas-phase abundance in L1689B, counteracting freeze-out at n~1e5 cm–3 and Av>5 mag, unlike inefficient photodesorption.", "label": false }, { "paperid": "2411.15375v1", "paper_path": "./SciVer/papers/2411.15375v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15375v1-Table2-1.png", "request_id": 653, "origin_statement": "By leveraging adaptive second-moment estimation with higher-order correction, AdamZ achieves a 0.07% median accuracy improvement over Adam on the non-linearly separable make_circles dataset, indicating precise gradient variance normalization enhances classification at the cost of a 73% longer training time due to extra parameter updates.", "perturbed_statement": "By leveraging adaptive second-moment estimation with higher-order correction, AdamZ achieves a 0.17% median accuracy improvement over Adam on the non-linearly separable make_circles dataset, indicating precise gradient variance normalization enhances classification at the cost of a 27% longer training time due to extra parameter updates.", "perturbed_explanation": "The perturbation incorrectly states a 0.17% accuracy improvement and a 27% longer training time. The actual median accuracy gain is only 0.07% (from 97.76% to 97.83%), and the training duration increases by approximately 73% (from 1.74s to 3.02s), not 27%.", "claim": "By leveraging adaptive second-moment estimation with higher-order correction, AdamZ achieves a 0.17% median accuracy improvement over Adam on the non-linearly separable make_circles dataset, indicating precise gradient variance normalization enhances classification at the cost of a 27% longer training time due to extra parameter updates.", "label": false }, { "paperid": "2410.13650v1", "paper_path": "./SciVer/papers/2410.13650v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "3" ], "image_path": "./SciVer/images/2410.13650v1_figure_7.png", "request_id": 654, "origin_statement": "Pulsars in the pink branch have the lowest orbital periods (log P_B ~ -0.5) and the highest spin-down luminosities, indicating tightly bound binaries where gravitational-wave-driven angular momentum loss accelerates mass transfer in black widow systems.", "perturbed_statement": "Pulsars in the pink branch have the highest orbital periods (log P_B ~ 1.0) and the lowest spin-down luminosities, indicating wide binaries where magnetic braking dominates angular momentum loss, slowing mass transfer in black widow systems.", "perturbed_explanation": "This is incorrect because Fig. 7 shows pink-branch pulsars cluster at the smallest orbital periods (log P_B < 0) and highest spin-down luminosities (log ṀE > 34), not the largest P_B and lowest ṀE. Moreover, mass transfer in these tight binaries is driven by gravitational-wave losses, not magnetic braking.", "claim": "Pulsars in the pink branch have the highest orbital periods (log P_B ~ 1.0) and the lowest spin-down luminosities, indicating wide binaries where magnetic braking dominates angular momentum loss, slowing mass transfer in black widow systems.", "label": false }, { "paperid": "2411.01023v1", "paper_path": "./SciVer/papers/2411.01023v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "6.2" ], "image_path": "./SciVer/images/2411.01023v1-Table4-1.png", "request_id": 655, "origin_statement": "Using the TPE algorithm, the TransH model’s Hits@3 reached 0.5727 with a 42-dimensional embedding, learning rate 0.0012, and 50 NPP, indicating that moderate dimensionality alongside extended negative sampling fosters stable convergence and richer relational encoding, enhancing tail entity ranking.", "perturbed_statement": "Using the TPE algorithm, the TransH model’s Hits@3 reached 0.5727 with a 64-dimensional embedding, learning rate 0.012, and 50 NPP, indicating that moderate dimensionality alongside extended negative sampling fosters stable convergence and richer relational encoding, enhancing tail entity ranking.", "perturbed_explanation": "The perturbation is incorrect because Table 4 reports an embedding dimension of 42 (not 64) and a learning rate of 0.0012 (not 0.012) for the optimized TransH configuration.", "claim": "Using the TPE algorithm, the TransH model’s Hits@3 reached 0.5727 with a 64-dimensional embedding, learning rate 0.012, and 50 NPP, indicating that moderate dimensionality alongside extended negative sampling fosters stable convergence and richer relational encoding, enhancing tail entity ranking.", "label": false }, { "paperid": "2411.01424v1", "paper_path": "./SciVer/papers/2411.01424v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "7.2" ], "image_path": "./SciVer/images/2411.01424v1_figure_3(a).png", "request_id": 656, "origin_statement": "The CD-SBN algorithm’s runtime decreases from 0.277s to 0.053s as the bitruss support threshold increases, since stronger support pruning exponentially reduces candidate communities, showcasing how threshold-based filtering can exploit graph sparsity to achieve sub-300ms response times.", "perturbed_statement": "The CD-SBN algorithm’s runtime decreases from 277ms to 53ms as the bitruss support threshold increases, since stronger support pruning linearly reduces candidate communities, showcasing how threshold-based filtering can leverage graph density to achieve sub-1ms response times.", "perturbed_explanation": "This is incorrect because Figure 4(b) reports runtimes of 0.053–0.277 seconds (53–277 ms), not sub-1 ms. The pruning effect is described as exponential in the context, not linear, and it exploits graph sparsity, not density.", "claim": "The CD-SBN algorithm’s runtime decreases from 277ms to 53ms as the bitruss support threshold increases, since stronger support pruning linearly reduces candidate communities, showcasing how threshold-based filtering can leverage graph density to achieve sub-1ms response times.", "label": false }, { "paperid": "2411.01217v1", "paper_path": "./SciVer/papers/2411.01217v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.01217v1_figure_6.png", "request_id": 658, "origin_statement": "In three-player Texas Hold’em, the loose-passive AI boosts its calling frequency from 0% to 23.9%, converting folds into calls, which yields a 0.2 mBB/h advantage over the normal strategy; this indicates that added player complexity magnifies style-based exploitability.", "perturbed_statement": "In three-player Texas Hold’em, the loose-passive AI boosts its calling frequency from 0% to 33.9%, converting folds into calls, which yields a 1.5 mBB/h advantage over the normal strategy; this indicates that added player complexity magnifies style-based exploitability.", "perturbed_explanation": "The perturbed statement is incorrect because the true calling frequency increase for the loose-passive AI is from 0% to 23.9% (not 33.9%), and the actual advantage over the normal strategy is 0.2 mBB/h (not 1.5 mBB/h). These details contradict the reported results in Figure 6 and the text.", "claim": "In three-player Texas Hold’em, the loose-passive AI boosts its calling frequency from 0% to 33.9%, converting folds into calls, which yields a 1.5 mBB/h advantage over the normal strategy; this indicates that added player complexity magnifies style-based exploitability.", "label": false }, { "paperid": "2410.24145v1", "paper_path": "./SciVer/papers/2410.24145v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.24145v1-Table2-1.png", "request_id": 659, "origin_statement": "Our conformal predictor uses the sine–cosine representation of wind direction and meteorological covariates, achieving nominal 90% coverage on 15,000 combined samples. This exploits circular continuity to reduce median prediction-interval arc lengths by roughly 15% compared to linear baseline methods.", "perturbed_statement": "The conformal predictor uses sine-only encoding of wind direction and meteorological covariates, achieving nominal 95% coverage on 20,000 combined training samples. This exploits circular continuity to reduce median arc lengths by roughly 25% compared to linear baseline methods.", "perturbed_explanation": "This statement is incorrect because the calibration procedure combines 10,000 training and 5,000 calibration units into 15,000 samples—not 20,000—and the method encodes wind direction with both sine and cosine, not sine only. Moreover, the nominal miscoverage level is 10%, yielding 90% coverage, not 95%.", "claim": "The conformal predictor uses sine-only encoding of wind direction and meteorological covariates, achieving nominal 95% coverage on 20,000 combined training samples. This exploits circular continuity to reduce median arc lengths by roughly 25% compared to linear baseline methods.", "label": false }, { "paperid": "2409.08056v1", "paper_path": "./SciVer/papers/2409.08056v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.08056v1-Table2-1.png", "request_id": 661, "origin_statement": "By maximizing permutation entropy through content-aware shuffling, the proposed P* method achieves 32.55 dB PSNR—just 0.18 dB below the random upper bound—and lowers angular L(A) error from 0.085 (sequential) to 0.031, illustrating that higher entropy enhances 3D NeRF reconstruction fidelity.", "perturbed_statement": "By maximizing permutation entropy through content-aware shuffling, the proposed P* method achieves 32.55 dB PSNR—0.5 dB below the random upper bound—and lowers angular L(A) error from 0.085 to 0.052, illustrating that higher entropy enhances 3D NeRF reconstruction fidelity.", "perturbed_explanation": "This statement is incorrect because the actual PSNR difference between Random (32.73 dB) and P* (32.55 dB) is 0.18 dB, not 0.5 dB, and the angular loss L(A) for P* is 0.031, not 0.052, as reported in Table 2.", "claim": "By maximizing permutation entropy through content-aware shuffling, the proposed P* method achieves 32.55 dB PSNR—0.5 dB below the random upper bound—and lowers angular L(A) error from 0.085 to 0.052, illustrating that higher entropy enhances 3D NeRF reconstruction fidelity.", "label": false }, { "paperid": "2409.10502v1", "paper_path": "./SciVer/papers/2409.10502v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "3.1", "3.4" ], "image_path": "./SciVer/images/2409.10502v1_figure_3(a).png", "request_id": 664, "origin_statement": "Incorporating solver-decomposed reasoning during training accelerates model convergence, achieving over 90% cell-level accuracy within 500K steps, whereas fixed and random order require over 2M steps to exceed 55% accuracy, suggesting explicit search heuristics expedite cell selection learning.", "perturbed_statement": "Training with solver-decomposed reasoning requires around 1M steps to surpass 90% cell accuracy, while fixed order only peaks at 40% after 2M steps, indicating search heuristics accelerate selection learning.", "perturbed_explanation": "The perturbed statement is incorrect because the decomposed reasoning model actually reaches over 90% cell accuracy within 500K steps (not 1M), and the fixed-order model peaks around 60% accuracy (not 40%) even after 4M steps.", "claim": "Training with solver-decomposed reasoning requires around 1M steps to surpass 90% cell accuracy, while fixed order only peaks at 40% after 2M steps, indicating search heuristics accelerate selection learning.", "label": false }, { "paperid": "2411.09899v1", "paper_path": "./SciVer/papers/2411.09899v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.09899v1-Table4-1.png", "request_id": 665, "origin_statement": "The ANN’s flexible architecture yields a mean terminal utility 0.00092 higher and a 0.00005 lower standard error than the analytic myopic weight under power utility, reflecting its ability to exploit stochastic volatility clustering that the log-utility-derived analytic solution cannot capture.", "perturbed_statement": "The analytic myopic weight achieves a mean terminal utility 0.00092 higher and a 0.00005 lower standard error than the ANN, reflecting its superior exploitation of stochastic volatility clustering under power utility.", "perturbed_explanation": "This statement is incorrect because Table 4 shows the ANN mean utility (0.07840) is actually 0.00092 above the analytic mean (0.07748), and its standard error (0.00055) is 0.00005 lower than the analytic error (0.00060).", "claim": "The analytic myopic weight achieves a mean terminal utility 0.00092 higher and a 0.00005 lower standard error than the ANN, reflecting its superior exploitation of stochastic volatility clustering under power utility.", "label": false }, { "paperid": "2411.05087v1", "paper_path": "./SciVer/papers/2411.05087v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.4.1" ], "image_path": "./SciVer/images/2411.05087v1_figure_2.png", "request_id": 668, "origin_statement": "Figure 2 shows that major releases exhibit a median log-difference of ~2.1 in direct dependents, roughly tenfold more than minor or patch updates; strong network externalities and semantic versioning signals likely amplify adoption only when backward-incompatible changes are introduced.", "perturbed_statement": "Figure 2 shows that major releases exhibit a median log-difference of ~2.1 in direct dependents, roughly twentyfold more than minor or patch updates, which cluster near -0.1; strong network externalities and semantic versioning signals likely amplify adoption only when backward-incompatible changes are introduced.", "perturbed_explanation": "The perturbation is wrong because Figure 2 shows minor and patch releases cluster around a median log-difference of +0.15 (not –0.1), and the fold-increase of major over minor/patch is about 14× (2.1÷0.15), not twentyfold.", "claim": "Figure 2 shows that major releases exhibit a median log-difference of ~2.1 in direct dependents, roughly twentyfold more than minor or patch updates, which cluster near -0.1; strong network externalities and semantic versioning signals likely amplify adoption only when backward-incompatible changes are introduced.", "label": false }, { "paperid": "2410.23537v1", "paper_path": "./SciVer/papers/2410.23537v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.23537v1-Table1-1.png", "request_id": 669, "origin_statement": "By using FP16 precision, OPT-13B’s 24 GB parameter size compresses to 12 GB in GPU HBM (37.5% of a 32 GB V100), freeing headroom for larger batch sizes under high Poisson arrival rates and leveraging INT8 KV caching to mitigate latency spikes.", "perturbed_statement": "By using FP16 precision, OPT-13B’s 24 GB parameter size compresses to 8 GB in GPU HBM (25% of a 32 GB V100), freeing headroom for larger batch sizes under high Poisson arrival rates and leveraging INT8 KV caching to mitigate latency spikes.", "perturbed_explanation": "FP16 halves the 24 GB model size to 12 GB, not 8 GB. Consequently, OPT-13B occupies 37.5% of a 32 GB V100 HBM (12/32 GB), not 25%.", "claim": "By using FP16 precision, OPT-13B’s 24 GB parameter size compresses to 8 GB in GPU HBM (25% of a 32 GB V100), freeing headroom for larger batch sizes under high Poisson arrival rates and leveraging INT8 KV caching to mitigate latency spikes.", "label": false }, { "paperid": "2411.17927v1", "paper_path": "./SciVer/papers/2411.17927v1.json", "claim_type": "analytical", "type": "chart", "item": "8", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.17927v1_figure_8.png", "request_id": 670, "origin_statement": "Figure 8 reveals a modestly positive correlation between Levenshtein distance and CodeBLEU improvement, suggesting that more extensive perturbations—like replacing placeholders with realistic tokens—enhance naturalness and guide the model toward better code completions. Yet the substantial scatter and heteroscedasticity indicate that code complexity also influences performance gains.", "perturbed_statement": "Figure 8 reveals a strong negative correlation (slope ≈ -0.6) between Levenshtein distance and CodeBLEU improvement, indicating that larger code perturbations consistently degrade the model’s code completion performance due to overfitting to unnatural edit patterns.", "perturbed_explanation": "This claim contradicts Figure 8, where the red dashed trend lines in each subplot slope upward slightly, showing a loose positive correlation. The actual slopes are around +0.2, not -0.6, so larger perturbations correspond to modest performance gains rather than consistent degradation.", "claim": "Figure 8 reveals a strong negative correlation (slope ≈ -0.6) between Levenshtein distance and CodeBLEU improvement, indicating that larger code perturbations consistently degrade the model’s code completion performance due to overfitting to unnatural edit patterns.", "label": false }, { "paperid": "2409.08512v1", "paper_path": "./SciVer/papers/2409.08512v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1.2" ], "image_path": "./SciVer/images/2409.08512v1-Table1-1.png", "request_id": 671, "origin_statement": "Leveraging Merge-CPGs to encode patch structures and NE-GCN for graph feature extraction, GRAPE achieves a 14.86% accuracy and 14.54% precision boost over Transformer, demonstrating that structural code graph representations enhance vulnerability fix identification compared to sequential models.", "perturbed_statement": "Leveraging Merge-CPGs and NE-GCN, GRAPE achieves a 16.02% accuracy and 15.23% precision boost over Transformer, demonstrating that structural code graph representations more effectively identify vulnerability fixes than sequence models.", "perturbed_explanation": "The perturbed statement misreports GRAPE’s gains over Transformer: actual accuracy improvement is 14.86% (not 16.02%) and precision improvement is 14.54% (not 15.23%), contradicting the values shown in Table 1 and the context description.", "claim": "Leveraging Merge-CPGs and NE-GCN, GRAPE achieves a 16.02% accuracy and 15.23% precision boost over Transformer, demonstrating that structural code graph representations more effectively identify vulnerability fixes than sequence models.", "label": false }, { "paperid": "2409.09099v2", "paper_path": "./SciVer/papers/2409.09099v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3", "3.1", "3.2", "4" ], "image_path": "./SciVer/images/2409.09099v2_figure_2.png", "request_id": 672, "origin_statement": "In Fig. 2(d), the empirical cumulative distribution of AoD reveals that soft-thresholding (red) limits negative AoD instances to below 1%, matching dense training (green), while hard-thresholding (blue) still incurs roughly 5% negative descents, evidencing S-STE’s mitigation of discontinuity-induced failures.", "perturbed_statement": "In Fig. 2(d), the empirical CDF of AoD indicates that soft-thresholding (red) limits negative AoD instances to below 10%, matching dense training (green), whereas hard-thresholding (blue) incurs only about 1% negative descents, suggesting S-STE offers minimal improvement over hard-thresholding.", "perturbed_explanation": "The perturbation incorrectly swaps the failure rates: soft-thresholding actually reduces negative AoD to below 1%, not 10%, and hard-thresholding incurs approximately 5% negative descents, not 1%, contradicting the CDF curves in Fig. 2(d).", "claim": "In Fig. 2(d), the empirical CDF of AoD indicates that soft-thresholding (red) limits negative AoD instances to below 10%, matching dense training (green), whereas hard-thresholding (blue) incurs only about 1% negative descents, suggesting S-STE offers minimal improvement over hard-thresholding.", "label": false }, { "paperid": "2411.12704v1", "paper_path": "./SciVer/papers/2411.12704v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2.1" ], "image_path": "./SciVer/images/2411.12704v1-Table1-1.png", "request_id": 675, "origin_statement": "By restricting flares to limb positions beyond 850 arcsec and excluding events within six hours of a prior C5.0 flare, the study isolates individual coronal heating episodes in XRT images, minimizing line-of-sight overlap and magnetic complexity biases in active region loop analyses.", "perturbed_statement": "By restricting flares to limb positions beyond 700 arcsec and excluding events within six hours of a prior C3.0 flare, the study isolates individual coronal heating episodes in XRT images, minimizing line-of-sight overlap and magnetic complexity biases in active region loop analyses.", "perturbed_explanation": "The perturbed statement misstates two key selection criteria: the study actually required limb locations beyond 850 arcsec (not 700 arcsec) and only excluded flares occurring within six hours of a prior C5.0 or greater event (not C3.0), contradicting the documented thresholds.", "claim": "By restricting flares to limb positions beyond 700 arcsec and excluding events within six hours of a prior C3.0 flare, the study isolates individual coronal heating episodes in XRT images, minimizing line-of-sight overlap and magnetic complexity biases in active region loop analyses.", "label": false }, { "paperid": "2411.00969v1", "paper_path": "./SciVer/papers/2411.00969v1.json", "claim_type": "analytical", "type": "chart", "item": "2(a)", "section": [ "4.7" ], "image_path": "./SciVer/images/2411.00969v1_figure_2(a).png", "request_id": 676, "origin_statement": "MGPP’s spike component intensifies weight shrinkage, evident by a 15% parameter concentration within ±0.05, lowering the dynamic pruning threshold by 20% versus L2; this yields sparser representations that reduce overfitting and improve generalization on MNLI.", "perturbed_statement": "MGPP’s spike component intensifies weight shrinkage, evident by a 15% parameter concentration within ±0.05, raising the dynamic pruning threshold by 20% versus L2; this yields sparser representations that reduce overfitting and improve generalization on MNLI.", "perturbed_explanation": "This is wrong because Figure 2(b) shows MGPP actually reduces the pruning threshold compared to L2, rather than raising it. The spike component drives thresholds downward, not upward.", "claim": "MGPP’s spike component intensifies weight shrinkage, evident by a 15% parameter concentration within ±0.05, raising the dynamic pruning threshold by 20% versus L2; this yields sparser representations that reduce overfitting and improve generalization on MNLI.", "label": false }, { "paperid": "2409.12757v2", "paper_path": "./SciVer/papers/2409.12757v2.json", "claim_type": "analytical", "type": "chart", "item": "4(b)", "section": [ "3.1.1" ], "image_path": "./SciVer/images/2409.12757v2_figure_4(b).png", "request_id": 678, "origin_statement": "In AuH, beyond MO 40, polaritonic corrections (~5×10⁻⁴ a.u.) exceed Breit (~3×10⁻⁵ a.u.) and Gaunt (~1×10⁻⁴ a.u.) contributions, validating the neglect of retardation when modeling valence excitations due to minimal relativistic coupling in diffuse orbitals.", "perturbed_statement": "In AuH, beyond MO 40, Gaunt corrections (~5×10⁻⁴ a.u.) exceed polaritonic (~3×10⁻⁵ a.u.) and Breit (~1×10⁻⁴ a.u.) contributions, validating the neglect of retardation when modeling valence excitations.", "perturbed_explanation": "The perturbed statement reverses the relative magnitudes: Figure 4 shows polaritonic corrections near 10⁻³.³ a.u. (~5×10⁻⁴) are larger than Gaunt (~10⁻⁴) and Breit (~10⁻⁵). It incorrectly asserts Gaunt (~5×10⁻⁴) dominates and misstates all three values, contradicting the plotted contributions.", "claim": "In AuH, beyond MO 40, Gaunt corrections (~5×10⁻⁴ a.u.) exceed polaritonic (~3×10⁻⁵ a.u.) and Breit (~1×10⁻⁴ a.u.) contributions, validating the neglect of retardation when modeling valence excitations.", "label": false }, { "paperid": "2410.21129v1", "paper_path": "./SciVer/papers/2410.21129v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.1.1" ], "image_path": "./SciVer/images/2410.21129v1-Table4-1.png", "request_id": 681, "origin_statement": "Under both Gaussian and uniform noise, initialization time increases roughly linearly with scale factor (from ~21 s at scale = 1 to ~27 s at scale = 10), reflecting proportional growth in Monte Carlo sampling workload.", "perturbed_statement": "Under both Gaussian and uniform noise, initialization time increases quadratically with scale factor (from ~21 s at scale = 1 to ~27 s at scale = 10), reflecting proportional growth in Monte Carlo sampling workload.", "perturbed_explanation": "The perturbation incorrectly claims a quadratic increase. The table shows initialization time rising by about 6 s over a tenfold scale factor change (21 s to 27 s), indicating an approximately linear, not quadratic, relationship.", "claim": "Under both Gaussian and uniform noise, initialization time increases quadratically with scale factor (from ~21 s at scale = 1 to ~27 s at scale = 10), reflecting proportional growth in Monte Carlo sampling workload.", "label": false }, { "paperid": "2411.06214v1", "paper_path": "./SciVer/papers/2411.06214v1.json", "claim_type": "analytical", "type": "chart", "item": "3", "section": [ "3.5" ], "image_path": "./SciVer/images/2411.06214v1_figure_3.png", "request_id": 682, "origin_statement": "PCA results show features 6 and 12 contribute 74.56% to PC1; with PC2 and PC3 capturing 15.85% and 5.29%, the top three components retain approximately 95.7% of variance, indicating dimensionality reduction preserves dominant signals and reduces noise, thus enhancing downstream model generalization.", "perturbed_statement": "PCA results show features 6 and 12 contribute 54.56% to PC1; with PC2 and PC3 capturing 15.85% and 5.29%, the top three components retain approximately 85% of variance, indicating dimensionality reduction may discard significant signals, potentially impairing model generalization.", "perturbed_explanation": "This statement is incorrect because the actual contribution of features 6 and 12 to PC1 is 74.56%, not 54.56%, and the cumulative variance captured by the first three principal components is about 95.7%, not 85%, so dimensionality reduction retains more signal than claimed.", "claim": "PCA results show features 6 and 12 contribute 54.56% to PC1; with PC2 and PC3 capturing 15.85% and 5.29%, the top three components retain approximately 85% of variance, indicating dimensionality reduction may discard significant signals, potentially impairing model generalization.", "label": false }, { "paperid": "2410.08289v1", "paper_path": "./SciVer/papers/2410.08289v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1", "4.3" ], "image_path": "./SciVer/images/2410.08289v1-Table1-1.png", "request_id": 683, "origin_statement": "By allocating merely 500 out of 18,891 contexts (≈2.6%) to the test split and employing only a single training epoch, the framework robustly prevents data leakage and overfitting, while dedicating 10% of dev comparison contexts to unbiased reward model evaluation.", "perturbed_statement": "By allocating 950 out of 18,891 contexts (≈5%) to the test split and employing three training epochs, the framework robustly prevents data leakage and overfitting, while dedicating 10% of dev comparison contexts to unbiased reward model evaluation.", "perturbed_explanation": "The perturbed statement misstates the test split size: the table shows 500 test contexts, not 950. It also incorrectly claims three training epochs, whereas the context specifies exactly one epoch to avoid overfitting.", "claim": "By allocating 950 out of 18,891 contexts (≈5%) to the test split and employing three training epochs, the framework robustly prevents data leakage and overfitting, while dedicating 10% of dev comparison contexts to unbiased reward model evaluation.", "label": false }, { "paperid": "2410.23168v1", "paper_path": "./SciVer/papers/2410.23168v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.23168v1_figure_7.png", "request_id": 684, "origin_statement": "Our method decouples token-token interaction costs from width scaling by keeping channel dimension fixed, so only token-parameter interactions grow, resulting in a consistent ~0.5 perplexity improvement over Net2Net at 3900 TPU hours, showcasing efficient long-context scalability.", "perturbed_statement": "Our method decouples token-token interaction costs from width scaling by doubling channel dimension, so only token-parameter interactions grow, resulting in a consistent ~1.5 perplexity improvement over Net2Net at 3900 TPU hours, showcasing efficient long-context scalability.", "perturbed_explanation": "The perturbed statement is incorrect because our approach maintains the original channel dimension rather than doubling it, and the observed perplexity improvement over Net2Net at 3900 TPU hours is approximately 0.5, not 1.5.", "claim": "Our method decouples token-token interaction costs from width scaling by doubling channel dimension, so only token-parameter interactions grow, resulting in a consistent ~1.5 perplexity improvement over Net2Net at 3900 TPU hours, showcasing efficient long-context scalability.", "label": false }, { "paperid": "2409.12892v1", "paper_path": "./SciVer/papers/2409.12892v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.12892v1_figure_4.png", "request_id": 686, "origin_statement": "Using 8000 ADAM initialization iterations, the two-stage method reaches a 24.3 dB PSNR plateau in under 300 seconds on the “garden” scene, leveraging early Gaussian densification to reduce non-linear solver load and accelerate convergence.", "perturbed_statement": "Using 8000 ADAM initialization iterations, the two-stage method reaches a 24.3 dB PSNR plateau in under 2 minutes on the “garden” scene, leveraging early Gaussian densification to reduce non-linear solver load and accelerate convergence.", "perturbed_explanation": "The claim of convergence in under 2 minutes (120 s) is incorrect. Figure 4 shows that the 8000-iteration initialization curve reaches ~24.3 dB only after about 300 seconds, not within 120 seconds, so the stated time is too short.", "claim": "Using 8000 ADAM initialization iterations, the two-stage method reaches a 24.3 dB PSNR plateau in under 2 minutes on the “garden” scene, leveraging early Gaussian densification to reduce non-linear solver load and accelerate convergence.", "label": false }, { "paperid": "2409.19014v4", "paper_path": "./SciVer/papers/2409.19014v4.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.19014v4-Table5-1.png", "request_id": 687, "origin_statement": "Excluding the ground truth query and results causes Kappa to plummet from 74.08 to 29.36 and accuracy to drop from 87.0% to 64.0%, illustrating that without a reliable reference, the model’s ability to distinguish equivalent and non-equivalent SQL queries collapses.", "perturbed_statement": "Excluding the ground truth causes Kappa to drop by only 10 points (from 74.08 to 64.08) and accuracy to fall marginally (87.0% to 85.0%), implying that ground truth references exert minimal influence on model evaluation.", "perturbed_explanation": "The perturbed statement is incorrect because Table 5 shows Kappa actually drops from 74.08 to 29.36 (a 44.72-point decline), not to 64.08, and accuracy falls to 64.0%, not 85.0%. Therefore, ground truth references have a substantial, not minimal, impact.", "claim": "Excluding the ground truth causes Kappa to drop by only 10 points (from 74.08 to 64.08) and accuracy to fall marginally (87.0% to 85.0%), implying that ground truth references exert minimal influence on model evaluation.", "label": false }, { "paperid": "2409.16492v1", "paper_path": "./SciVer/papers/2409.16492v1.json", "claim_type": "analytical", "type": "chart", "item": "9", "section": [ "3" ], "image_path": "./SciVer/images/2409.16492v1_figure_9.png", "request_id": 688, "origin_statement": "Rotational diagram fitting of HC5N and HC7N spectra, accounting for a beam‐filling factor of 30″, yields rotational temperatures of ~8–10 K, exceeding the canonical 6.5 K in TMC-1 CP, indicating warmer gas and a higher H₂ density enhancing collisional excitation.", "perturbed_statement": "Rotational diagram fitting of HC5N and HC7N spectra, assuming a beam‐filling factor of 60″, yields rotational temperatures of ~7–8 K, lower than the canonical 5 K in TMC-1 CP, suggesting cooler gas due to reduced collisional excitation at lower H₂ density.", "perturbed_explanation": "The perturbed statement is incorrect because the analysis used a beam‐filling factor of 30″, not 60″, and the canonical rotational temperature in TMC-1 CP is 6.5 K, not 5 K. Moreover, the actual rotational temperatures are ~8–10 K, which are higher, not lower, than 6.5 K.", "claim": "Rotational diagram fitting of HC5N and HC7N spectra, assuming a beam‐filling factor of 60″, yields rotational temperatures of ~7–8 K, lower than the canonical 5 K in TMC-1 CP, suggesting cooler gas due to reduced collisional excitation at lower H₂ density.", "label": false }, { "paperid": "2409.12314v1", "paper_path": "./SciVer/papers/2409.12314v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.12314v1-Table3-1.png", "request_id": 689, "origin_statement": "With SD2.1, increasing poison from 0 to 500 concepts elevates AD from 0.441 to 0.592, causing generation aesthetics to drop from 0.950 to 0.286. This suggests high-quality pretrained VAE decoders overfit poisoned embeddings, leading to semantic collapse when confronted with undefined visual representations.", "perturbed_statement": "With SD2.1, increasing poison from 0 to 500 concepts elevates AD from 0.441 to 0.814, causing generation aesthetics to drop from 0.950 to 0.150. This suggests high-quality pretrained VAE decoders overfit poisoned embeddings, leading to semantic collapse when confronted with undefined visual representations.", "perturbed_explanation": "The perturbed statement misreports the AD and aesthetics values: Table 3 shows AD rises only to 0.592 for SD2.1 at 500 poisoned concepts (not 0.814) and generation aesthetics falls to 0.286 (not 0.150). These discrepancies make the claim factually incorrect.", "claim": "With SD2.1, increasing poison from 0 to 500 concepts elevates AD from 0.441 to 0.814, causing generation aesthetics to drop from 0.950 to 0.150. This suggests high-quality pretrained VAE decoders overfit poisoned embeddings, leading to semantic collapse when confronted with undefined visual representations.", "label": false }, { "paperid": "2411.11000v1", "paper_path": "./SciVer/papers/2411.11000v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "2.4" ], "image_path": "./SciVer/images/2411.11000v1-Table2-1.png", "request_id": 691, "origin_statement": "By 75–115 days post-explosion, with γ-ray deposition below 20% above the photosphere, TARDIS’s dilute-blackbody radiative rate and downbranch line interaction converge plasma ionization and excitation in ~30 iterations using 10^5 Monte Carlo packets, accurately reproducing SESNe spectra under the recomb-NLTE helium treatment.", "perturbed_statement": "By 75–115 days post-explosion, with γ-ray deposition below 50% above the photosphere, TARDIS’s dilute-blackbody radiative rate and downbranch line interaction converge plasma ionization and excitation in ~50 iterations using 10^4 Monte Carlo packets, accurately reproducing SESNe spectra under the recomb-NLTE helium treatment.", "perturbed_explanation": "The perturbation is incorrect because the context specifies γ-ray deposition must be below 20%, not 50%, the simulation converges in ~30 iterations rather than ~50, and uses 10^5 Monte Carlo packets (with 10^6 in the final iteration), not 10^4 packets.", "claim": "By 75–115 days post-explosion, with γ-ray deposition below 50% above the photosphere, TARDIS’s dilute-blackbody radiative rate and downbranch line interaction converge plasma ionization and excitation in ~50 iterations using 10^4 Monte Carlo packets, accurately reproducing SESNe spectra under the recomb-NLTE helium treatment.", "label": false }, { "paperid": "2410.20297v1", "paper_path": "./SciVer/papers/2410.20297v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "3.2.1" ], "image_path": "./SciVer/images/2410.20297v1_figure_1.png", "request_id": 692, "origin_statement": "The training loss falls from ~3.2 to ~2.0 within the first 50 steps due to AdamW optimizing cross-entropy gradients, while the flat region around 1.8 after step 80 suggests vanishing gradients constrained further learning.", "perturbed_statement": "The training loss falls from ~3.8 to ~2.5 within the first 100 steps due to AdamW optimizing cross-entropy gradients, while the flat region around 1.2 after step 80 suggests vanishing gradients constrained further learning.", "perturbed_explanation": "This is incorrect because Figure 1 shows the loss starting near 3.3 (not 3.8) and dropping to about 2.0 by step 50 (not 2.5 by step 100), and the plateau occurs around 1.8 rather than 1.2.", "claim": "The training loss falls from ~3.8 to ~2.5 within the first 100 steps due to AdamW optimizing cross-entropy gradients, while the flat region around 1.2 after step 80 suggests vanishing gradients constrained further learning.", "label": false }, { "paperid": "2409.14201v1", "paper_path": "./SciVer/papers/2409.14201v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4.1.1" ], "image_path": "./SciVer/images/2409.14201v1-Table1-1.png", "request_id": 693, "origin_statement": "By applying a single refinement step, LATTE2 leverages error-driven feedback to correct 823 misparsed LaTeX expressions, boosting structural fidelity (CW-SSIM +0.0382) and translation fluency (BLEU +0.34%), which collectively raise the match rate from 82.27% to 90.44%.", "perturbed_statement": "By applying a single refinement step, LATTE2 leverages error-driven feedback to correct 1,200 misparsed LaTeX expressions, boosting structural fidelity (CW-SSIM +0.1382) and translation fluency (BLEU +1.34%), which collectively raise the match rate from 82.27% to 90.44%.", "perturbed_explanation": "The perturbed statement is incorrect because LATTE2 actually corrects 823 misparsed expressions (not 1,200), the CW-SSIM improvement is 0.0382 (not 0.1382), and the BLEU gain is 0.34% (not 1.34%), as shown in Table 1 and the text.", "claim": "By applying a single refinement step, LATTE2 leverages error-driven feedback to correct 1,200 misparsed LaTeX expressions, boosting structural fidelity (CW-SSIM +0.1382) and translation fluency (BLEU +1.34%), which collectively raise the match rate from 82.27% to 90.44%.", "label": false }, { "paperid": "2410.16597v1", "paper_path": "./SciVer/papers/2410.16597v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "7.5" ], "image_path": "./SciVer/images/2410.16597v1_figure_4.png", "request_id": 694, "origin_statement": "Embedding 2-hop relation paths alongside top-K chunks yields a 0.027 EM gain (0.406→0.433) and 0.020 F1 gain (0.549→0.569), demonstrating that compositional multi-hop relations supply critical evidence for cross-sentence inference in multi-hop QA.", "perturbed_statement": "Embedding only 1-hop propositions alongside top-K chunks yields a 0.072 EM gain (0.406→0.478) and 0.030 F1 gain (0.549→0.579), demonstrating that atomic propositions supply sufficient cross-sentence evidence for multi-hop QA.", "perturbed_explanation": "The perturbed statement is incorrect because the ablation study does not evaluate a \"1-hop propositions\" condition, only full propositions and 2-hop paths. Furthermore, adding propositions to chunks actually increases EM by 0.020 (from 0.406 to 0.426) and F1 by 0.016 (from 0.549 to 0.565), not by 0.072 and 0.030 as claimed.", "claim": "Embedding only 1-hop propositions alongside top-K chunks yields a 0.072 EM gain (0.406→0.478) and 0.030 F1 gain (0.549→0.579), demonstrating that atomic propositions supply sufficient cross-sentence evidence for multi-hop QA.", "label": false }, { "paperid": "2409.17536v1", "paper_path": "./SciVer/papers/2409.17536v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.17536v1-Table4-1.png", "request_id": 695, "origin_statement": "In LIS scenarios, MUSE’s integration of semantic priors yields a 7% H@1 rise on WN18RR and 13.9% on NELL995, indicating that augmenting sparse graphs with external semantic knowledge particularly strengthens long-tail entity representations more than in richer graph settings.", "perturbed_statement": "In RIS scenarios, MUSE’s integration of semantic priors yields an 8% H@1 rise on WN18RR and a 15% rise on NELL995, indicating that augmenting sparse graphs with external semantic knowledge particularly strengthens head entity representations in dense graphs.", "perturbed_explanation": "The perturbation is incorrect because the original improvements of 0.07 (7%) on WN18RR and 0.139 (13.9%) on NELL995 occur in the LIS (limited information) scenario, not the RIS scenario. Additionally, the benefit primarily targets long-tail entities, not head entities.", "claim": "In RIS scenarios, MUSE’s integration of semantic priors yields an 8% H@1 rise on WN18RR and a 15% rise on NELL995, indicating that augmenting sparse graphs with external semantic knowledge particularly strengthens head entity representations in dense graphs.", "label": false }, { "paperid": "2411.17987v1", "paper_path": "./SciVer/papers/2411.17987v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5.1.2" ], "image_path": "./SciVer/images/2411.17987v1_figure_5.png", "request_id": 696, "origin_statement": "By leveraging pipeline parallelism and register arrays for field export, P4-NIDS limits per-packet processing overhead, resulting in only ~7.8% throughput drop at nine NetFlow fields compared to FlowStalker’s ~11.2%, indicating P4-NIDS performs ~30% fewer memory-access operations per flow export stage.", "perturbed_statement": "By leveraging TCAM caching and register arrays for field export, P4-NIDS limits per-packet processing overhead, resulting in only ~5.8% throughput drop at eleven NetFlow fields compared to FlowStalker’s ~9.2%, indicating P4-NIDS performs ~45% fewer memory-access operations per flow export stage.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows P4-NIDS has about an 8.0% throughput drop at eleven NetFlow fields, not 5.8%. FlowStalker’s throughput drop at eleven fields isn’t reported (only up to nine fields), and its drop at nine fields is approximately 11.2%, not 9.2%. Furthermore, P4-NIDS does not use TCAM caching for NetFlow exports but relies on pipeline parallelism and register arrays.", "claim": "By leveraging TCAM caching and register arrays for field export, P4-NIDS limits per-packet processing overhead, resulting in only ~5.8% throughput drop at eleven NetFlow fields compared to FlowStalker’s ~9.2%, indicating P4-NIDS performs ~45% fewer memory-access operations per flow export stage.", "label": false }, { "paperid": "2411.16421v1", "paper_path": "./SciVer/papers/2411.16421v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5.2.3", "5.3" ], "image_path": "./SciVer/images/2411.16421v1_figure_5.png", "request_id": 698, "origin_statement": "In Figure 5, the ViT’s mean absolute pressure error increases linearly for the WP basin (0.75→2.26 hPa) but surges exponentially for the AU basin beyond grade 3 (6.39→24.45 hPa), implying the model’s limited capacity to represent extreme intensity-induced structural variability in AU cyclones.", "perturbed_statement": "In Figure 5, the ViT’s mean absolute pressure error increases linearly for the AU basin (5.77→6.39 hPa) but surges exponentially for the WP basin beyond grade 3 (1.06→2.26 hPa), implying the model’s limited capacity to represent extreme intensity-induced structural variability in AU cyclones.", "perturbed_explanation": "The perturbation is incorrect because Figure 5 shows the exponential surge in mean absolute error (from 6.39 to 23.84 to 24.45 hPa) for the AU basin (red curve), not the WP basin. The WP basin (blue curve) only increases modestly from 1.06 to 2.26 hPa.", "claim": "In Figure 5, the ViT’s mean absolute pressure error increases linearly for the AU basin (5.77→6.39 hPa) but surges exponentially for the WP basin beyond grade 3 (1.06→2.26 hPa), implying the model’s limited capacity to represent extreme intensity-induced structural variability in AU cyclones.", "label": false }, { "paperid": "2410.19796v1", "paper_path": "./SciVer/papers/2410.19796v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.2.2" ], "image_path": "./SciVer/images/2410.19796v1-Table4-1.png", "request_id": 701, "origin_statement": "ViT-L-16 produces zero test samples exceeding 0.99 confidence, while ResNet-50 retains 3,173 such cases, suggesting that attention-based architectures inherently distribute representation uncertainty more evenly, thereby reducing extreme overconfidence compared to convolutional models.", "perturbed_statement": "ViT-L-16 produces 3,173 test samples exceeding 0.99 confidence, while ResNet-50 has none, suggesting that attention-based architectures inherently induce extreme overconfidence compared to convolutional models.", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 shows that ResNet-50 has 3,173 samples with confidence >0.99 and ViT-L-16 has 0 such samples, not the other way around.", "claim": "ViT-L-16 produces 3,173 test samples exceeding 0.99 confidence, while ResNet-50 has none, suggesting that attention-based architectures inherently induce extreme overconfidence compared to convolutional models.", "label": false }, { "paperid": "2410.17226v2", "paper_path": "./SciVer/papers/2410.17226v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2410.17226v2-Table1-1.png", "request_id": 703, "origin_statement": "Parallel BFS completes in D rounds, yielding span Θ(D). By applying directional optimization when |frontier| > n/D, BFS switches to reverse exploration, scanning O(n) vertices instead of O(m) edges per round, thus reducing per-round work on low-diameter graphs.", "perturbed_statement": "Parallel BFS completes in n rounds, yielding span Θ(n). By applying directional optimization when |frontier| > m/D, BFS switches to reverse exploration, scanning O(n) vertices instead of O(m) edges per round, thus reducing per-round work even on large-diameter graphs.", "perturbed_explanation": "Parallel BFS actually finishes in at most D rounds (D is the graph diameter), not n. Also, directional optimization should trigger when the frontier size exceeds n/D, not m/D.", "claim": "Parallel BFS completes in n rounds, yielding span Θ(n). By applying directional optimization when |frontier| > m/D, BFS switches to reverse exploration, scanning O(n) vertices instead of O(m) edges per round, thus reducing per-round work even on large-diameter graphs.", "label": false }, { "paperid": "2411.04451v1", "paper_path": "./SciVer/papers/2411.04451v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2411.04451v1-Table2-1.png", "request_id": 711, "origin_statement": "Infrared surveys show LMC protostars exhibit ∼2× higher mean CO2/H2O ratios than Milky Way sources, consistent with warm ice chemistry: elevated dust temperatures in low-metallicity environments enhance CO surface diffusion, facilitating CO2 formation, while concurrently inhibiting CO hydrogenation and methanol synthesis.", "perturbed_statement": "Infrared surveys show LMC protostars exhibit ∼3× higher mean CO2/H2O ratios than Milky Way sources, consistent with warm ice chemistry: elevated dust temperatures in low-metallicity environments enhance CO surface diffusion, facilitating both CO2 and CH3OH formation via surface hydrogenation.", "perturbed_explanation": "The perturbed statement is incorrect because observations indicate the mean CO2/H2O ratio in LMC sources is about twofold higher—not threefold—than in the Milky Way. Moreover, warm dust actually suppresses CO hydrogenation, reducing CH3OH ice formation rather than enhancing it.", "claim": "Infrared surveys show LMC protostars exhibit ∼3× higher mean CO2/H2O ratios than Milky Way sources, consistent with warm ice chemistry: elevated dust temperatures in low-metallicity environments enhance CO surface diffusion, facilitating both CO2 and CH3OH formation via surface hydrogenation.", "label": false }, { "paperid": "2411.05253v1", "paper_path": "./SciVer/papers/2411.05253v1.json", "claim_type": "analytical", "type": "chart", "item": "4(b)", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.05253v1_figure_4(b).png", "request_id": 712, "origin_statement": "As code-mixing intensifies, BLEU scores for all models decline—especially past two additional languages—due to increased n-gram sparsity and diminished lexical overlap hindering precise matching of multi-language segments.", "perturbed_statement": "As code-mixing intensifies, BLEU scores for all models decline—especially past one additional language—due to increased n-gram sparsity and diminished lexical overlap hindering precise matching of multi-language segments.", "perturbed_explanation": "The perturbation incorrectly claims the steepest BLEU drop occurs after one additional language. In Figure 4, the most pronounced decline in BLEU happens when moving from two to three additional languages, not after the first.", "claim": "As code-mixing intensifies, BLEU scores for all models decline—especially past one additional language—due to increased n-gram sparsity and diminished lexical overlap hindering precise matching of multi-language segments.", "label": false }, { "paperid": "2410.22086v2", "paper_path": "./SciVer/papers/2410.22086v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5.4" ], "image_path": "./SciVer/images/2410.22086v2-Table3-1.png", "request_id": 713, "origin_statement": "AutoLR's dynamic rate adjustment reduces Verbmem by up to 75% (e.g., from 0.81 to 0.20 in GDiff c=0.9) and boosts TruthRatio by over 60%, illustrating that adaptive learning rates mitigate gradient noise and stabilize unlearning convergence.", "perturbed_statement": "AutoLR’s dynamic rate adjustment reduces Verbmem by up to 90% (e.g., from 0.81 to 0.20 in GDiff c=0.9) and boosts TruthRatio by over 80%, illustrating that adaptive learning rates introduce L2 regularization effects and stabilize unlearning convergence.", "perturbed_explanation": "The purported 90% reduction is incorrect: going from 0.81 to 0.20 corresponds to about a 75% decrease, not 90%. Likewise, the TruthRatio rise from 0.21 to 0.31 is roughly a 48% improvement, not over 80%. Moreover, AutoLR adapts learning rates but does not perform L2 regularization.", "claim": "AutoLR’s dynamic rate adjustment reduces Verbmem by up to 90% (e.g., from 0.81 to 0.20 in GDiff c=0.9) and boosts TruthRatio by over 80%, illustrating that adaptive learning rates introduce L2 regularization effects and stabilize unlearning convergence.", "label": false }, { "paperid": "2411.12248v2", "paper_path": "./SciVer/papers/2411.12248v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.12248v2-Table1-1.png", "request_id": 715, "origin_statement": "By offering resting-state, static, and dynamic EEG responses alongside synchronized high-res videos, 3D shapes with color labels, and textual captions, EEG-3D enables nuanced decoding of neural mechanisms underlying spatiotemporal integration and color-shape binding in visual processing beyond capabilities of prior datasets.", "perturbed_statement": "By offering only resting-state and dynamic EEG responses alongside synchronized high-res videos, 3D shapes with color labels, and textual captions, EEG-3D enables nuanced decoding of neural mechanisms underlying spatiotemporal integration and color-shape binding in visual processing beyond capabilities of prior datasets.", "perturbed_explanation": "The perturbed statement incorrectly claims EEG-3D provides only resting-state and dynamic responses. In fact, Table 1 and the text confirm EEG-3D also includes EEG responses to static image stimuli (St), so omitting static responses contradicts the dataset attributes.", "claim": "By offering only resting-state and dynamic EEG responses alongside synchronized high-res videos, 3D shapes with color labels, and textual captions, EEG-3D enables nuanced decoding of neural mechanisms underlying spatiotemporal integration and color-shape binding in visual processing beyond capabilities of prior datasets.", "label": false }, { "paperid": "2411.13900v1", "paper_path": "./SciVer/papers/2411.13900v1.json", "claim_type": "analytical", "type": "chart", "item": "5(b)", "section": [ "3.4.2" ], "image_path": "./SciVer/images/2411.13900v1_figure_5(b).png", "request_id": 716, "origin_statement": "Figure 5.b’s heatmap shows that toggling branch address bits B[2:5] only alters Qualcomm Oryon’s misprediction rate after 28–31 dummy branches, confirming a one-bit shift per taken branch and that its 32-bit PHRB exclusively tracks bits 2–5 of the branch address.", "perturbed_statement": "Figure 5.b’s heatmap shows that toggling branch address bits B[1:4] only alters Qualcomm Oryon’s misprediction rate after 16–19 dummy branches, confirming a two-bit shift per taken branch and that its 30-bit PHRB exclusively tracks bits 1–4 of the branch address.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5.b demonstrates that bits 2–5 (not 1–4) influence misprediction; the change occurs after 28–31 (not 16–19) dummy branches; the PHR shifts by one bit per branch (not two); and Oryon’s PHRB is 32 bits wide (not 30).", "claim": "Figure 5.b’s heatmap shows that toggling branch address bits B[1:4] only alters Qualcomm Oryon’s misprediction rate after 16–19 dummy branches, confirming a two-bit shift per taken branch and that its 30-bit PHRB exclusively tracks bits 1–4 of the branch address.", "label": false }, { "paperid": "2410.11305v1", "paper_path": "./SciVer/papers/2410.11305v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.11305v1-Table4-1.png", "request_id": 717, "origin_statement": "By parallelizing multi-token verification, QSpec mitigates the O(n^2) self-attention cost in larger transformers, achieving an average speedup increase from 1.33× on the 8B model to 1.49× on the 13B model, reflecting improved amortization of non-attention overhead as model size grows.", "perturbed_statement": "By parallelizing multi-token verification, QSpec mitigates the O(n^3) self-attention cost in larger transformers, achieving an average speedup increase from 1.26× on the 8B model to 1.75× on the 13B model, reflecting improved amortization of non-attention overhead as model size grows.", "perturbed_explanation": "This statement is incorrect because transformer self-attention has O(n^2) complexity, not O(n^3), and the average speedup figures for 8B and 13B models are 1.31× and 1.49× respectively (Table 4), not 1.26× and 1.75×.", "claim": "By parallelizing multi-token verification, QSpec mitigates the O(n^3) self-attention cost in larger transformers, achieving an average speedup increase from 1.26× on the 8B model to 1.75× on the 13B model, reflecting improved amortization of non-attention overhead as model size grows.", "label": false }, { "paperid": "2411.00690v1", "paper_path": "./SciVer/papers/2411.00690v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.00690v1_figure_4.png", "request_id": 718, "origin_statement": "Inclusion of disconnected triple and quadruple excitations in QCCSD yields a reference weight that monotonically decreases from near unity at equilibrium to ~0.13 at R=5 Å, preventing CCSD’s unphysical negative reference amplitude beyond ~3.8 Å and thus preserving bounded separability.", "perturbed_statement": "Inclusion of disconnected triple and quadruple excitations in QCCSD yields a reference weight that monotonically decreases from near unity at equilibrium to ~0.30 at R=5 Å, preventing CCSD’s unphysical negative reference amplitude beyond ~4.5 Å and thus preserving bounded separability.", "perturbed_explanation": "The perturbed statement is incorrect because the QCCSD reference weight at R=5 Å is approximately 0.13 (solid black curve), not 0.30. Furthermore, the CCSD reference weight becomes negative just past R≈3.8 Å, not beyond 4.5 Å, as indicated by the dotted black line.", "claim": "Inclusion of disconnected triple and quadruple excitations in QCCSD yields a reference weight that monotonically decreases from near unity at equilibrium to ~0.30 at R=5 Å, preventing CCSD’s unphysical negative reference amplitude beyond ~4.5 Å and thus preserving bounded separability.", "label": false }, { "paperid": "2409.02292v1", "paper_path": "./SciVer/papers/2409.02292v1.json", "claim_type": "analytical", "type": "chart", "item": "9", "section": [ "5.6" ], "image_path": "./SciVer/images/2409.02292v1_figure_9.png", "request_id": 720, "origin_statement": "At 10000 bps, the rapid symbol transitions lower the average SNR below 5%, causing a bit error rate exceeding 5% due to insufficient sampling intervals and increased ISI in the demodulated waveform.", "perturbed_statement": "At 10000 bps, the rapid symbol transitions increase the average SNR above 15 dB, causing the bit error rate to drop below 2% thanks to oversampling and reduced intersymbol interference.", "perturbed_explanation": "This statement contradicts the context: at 10000 bps the measured SNR is below 5%, not above 15 dB, and the bit error rate exceeds 5%, not drops below 2%. Additionally, oversampling cannot overcome the low SNR levels observed, so ISI remains high.", "claim": "At 10000 bps, the rapid symbol transitions increase the average SNR above 15 dB, causing the bit error rate to drop below 2% thanks to oversampling and reduced intersymbol interference.", "label": false }, { "paperid": "2411.05816v1", "paper_path": "./SciVer/papers/2411.05816v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.05816v1-Table1-1.png", "request_id": 721, "origin_statement": "The QNN exploits quaternion algebra's intrinsic coupling of four-dimensional inputs, enabling it to disentangle real and imaginary components correspondingly, as shown by mapping inputs like 2+2i+2j+2k to k; this structural alignment underpins its faster convergence relative to the Real-NN.", "perturbed_statement": "The QNN relies on commutative quaternion multiplication to isolate real and imaginary parts, as seen by mapping 2+2i+2j+2k to j; this enables threefold faster convergence than the Real-NN.", "perturbed_explanation": "This statement is incorrect because quaternion multiplication is non-commutative (not commutative), the dataset maps the input 2+2i+2j+2k to output k (not j), and experimental results indicate roughly a twofold speedup, not threefold.", "claim": "The QNN relies on commutative quaternion multiplication to isolate real and imaginary parts, as seen by mapping 2+2i+2j+2k to j; this enables threefold faster convergence than the Real-NN.", "label": false }, { "paperid": "2410.07242v1", "paper_path": "./SciVer/papers/2410.07242v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2410.07242v1_figure_2.png", "request_id": 722, "origin_statement": "When the observed control response (29.3%) in MTX-treated patients is close to the historical MTX-specific rate (31.4%), SPx assigns over 70% combined weight to borrowing submodels, shrinking the posterior mean towards 31.4% and reducing posterior uncertainty compared to no borrowing.", "perturbed_statement": "When the observed control response (29.3%) in MTX-treated patients is close to the historical MTX-specific rate (31.4%), SPx assigns over 90% combined weight to borrowing submodels, shrinking the posterior mean towards 31.4% and reducing posterior uncertainty compared to no borrowing.", "perturbed_explanation": "The perturbed claim incorrectly reports a combined borrowing weight over 90%. In Figure 2A, the actual SPx weights are 41% for the historical-only submodel and 33% for the regression submodel, totaling approximately 74%, not over 90%.", "claim": "When the observed control response (29.3%) in MTX-treated patients is close to the historical MTX-specific rate (31.4%), SPx assigns over 90% combined weight to borrowing submodels, shrinking the posterior mean towards 31.4% and reducing posterior uncertainty compared to no borrowing.", "label": false }, { "paperid": "2411.08494v1", "paper_path": "./SciVer/papers/2411.08494v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.08494v1_figure_6.png", "request_id": 726, "origin_statement": "During the 507.cactuBSSN_r workload, specific grid sizes (e.g., x0 and x0+8) induce L1I cache MPKI spikes due to conflict misses that exceed cache associativity, creating local execution time peaks despite overall dataset growth, highlighting microarchitectural sensitivity in SPEC CPU2017 evaluations.", "perturbed_statement": "During the 507.cactuBSSN_r workload, at grid sizes x0 and x0+8, L2 cache MPKI spikes to 42.7 with a miss ratio of 28%, causing execution time peaks due to insufficient 2-way associativity, illustrating microarchitectural sensitivity masked by SPEC CPU2017’s fixed datasets.", "perturbed_explanation": "The perturbed statement is wrong because the observed spikes occur in the L1I cache, not L2. The actual L1I MPKI at the x0 peak is 35.2 with a 31% miss ratio, and both CPUs use 4-way associativity rather than 2-way, contradicting the altered details.", "claim": "During the 507.cactuBSSN_r workload, at grid sizes x0 and x0+8, L2 cache MPKI spikes to 42.7 with a miss ratio of 28%, causing execution time peaks due to insufficient 2-way associativity, illustrating microarchitectural sensitivity masked by SPEC CPU2017’s fixed datasets.", "label": false }, { "paperid": "2411.03304v1", "paper_path": "./SciVer/papers/2411.03304v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.03304v1_figure_2.png", "request_id": 728, "origin_statement": "In the Scenario 2 simulation, the posterior distribution of W_j for true active variables concentrates tightly around 1.0, while distributions for inactive variables center near zero; this separation enables the Bayesian knockoff procedure to control FDR at the preset 0.2 threshold by applying the cutoff from Equation (18).", "perturbed_statement": "In the Scenario 2 simulation, the posterior distribution of W_j for true active variables concentrates tightly around 1.0, while distributions for inactive variables center near zero; this separation enables the Bayesian knockoff procedure to control FDR at the preset 0.05 threshold by applying the cutoff from Equation (18).", "perturbed_explanation": "The perturbation is incorrect because the context specifies that the FDR threshold was set at 0.2, not 0.05. Therefore, stating a 0.05 threshold contradicts the documented experimental setup.", "claim": "In the Scenario 2 simulation, the posterior distribution of W_j for true active variables concentrates tightly around 1.0, while distributions for inactive variables center near zero; this separation enables the Bayesian knockoff procedure to control FDR at the preset 0.05 threshold by applying the cutoff from Equation (18).", "label": false }, { "paperid": "2411.03122v2", "paper_path": "./SciVer/papers/2411.03122v2.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "4", "9" ], "image_path": "./SciVer/images/2411.03122v2-Table1-1.png", "request_id": 729, "origin_statement": "The 1/T dependence of the magnetic moment cross section amplifies low-energy electron recoils, enabling reactor EES experiments like GEMMA to achieve the most stringent ν̄_e magnetic moment limit (μ_ν̄e <2.9×10^−11 μ_B), surpassing recent CENS bounds by over an order of magnitude.", "perturbed_statement": "The 1/E_ν dependence of the magnetic moment cross section amplifies low-energy electron recoils, enabling reactor EES experiments like GEMMA to achieve the most stringent ν̄_e magnetic moment limit (μ_ν̄e <2.9×10^−11 μ_B), surpassing recent CENS bounds by over an order of magnitude.", "perturbed_explanation": "The perturbation is wrong because the magnetic moment interaction cross section scales inversely with the recoil energy T (σ_μ ∝1/T, as shown in Eq.34 and discussed in Fig.3), not with the neutrino energy E_ν.", "claim": "The 1/E_ν dependence of the magnetic moment cross section amplifies low-energy electron recoils, enabling reactor EES experiments like GEMMA to achieve the most stringent ν̄_e magnetic moment limit (μ_ν̄e <2.9×10^−11 μ_B), surpassing recent CENS bounds by over an order of magnitude.", "label": false }, { "paperid": "2410.15632v1", "paper_path": "./SciVer/papers/2410.15632v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "8", "8.1" ], "image_path": "./SciVer/images/2410.15632v1_figure_4.png", "request_id": 730, "origin_statement": "As the spectator quark mass increases, the indirect environment-driven quark-sector magnetic polarizability difference Δβ increases due to reduced octet–decuplet baryon mass splitting enhancing transition strengths, and all Δβ values trend toward zero as mπ² approaches 0.4 GeV².", "perturbed_statement": "As the spectator quark mass increases, the indirect environment-driven quark-sector magnetic polarizability difference Δβ decreases due to increased octet–decuplet baryon mass splitting reducing transition strengths, and all Δβ values trend toward zero as mπ² approaches 0.4 MeV².", "perturbed_explanation": "This statement is incorrect because Figure 4 demonstrates that Δβ actually increases—not decreases—with heavier spectator quark masses. In addition, the pion-mass-squared axis is labeled in GeV², not MeV², so the unit “0.4 MeV²” contradicts the context.", "claim": "As the spectator quark mass increases, the indirect environment-driven quark-sector magnetic polarizability difference Δβ decreases due to increased octet–decuplet baryon mass splitting reducing transition strengths, and all Δβ values trend toward zero as mπ² approaches 0.4 MeV².", "label": false }, { "paperid": "2409.09884v1", "paper_path": "./SciVer/papers/2409.09884v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "5.1.1", "5.1.4", "5.1.5" ], "image_path": "./SciVer/images/2409.09884v1_figure_3(a).png", "request_id": 732, "origin_statement": "The algorithm’s weight histogram peaks around the default weight (1.0) with a 17% lower tail below 0.95 down to 0.6, reflecting a soft-punting strategy where marginal losses in a category’s expected value are counterbalanced by multivariate normal gradient effects, rather than extreme zero-weight allocations.", "perturbed_statement": "The algorithm’s weight histogram peaks around 1.2 with a 25% lower tail below 0.9 down to 0.4, reflecting a soft-punting strategy where marginal losses in a category’s expected value are counterbalanced by multivariate normal gradient effects, rather than extreme zero-weight allocations.", "perturbed_explanation": "This is incorrect because the observed peak weight is 1.0, not 1.2; only 17% of weights fall below 0.95 (not 25% below 0.9); and the lower tail extends to about 0.6 rather than down to 0.4.", "claim": "The algorithm’s weight histogram peaks around 1.2 with a 25% lower tail below 0.9 down to 0.4, reflecting a soft-punting strategy where marginal losses in a category’s expected value are counterbalanced by multivariate normal gradient effects, rather than extreme zero-weight allocations.", "label": false }, { "paperid": "2409.13671v1", "paper_path": "./SciVer/papers/2409.13671v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "3.2", "4" ], "image_path": "./SciVer/images/2409.13671v1_figure_7.png", "request_id": 734, "origin_statement": "Between 15 and 20 iterations, the CB algorithm’s median accuracy jumps from about 0.73 to 0.83 while its IQR widens, indicating that the observed performance boost stems partly from increased variance in sample-based reward estimation during exploration.", "perturbed_statement": "Between 15 and 20 iterations, the CB algorithm’s median accuracy jumps from about 0.63 to 0.93 while its IQR narrows, indicating that the observed performance boost stems partly from reduced variance in sample-based reward estimation during exploration.", "perturbed_explanation": "The perturbation is incorrect because the actual median accuracies for CB are approximately 0.73 at 15 iterations and 0.83 at 20 iterations, not 0.63 and 0.93. Additionally, the IQR in the figure actually increases from 15 to 20 iterations, rather than narrows.", "claim": "Between 15 and 20 iterations, the CB algorithm’s median accuracy jumps from about 0.63 to 0.93 while its IQR narrows, indicating that the observed performance boost stems partly from reduced variance in sample-based reward estimation during exploration.", "label": false }, { "paperid": "2411.10013v1", "paper_path": "./SciVer/papers/2411.10013v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "1", "5.5" ], "image_path": "./SciVer/images/2411.10013v1-Table4-1.png", "request_id": 737, "origin_statement": "By embedding homography estimation and 2D rectification position encoding, HomoDepth reduces DTU_df AbsRel by 52% (0.183→0.087) versus Argos, bypassing GPU rectification (saving 312.6ms) and decreasing total pipeline latency by 43%, demonstrating robustness to variable focal lengths without external preprocessing.", "perturbed_statement": "By embedding homography estimation and 2D rectification position encoding, HomoDepth reduces DTU_df D1 error by 64% (0.64→0.32) versus Argos, bypassing CPU rectification (saving 811ms) and decreasing total pipeline latency by 30%, demonstrating robustness to variable focal lengths without external preprocessing.", "perturbed_explanation": "The statement miscalculates the D1 error reduction: going from 0.64 to 0.32 on DTU_df is a 50% relative improvement, not 64%. Also, rectification savings refer to GPU preprocessing time (~312.5ms), not the full CPU latency of 811ms.", "claim": "By embedding homography estimation and 2D rectification position encoding, HomoDepth reduces DTU_df D1 error by 64% (0.64→0.32) versus Argos, bypassing CPU rectification (saving 811ms) and decreasing total pipeline latency by 30%, demonstrating robustness to variable focal lengths without external preprocessing.", "label": false }, { "paperid": "2411.04041v2", "paper_path": "./SciVer/papers/2411.04041v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.04041v2-Table3-1.png", "request_id": 739, "origin_statement": "The negative correlation parameter ρ remains stable around –0.68 across maturities, consistently generating the negative skew typical in equity markets. Meanwhile, the variance of the randomized parameter Var Γ decays from 3.371 for August to 0.361 for November, reflecting diminishing need for randomization at longer expiries.", "perturbed_statement": "The variance of the randomized parameter Var Γ increases from 3.371 for August to 0.361 for November, indicating growing randomization needed at longer expiries. The correlation parameter ρ remains stable around +0.68, reinforcing persistent volatility skew typical in equity markets.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows Var Γ decreases from 3.371 in August to 0.361 in November, not increases. Also, the calibrated correlation ρ is around –0.68 (negative), not +0.68.", "claim": "The variance of the randomized parameter Var Γ increases from 3.371 for August to 0.361 for November, indicating growing randomization needed at longer expiries. The correlation parameter ρ remains stable around +0.68, reinforcing persistent volatility skew typical in equity markets.", "label": false }, { "paperid": "2410.23000v2", "paper_path": "./SciVer/papers/2410.23000v2.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "6.4" ], "image_path": "./SciVer/images/2410.23000v2-Table4-1.png", "request_id": 741, "origin_statement": "The 0.323 KPP for GPT-4o, combined with a 939.35-token average length, indicates that expanding parametric knowledge in longer responses dilutes alignment with retrieved documents, leading to lower precision compared to the more concise Phi-3-mini (0.337 KPP, 832.93 tokens).", "perturbed_statement": "The 0.343 KPP for GPT-4o, combined with a 950-token average length, suggests that expanding parametric knowledge in longer responses dilutes alignment with retrieved documents, leading to lower precision compared to the more concise Phi-3-mini (0.327 KPP, 820 tokens).", "perturbed_explanation": "This statement is incorrect because GPT-4o’s actual KPP is 0.323, not 0.343, and its average response length is 939.35 tokens, not 950. Also, Phi-3-mini’s KPP is 0.337 and its average length is 832.93 tokens, not 0.327 and 820 tokens.", "claim": "The 0.343 KPP for GPT-4o, combined with a 950-token average length, suggests that expanding parametric knowledge in longer responses dilutes alignment with retrieved documents, leading to lower precision compared to the more concise Phi-3-mini (0.327 KPP, 820 tokens).", "label": false }, { "paperid": "2411.03396v1", "paper_path": "./SciVer/papers/2411.03396v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1", "3.2" ], "image_path": "./SciVer/images/2411.03396v1-Table1-1.png", "request_id": 743, "origin_statement": "Positive ΔΦ in Janus-TMDC heterobilayers induces Z-scheme band alignment, boosting the effective band gap by ΔΦ; for WS2-SMoSe, Eg (0.937 eV) plus ΔΦ (0.79 eV) yields E_eff 1.717 eV, surpassing the 1.23 eV water-splitting threshold for enhanced photocatalysis.", "perturbed_statement": "Positive ΔΦ in Janus-TMDC heterobilayers induces Z-scheme band alignment, boosting the effective band gap by ΔΦ; for WS2-SMoSe, Eg (0.937 eV) plus ΔΦ (-0.79 eV) yields E_eff 1.717 eV, surpassing the 1.5 eV water-splitting threshold for enhanced photocatalysis.", "perturbed_explanation": "The perturbed statement misstates ΔΦ as -0.79 eV, whereas Table 1 reports ΔΦ = +0.79 eV for WS2-SMoSe. It also uses a 1.5 eV threshold for water splitting, but the correct theoretical band gap requirement is 1.23 eV.", "claim": "Positive ΔΦ in Janus-TMDC heterobilayers induces Z-scheme band alignment, boosting the effective band gap by ΔΦ; for WS2-SMoSe, Eg (0.937 eV) plus ΔΦ (-0.79 eV) yields E_eff 1.717 eV, surpassing the 1.5 eV water-splitting threshold for enhanced photocatalysis.", "label": false }, { "paperid": "2411.11440v1", "paper_path": "./SciVer/papers/2411.11440v1.json", "claim_type": "analytical", "type": "chart", "item": "13", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.11440v1_figure_13.png", "request_id": 744, "origin_statement": "For S4-like survey depth with 1% lensing residual, KS + LiteBIRD-like E/B separation achieves similar ~50% reduction in Fisher σ(r) compared to matrix purification, as lowered residuals shift the error budget from irreducible noise to leakage that these optimized algorithms efficiently remove.", "perturbed_statement": "For S4-like survey depth with 5% lensing residual, KS + Planck-like E/B separation achieves similar ~60% reduction in Fisher σ(r) compared to matrix purification, as lower residuals shift the error budget from irreducible noise to leakage that these algorithms remove.", "perturbed_explanation": "This statement is incorrect because the study only reports 10% and 1% lensing residual scenarios—no 5% case exists—and the KS + Planck-like method for S4-like depth at 1% residual yields roughly a 40% reduction in σ(r), not 60%, and does not match matrix purification in performance.", "claim": "For S4-like survey depth with 5% lensing residual, KS + Planck-like E/B separation achieves similar ~60% reduction in Fisher σ(r) compared to matrix purification, as lower residuals shift the error budget from irreducible noise to leakage that these algorithms remove.", "label": false }, { "paperid": "2409.13221v2", "paper_path": "./SciVer/papers/2409.13221v2.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "7.3" ], "image_path": "./SciVer/images/2409.13221v2-Table3-1.png", "request_id": 745, "origin_statement": "RLHFuse’s simulated annealing-based intra-stage fusion achieves the theoretical lower bound latency for five of six pipeline settings by maximizing parallel task overlap, delivering up to 1.5× speedup, and consistently reduces peak activation memory to the serial 1F1B baseline through optimal stage scheduling.", "perturbed_statement": "RLHFuse’s simulated annealing-based intra-stage fusion achieves the theoretical lower bound latency for all six pipeline settings by maximizing parallel task overlap, delivering up to 2.0× speedup, and consistently reduces peak activation memory to 0.8× the serial 1F1B baseline.", "perturbed_explanation": "The perturbed statement is incorrect on three counts. First, Table 3 shows the maximum observed speedup is 1.5×, not 2.0×. Second, the algorithm fails to reach the theoretical lower bound in the 65B/33B configuration with 16 pipeline stages and 64 micro-batches. Third, memory usage never drops below the 1F1B baseline of 1.0×.", "claim": "RLHFuse’s simulated annealing-based intra-stage fusion achieves the theoretical lower bound latency for all six pipeline settings by maximizing parallel task overlap, delivering up to 2.0× speedup, and consistently reduces peak activation memory to 0.8× the serial 1F1B baseline.", "label": false }, { "paperid": "2411.07321v1", "paper_path": "./SciVer/papers/2411.07321v1.json", "claim_type": "analytical", "type": "chart", "item": "4(b)", "section": [ "5" ], "image_path": "./SciVer/images/2411.07321v1_figure_4(b).png", "request_id": 746, "origin_statement": "In weak scaling tests, the OpenMPI Ireduce time increases from ~1 s at 4 tasks to ~20 s at 32 tasks, reflecting O(N_data) communication scaling, while the NCCL GPU reduce remains under 2 s even at 32 GPUs, owing to NVLink bandwidth and GPU compute throughput.", "perturbed_statement": "In weak scaling tests, the OpenMPI Ireduce time scales quadratically (from ~1 s at 4 tasks to ~20 s at 32 tasks), reflecting an O(N_tasks^2) overhead from its logarithmic tree algorithm, while the NCCL GPU reduce remains constant at ~0.5 s due to its ring algorithm eliminating communication growth.", "perturbed_explanation": "This is incorrect because Fig. 4 shows the OpenMPI Ireduce time grows roughly linearly with data size (not quadratically), following the O(N_data) term. Furthermore, the NCCL reduce time actually increases from about 0.4 s to 1.5 s between 4 and 32 GPUs, so it is not constant at ~0.5 s.", "claim": "In weak scaling tests, the OpenMPI Ireduce time scales quadratically (from ~1 s at 4 tasks to ~20 s at 32 tasks), reflecting an O(N_tasks^2) overhead from its logarithmic tree algorithm, while the NCCL GPU reduce remains constant at ~0.5 s due to its ring algorithm eliminating communication growth.", "label": false }, { "paperid": "2409.07192v1", "paper_path": "./SciVer/papers/2409.07192v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "6" ], "image_path": "./SciVer/images/2409.07192v1-Table5-1.png", "request_id": 747, "origin_statement": "Despite growing industry interest from 2019, RE4AI remains solution-dominated with sparse empirical validation, indicating that integrating industry-led case studies and human-centric elicitation frameworks is crucial to improve stakeholder trust and maturity in AI-based system requirements engineering.", "perturbed_statement": "Since 2018, industry contributions in RE4AI eliminated the solution bias, with publications now predominantly empirical and requiring no additional validation; moreover, existing RE tools fully support AI-based system requirements, reflecting a mature and tool-backed practice.", "perturbed_explanation": "This is incorrect because industry contributions in RE4AI started in 2019, not 2018; the majority of publications still require additional industry validation (they remain solution-dominated), and no existing RE tool has been used to support AI-based system requirements, contradicting the claim of full tool support.", "claim": "Since 2018, industry contributions in RE4AI eliminated the solution bias, with publications now predominantly empirical and requiring no additional validation; moreover, existing RE tools fully support AI-based system requirements, reflecting a mature and tool-backed practice.", "label": false }, { "paperid": "2409.00210v1", "paper_path": "./SciVer/papers/2409.00210v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "2.1", "2.2", "2.5", "4.1" ], "image_path": "./SciVer/images/2409.00210v1_figure_2.png", "request_id": 748, "origin_statement": "The viscous friction component in dynamic pulling causes the free-energy difference ΔF to increase linearly with pulling velocity v for v≤0.06 m/s, enabling extrapolation to an equilibrium desorption free energy ΔF(v=0)≈10.4 kBT, consistent with umbrella sampling and static pulling.", "perturbed_statement": "The viscous friction component in dynamic pulling causes the free-energy difference ΔF to increase linearly with pulling velocity v for v≤0.6 m/s, enabling extrapolation to an equilibrium desorption free energy ΔF(v=0)≈40 kBT, in agreement with umbrella sampling and static pulling values.", "perturbed_explanation": "The perturbed statement is incorrect because linear scaling of ΔF with v is only observed in the low-velocity regime up to 0.06 m/s (not 0.6 m/s), and the extrapolated equilibrium desorption free energy is approximately 10.4 kBT (not 40 kBT).", "claim": "The viscous friction component in dynamic pulling causes the free-energy difference ΔF to increase linearly with pulling velocity v for v≤0.6 m/s, enabling extrapolation to an equilibrium desorption free energy ΔF(v=0)≈40 kBT, in agreement with umbrella sampling and static pulling values.", "label": false }, { "paperid": "2409.16294v1", "paper_path": "./SciVer/papers/2409.16294v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.16294v1-Table1-1.png", "request_id": 749, "origin_statement": "By leveraging autoregressive latent encoding, GenCAD reduces the mean Chamfer distance from 0.783 to 0.762 and lowers the invalid ratio by 0.12 points, illustrating that modeling sequential dependencies in CAD commands enhances both geometric fidelity and program validity.", "perturbed_statement": "By leveraging autoregressive latent encoding, GenCAD reduces the mean Chamfer distance from 0.783 to 0.722 and lowers the invalid ratio by 0.12 points, illustrating that modeling sequential dependencies in CAD commands enhances both geometric fidelity and program validity.", "perturbed_explanation": "The perturbed statement incorrectly reports GenCAD’s mean Chamfer distance as 0.722, but Table 1 shows it is 0.762 for GenCAD. This misstates the actual performance improvement.", "claim": "By leveraging autoregressive latent encoding, GenCAD reduces the mean Chamfer distance from 0.783 to 0.722 and lowers the invalid ratio by 0.12 points, illustrating that modeling sequential dependencies in CAD commands enhances both geometric fidelity and program validity.", "label": false }, { "paperid": "2411.11786v1", "paper_path": "./SciVer/papers/2411.11786v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "2.2.2" ], "image_path": "./SciVer/images/2411.11786v1_figure_1.png", "request_id": 750, "origin_statement": "By t=10000, the generator repeatedly collapses to a single mode 1.5 units from the origin, ignoring the other seven modes. The ReLU-activated 5-layer network cannot approximate multiple disconnected supports with tiny variance (0.01), biasing gradients toward the highest-density cluster.", "perturbed_statement": "By t=10000, the generator repeatedly collapses to a single mode 0.5 units from the origin, ignoring the other seven modes. The ReLU-activated 5-layer network cannot approximate multiple disconnected supports with moderate variance (0.1), biasing gradients toward the highest-density cluster.", "perturbed_explanation": "The perturbation is incorrect because the target modes lie 1.5 units from the origin (not 0.5 units), and each mode has variance 0.01 (not 0.1) as stated in the figure caption.", "claim": "By t=10000, the generator repeatedly collapses to a single mode 0.5 units from the origin, ignoring the other seven modes. The ReLU-activated 5-layer network cannot approximate multiple disconnected supports with moderate variance (0.1), biasing gradients toward the highest-density cluster.", "label": false }, { "paperid": "2411.16095v1", "paper_path": "./SciVer/papers/2411.16095v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.16095v1-Table4-1.png", "request_id": 751, "origin_statement": "Label smoothing calibrates predictions by penalizing overconfident outputs, yielding a 17% relative MAPE reduction and 8.6% CR rise compared to -wo-smoothing. VRMP then dynamically reweights variance across long-tail buckets, further decreasing MAPE to 0.2228 and maximizing the ensemble’s consistency.", "perturbed_statement": "Label smoothing calibrates predictions by penalizing overconfident outputs, yielding a 25% relative MAPE reduction and 14% CR rise compared to -wo-smoothing. VRMP then dynamically reweights variance across long-tail buckets, further decreasing MAPE to 0.1928 and maximizing the ensemble’s consistency.", "perturbed_explanation": "The perturbed statement is incorrect because label smoothing reduces MAPE from 0.2692 to 0.2228, a 17.27% drop (not 25%), and increases CR from 57.38% to 62.32%, an 8.61% rise (not 14%). The final MAPE after VRMP is 0.2228, not 0.1928.", "claim": "Label smoothing calibrates predictions by penalizing overconfident outputs, yielding a 25% relative MAPE reduction and 14% CR rise compared to -wo-smoothing. VRMP then dynamically reweights variance across long-tail buckets, further decreasing MAPE to 0.1928 and maximizing the ensemble’s consistency.", "label": false }, { "paperid": "2411.13247v1", "paper_path": "./SciVer/papers/2411.13247v1.json", "claim_type": "analytical", "type": "chart", "item": "4(b)", "section": [ "5" ], "image_path": "./SciVer/images/2411.13247v1_figure_4(b).png", "request_id": 752, "origin_statement": "Excluding TW Hya, our mean A_V for R_V =3.1 and literature values correlate with Pearson r =0.97, implying strong consistency. The systematic elevation of measurements above the identity line reflects residual self-absorption in the H2 [0,1] progression, biasing extinction estimates upward, especially for inclined disks.", "perturbed_statement": "Excluding TW Hya, our mean A_V for R_V =3.1 and literature values correlate with Pearson r =0.82, implying moderate consistency. The systematic elevation of measurements above the identity line reflects residual self-absorption in the H2 [1,7] progression, biasing extinction estimates upward, especially for inclined disks.", "perturbed_explanation": "The perturbation is incorrect because the Pearson correlation coefficient excluding TW Hya is reported as 0.97 (not 0.82) in the text. Moreover, the text identifies self-absorption contamination in the [0,1] progression, whereas the statement wrongly attributes it to the [1,7] progression.", "claim": "Excluding TW Hya, our mean A_V for R_V =3.1 and literature values correlate with Pearson r =0.82, implying moderate consistency. The systematic elevation of measurements above the identity line reflects residual self-absorption in the H2 [1,7] progression, biasing extinction estimates upward, especially for inclined disks.", "label": false }, { "paperid": "2409.12720v1", "paper_path": "./SciVer/papers/2409.12720v1.json", "claim_type": "analytical", "type": "table", "item": "4", "section": [ "4.6" ], "image_path": "./SciVer/images/2409.12720v1-Table4-1.png", "request_id": 753, "origin_statement": "Knowledge distillation transfers nuanced soft-label information from the ConvNext-B teacher, enabling ConvNext-V2-T to achieve 68.7% AR, a 0.7 percentage-point gain over direct training and nearing teacher-level performance.", "perturbed_statement": "Knowledge distillation transfers nuanced soft-label information from the ConvNext-B teacher, enabling ConvNext-V2-T to achieve 69.3% AR, a 1.3 percentage-point gain over direct training and surpassing teacher-level performance.", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 shows ConvNext-V2-T reaches only 68.7% AR with distillation (not 69.3%), and this represents a 0.7 percentage-point gain over its 68.0% AR without distillation (not 1.3 points).", "claim": "Knowledge distillation transfers nuanced soft-label information from the ConvNext-B teacher, enabling ConvNext-V2-T to achieve 69.3% AR, a 1.3 percentage-point gain over direct training and surpassing teacher-level performance.", "label": false }, { "paperid": "2410.04465v2", "paper_path": "./SciVer/papers/2410.04465v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2410.04465v2_figure_2.png", "request_id": 754, "origin_statement": "The recoil mass distribution exhibits a prominent signal peak at ~125 GeV with over 90% efficiency due to precise muon isolation, while Z-peak backgrounds at ~91 GeV are suppressed by angular and mass-window cuts, demonstrating effective ZH separation in CEPC 240 GeV collisions with 5600 fb⁻¹.", "perturbed_statement": "The recoil mass distribution exhibits a prominent signal peak at ~115 GeV with over 95% efficiency due to precise muon isolation, while Z-peak backgrounds at ~91 GeV are suppressed by angular and mass-window cuts, demonstrating effective ZH separation in CEPC 240 GeV collisions with 5600 fb⁻¹.", "perturbed_explanation": "The perturbed statement misplaces the Higgs recoil peak at ~115 GeV instead of the actual ~125 GeV and overstates the selection efficiency as >95%, whereas the context and Figure 2 report a signal efficiency exceeding 90%, not 95%.", "claim": "The recoil mass distribution exhibits a prominent signal peak at ~115 GeV with over 95% efficiency due to precise muon isolation, while Z-peak backgrounds at ~91 GeV are suppressed by angular and mass-window cuts, demonstrating effective ZH separation in CEPC 240 GeV collisions with 5600 fb⁻¹.", "label": false }, { "paperid": "2409.16316v1", "paper_path": "./SciVer/papers/2409.16316v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.16316v1-Table1-1.png", "request_id": 757, "origin_statement": "The IEA-PVPS dataset's 1-minute interval SSI measurements from 14 stations with stringent quality control provide a robust temporal resolution that captures rapid irradiance fluctuations caused by transient cloud cover, enhancing emulator validation against satellite-derived albedo retrievals over 3×3 km SEVIRI pixels.", "perturbed_statement": "The IEA-PVPS dataset's 1-hour interval SSI measurements from 14 stations with stringent quality control provide a robust temporal resolution that captures rapid irradiance fluctuations caused by transient cloud cover, enhancing emulator validation against satellite-derived albedo retrievals over 3×3 km SEVIRI pixels.", "perturbed_explanation": "The perturbed statement is incorrect because, according to Table 1 and the context, the IEA-PVPS dataset records SSI at 1-minute intervals, not 1-hour intervals, so its temporal resolution is misrepresented.", "claim": "The IEA-PVPS dataset's 1-hour interval SSI measurements from 14 stations with stringent quality control provide a robust temporal resolution that captures rapid irradiance fluctuations caused by transient cloud cover, enhancing emulator validation against satellite-derived albedo retrievals over 3×3 km SEVIRI pixels.", "label": false }, { "paperid": "2410.07728v1", "paper_path": "./SciVer/papers/2410.07728v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.07728v1-Table3-1.png", "request_id": 759, "origin_statement": "In the mixed model, each one-unit increase in perceived autonomy yields a 5% increase in correct decisions, reflecting that greater self-determination enhances cognitive control during drone handover tasks, mitigating decision complexity impacts.", "perturbed_statement": "In the mixed model, each one-unit increase in perceived autonomy yields a 10% increase in correct decisions, reflecting that greater self-determination enhances cognitive control during drone handover tasks, mitigating decision complexity impacts.", "perturbed_explanation": "This statement is incorrect because Table 3 reports a coefficient of 0.05 (5%) for autonomy on decision accuracy, not 0.10 (10%). The actual effect size is half of what the perturbed statement claims.", "claim": "In the mixed model, each one-unit increase in perceived autonomy yields a 10% increase in correct decisions, reflecting that greater self-determination enhances cognitive control during drone handover tasks, mitigating decision complexity impacts.", "label": false }, { "paperid": "2410.17484v1", "paper_path": "./SciVer/papers/2410.17484v1.json", "claim_type": "analytical", "type": "chart", "item": "2(b)", "section": [ "3" ], "image_path": "./SciVer/images/2410.17484v1_figure_2(b).png", "request_id": 762, "origin_statement": "The DLUC assigns the abdomen client’s two image prompt heads nearly equal weights (~51% vs ~49%), indicating that early transformer blocks integrate both localized tissue textures and broader organ contexts equally for CT-based abdominal VQA.", "perturbed_statement": "For chest images, DLUC assigns ~75% weight to the second question prompt head (w2) over the first (w1), reflecting a stronger reliance on w2 for cross-modal semantic integration.", "perturbed_explanation": "This statement is wrong because Figure 2 shows the chest client’s question prompt weights are about 55% for w2 and 45% for w1, not 75% vs 25% as claimed.", "claim": "For chest images, DLUC assigns ~75% weight to the second question prompt head (w2) over the first (w1), reflecting a stronger reliance on w2 for cross-modal semantic integration.", "label": false }, { "paperid": "2410.05295v3", "paper_path": "./SciVer/papers/2410.05295v3.json", "claim_type": "analytical", "type": "chart", "item": "4(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.05295v3_figure_4(a).png", "request_id": 764, "origin_statement": "Despite distributional shifts from Harmbench to unseen malicious requests, the ASR reduction remains below 5% across all target models, indicating that jailbreak strategies exploit model-agnostic vulnerabilities rather than dataset-specific artifacts.", "perturbed_statement": "Despite distributional shifts from Harmbench to unseen malicious requests, the ASR reduction remains below 2% across all target models, indicating that jailbreak strategies exploit model-agnostic vulnerabilities rather than dataset-specific artifacts.", "perturbed_explanation": "The perturbation is incorrect because the observed ASR drop for several target models exceeds 2%. For example, Gemini’s ASR falls by 4.2% (66.3% to 62.1%), which contradicts the claimed maximum reduction of 2%.", "claim": "Despite distributional shifts from Harmbench to unseen malicious requests, the ASR reduction remains below 2% across all target models, indicating that jailbreak strategies exploit model-agnostic vulnerabilities rather than dataset-specific artifacts.", "label": false }, { "paperid": "2409.18011v1", "paper_path": "./SciVer/papers/2409.18011v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.18011v1_figure_4.png", "request_id": 766, "origin_statement": "The entropy-based aggregation reduces false positives by summarizing variability over adaptive windows, revealing a robust ~0.6 K cooling from mid-April to early November 1992, in contrast to daily tests that produce numerous transient significant dips due to high-frequency noise.", "perturbed_statement": "The monthly aggregation reduces false positives more effectively than the entropy-based method, demonstrating a consistent ~0.8 K cooling from May to December 1992, while entropy means still show many spurious fluctuations from daily variability.", "perturbed_explanation": "This is incorrect because Figure 4 and the context state that monthly means actually introduce more false positives due to multiple monthly tests and detect the eruption impact later than the entropy method. Additionally, the observed mean cooling is about 0.6 K, not 0.8 K.", "claim": "The monthly aggregation reduces false positives more effectively than the entropy-based method, demonstrating a consistent ~0.8 K cooling from May to December 1992, while entropy means still show many spurious fluctuations from daily variability.", "label": false }, { "paperid": "2410.10723v1", "paper_path": "./SciVer/papers/2410.10723v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.10723v1_figure_5.png", "request_id": 768, "origin_statement": "Parametric CMI produced attenuated TIME coefficients relative to naive and semiparametric CMI, reflecting the stronger log-normal imputation assumption that shrinks variability; however, this approach also underestimates uncertainty, as evidenced by narrower confidence intervals, potentially leading to overconfident inference about time-to-diagnosis effects.", "perturbed_statement": "Parametric CMI produced amplified TIME coefficients relative to naive and semiparametric CMI, reflecting the stronger log-normal imputation assumption that inflates variability; moreover, this approach overestimates uncertainty, as evidenced by wider confidence intervals, potentially leading to overcautious inference about time-to-diagnosis effects.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows that parametric CMI yields smaller (not amplified) TIME coefficient magnitudes compared to naive and semiparametric CMI, and its confidence intervals are narrower (not wider), so it underestimates rather than overestimates uncertainty.", "claim": "Parametric CMI produced amplified TIME coefficients relative to naive and semiparametric CMI, reflecting the stronger log-normal imputation assumption that inflates variability; moreover, this approach overestimates uncertainty, as evidenced by wider confidence intervals, potentially leading to overcautious inference about time-to-diagnosis effects.", "label": false }, { "paperid": "2411.16662v1", "paper_path": "./SciVer/papers/2411.16662v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.2.1" ], "image_path": "./SciVer/images/2411.16662v1-Table2-1.png", "request_id": 769, "origin_statement": "By fine-tuning separate binary classifiers, the SPECTER2 model achieves consistently high macro-F1 across both prevalent (e.g., Proposal, 0.83) and rare labels (Applicant: Quantity, 0.93). This isolated decision boundary optimizes class-specific thresholds, mitigating imbalance effects that degrade multi-task performance (e.g., 0.50 F1 for rare classes).", "perturbed_statement": "By fine-tuning separate binary classifiers, the SPECTER2 model achieves consistently high macro-F1 across both prevalent (e.g., Proposal, 0.83) and rare labels (Applicant: Quantity, 0.83). This isolated decision boundary optimizes class-specific thresholds, mitigating imbalance effects that degrade multi-task performance (e.g., 0.63 F1 for rare classes).", "perturbed_explanation": "The binary F1 score for 'Applicant: Quantity' is actually 0.93 (not 0.83) according to Table 2, and the multi-task F1 for that rare category is 0.50 (not 0.63). These misreported values contradict the context’s reported metrics.", "claim": "By fine-tuning separate binary classifiers, the SPECTER2 model achieves consistently high macro-F1 across both prevalent (e.g., Proposal, 0.83) and rare labels (Applicant: Quantity, 0.83). This isolated decision boundary optimizes class-specific thresholds, mitigating imbalance effects that degrade multi-task performance (e.g., 0.63 F1 for rare classes).", "label": false }, { "paperid": "2411.07529v1", "paper_path": "./SciVer/papers/2411.07529v1.json", "claim_type": "analytical", "type": "chart", "item": "6(a)", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.07529v1_figure_6(a).png", "request_id": 770, "origin_statement": "Over 55% of unsolved easy problems had a percent-solved below 20%, indicating that early-stage parsing failures disproportionately compromise overall test performance due to error accumulation, as smaller errors in prompt interpretation cascade through subsequent reasoning steps.", "perturbed_statement": "Over 85% of unsolved easy problems had a percent-solved below 20%, indicating that early-stage parsing failures disproportionately compromise overall test performance due to error accumulation, as smaller errors in prompt interpretation cascade through subsequent reasoning steps.", "perturbed_explanation": "This is incorrect because the histogram shows 17 out of 31 unsolved easy problems—approximately 55%—fall in the 0–20% solved bin, not 85% as claimed.", "claim": "Over 85% of unsolved easy problems had a percent-solved below 20%, indicating that early-stage parsing failures disproportionately compromise overall test performance due to error accumulation, as smaller errors in prompt interpretation cascade through subsequent reasoning steps.", "label": false }, { "paperid": "2411.01747v1", "paper_path": "./SciVer/papers/2411.01747v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4.3", "4.3.2" ], "image_path": "./SciVer/images/2411.01747v1-Table2-1.png", "request_id": 771, "origin_statement": "Integrating action accumulation with initial expert-designed actions yields synergistic performance gains, boosting average exact match from 35.15% to 41.82%, reflecting a 6.67% relative increase. This synergy likely arises because accumulation exploits IA's diverse tooling via iterative refinement across episodes.", "perturbed_statement": "Integrating action accumulation with initial expert-designed actions yields synergistic performance gains, boosting average exact match from 33.96% to 41.82%, reflecting a 9.2% relative increase. This synergy likely arises because accumulation exploits IA's diverse tooling via iterative refinement across episodes.", "perturbed_explanation": "The perturbation is incorrect because the baseline without action accumulation is 35.15%, not 33.96%, and the actual relative increase from 35.15% to 41.82% is 6.67%, not 9.2%.", "claim": "Integrating action accumulation with initial expert-designed actions yields synergistic performance gains, boosting average exact match from 33.96% to 41.82%, reflecting a 9.2% relative increase. This synergy likely arises because accumulation exploits IA's diverse tooling via iterative refinement across episodes.", "label": false }, { "paperid": "2411.15645v1", "paper_path": "./SciVer/papers/2411.15645v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.7" ], "image_path": "./SciVer/images/2411.15645v1_figure_4.png", "request_id": 772, "origin_statement": "Under MC-NEST with 16 importance-sampling rollouts, GPT-4o solved 40 Number Theory and approximately 28 Geometry problems—far exceeding its 15 Algebra, 10 Combinatorics, and 12 Others—suggesting that importance sampling amplifies its capacity for hierarchical structural reasoning in number-theoretic contexts relative to symbolically diverse tasks.", "perturbed_statement": "Under MC-NEST with 16 importance-sampling rollouts, GPT-4o solved only 25 Number Theory and 20 Geometry problems—far exceeding its 15 Algebra, 10 Combinatorics, and 12 Others—suggesting that importance sampling amplifies its capacity for hierarchical structural reasoning in geometry relative to symbolically diverse tasks.", "perturbed_explanation": "Figure 4 reports 40 Number Theory and ~28 Geometry solves, not 25 and 20. The perturbed counts contradict the bar heights, and geometry never outperforms number theory, invalidating the claimed relative advantage.", "claim": "Under MC-NEST with 16 importance-sampling rollouts, GPT-4o solved only 25 Number Theory and 20 Geometry problems—far exceeding its 15 Algebra, 10 Combinatorics, and 12 Others—suggesting that importance sampling amplifies its capacity for hierarchical structural reasoning in geometry relative to symbolically diverse tasks.", "label": false }, { "paperid": "2409.03843v1", "paper_path": "./SciVer/papers/2409.03843v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "4.1", "4.2", "4.3", "4.4" ], "image_path": "./SciVer/images/2409.03843v1_figure_2.png", "request_id": 774, "origin_statement": "Under a Republican identity, GPT-4o exhibited a 2.43-point ingroup bias, increasing its mean agreement with Republican statements from –1.53 to 0.90. This shift aligns with social identity theory, where self-categorization amplifies endorsement of ingroup values.", "perturbed_statement": "Under a Republican identity, GPT-4o exhibited a 1.43-point ingroup bias, increasing its mean agreement with Republican statements from –1.53 to 0.90. This shift aligns with social identity theory, where self-categorization amplifies endorsement of ingroup values.", "perturbed_explanation": "The ingroup bias magnitude is incorrect. According to Figure 2B and the context, the bias increased by 2.43 points, not 1.43 points, when adopting the Republican identity.", "claim": "Under a Republican identity, GPT-4o exhibited a 1.43-point ingroup bias, increasing its mean agreement with Republican statements from –1.53 to 0.90. This shift aligns with social identity theory, where self-categorization amplifies endorsement of ingroup values.", "label": false }, { "paperid": "2410.17657v1", "paper_path": "./SciVer/papers/2410.17657v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.17657v1-Table1-1.png", "request_id": 775, "origin_statement": "ReflecTool uniquely offers all five agent capacities and methods, leveraging both tool-wise reflection and long-term memory to dynamically optimize tool selection across multimodal, numerical analysis, data understanding, and trustworthiness tasks, thereby overcoming the modality-specific and tool-limited constraints of existing clinical agents.", "perturbed_statement": "ReflecTool uniquely offers all five agent capacities and methods but lacks long-term memory, relying solely on tool-wise reflection to dynamically optimize tool selection across multimodal, numerical analysis, data understanding, and trustworthiness tasks, thereby overcoming the modality-specific and tool-limited constraints of existing clinical agents.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 shows that ReflecTool has a checkmark under 'Long-Term Memory'. Therefore, it does support long-term memory in addition to tool-wise reflection, contradicting the claim that it lacks long-term memory.", "claim": "ReflecTool uniquely offers all five agent capacities and methods but lacks long-term memory, relying solely on tool-wise reflection to dynamically optimize tool selection across multimodal, numerical analysis, data understanding, and trustworthiness tasks, thereby overcoming the modality-specific and tool-limited constraints of existing clinical agents.", "label": false }, { "paperid": "2411.06723v1", "paper_path": "./SciVer/papers/2411.06723v1.json", "claim_type": "analytical", "type": "chart", "item": "11(a)", "section": [ "5.3.2", "5.3.3" ], "image_path": "./SciVer/images/2411.06723v1_figure_11(a).png", "request_id": 778, "origin_statement": "LLM-powered chatbots’ contextual coherence, reflected in rising average daily steps (peaking on Day 4 for LLM-SAG), suggests that dynamic alignment fosters self-efficacy via personalized motivational feedback, thus sustaining user engagement and boosting physical activity.", "perturbed_statement": "LLM-powered chatbots’ contextual coherence, reflected in rising average daily steps (peaking on Day 2 for LLM-SAG), suggests that dynamic alignment fosters self-efficacy via personalized motivational feedback, thus sustaining user engagement and boosting physical activity.", "perturbed_explanation": "The perturbation incorrectly states that LLM-SAG daily steps peaked on Day 2, whereas the context and Figure 11 show the highest average steps occurred on Day 4 for the LLM-SAG condition.", "claim": "LLM-powered chatbots’ contextual coherence, reflected in rising average daily steps (peaking on Day 2 for LLM-SAG), suggests that dynamic alignment fosters self-efficacy via personalized motivational feedback, thus sustaining user engagement and boosting physical activity.", "label": false }, { "paperid": "2410.16343v1", "paper_path": "./SciVer/papers/2410.16343v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "4" ], "image_path": "./SciVer/images/2410.16343v1-Table2-1.png", "request_id": 779, "origin_statement": "High-elevation catchments above 2,000 m such as Taylor River (2,800 m) and Blue River (2,670 m) exhibit mean temperatures around 272.8 K and precipitation below 1.9 mm/day, consistent with the adiabatic lapse rate cooling and reduced orographic moisture flux.", "perturbed_statement": "High-elevation catchments above 2,000 m such as Taylor River (2,800 m) and Blue River (2,670 m) exhibit mean temperatures around 280 K and precipitation above 4 mm/day, suggesting enhanced moisture flux instead of the expected adiabatic cooling.", "perturbed_explanation": "This is incorrect because the Taylor River and Blue River catchments actually have mean precipitation of 1.67 mm/day and 1.90 mm/day—well below 4 mm/day—and mean temperatures of approximately 272.9 K and 272.7 K, not around 280 K, contradicting the perturbed values.", "claim": "High-elevation catchments above 2,000 m such as Taylor River (2,800 m) and Blue River (2,670 m) exhibit mean temperatures around 280 K and precipitation above 4 mm/day, suggesting enhanced moisture flux instead of the expected adiabatic cooling.", "label": false }, { "paperid": "2410.02502v1", "paper_path": "./SciVer/papers/2410.02502v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2410.02502v1_figure_5.png", "request_id": 782, "origin_statement": "The LHCb 13 TeV determination of sin²θ_eff has total uncertainty dominated by statistical errors, exceeding PDF-induced theoretical uncertainty, indicating that measurement precision at forward rapidities is limited more by data statistics than parton distribution knowledge, obviating the need for PDF profiling.", "perturbed_statement": "The LHCb 13 TeV determination of sin²θ_eff has total uncertainty dominated by PDF-induced theoretical errors, exceeding statistical uncertainty, indicating that measurement precision at forward rapidities is limited more by parton distribution knowledge than data statistics, necessitating PDF profiling.", "perturbed_explanation": "This is incorrect because the context explicitly states that the theoretical uncertainty (dominated by PDFs) is significantly smaller than the statistical uncertainty. Therefore, the total uncertainty is not dominated by PDF errors, and no PDF profiling is required.", "claim": "The LHCb 13 TeV determination of sin²θ_eff has total uncertainty dominated by PDF-induced theoretical errors, exceeding statistical uncertainty, indicating that measurement precision at forward rapidities is limited more by parton distribution knowledge than data statistics, necessitating PDF profiling.", "label": false }, { "paperid": "2409.01435v1", "paper_path": "./SciVer/papers/2409.01435v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "5" ], "image_path": "./SciVer/images/2409.01435v1-Table3-1.png", "request_id": 783, "origin_statement": "By applying pre-aggregation sparsification before direction-based adaptive filtering (Spar+Di), LASA synergistically enhances robustness in non-IID Federated Learning: on FEMNIST, Spar+Di raises Min-Max accuracy from 44.39% to 84.28% and Noise accuracy from 54.91% to 69.03%, underscoring sparsification's amplification of filtering efficacy.", "perturbed_statement": "By applying pre-aggregation sparsification before direction-based adaptive filtering (Spar+Di), LASA synergistically enhances robustness in non-IID Federated Learning: on FEMNIST, Spar+Di raises Min-Max accuracy from 44.39% to 94.28% and Noise accuracy from 54.91% to 79.03%, underscoring sparsification's amplification of filtering efficacy.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 reports Spar+Di achieves 84.28% accuracy under Min-Max and 69.03% under Noise on FEMNIST, not the erroneously stated 94.28% and 79.03% values.", "claim": "By applying pre-aggregation sparsification before direction-based adaptive filtering (Spar+Di), LASA synergistically enhances robustness in non-IID Federated Learning: on FEMNIST, Spar+Di raises Min-Max accuracy from 44.39% to 94.28% and Noise accuracy from 54.91% to 79.03%, underscoring sparsification's amplification of filtering efficacy.", "label": false }, { "paperid": "2409.17424v1", "paper_path": "./SciVer/papers/2409.17424v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.17424v1_figure_5.png", "request_id": 784, "origin_statement": "At recall ~0.88, PyANNs sustains ≈8500 QPS, about twice SHNSW’s ≈4200 QPS, demonstrating that half-precision quantization with a refinement stage effectively offsets graph search’s memory-access overhead.", "perturbed_statement": "At recall ~0.88, PyANNs sustains ≈15000 QPS, nearly quadruple SHNSW’s ≈4200 QPS, demonstrating that half-precision quantization with a refinement stage effectively offsets graph search’s memory-access overhead.", "perturbed_explanation": "The statement incorrectly reports PyANNs’s QPS at recall ~0.88 as ≈15000; the actual performance is around 8500 QPS, yielding only a ~2× speed-up over SHNSW, not 4×.", "claim": "At recall ~0.88, PyANNs sustains ≈15000 QPS, nearly quadruple SHNSW’s ≈4200 QPS, demonstrating that half-precision quantization with a refinement stage effectively offsets graph search’s memory-access overhead.", "label": false }, { "paperid": "2409.05923v1", "paper_path": "./SciVer/papers/2409.05923v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.05923v1_figure_5.png", "request_id": 786, "origin_statement": "CodeLlama-7b achieves its highest Pass@1111 score at θ=0.6, illustrating an optimal balance where the USCD mechanism sufficiently mitigates noisy tokens without over-regularizing the output distribution; values below or above this threshold respectively fail to repair noise or overly constrain model diversity, reducing code generation performance.", "perturbed_statement": "CodeLlama-7b achieves its highest Pass@1111 score at θ=0.8, indicating the USCD mechanism's optimal regime; values below 0.4 inadequately suppress noise, while values above 1.0 induce gradient vanishing, excessively smoothing logits and harming model diversity.", "perturbed_explanation": "The perturbed statement is wrong because Figure 5 clearly shows the peak Pass@1111 score at θ=0.6, not θ=0.8. Additionally, performance degradation at high θ values is due to over-regularization of the output distribution, not gradient vanishing.", "claim": "CodeLlama-7b achieves its highest Pass@1111 score at θ=0.8, indicating the USCD mechanism's optimal regime; values below 0.4 inadequately suppress noise, while values above 1.0 induce gradient vanishing, excessively smoothing logits and harming model diversity.", "label": false }, { "paperid": "2409.06390v1", "paper_path": "./SciVer/papers/2409.06390v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.06390v1-Table2-1.png", "request_id": 787, "origin_statement": "Environment-based generation via cdxgen captures installed runtime packages and transient dependencies that metadata-based tools like Syft and Trivy might omit, thereby enhancing vulnerability coverage in containerized applications by leveraging actual environment inspection.", "perturbed_statement": "Dependency graph-based SBOM generation via GH-sbom captures installed runtime packages and transient dependencies that metadata-based tools like Syft and Trivy might omit, thereby enhancing vulnerability coverage in containerized applications through actual environment inspection.", "perturbed_explanation": "The perturbed statement is incorrect because GH-sbom uses a dependency graph-based generation method (Table 2) rather than environment-based inspection. Dependency graph-based tools reconstruct dependencies from manifest data and do not capture installed runtime packages directly, so they cannot inspect the actual environment.", "claim": "Dependency graph-based SBOM generation via GH-sbom captures installed runtime packages and transient dependencies that metadata-based tools like Syft and Trivy might omit, thereby enhancing vulnerability coverage in containerized applications through actual environment inspection.", "label": false }, { "paperid": "2409.07981v1", "paper_path": "./SciVer/papers/2409.07981v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.07981v1_figure_4.png", "request_id": 788, "origin_statement": "In the 14–58% shock range, each additional 1% gas supply reduction cuts gross output by ~0.25% because limited flexibility in PBL production intensifies upstream supply disruptions, triggering network spillovers and Keynesian income effects that amplify direct industrial shortfalls into broader economic losses.", "perturbed_statement": "In the 10–60% shock range, each additional 1% gas supply reduction cuts gross output by ~0.5% because inflexible Leontief production intensifies downstream bottlenecks, triggering network spillovers and demand-side feedback that amplify direct shortfalls into systemic economic losses.", "perturbed_explanation": "The perturbed statement misstates the shock interval and marginal impact: the context specifies a 14–58% range with a 0.25% output reduction per 1% gas shock, not 10–60% with a 0.5% effect. It also incorrectly attributes amplification to Leontief production and downstream bottlenecks instead of PBL production and upstream disruptions.", "claim": "In the 10–60% shock range, each additional 1% gas supply reduction cuts gross output by ~0.5% because inflexible Leontief production intensifies downstream bottlenecks, triggering network spillovers and demand-side feedback that amplify direct shortfalls into systemic economic losses.", "label": false }, { "paperid": "2409.07335v1", "paper_path": "./SciVer/papers/2409.07335v1.json", "claim_type": "analytical", "type": "chart", "item": "1", "section": [ "4.1", "4.1.1" ], "image_path": "./SciVer/images/2409.07335v1_figure_1.png", "request_id": 792, "origin_statement": "The linear increase in NLP PGR with model size reflects transformers’ improved capacity to internalize linguistic abstractions, while the inverse trend in chess indicates that combinatorial game complexity overwhelms larger models’ naive finetuning. Reward modeling’s sub-20% PGR implies weak preference labels insufficiently constrain policy learning.", "perturbed_statement": "The linear increase in NLP PGR with model size reflects transformers’ improved capacity to internalize linguistic abstractions, while the peak in chess PGR at 80% for large models indicates combinatorial game complexity no longer hinders finetuning. Reward modeling’s 60% PGR implies preference labels suffice without advanced methods.", "perturbed_explanation": "This is incorrect because Figure 1 shows chess PGR actually decreases from 60% (small) to 20% (large), so it never peaks at 80% for large models. Additionally, reward modeling PGR remains below 20% (0.1→0.15→0.2), contradicting the claimed 60% recovery.", "claim": "The linear increase in NLP PGR with model size reflects transformers’ improved capacity to internalize linguistic abstractions, while the peak in chess PGR at 80% for large models indicates combinatorial game complexity no longer hinders finetuning. Reward modeling’s 60% PGR implies preference labels suffice without advanced methods.", "label": false }, { "paperid": "2410.18321v1", "paper_path": "./SciVer/papers/2410.18321v1.json", "claim_type": "analytical", "type": "chart", "item": "7", "section": [ "5.5.2" ], "image_path": "./SciVer/images/2410.18321v1_figure_7.png", "request_id": 794, "origin_statement": "The miscalibration curve reveals a systematic underconfidence for ground-truth probabilities between 0.4 and 0.8, where the blue line falls up to 0.1 below the diagonal; applying a calibration regularizer λ of 0.5 increases these mid-range predictions by ~10%, reducing ECE through Brier score gradient alignment.", "perturbed_statement": "The miscalibration curve reveals a systematic underconfidence for ground-truth probabilities between 0.1 and 0.4; applying a calibration regularizer λ of 0.8 decreases these small-range predictions by 10%, improving ECE through cross-entropy gradient descent.", "perturbed_explanation": "This is incorrect because the underconfidence region in Fig. 7 spans yₖ from 0.4 to 0.8 (not 0.1–0.4), the paper uses λ=0.5 (not 0.8) which increases mid-range predictions (not decreases small-range probabilities), and calibration gains stem from a Brier-score-based loss rather than cross-entropy gradients.", "claim": "The miscalibration curve reveals a systematic underconfidence for ground-truth probabilities between 0.1 and 0.4; applying a calibration regularizer λ of 0.8 decreases these small-range predictions by 10%, improving ECE through cross-entropy gradient descent.", "label": false }, { "paperid": "2411.08788v1", "paper_path": "./SciVer/papers/2411.08788v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.08788v1-Table1-1.png", "request_id": 795, "origin_statement": "Experiments in Table 1 show that static plastic bedforms with sand coating (wavelength 0.914 m, height 0.137 m) under flow velocities up to 0.653 m/s produce localized pressure peaks halfway along the stoss face; according to Darcy's law, these pressure heterogeneities can enhance hyporheic exchange flux beneath the SWI.", "perturbed_statement": "Experiments in Table 1 show that static plastic bedforms with sand coating (wavelength 0.914 m, height 0.237 m) under flow velocities up to 0.653 m/s produce localized pressure peaks at the crest; according to Darcy's law, these pressure heterogeneities can enhance hyporheic exchange flux beneath the SWI.", "perturbed_explanation": "The perturbed statement incorrectly reports the bedform height as 0.237 m instead of the actual 0.137 m given in Table 1. It also erroneously locates the pressure peak at the crest, whereas multiple sources (e.g., van Mierlo and de Ruiter 1988 and preceding studies) confirm the local maximum occurs halfway along the stoss face.", "claim": "Experiments in Table 1 show that static plastic bedforms with sand coating (wavelength 0.914 m, height 0.237 m) under flow velocities up to 0.653 m/s produce localized pressure peaks at the crest; according to Darcy's law, these pressure heterogeneities can enhance hyporheic exchange flux beneath the SWI.", "label": false }, { "paperid": "2411.15453v1", "paper_path": "./SciVer/papers/2411.15453v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.15453v1-Table1-1.png", "request_id": 797, "origin_statement": "Spatial downsampling enhances LLaVA-1.5-7B’s AVG from 34.7 to 38.4 (+3.7%) and 13B’s from 42.1 to 47.4 (+5.3%), yet GQA accuracy drops by 2.1% and 1.6%, indicating that reduced spatial resolution sacrifices fine-grained cues essential for compositional reasoning.", "perturbed_statement": "Spatial downsampling enhances LLaVA-1.5-7B’s AVG from 34.7 to 41.8 (+7.3%) and 13B’s from 42.1 to 51.6 (+9.5%), yet GQA accuracy drops by 2.5% and 2.0%, suggesting a trade-off between token reduction and spatial detail.", "perturbed_explanation": "The perturbed statement is incorrect: SPD-7B’s AVG increases from 34.7 to 38.4 (not to 41.8), which is +3.7% (not +7.3%), and SPD-13B’s AVG goes from 42.1 to 47.4 (+5.3%, not +9.5%). GQA accuracy drops by 2.1% and 1.6%, not 2.5% and 2.0%.", "claim": "Spatial downsampling enhances LLaVA-1.5-7B’s AVG from 34.7 to 41.8 (+7.3%) and 13B’s from 42.1 to 51.6 (+9.5%), yet GQA accuracy drops by 2.5% and 2.0%, suggesting a trade-off between token reduction and spatial detail.", "label": false }, { "paperid": "2411.15268v1", "paper_path": "./SciVer/papers/2411.15268v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "6.2" ], "image_path": "./SciVer/images/2411.15268v1-Table3-1.png", "request_id": 801, "origin_statement": "Applying activation shift vectors from LLaVA-v1.5 on Qwen-VL raises its GQA F1 score by an average of ∼4.6% over the regular baseline across Random, Popular, and Adversarial settings, yet still trails the original ICT method’s ∼7.2% average F1 gain, indicating only partial transferability.", "perturbed_statement": "Applying activation shift vectors from LLaVA-v1.5 on Qwen-VL raises its GQA F1 score by an average of ∼6.5% over the regular baseline across Random, Popular, and Adversarial settings, outperforming the original ICT method’s ∼5.2% average F1 gain, indicating robust transferability.", "perturbed_explanation": "The perturbed statement misreports the average F1 improvement: the true gain for ICT-LLaVA-v1.5 is ∼4.6%, not ∼6.5%. It also incorrectly claims it outperforms ICT, whereas ICT’s actual average F1 gain is ∼7.2%, not ∼5.2%.", "claim": "Applying activation shift vectors from LLaVA-v1.5 on Qwen-VL raises its GQA F1 score by an average of ∼6.5% over the regular baseline across Random, Popular, and Adversarial settings, outperforming the original ICT method’s ∼5.2% average F1 gain, indicating robust transferability.", "label": false }, { "paperid": "2411.12858v2", "paper_path": "./SciVer/papers/2411.12858v2.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.12858v2_figure_2.png", "request_id": 804, "origin_statement": "CDI’s p-value for the U-ViT256-T2I-Deep model trained on COCO drops nonlinearly from ~0.1 at 50 samples to <0.01 by 70, demonstrating that diffusion models trained on smaller datasets exhibit stronger per-sample gradient signals, requiring fewer examples for membership verification.", "perturbed_statement": "CDI’s p-value for the U-ViT256-T2I-Deep model trained on COCO drops nonlinearly from ~0.1 at 50 samples to <0.005 by 30, demonstrating that diffusion models trained on smaller datasets exhibit stronger per-sample gradient signals, requiring fewer examples for membership verification.", "perturbed_explanation": "The perturbed statement misreports both the sample count and significance threshold: according to Fig. 2, the p-value only falls below 0.01 at around 70 samples, never reaching <0.005 by 30 samples.", "claim": "CDI’s p-value for the U-ViT256-T2I-Deep model trained on COCO drops nonlinearly from ~0.1 at 50 samples to <0.005 by 30, demonstrating that diffusion models trained on smaller datasets exhibit stronger per-sample gradient signals, requiring fewer examples for membership verification.", "label": false }, { "paperid": "2411.14588v1", "paper_path": "./SciVer/papers/2411.14588v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "3.3.2" ], "image_path": "./SciVer/images/2411.14588v1-Table5-1.png", "request_id": 805, "origin_statement": "Under the U-B approximation with identical two-way switching rate p, the mix subpopulation’s weighted mean copy number is dominated by the exponential term e^{4p^2 t -6pt}, leading to faster growth than pure yellow or red cells whenever p exceeds 1.5, reflecting enhanced eigenvalue separation from mixing.", "perturbed_statement": "Under the U-B approximation with identical two-way switching rate p, the mix subpopulation’s weighted mean copy number is dominated by the exponential term e^{4p^2 t +6pt}, leading to faster growth than pure yellow or red cells whenever p exceeds 0.5, reflecting enhanced eigenvalue separation from mixing.", "perturbed_explanation": "The perturbation incorrectly changes the exponent from e^{4p^2 t -6pt} (as given in Table 5) to e^{4p^2 t +6pt}, and it misstates the dominance threshold as p > 0.5 instead of deriving it from 4p^2 - 6p > 0, which yields p > 1.5.", "claim": "Under the U-B approximation with identical two-way switching rate p, the mix subpopulation’s weighted mean copy number is dominated by the exponential term e^{4p^2 t +6pt}, leading to faster growth than pure yellow or red cells whenever p exceeds 0.5, reflecting enhanced eigenvalue separation from mixing.", "label": false }, { "paperid": "2409.09929v1", "paper_path": "./SciVer/papers/2409.09929v1.json", "claim_type": "analytical", "type": "chart", "item": "6", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.09929v1_figure_6.png", "request_id": 806, "origin_statement": "The semi-analytical model employing 42-point Gaussian integration and truncated modal series predicts TL within 1.5 dB of FE-SEA across 0–2000 Hz, demonstrating that increased modal density at high frequencies smooths random modal fluctuation and validates the hybrid fluid–structure coupling treatment.", "perturbed_statement": "The semi-analytical model employing 60-point Gaussian integration and truncated modal series predicts TL within 0.5 dB of FE-SEA across 0–2000 Hz, demonstrating that increased modal density at high frequencies smooths random modal fluctuation and validates the hybrid fluid–structure coupling treatment.", "perturbed_explanation": "This statement is incorrect because the context specifies using 42 sampling points for Gaussian integration, not 60, and reports agreement within about 1.5 dB, not within 0.5 dB, across the 0–2000 Hz range.", "claim": "The semi-analytical model employing 60-point Gaussian integration and truncated modal series predicts TL within 0.5 dB of FE-SEA across 0–2000 Hz, demonstrating that increased modal density at high frequencies smooths random modal fluctuation and validates the hybrid fluid–structure coupling treatment.", "label": false }, { "paperid": "2409.01123v1", "paper_path": "./SciVer/papers/2409.01123v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.01123v1_figure_5.png", "request_id": 808, "origin_statement": "As the R3+ ionic radius decreases from Pr3+ to Yb3+, the on-site Coulomb interaction U rises by about 20% due to weaker dielectric screening, outpacing the ~5% reduction in bandwidth W; this U/W increase drives the Mott transition when U/W surpasses unity in smaller‐ion R2Ir2O7 compounds.", "perturbed_statement": "As the R3+ ionic radius decreases from Pr3+ to Yb3+, the on-site Coulomb interaction U rises by about 50% due to weaker dielectric screening, outpacing the ~15% reduction in bandwidth W; this U/W increase drives the Mott transition when U/W surpasses 1.5 in smaller‐ion R2Ir2O7 compounds.", "perturbed_explanation": "The perturbed statement is incorrect because the actual cRPA U increases only by about 8–13% (from ~2.3 eV to ~2.6 eV) rather than 50%, the bandwidth reduction is closer to ~5% not 15%, and the Mott transition threshold U/W is near 1, not 1.5.", "claim": "As the R3+ ionic radius decreases from Pr3+ to Yb3+, the on-site Coulomb interaction U rises by about 50% due to weaker dielectric screening, outpacing the ~15% reduction in bandwidth W; this U/W increase drives the Mott transition when U/W surpasses 1.5 in smaller‐ion R2Ir2O7 compounds.", "label": false }, { "paperid": "2409.15466v1", "paper_path": "./SciVer/papers/2409.15466v1.json", "claim_type": "analytical", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.15466v1-Table2-1.png", "request_id": 809, "origin_statement": "The data-dependent Matérn 1/2 kernel’s low smoothness enables rapid convergence—achieving 95.3% IoU and halving NKF’s training time to three days—consistent with theoretical results that less smooth kernels learn faster in neural kernel fields.", "perturbed_statement": "According to observed convergence, the infinitely smooth Matérn kernel (Matérn ∞) converges in just 50 epochs—faster than Matérn 1/2—and achieves a peak IoU of 96.0%.", "perturbed_explanation": "This statement is incorrect because Table 2 reports that the Matérn ∞ kernel actually achieves only 92.1% IoU, not 96.0%, and the analysis in Section 5.2 (and Figure 9) shows that smoother kernels like Matérn ∞ converge more slowly, with Matérn 1/2 converging the fastest rather than in just 50 epochs.", "claim": "According to observed convergence, the infinitely smooth Matérn kernel (Matérn ∞) converges in just 50 epochs—faster than Matérn 1/2—and achieves a peak IoU of 96.0%.", "label": false }, { "paperid": "2411.13892v1", "paper_path": "./SciVer/papers/2411.13892v1.json", "claim_type": "analytical", "type": "table", "item": "5", "section": [ "5.6" ], "image_path": "./SciVer/images/2411.13892v1-Table5-1.png", "request_id": 811, "origin_statement": "By scaling sublinearly—preprocessing grows 36× (2.5s→91s) and inference grows only 6× (0.098s→0.65s) from Adressa to ML10M—TSP operates in near-linear time, reducing relative computational overhead compared to LightGCN’s 54× training growth, thus ensuring efficient large-scale recommendation performance.", "perturbed_statement": "TSP’s inference overhead increases from 0.098s on Adressa to over 2s on ML10M, indicating superlinear time complexity that hampers scalability in large-scale recommendation systems.", "perturbed_explanation": "This is incorrect because the inference time on ML10M is reported as 0.650s, not over 2s; moreover, the observed growth (approximately 6.6×) is sublinear rather than superlinear, directly contradicting Table 5.", "claim": "TSP’s inference overhead increases from 0.098s on Adressa to over 2s on ML10M, indicating superlinear time complexity that hampers scalability in large-scale recommendation systems.", "label": false }, { "paperid": "2411.04487v1", "paper_path": "./SciVer/papers/2411.04487v1.json", "claim_type": "analytical", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.04487v1_figure_5.png", "request_id": 812, "origin_statement": "High R2 values (0.949 for yield, 0.935 for inverted thresholds, 0.985 for wavelength) combined with low MSEs indicate the Gaussian process regressor effectively captures smooth variations in lasing wavelength, yield deposition kinetics, and threshold behavior, though threshold predictions exhibit larger variance suggesting nonlinear cavity loss contributions.", "perturbed_statement": "High R2 values (0.949 for yield, 0.75 for inverted thresholds, 0.985 for wavelength) with low MSEs (0.05 for yield, 603.9 for threshold, 100.6 for wavelength) suggest the GP effectively captures deposition kinetics and spectral shifts, though threshold noise remains significant.", "perturbed_explanation": "The perturbed statement is incorrect because the yield MSE is 0.005 (not 0.05) and the lasing threshold R2 is 0.935 (not 0.75) as reported in Figure 5.", "claim": "High R2 values (0.949 for yield, 0.75 for inverted thresholds, 0.985 for wavelength) with low MSEs (0.05 for yield, 603.9 for threshold, 100.6 for wavelength) suggest the GP effectively captures deposition kinetics and spectral shifts, though threshold noise remains significant.", "label": false }, { "paperid": "2411.05174v1", "paper_path": "./SciVer/papers/2411.05174v1.json", "claim_type": "analytical", "type": "chart", "item": "2", "section": [ "7" ], "image_path": "./SciVer/images/2411.05174v1_figure_2.png", "request_id": 814, "origin_statement": "In Randomworlds standard tasks, BITL’s nearly linear rise in normalized value towards 1 as coverage increases indicates that Bayesian uncertainty modeling over dynamics effectively mitigates sparse sampling, balancing exploration and exploitation and avoiding local optima that hinder gradient-based MCE methods.", "perturbed_statement": "In Randomworlds standard tasks, BITL’s logarithmic rise in normalized value towards 1 as coverage increases indicates that Gaussian priors over dynamics effectively mitigate sparse sampling, balancing exploration and exploitation and avoiding local optima that hinder gradient-based MCE methods.", "perturbed_explanation": "This statement is incorrect because Figure 2 (bottom left) shows that BITL’s performance grows almost linearly with coverage, not logarithmically. Additionally, BITL uses a uniform Dirichlet prior over transition probabilities rather than Gaussian priors, so attributing its gains to Gaussian priors contradicts the method’s actual prior specification.", "claim": "In Randomworlds standard tasks, BITL’s logarithmic rise in normalized value towards 1 as coverage increases indicates that Gaussian priors over dynamics effectively mitigate sparse sampling, balancing exploration and exploitation and avoiding local optima that hinder gradient-based MCE methods.", "label": false }, { "paperid": "2409.07732v1", "paper_path": "./SciVer/papers/2409.07732v1.json", "claim_type": "analytical", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.07732v1_figure_4.png", "request_id": 818, "origin_statement": "The horizontal rule before \"Machine Learning Mastery\" in Fig. 4 delineates foundational topics (rows 1–6) from advanced modules (rows 7–11), leveraging LaTeX booktabs conventions to enhance the table’s hierarchical clarity.", "perturbed_statement": "The midrule appears after row 5, separating novice modules (rows 1–5) from intermediate courses (rows 6–11), applying LaTeX’s standard tabular spacing to emphasize a stepwise progression.", "perturbed_explanation": "This is incorrect because the horizontal line in Fig. 4 is actually inserted after the sixth row, not the fifth, and the lower block lists advanced modules (Machine Learning Mastery onward), not intermediate courses. Additionally, the table uses the booktabs package’s midrule for grouping—not default tabular spacing.", "claim": "The midrule appears after row 5, separating novice modules (rows 1–5) from intermediate courses (rows 6–11), applying LaTeX’s standard tabular spacing to emphasize a stepwise progression.", "label": false }, { "paperid": "2409.08534v1", "paper_path": "./SciVer/papers/2409.08534v1.json", "claim_type": "analytical", "type": "table", "item": "3", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.08534v1-Table3-1.png", "request_id": 819, "origin_statement": "Reducing dropout voltage in an LDO limits minimum headroom, but by concurrently increasing loop bandwidth and minimizing quiescent current, the regulator achieves faster transient recovery and improved efficiency due to enhanced error-amplifier responsiveness and lower idle power draw.", "perturbed_statement": "Reducing dropout voltage in an LDO limits minimum headroom, but by concurrently decreasing loop bandwidth and minimizing quiescent current, the regulator achieves faster transient recovery and improved efficiency due to enhanced error-amplifier responsiveness and lower idle power draw.", "perturbed_explanation": "The perturbed statement incorrectly claims that decreasing loop bandwidth improves transient recovery. Table 3 specifies that loop bandwidth should increase (↑) for a low dropout regulator to enhance transient response; reducing bandwidth would actually slow recovery and degrade regulation speed.", "claim": "Reducing dropout voltage in an LDO limits minimum headroom, but by concurrently decreasing loop bandwidth and minimizing quiescent current, the regulator achieves faster transient recovery and improved efficiency due to enhanced error-amplifier responsiveness and lower idle power draw.", "label": false }, { "paperid": "2409.10708v1", "paper_path": "./SciVer/papers/2409.10708v1.json", "claim_type": "analytical", "type": "chart", "item": "8(b)", "section": [ "5.3.2" ], "image_path": "./SciVer/images/2409.10708v1_figure_8(b).png", "request_id": 822, "origin_statement": "Honest UAVs detect no neighbors with 83.86% accuracy, but accuracy falls to 36.55% when two neighbors are present, showing that increased neighborhood size degrades neighbor discovery due to higher signal interference and contention.", "perturbed_statement": "Honest UAVs detect no neighbors with 83.86% accuracy, but accuracy falls to 75% when two neighbors are present, showing that increased neighborhood size degrades neighbor discovery due to higher signal interference and contention.", "perturbed_explanation": "The perturbed statement erroneously claims a 75% accuracy at two neighbors, whereas the context and Fig. 8(a) report a 36.55% discovery accuracy when two neighbors are present, making the 75% figure incorrect.", "claim": "Honest UAVs detect no neighbors with 83.86% accuracy, but accuracy falls to 75% when two neighbors are present, showing that increased neighborhood size degrades neighbor discovery due to higher signal interference and contention.", "label": false }, { "paperid": "2411.02020v1", "paper_path": "./SciVer/papers/2411.02020v1.json", "claim_type": "analytical", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.02020v1-Table1-1.png", "request_id": 823, "origin_statement": "Table 1 shows that as metallicity decreases from [Fe/H]=0.5 to −1.25, the quadratic coefficient a at f=1 increases from 0.1313 to 0.3324, indicating a stronger dependence of instability mass ratio on primary mass in metal-poor binaries due to reduced opacity and enhanced radiative flux gradients.", "perturbed_statement": "Table 1 shows that as metallicity decreases from [Fe/H]=0.5 to −1.25, the quadratic coefficient a at f=1 decreases from 0.3324 to 0.1313, indicating a weaker dependence of instability mass ratio on primary mass in metal-poor binaries due to reduced opacity and enhanced radiative flux gradients.", "perturbed_explanation": "This statement is wrong because Table 1 actually shows a at f=1 rises from 0.1313 at [Fe/H]=0.5 to 0.3324 at [Fe/H]=−1.25, not the other way around, so the dependence becomes stronger, not weaker.", "claim": "Table 1 shows that as metallicity decreases from [Fe/H]=0.5 to −1.25, the quadratic coefficient a at f=1 decreases from 0.3324 to 0.1313, indicating a weaker dependence of instability mass ratio on primary mass in metal-poor binaries due to reduced opacity and enhanced radiative flux gradients.", "label": false }, { "paperid": "2411.00551v1", "paper_path": "./SciVer/papers/2411.00551v1.json", "claim_type": "analytical", "type": "chart", "item": "3(a)", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.00551v1_figure_3(a).png", "request_id": 824, "origin_statement": "TACS consistently attains an MAE of approximately 0.10 while maintaining molecular stability above 0.6, approaching the Pareto front and demonstrating that guided denoising effectively balances property accuracy with structural integrity.", "perturbed_statement": "TACS consistently attains an MAE below 0.05 while maintaining molecular stability above 0.9, surpassing the Pareto front and demonstrating that guided denoising achieves flawless property targeting with perfect structural integrity.", "perturbed_explanation": "This statement is incorrect because Figure 3(a) shows the lowest MAE for TACS is about 0.10, not below 0.05, and its molecular stability peaks around 0.8, not above 0.9. Moreover, no method can surpass the Pareto front, which denotes the optimal trade-off boundary.", "claim": "TACS consistently attains an MAE below 0.05 while maintaining molecular stability above 0.9, surpassing the Pareto front and demonstrating that guided denoising achieves flawless property targeting with perfect structural integrity.", "label": false }, { "paperid": "2411.11340v1", "paper_path": "./SciVer/papers/2411.11340v1.json", "claim_type": "direct", "type": "chart", "item": "2(a)", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.11340v1_figure_2(a).png", "request_id": 34, "origin_statement": "In the seasonal subplot, across the three most prominent peaks, the predicted values (yellow) deviate from the true values (green) by less than 0.1 amplitude units each, indicating high accuracy in capturing peak magnitudes.", "perturbed_statement": "In the seasonal subplot, across the three most prominent peaks, the predicted values (yellow) deviate from the true values (green) by less than 0.02 amplitude units each, indicating near-perfect accuracy in capturing peak magnitudes.", "perturbed_explanation": "This is incorrect because the visual deviations at the seasonal peaks are approximately 0.05 to 0.1 amplitude units, which exceeds the claimed 0.02 unit threshold.", "claim": "In the seasonal subplot, across the three most prominent peaks, the predicted values (yellow) deviate from the true values (green) by less than 0.1 amplitude units each, indicating high accuracy in capturing peak magnitudes.", "label": true }, { "paperid": "2410.04203v1", "paper_path": "./SciVer/papers/2410.04203v1.json", "claim_type": "direct", "type": "chart", "item": "1(c)", "section": [ "3.1", "4.1" ], "image_path": "./SciVer/images/2410.04203v1_figure_1(c).png", "request_id": 122, "origin_statement": "Between temperature τ=0.2 and τ=2.0, the LC win rate declines by 1.8 percentage points (from 42.8% to 41.0%), whereas the standard GPT4 win rate decreases by about 1.25 points (from 42.5% to 41.25%).", "perturbed_statement": "Between τ=0.2 and τ=2.0, the LC win rate declines by only 0.9 percentage points (from 42.8% to 41.9%), whereas the GPT4 win rate decreases by 2.3 points (from 42.5% to 40.2%).", "perturbed_explanation": "This is incorrect because the LC win rate at τ=2.0 is actually 41.0%, not 41.9%, and the GPT4 win rate at τ=2.0 is 41.25%, not 40.2%, so both drop magnitudes are misstated.", "claim": "Between temperature τ=0.2 and τ=2.0, the LC win rate declines by 1.8 percentage points (from 42.8% to 41.0%), whereas the standard GPT4 win rate decreases by about 1.25 points (from 42.5% to 41.25%).", "label": true }, { "paperid": "2411.02640v1", "paper_path": "./SciVer/papers/2411.02640v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3.2.2" ], "image_path": "./SciVer/images/2411.02640v1_figure_6.png", "request_id": 126, "origin_statement": "Between 1×10^5 and 1×10^7 thermochemical states, the AMD MI250X’s transport kernel throughput for the CH₄ GRI3.0 mechanism rises from about 0.4 GDOF/s to approximately 4.0 GDOF/s, representing a tenfold increase.", "perturbed_statement": "Between 1×10^5 and 1×10^7 thermochemical states, the AMD MI250X’s transport kernel throughput for the CH₄ GRI3.0 mechanism rises from about 0.2 GDOF/s to approximately 5.0 GDOF/s, representing a twenty-fivefold increase.", "perturbed_explanation": "This is incorrect because the plot shows the throughput at 1×10^5 states is around 0.4 GDOF/s (not 0.2), and at 1×10^7 states it is about 4.0 GDOF/s (not 5.0).", "claim": "Between 1×10^5 and 1×10^7 thermochemical states, the AMD MI250X’s transport kernel throughput for the CH₄ GRI3.0 mechanism rises from about 0.4 GDOF/s to approximately 4.0 GDOF/s, representing a tenfold increase.", "label": true }, { "paperid": "2409.09622v1", "paper_path": "./SciVer/papers/2409.09622v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5" ], "image_path": "./SciVer/images/2409.09622v1-Table2-1.png", "request_id": 135, "origin_statement": "For n=4, the range in the total number of regions is 28 when k=4 and 73 when k=5, showing the range nearly triples as k increases by 1.", "perturbed_statement": "For n=4, the range in the total number of regions is 28 when k=4 and 63 when k=5, showing the range more than doubles as k increases by 1.", "perturbed_explanation": "The table shows that for n=4, k=5 the min-max number of regions is 46 and 119, giving a range of 119−46=73, not 63 as claimed in the perturbed statement.", "claim": "For n=4, the range in the total number of regions is 28 when k=4 and 73 when k=5, showing the range nearly triples as k increases by 1.", "label": true }, { "paperid": "2411.03025v1", "paper_path": "./SciVer/papers/2411.03025v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.03025v1-Table3-1.png", "request_id": 147, "origin_statement": "On ogbn-proteins, DA-MoE's ROC-AUC improvement of 2.30% is nearly twice as large as the 1.29% gain achieved by GMoE.", "perturbed_statement": "On ogbn-proteins, DA-MoE's ROC-AUC improvement of 2.30% is less than the 1.29% gain achieved by GMoE.", "perturbed_explanation": "This is incorrect because the table shows DA-MoE improves ROC-AUC by 2.30%, which is greater than GMoE's 1.29% improvement, not less.", "claim": "On ogbn-proteins, DA-MoE's ROC-AUC improvement of 2.30% is nearly twice as large as the 1.29% gain achieved by GMoE.", "label": true }, { "paperid": "2410.02810v1", "paper_path": "./SciVer/papers/2410.02810v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "6.3" ], "image_path": "./SciVer/images/2410.02810v1_figure_7.png", "request_id": 172, "origin_statement": "StateAct’s state+thought+action setup achieves 0.85 accuracy, representing a 13.92% increase over the base state+action method’s 0.72 accuracy.", "perturbed_statement": "StateAct’s state+thought+action setup achieves 0.82 accuracy, representing a 12% increase over the base state+action method’s 0.72 accuracy.", "perturbed_explanation": "This statement is incorrect because the chart indicates the state+thought+action configuration actually achieves 0.85 accuracy with a 13.92% increase over the base 0.72 accuracy, not 0.82 and 12%.", "claim": "StateAct’s state+thought+action setup achieves 0.85 accuracy, representing a 13.92% increase over the base state+action method’s 0.72 accuracy.", "label": true }, { "paperid": "2409.19351v1", "paper_path": "./SciVer/papers/2409.19351v1.json", "claim_type": "direct", "type": "chart", "item": "10", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.19351v1_figure_10.png", "request_id": 190, "origin_statement": "At 16:50 with PR=40%, there is an outlier where the estimated velocity is approximately 35 m/s while the corresponding simulated velocity is close to 0 m/s.", "perturbed_statement": "At 14:50 with PR=100%, there is an outlier where the estimated velocity is approximately 35 m/s while the corresponding simulated velocity is close to 0 m/s.", "perturbed_explanation": "This statement is incorrect because in the 14:50 plot at PR = 100 % there is no data point showing an estimated velocity of ~35 m/s for a simulated velocity near 0 m/s. The extreme outlier appears only in the 16:50 PR = 40 % subplot.", "claim": "At 16:50 with PR=40%, there is an outlier where the estimated velocity is approximately 35 m/s while the corresponding simulated velocity is close to 0 m/s.", "label": true }, { "paperid": "2409.04043v1", "paper_path": "./SciVer/papers/2409.04043v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.04043v1_figure_4.png", "request_id": 212, "origin_statement": "In the Reddit 'eat X cal' community, the median sentiment score under the modeling civility intervention is around 0.65, which is roughly 0.7 points higher than the median score of about -0.05 observed under the reset insight intervention.", "perturbed_statement": "In the Reddit 'eat X cal' community, the median sentiment score under the modeling civility intervention is around 0.5, which is roughly 0.4 points higher than the median score of about 0.1 observed under the reset insight intervention.", "perturbed_explanation": "The figure shows the modeling civility median at approximately 0.65 (not 0.5) and the reset insight median around -0.05 (not 0.1), so the true difference is about 0.7 points, not 0.4.", "claim": "In the Reddit 'eat X cal' community, the median sentiment score under the modeling civility intervention is around 0.65, which is roughly 0.7 points higher than the median score of about -0.05 observed under the reset insight intervention.", "label": true }, { "paperid": "2410.02936v1", "paper_path": "./SciVer/papers/2410.02936v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3" ], "image_path": "./SciVer/images/2410.02936v1-Table3-1.png", "request_id": 223, "origin_statement": "Each successive DownBlock2D doubles its channel count (32→64→128), culminating in the AttnDownBlock2D at 256 channels; the up-sampling path mirrors this with UpBlock2D layers doubling from 32 to 128 before the AttnUpBlock2D’s 256 channels.", "perturbed_statement": "Each successive DownBlock2D doubles its channel count (32→64→128), culminating in the AttnDownBlock2D at 256 channels; the up-sampling path mirrors this with UpBlock2D layers doubling from 32 to 128 before the AttnUpBlock2D’s 128 channels.", "perturbed_explanation": "The perturbed claim incorrectly states that the AttnUpBlock2D has 128 channels, whereas Table 3 shows the AttnUpBlock2D actually uses 256 channels.", "claim": "Each successive DownBlock2D doubles its channel count (32→64→128), culminating in the AttnDownBlock2D at 256 channels; the up-sampling path mirrors this with UpBlock2D layers doubling from 32 to 128 before the AttnUpBlock2D’s 256 channels.", "label": true }, { "paperid": "2410.14059v2", "paper_path": "./SciVer/papers/2410.14059v2.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "5.5" ], "image_path": "./SciVer/images/2410.14059v2_figure_8.png", "request_id": 276, "origin_statement": "Palmyra-Fin-70B-32k achieves an Overall Elo score of approximately 1.14, about 0.22 higher than Llama3-XuanYuan3-70B-Chat’s score of 0.92.", "perturbed_statement": "Palmyra-Fin-70B-32k achieves an Overall Elo score of approximately 1.05, about 0.13 higher than Llama3-XuanYuan3-70B-Chat’s score of 0.92.", "perturbed_explanation": "The perturbed statement is incorrect because in the figure Palmyra-Fin-70B-32k actually scores about 1.14, not 1.05, and its margin over Llama3-XuanYuan3-70B-Chat (0.92) is roughly 0.22, not 0.13.", "claim": "Palmyra-Fin-70B-32k achieves an Overall Elo score of approximately 1.14, about 0.22 higher than Llama3-XuanYuan3-70B-Chat’s score of 0.92.", "label": true }, { "paperid": "2411.16506v1", "paper_path": "./SciVer/papers/2411.16506v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2411.16506v1_figure_5.png", "request_id": 286, "origin_statement": "At m=20 on the warehouse map, static task distribution yields about 8.2 throughput, roughly 2.8 units higher than the 5.4 throughput under dynamic distribution.", "perturbed_statement": "At m=50 on the warehouse map, static task distribution yields about 8.2 throughput, roughly 2.8 units higher than the 5.4 throughput under dynamic distribution.", "perturbed_explanation": "This is wrong because at m=50 the chart shows static throughput around 8.0 and dynamic throughput around 5.0, not 8.2 and 5.4 as claimed.", "claim": "At m=20 on the warehouse map, static task distribution yields about 8.2 throughput, roughly 2.8 units higher than the 5.4 throughput under dynamic distribution.", "label": true }, { "paperid": "2409.04257v1", "paper_path": "./SciVer/papers/2409.04257v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2409.04257v1_figure_3.png", "request_id": 332, "origin_statement": "The synthetic data disclosure measure for capital.loss is about 8 percentage points lower than that of the original data, representing the largest reduction among the nine targets.", "perturbed_statement": "The synthetic data disclosure measure for native.country is about 12 percentage points lower than that of the original data, the largest gap among the nine variables.", "perturbed_explanation": "The figure shows native.country's disclosure drops from about 26% to 23%, a 3-point difference, not 12. The largest reduction is actually for capital.loss (about 42% to 34%, an 8-point drop).", "claim": "The synthetic data disclosure measure for capital.loss is about 8 percentage points lower than that of the original data, representing the largest reduction among the nine targets.", "label": true }, { "paperid": "2411.10703v1", "paper_path": "./SciVer/papers/2411.10703v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.10703v1_figure_3.png", "request_id": 346, "origin_statement": "The SSR transformation preserves the number of carbohydrate intake events: both the raw-carb and effective-carb plots display approximately 30 distinct peaks across the 0–2500 time stamps.", "perturbed_statement": "The SSR transformation increases the number of carbohydrate intake events: while the raw-carb plot has approximately 30 spikes, the effective-carb plot shows about 40 continuous peaks over the same time frame.", "perturbed_explanation": "The perturbed claim is incorrect because both the raw-carb and effective-carb plots in Figure 3 display the same number of peaks (around 30), not an increase to 40 peaks after SSR transformation.", "claim": "The SSR transformation preserves the number of carbohydrate intake events: both the raw-carb and effective-carb plots display approximately 30 distinct peaks across the 0–2500 time stamps.", "label": true }, { "paperid": "2410.20807v2", "paper_path": "./SciVer/papers/2410.20807v2.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.20807v2_figure_3.png", "request_id": 368, "origin_statement": "At 0% true OOD samples, AdaptOD achieves approximately 81.2 AUC, about 5.5 points higher than AUTO’s 75.7 and 5.2 points higher than AdaOOD’s 76.0.", "perturbed_statement": "At 0% true OOD samples, AdaptOD achieves approximately 82.3 AUC, about 8.3 points higher than AUTO’s 74.0 and 7.0 points higher than AdaOOD’s 75.0.", "perturbed_explanation": "This claim misstates the performance values: on the figure AdaptOD’s AUC at 0% is about 81.2 (not 82.3), AUTO’s is about 75.7 (not 74.0), and AdaOOD’s is about 76.0 (not 75.0).", "claim": "At 0% true OOD samples, AdaptOD achieves approximately 81.2 AUC, about 5.5 points higher than AUTO’s 75.7 and 5.2 points higher than AdaOOD’s 76.0.", "label": true }, { "paperid": "2410.06313v1", "paper_path": "./SciVer/papers/2410.06313v1.json", "claim_type": "direct", "type": "chart", "item": "6(b)", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.06313v1_figure_6(b).png", "request_id": 372, "origin_statement": "In 2008, health economics papers achieved an impact rating of roughly 1.6 SD, surpassing other fields by about 1.95 SD (as other fields were at about –0.35 SD).", "perturbed_statement": "In 2007, health economics papers achieved an impact rating of roughly 1.2 SD, surpassing other fields by about 1.0 SD (as other fields were at about 0.2 SD).", "perturbed_explanation": "This is incorrect because in 2007, health economics papers scored about 1.05 SD—not 1.2 SD—and other fields were around –0.35 SD, not 0.2 SD, as shown in Figure 5.", "claim": "In 2008, health economics papers achieved an impact rating of roughly 1.6 SD, surpassing other fields by about 1.95 SD (as other fields were at about –0.35 SD).", "label": true }, { "paperid": "2409.03054v1", "paper_path": "./SciVer/papers/2409.03054v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.03054v1-Table1-1.png", "request_id": 375, "origin_statement": "The Content Appearance category lists 7 types, which is more than twice the 3 types listed under Image Appearance.", "perturbed_statement": "The Content Appearance category lists 9 types, which is more than triple the 2 types listed under Image Appearance.", "perturbed_explanation": "This is incorrect because the table shows Content Appearance actually has 7 types (Size, Position, Color, Font, Visibility, Other), not 9, and Image Appearance has 3 types (Size, Position, Other), not 2.", "claim": "The Content Appearance category lists 7 types, which is more than twice the 3 types listed under Image Appearance.", "label": true }, { "paperid": "2409.10995v1", "paper_path": "./SciVer/papers/2409.10995v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2.2" ], "image_path": "./SciVer/images/2409.10995v1-Table1-1.png", "request_id": 379, "origin_statement": "Pianississimo (ppp) covers 15 distinct MIDI velocity values, whereas every other dynamic mark in the table (pp, p, mp, mf, f, ff, fff) covers 16 values each.", "perturbed_statement": "Pianississimo (ppp) covers 16 distinct MIDI velocity values, while pianissimo (pp), piano (p), mezzo piano (mp), mezzo forte (mf), forte (f), fortissimo (ff), and fortississimo (fff) each cover only 15 values.", "perturbed_explanation": "This is incorrect because the table shows ppp spans [1,16), which includes only 15 values. Meanwhile, all other dynamics use intervals like [16,32), [32,48), etc., each of which spans 16 values, not 15.", "claim": "Pianississimo (ppp) covers 15 distinct MIDI velocity values, whereas every other dynamic mark in the table (pp, p, mp, mf, f, ff, fff) covers 16 values each.", "label": true }, { "paperid": "2409.06123v1", "paper_path": "./SciVer/papers/2409.06123v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "6.1.2" ], "image_path": "./SciVer/images/2409.06123v1_figure_9.png", "request_id": 394, "origin_statement": "In the syn dataset, SubTab FL’s delta accuracy falls from about 0.50 at the first data size imbalance setting to 0.48 at the second, a drop of roughly 0.02—the largest decrease among the four methods over that interval.", "perturbed_statement": "In the syn dataset, SubTab FL’s delta accuracy rises from about 0.50 at the first data size imbalance setting to 0.48 at the second, a gain of roughly 0.02 and the largest improvement among the four methods over that interval.", "perturbed_explanation": "This is incorrect because the chart shows SubTab FL’s delta accuracy actually decreases from approximately 0.50 at the first setting to 0.48 at the second, representing a drop of about 0.02 rather than a rise.", "claim": "In the syn dataset, SubTab FL’s delta accuracy falls from about 0.50 at the first data size imbalance setting to 0.48 at the second, a drop of roughly 0.02—the largest decrease among the four methods over that interval.", "label": true }, { "paperid": "2409.15568v1", "paper_path": "./SciVer/papers/2409.15568v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.15568v1-Table1-1.png", "request_id": 397, "origin_statement": "In the inter-domain cold-start scenario, the Video training set contains 19,240 users and 156,091 ratings, compared to Games’ 24,799 users and 150,306 ratings. Thus, Video has 5,559 fewer users but 5,785 more ratings than Games.", "perturbed_statement": "In the inter-domain cold-start scenario, the Video training set contains 19,240 users and 150,306 ratings, compared to Games’ 24,799 users and 156,091 ratings. Thus, Video has 5,559 fewer users and 5,785 fewer ratings than Games.", "perturbed_explanation": "This is incorrect because the table shows Video has 156,091 ratings (not 150,306), so Video actually has 5,785 more ratings than Games (which has 150,306).", "claim": "In the inter-domain cold-start scenario, the Video training set contains 19,240 users and 156,091 ratings, compared to Games’ 24,799 users and 150,306 ratings. Thus, Video has 5,559 fewer users but 5,785 more ratings than Games.", "label": true }, { "paperid": "2411.13694v1", "paper_path": "./SciVer/papers/2411.13694v1.json", "claim_type": "direct", "type": "chart", "item": "11(b)", "section": [ "7.6" ], "image_path": "./SciVer/images/2411.13694v1_figure_11(b).png", "request_id": 424, "origin_statement": "For participants who first encountered PairSonic, the median System Usability Scale (SUS) score for SafeSlinger is approximately 75, about 10 points higher than PairSonic’s median SUS score of around 65.", "perturbed_statement": "For participants who first encountered PairSonic, the median System Usability Scale (SUS) score for SafeSlinger is approximately 55, about 10 points lower than PairSonic’s median SUS score of around 65.", "perturbed_explanation": "Figure 11(b) shows the median SUS score for SafeSlinger is around 75, not 55, and it exceeds PairSonic’s median of about 65. The perturbed statement incorrectly lowers SafeSlinger’s median by 20 points and reverses the comparison.", "claim": "For participants who first encountered PairSonic, the median System Usability Scale (SUS) score for SafeSlinger is approximately 75, about 10 points higher than PairSonic’s median SUS score of around 65.", "label": true }, { "paperid": "2411.00174v1", "paper_path": "./SciVer/papers/2411.00174v1.json", "claim_type": "direct", "type": "chart", "item": "2(c)", "section": [ "4" ], "image_path": "./SciVer/images/2411.00174v1_figure_2(c).png", "request_id": 448, "origin_statement": "In the easy protocol’s validation set, ETH-M has approximately 23,000 missing coordinates compared to about 8,500 in UNIV-M, meaning UNIV-M contains roughly 37% of the missing data present in ETH-M.", "perturbed_statement": "In the easy protocol’s validation set, ETH-M has approximately 23,000 missing coordinates compared to about 11,500 in UNIV-M, meaning UNIV-M contains roughly 50% of the missing data present in ETH-M.", "perturbed_explanation": "The chart indicates that UNIV-M has around 8,500 missing coordinates, not 11,500; therefore, the stated number and resulting 50% ratio are incorrect according to the provided data.", "claim": "In the easy protocol’s validation set, ETH-M has approximately 23,000 missing coordinates compared to about 8,500 in UNIV-M, meaning UNIV-M contains roughly 37% of the missing data present in ETH-M.", "label": true }, { "paperid": "2409.02076v6", "paper_path": "./SciVer/papers/2409.02076v6.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.02076v6_figure_2.png", "request_id": 480, "origin_statement": "By the end of the 16K sequence length panel, GPT_4o_mini’s accuracy declines to about 25%, whereas LongWriter’s falls to roughly 8%, creating a 17 percentage-point gap.", "perturbed_statement": "By the end of the 16K sequence length panel, GPT_4o_mini’s accuracy declines to about 22%, whereas LongWriter’s falls to roughly 10%, creating a 12 percentage-point gap.", "perturbed_explanation": "The perturbed statement is incorrect because the figure shows GPT_4o_mini at around 25% accuracy and LongWriter at about 8% accuracy by 16K tokens, not 22% and 10% as stated.", "claim": "By the end of the 16K sequence length panel, GPT_4o_mini’s accuracy declines to about 25%, whereas LongWriter’s falls to roughly 8%, creating a 17 percentage-point gap.", "label": true }, { "paperid": "2410.20335v1", "paper_path": "./SciVer/papers/2410.20335v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.20335v1-Table2-1.png", "request_id": 501, "origin_statement": "In Table 2, IFUTSVM-ID wins at least 36 datasets against each baseline, peaking at 43 wins against IFW-LSTSVM, while losing at most 5 datasets in any pairwise comparison.", "perturbed_statement": "In Table 2, IFUTSVM-ID wins at least 36 datasets against each baseline, peaking at 42 wins against IFW-LSTSVM, while losing at most 4 datasets in any pairwise comparison.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 shows that IFUTSVM-ID actually achieves 43 wins (not 42) against IFW-LSTSVM, and its maximum number of losses in any pairwise comparison is 5 (against UTSVM), not 4.", "claim": "In Table 2, IFUTSVM-ID wins at least 36 datasets against each baseline, peaking at 43 wins against IFW-LSTSVM, while losing at most 5 datasets in any pairwise comparison.", "label": true }, { "paperid": "2410.19055v1", "paper_path": "./SciVer/papers/2410.19055v1.json", "claim_type": "direct", "type": "chart", "item": "4(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.19055v1_figure_4(a).png", "request_id": 508, "origin_statement": "The Fisher variant of Newton Loss achieves its highest element-wise ranking accuracy of about 0.935 at λ≈0.1, which is approximately 0.005 higher than the Hessian variant at the same λ.", "perturbed_statement": "The Fisher variant of Newton Loss achieves its highest element-wise ranking accuracy of about 0.945 at λ≈0.1, which is approximately 0.015 higher than the Hessian variant at the same λ.", "perturbed_explanation": "This is incorrect because the figure shows the Fisher variant peaks at around 0.935 accuracy at λ≈0.1, not 0.945, and its advantage over the Hessian variant there is about 0.005, not 0.015.", "claim": "The Fisher variant of Newton Loss achieves its highest element-wise ranking accuracy of about 0.935 at λ≈0.1, which is approximately 0.005 higher than the Hessian variant at the same λ.", "label": true }, { "paperid": "2411.06096v1", "paper_path": "./SciVer/papers/2411.06096v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.06096v1_figure_5.png", "request_id": 590, "origin_statement": "The FCI curve dips from about 0.4 to roughly 0.2 around 200M tokens, then climbs sharply to approximately 0.8 at 1B tokens.", "perturbed_statement": "The FCI curve dips from about 0.4 to roughly 0.3 around 200M tokens, then climbs sharply to approximately 0.9 at 1B tokens.", "perturbed_explanation": "This claim is incorrect because in the figure the FCI curve actually dips to about 0.2, not 0.3, around 200M tokens, and it rises to roughly 0.8, not 0.9, by the 1B token checkpoint.", "claim": "The FCI curve dips from about 0.4 to roughly 0.2 around 200M tokens, then climbs sharply to approximately 0.8 at 1B tokens.", "label": true }, { "paperid": "2411.11293v1", "paper_path": "./SciVer/papers/2411.11293v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.11293v1_figure_5.png", "request_id": 618, "origin_statement": "Anchors takes about 8,000 efficiency units on the ISCXTor2016 dataset and about 12,000 on the CIC-DoHBrw-2020 dataset, making it roughly 1.5 times slower on the latter due to increased feature count.", "perturbed_statement": "Anchors has an efficiency of approximately 4,000 units on the CIC-DoHBrw-2020 dataset, which is about half of its roughly 8,000 performance on ISCXTor2016, implying it runs faster with more features.", "perturbed_explanation": "This is incorrect because the figure shows Anchors’ efficiency at around 12,000 units on CIC-DoHBrw-2020—not 4,000—and about 8,000 units on ISCXTor2016; therefore, it is actually slower on the CIC-DoHBrw-2020 dataset, not faster.", "claim": "Anchors takes about 8,000 efficiency units on the ISCXTor2016 dataset and about 12,000 on the CIC-DoHBrw-2020 dataset, making it roughly 1.5 times slower on the latter due to increased feature count.", "label": true }, { "paperid": "2409.04073v2", "paper_path": "./SciVer/papers/2409.04073v2.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "6.1", "6.2.2" ], "image_path": "./SciVer/images/2409.04073v2_figure_3(b).png", "request_id": 632, "origin_statement": "AnyMatch yields an average F1 score of about 81 with an inference cost of roughly $0.00001 per 1K tokens, making it approximately 3,899 times cheaper than GPT-4’s $0.02 per 1K tokens, while sacrificing only about 4.4% in prediction quality.", "perturbed_statement": "AnyMatch yields an average F1 score of about 81 with an inference cost of roughly $0.00001 per 1K tokens, making it approximately 1,000 times cheaper than GPT-4’s $0.02 per 1K tokens, while sacrificing only about 10% in prediction quality.", "perturbed_explanation": "The perturbation is wrong because the actual cost difference shown in the figure is 3,899× cheaper, not 1,000×. Likewise, the true F1 score drop is 4.4%, not 10%, as explicitly annotated on the plot.", "claim": "AnyMatch yields an average F1 score of about 81 with an inference cost of roughly $0.00001 per 1K tokens, making it approximately 3,899 times cheaper than GPT-4’s $0.02 per 1K tokens, while sacrificing only about 4.4% in prediction quality.", "label": true }, { "paperid": "2411.03396v1", "paper_path": "./SciVer/papers/2411.03396v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.03396v1-Table3-1.png", "request_id": 725, "origin_statement": "The electron mobility of WSe2-TeWSe along the zigzag direction (13274.89 cm2/V·s) is about 1.2 times higher than that of WSe2-SWSe (11120.74 cm2/V·s).", "perturbed_statement": "The electron mobility of WSe2-TeWSe along the zigzag direction (13274.89 cm2/V·s) is about 1.4 times higher than that of WSe2-SWSe (11120.74 cm2/V·s).", "perturbed_explanation": "According to Table 3, WSe2-TeWSe and WSe2-SWSe have electron mobilities of 13274.89 and 11120.74 cm2/V·s respectively, giving a ratio of ≈1.19, not 1.4 as stated.", "claim": "The electron mobility of WSe2-TeWSe along the zigzag direction (13274.89 cm2/V·s) is about 1.2 times higher than that of WSe2-SWSe (11120.74 cm2/V·s).", "label": true }, { "paperid": "2409.14337v2", "paper_path": "./SciVer/papers/2409.14337v2.json", "claim_type": "direct", "type": "chart", "item": "7(a)", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.14337v2_figure_7(a).png", "request_id": 748, "origin_statement": "In MobileViews, around 68% of labels consist of 11–30 words, whereas in Rico fewer than 5% of labels reach that length.", "perturbed_statement": "In MobileViews, around 85% of labels consist of 11–30 words, whereas in Rico fewer than 2% of labels reach that length.", "perturbed_explanation": "The perturbed claim is incorrect because MobileViews only has about 68% of labels in the 11–30 word range (not 85%), and Rico has around 4–5% in that range (not fewer than 2%).", "claim": "In MobileViews, around 68% of labels consist of 11–30 words, whereas in Rico fewer than 5% of labels reach that length.", "label": true }, { "paperid": "2411.06723v1", "paper_path": "./SciVer/papers/2411.06723v1.json", "claim_type": "direct", "type": "chart", "item": "11(b)", "section": [ "5.3.2", "5.3.3" ], "image_path": "./SciVer/images/2411.06723v1_figure_11(b).png", "request_id": 762, "origin_statement": "On Day-4, participants using the LLM-SAG chatbot averaged about 8,500 daily steps, roughly 3,000 more than those using the rule-based chatbot, who averaged around 5,500 steps.", "perturbed_statement": "On Day-4, participants using the rule-based chatbot averaged about 8,500 daily steps, roughly 3,000 more than those using the LLM-SAG chatbot, who averaged around 5,500 steps.", "perturbed_explanation": "This statement is incorrect because Figure 11 shows that on Day-4, the LLM-SAG condition averaged about 8,500 steps while the rule-based condition averaged about 5,500 steps, so the roles are reversed in the perturbed claim.", "claim": "On Day-4, participants using the LLM-SAG chatbot averaged about 8,500 daily steps, roughly 3,000 more than those using the rule-based chatbot, who averaged around 5,500 steps.", "label": true }, { "paperid": "2411.00551v1", "paper_path": "./SciVer/papers/2411.00551v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.00551v1_figure_3(a).png", "request_id": 810, "origin_statement": "TCS (dark red circles) achieves molecule stability above 0.88 with MAE between 0.17 and 0.20, whereas EDM (yellow circles) exhibits stability below 0.30 with MAE around 0.16 to 0.27.", "perturbed_statement": "TCS (dark red circles) achieves molecule stability above 0.95 with MAE between 0.17 and 0.20, whereas EDM (yellow circles) exhibits stability below 0.30 with MAE around 0.16 to 0.27.", "perturbed_explanation": "The perturbed statement incorrectly claims that TCS reaches stability above 0.95. In the figure, the dark red circles (TCS) only reach stability up to about 0.92, never exceeding 0.95.", "claim": "TCS (dark red circles) achieves molecule stability above 0.88 with MAE between 0.17 and 0.20, whereas EDM (yellow circles) exhibits stability below 0.30 with MAE around 0.16 to 0.27.", "label": true }, { "paperid": "2410.15049v1", "paper_path": "./SciVer/papers/2410.15049v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "6", "7.4" ], "image_path": "./SciVer/images/2410.15049v1_figure_4.png", "request_id": 814, "origin_statement": "In Figure 4’s right column, as sample size increases from 20 to 100, the most frequently selected number of knots shifts from around 5 to around 7.", "perturbed_statement": "In Figure 4’s right column, as sample size increases from 20 to 100, the most frequently selected number of knots shifts from around 5 to around 10.", "perturbed_explanation": "The perturbed claim is incorrect because the histograms for sample size 100 (dark bars) peak at about 7 knots, not 10. There are far fewer replicates selecting 10 knots, so the mode does not shift to 10.", "claim": "In Figure 4’s right column, as sample size increases from 20 to 100, the most frequently selected number of knots shifts from around 5 to around 7.", "label": true }, { "paperid": "2410.03053v1", "paper_path": "./SciVer/papers/2410.03053v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "6.1" ], "image_path": "./SciVer/images/2410.03053v1-Table4-1.png", "request_id": 863, "origin_statement": "Between p=500 and p=128000, E[|E_p(H_b)|] decreases from 0.278 to 0.200 (a 0.078 drop), while p·E[|E_p(H_b)|]^2 increases from 38.7 to 5138, showing that p-weighted squared optimization bias grows faster than p itself.", "perturbed_statement": "Between p=500 and p=128000, E[|E_p(H_b)|] decreases from 0.278 to 0.200 (a 0.17 drop), while p·E[|E_p(H_b)|]^2 increases from 38.7 to 4380, indicating a sublinear growth in squared bias with dimension.", "perturbed_explanation": "The decrease in E[|E_p(H_b)|] is actually 0.078 (0.278 minus 0.200), not 0.17, and the value of p·E[|E_p(H_b)|]^2 at p=128000 is 5138, not 4380, according to the table.", "claim": "Between p=500 and p=128000, E[|E_p(H_b)|] decreases from 0.278 to 0.200 (a 0.078 drop), while p·E[|E_p(H_b)|]^2 increases from 38.7 to 5138, showing that p-weighted squared optimization bias grows faster than p itself.", "label": true }, { "paperid": "2409.07717v1", "paper_path": "./SciVer/papers/2409.07717v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.5" ], "image_path": "./SciVer/images/2409.07717v1_figure_4.png", "request_id": 868, "origin_statement": "In phishing emails where participants fell for the attack, 36% inspected attachments, over five times the 7% inspection rate in legitimate emails.", "perturbed_statement": "In phishing emails where participants fell for the attack, only 18.8% inspected attachments, about 2.7 times the 7% inspection rate in legitimate emails.", "perturbed_explanation": "The perturbed statement incorrectly uses 18.8% instead of the correct 36% for participants who fell for phishing; 18.8% actually corresponds to those who did not fall for the phishing attack, not those who did.", "claim": "In phishing emails where participants fell for the attack, 36% inspected attachments, over five times the 7% inspection rate in legitimate emails.", "label": true }, { "paperid": "2411.15411v1", "paper_path": "./SciVer/papers/2411.15411v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.15411v1-Table1-1.png", "request_id": 881, "origin_statement": "Among the 11 models compared in Table 1, 7 support mask referencing, but only 2 support high-resolution encoding.", "perturbed_statement": "Among the 11 models compared in Table 1, 8 support mask referencing, but only 1 supports high-resolution encoding.", "perturbed_explanation": "This is incorrect because Table 1 shows exactly 7 models have a ✓ under Mask Referencing (VCoder, Osprey, Alpha-CLIP, GLaMM, RegionGPT, OMG-LLaVA, FineCaption), not 8, and 2 models (Ferret-v2 and FineCaption) support High Resolution, not just 1.", "claim": "Among the 11 models compared in Table 1, 7 support mask referencing, but only 2 support high-resolution encoding.", "label": true }, { "paperid": "2411.01844v1", "paper_path": "./SciVer/papers/2411.01844v1.json", "claim_type": "direct", "type": "chart", "item": "5(a)", "section": [ "5.2.1", "5.2.2", "5.2.3" ], "image_path": "./SciVer/images/2411.01844v1_figure_5(a).png", "request_id": 882, "origin_statement": "63% of participants rated detection accuracy as a 4 and 31% rated it a 5 (totaling 94%), whereas for explanation accuracy only 34% rated 4 and 17% rated 5 (totaling 51%).", "perturbed_statement": "63% of participants rated detection accuracy as a 4 and 31% rated it a 5 (totaling 94%), whereas for explanation accuracy 46% rated 4 and 17% rated 5 (totaling 63%).", "perturbed_explanation": "The perturbed statement incorrectly claims that 46% of participants rated explanation accuracy a 4. The actual chart shows only 34% (12 participants) rated explanation accuracy as 4, not 46%.", "claim": "63% of participants rated detection accuracy as a 4 and 31% rated it a 5 (totaling 94%), whereas for explanation accuracy only 34% rated 4 and 17% rated 5 (totaling 51%).", "label": true }, { "paperid": "2411.15737v1", "paper_path": "./SciVer/papers/2411.15737v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.4.1" ], "image_path": "./SciVer/images/2411.15737v1_figure_5.png", "request_id": 902, "origin_statement": "On the CR dataset, the MAN method achieves 0.98 accuracy, which is 0.04 higher than DTW’s 0.94.", "perturbed_statement": "On the CR dataset, the MAN method achieves 0.94 accuracy, which is 0.04 higher than DTW’s 0.90.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows MAN’s accuracy on CR is 0.98 (not 0.94) and DTW’s accuracy is 0.94 (not 0.90).", "claim": "On the CR dataset, the MAN method achieves 0.98 accuracy, which is 0.04 higher than DTW’s 0.94.", "label": true }, { "paperid": "2410.00016v1", "paper_path": "./SciVer/papers/2410.00016v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "2.3", "2.4" ], "image_path": "./SciVer/images/2410.00016v1_figure_8.png", "request_id": 972, "origin_statement": "The predicted SHR for subcritical units spans approximately 11.0 to 14.2 MMBtu/MWh, about 1.2–3.0 MMBtu/MWh higher than supercritical units, which range roughly from 10.0 to 11.0 MMBtu/MWh.", "perturbed_statement": "The predicted SHR for subcritical units spans approximately 12.5 to 16.0 MMBtu/MWh, about 4.0–6.0 MMBtu/MWh higher than supercritical units, which span roughly from 9.0 to 10.0 MMBtu/MWh.", "perturbed_explanation": "The perturbation is incorrect because the colorbar in Figure 8(a) shows subcritical SHR actually ranges from about 11.0 to 14.2 MMBtu/MWh (not 12.5–16.0), and in Figure 8(b) supercritical SHR ranges from about 10.0 to 11.0 MMBtu/MWh (not 9.0–10.0).", "claim": "The predicted SHR for subcritical units spans approximately 11.0 to 14.2 MMBtu/MWh, about 1.2–3.0 MMBtu/MWh higher than supercritical units, which range roughly from 10.0 to 11.0 MMBtu/MWh.", "label": true }, { "paperid": "2410.03225v2", "paper_path": "./SciVer/papers/2410.03225v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2410.03225v2_figure_4.png", "request_id": 1006, "origin_statement": "For real-world CVE tasks, the assisted agent’s exploitation success rate (82%) is more than four times higher than the autonomous agent’s exploitation rate (20%).", "perturbed_statement": "For real-world CVE tasks, the assisted agent’s exploitation success rate (75%) is nearly triple the autonomous agent’s rate (30%).", "perturbed_explanation": "The perturbed statement is incorrect because the figure shows the assisted agent’s exploitation success rate is actually 82%, not 75%, and the autonomous agent’s exploitation rate is 20%, not 30%.", "claim": "For real-world CVE tasks, the assisted agent’s exploitation success rate (82%) is more than four times higher than the autonomous agent’s exploitation rate (20%).", "label": true }, { "paperid": "2410.19128v2", "paper_path": "./SciVer/papers/2410.19128v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2410.19128v2-Table1-1.png", "request_id": 1073, "origin_statement": "The Joy and Sad categories both include twice as many implicitly annotated events as explicitly annotated ones, while the Angry category has twice as many explicit events as implicit ones.", "perturbed_statement": "The Joy and Sad categories both include twice as many explicitly annotated events as implicitly annotated ones, while the Angry category has twice as many implicit events as explicit ones.", "perturbed_explanation": "According to the table, Joy and Sad each have 1 explicitly annotated event and 2 implicitly annotated events (not twice as many explicit). Meanwhile, Angry has 2 explicit and 1 implicit event, so it does not have twice as many implicit events.", "claim": "The Joy and Sad categories both include twice as many implicitly annotated events as explicitly annotated ones, while the Angry category has twice as many explicit events as implicit ones.", "label": true }, { "paperid": "2410.19874v2", "paper_path": "./SciVer/papers/2410.19874v2.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "2.1" ], "image_path": "./SciVer/images/2410.19874v2-Table6-1.png", "request_id": 1155, "origin_statement": "The Geospatial Attributes section lists eleven distinct attributes, exceeding the seven DL related attributes by four.", "perturbed_statement": "The Geospatial Attributes section lists twelve distinct attributes, exceeding the seven DL related attributes by five.", "perturbed_explanation": "The table’s Geospatial Attributes section actually contains 11 entries (continent; country; longlat; points_tiles; z14_tiles; ID_HDCGO; UC_NM_LST; agglosID; agglosName; year_and_month; temporal_index), not 12, and the DL related attributes section lists 7, making the difference four, not five.", "claim": "The Geospatial Attributes section lists eleven distinct attributes, exceeding the seven DL related attributes by four.", "label": true }, { "paperid": "2409.01192v1", "paper_path": "./SciVer/papers/2409.01192v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.01192v1-Table4-1.png", "request_id": 1249, "origin_statement": "On the KuaiRand dataset, SSD4Rec’s MRR@20 (0.1076) is 0.0082 higher than Mamba4Rec’s MRR@20 score of 0.0994.", "perturbed_statement": "On the KuaiRand dataset, SSD4Rec’s MRR@20 (0.1056) is 0.0062 higher than Mamba4Rec’s MRR@20 score of 0.0994.", "perturbed_explanation": "The perturbed statement is incorrect because the actual MRR@20 value for SSD4Rec on KuaiRand is 0.1076 (not 0.1056), and the difference from Mamba4Rec’s 0.0994 is 0.0082 (not 0.0062).", "claim": "On the KuaiRand dataset, SSD4Rec’s MRR@20 (0.1076) is 0.0082 higher than Mamba4Rec’s MRR@20 score of 0.0994.", "label": true }, { "paperid": "2411.12490v1", "paper_path": "./SciVer/papers/2411.12490v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2411.12490v1-Table1-1.png", "request_id": 1283, "origin_statement": "CME 4’s soft X-ray flare peaks 3 minutes after its first LASCO/C2 observation, whereas the flares of CMEs 1, 2, and 5 all peaked between 11 and 27 minutes before their respective first C2 appearances.", "perturbed_statement": "CME 4’s soft X-ray flare peaks 10 minutes after its first LASCO/C2 observation, whereas the flares of CMEs 1, 2, and 5 all peaked between 11 and 27 minutes before their respective first C2 appearances.", "perturbed_explanation": "Table 1 lists CME 4’s first LASCO/C2 observation at 2024-05-08T22:24Z and its peak flare time at 2024-05-08T22:27Z, indicating a 3-minute delay. The statement’s claim of a 10-minute delay for CME 4 contradicts these exact times.", "claim": "CME 4’s soft X-ray flare peaks 3 minutes after its first LASCO/C2 observation, whereas the flares of CMEs 1, 2, and 5 all peaked between 11 and 27 minutes before their respective first C2 appearances.", "label": true }, { "paperid": "2410.09487v1", "paper_path": "./SciVer/papers/2410.09487v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.09487v1_figure_5.png", "request_id": 1286, "origin_statement": "Southern Germany's median hourly consumption falls from approximately 0.51 kWh in January to 0.15 kWh in August (a decline of ~71%), while East Midlands only decreases from 0.39 kWh in January to 0.28 kWh in July (around 28%).", "perturbed_statement": "Southern Germany's median hourly consumption falls from approximately 0.48 kWh in January to 0.20 kWh in August (a decline of ~58%), while East Midlands only decreases from 0.39 kWh in January to 0.28 kWh in July (around 28%).", "perturbed_explanation": "The perturbed statement misreports Southern Germany’s values: the chart shows 0.51 kWh in January (not 0.48 kWh) and 0.15 kWh in August (not 0.20 kWh), so both the values and the calculated decline (~71% vs. ~58%) are incorrect.", "claim": "Southern Germany's median hourly consumption falls from approximately 0.51 kWh in January to 0.15 kWh in August (a decline of ~71%), while East Midlands only decreases from 0.39 kWh in January to 0.28 kWh in July (around 28%).", "label": true }, { "paperid": "2410.20749v1", "paper_path": "./SciVer/papers/2410.20749v1.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.20749v1_figure_6(a).png", "request_id": 1294, "origin_statement": "With 50% of training data, Matryoshka’s success rate is 96.2%, which is 8.2 percentage points higher than the 88% achieved with 0% training data.", "perturbed_statement": "With 50% of training data, Matryoshka’s success rate is 96.2%, which is 6.2 percentage points higher than the 88% achieved with 0% training data.", "perturbed_explanation": "The perturbed statement understates the increase: the chart shows a success rate of 96.2% at 50% training versus 88% at 0%, an actual difference of 8.2 percentage points, not 6.2.", "claim": "With 50% of training data, Matryoshka’s success rate is 96.2%, which is 8.2 percentage points higher than the 88% achieved with 0% training data.", "label": true }, { "paperid": "2409.10570v1", "paper_path": "./SciVer/papers/2409.10570v1.json", "claim_type": "direct", "type": "chart", "item": "6(c)", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2409.10570v1_figure_6(c).png", "request_id": 1338, "origin_statement": "Between 10% and 50% sparsity, ACC falls by 12 percentage points (from 19% to 7%), whereas WACC only declines by 14 percentage points (from 76% to 62%), indicating the watermark remains more stable under pruning.", "perturbed_statement": "Between 10% and 50% sparsity, ACC falls by 10 percentage points (from 19% to 9%), while WACC drops by 26 percentage points (from 76% to 50%), suggesting a greater vulnerability of the watermark.", "perturbed_explanation": "The perturbed statement incorrectly reports ACC at 50% sparsity as 9% instead of the true 7% shown in the figure, and misstates WACC at 50% sparsity as 50% rather than the actual ~62%.", "claim": "Between 10% and 50% sparsity, ACC falls by 12 percentage points (from 19% to 7%), whereas WACC only declines by 14 percentage points (from 76% to 62%), indicating the watermark remains more stable under pruning.", "label": true }, { "paperid": "2409.20054v1", "paper_path": "./SciVer/papers/2409.20054v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.20054v1_figure_4.png", "request_id": 1376, "origin_statement": "The POA method exhibits a stronger negative correlation (R=-0.92) between OTDD distance and performance than the PSE method (R=-0.88), indicating that POA performance declines more steeply as OTDD increases.", "perturbed_statement": "The POA method exhibits a weaker negative correlation (R=-0.88) than the PSE method (R=-0.92), indicating that POA performance declines less steeply as OTDD increases.", "perturbed_explanation": "This statement is incorrect because Figure 4 shows that the POA method actually has an R-value of -0.92 and the PSE method has an R-value of -0.88, not the swapped values claimed.", "claim": "The POA method exhibits a stronger negative correlation (R=-0.92) between OTDD distance and performance than the PSE method (R=-0.88), indicating that POA performance declines more steeply as OTDD increases.", "label": true }, { "paperid": "2409.11261v3", "paper_path": "./SciVer/papers/2409.11261v3.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.11261v3_figure_3.png", "request_id": 1382, "origin_statement": "XTTSv2’s average score across the five metrics for female voice is approximately 1.66, about 0.22 points higher than StyleTTS2’s average of 1.44.", "perturbed_statement": "XTTSv2’s average score across the five metrics for female voice is approximately 1.50, only 0.05 points higher than StyleTTS2’s average of 1.45.", "perturbed_explanation": "The perturbed statement is incorrect because the actual metric scores for XTTSv2 on female voice are [1.4, 1.9, 1.6, 2.4, 1.0], which sum to 8.3 and average 1.66, and StyleTTS2’s scores are [1.2, 1.5, 1.3, 2.2, 1.0], which sum to 7.2 and average 1.44—not the stated 1.50 and 1.45.", "claim": "XTTSv2’s average score across the five metrics for female voice is approximately 1.66, about 0.22 points higher than StyleTTS2’s average of 1.44.", "label": true }, { "paperid": "2410.02762v1", "paper_path": "./SciVer/papers/2410.02762v1.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "4.1.2" ], "image_path": "./SciVer/images/2410.02762v1_figure_5(b).png", "request_id": 1388, "origin_statement": "The mass removal rate surpasses 80% when the weight factor α exceeds 4, while the hallucination percentage drops below 50% at α = 5.", "perturbed_statement": "The mass removal rate surpasses 90% when the weight factor α exceeds 4, while the hallucination percentage falls below 40% at α = 5.", "perturbed_explanation": "The perturbed statement is incorrect because at α = 5 the mass removal rate is about 85%, not above 90%, and the hallucination percentage is around 45%, not below 40%, as shown in the chart.", "claim": "The mass removal rate surpasses 80% when the weight factor α exceeds 4, while the hallucination percentage drops below 50% at α = 5.", "label": true }, { "paperid": "2410.15168v1", "paper_path": "./SciVer/papers/2410.15168v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.15168v1-Table1-1.png", "request_id": 1453, "origin_statement": "Among the six methods, only Ranked Pairs satisfies the criterion shown in the rightmost column; the other five methods all fail that criterion.", "perturbed_statement": "Ranked Pairs and IRV are the only methods that satisfy the criterion shown in the rightmost column.", "perturbed_explanation": "In the table, IRV’s entry under the rightmost column is marked with a cross (✗), indicating it does not satisfy that criterion, so IRV cannot be listed alongside Ranked Pairs as satisfying it.", "claim": "Among the six methods, only Ranked Pairs satisfies the criterion shown in the rightmost column; the other five methods all fail that criterion.", "label": true }, { "paperid": "2410.19803v1", "paper_path": "./SciVer/papers/2410.19803v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.19803v1_figure_9.png", "request_id": 1526, "origin_statement": "At a 50% human rating difference in harmful F–M stereotypes, the LMRA assigns about a 90% harmful probability for male ratings and about an 85% harmful probability for female ratings.", "perturbed_statement": "At a 50% human rating difference in harmful F–M stereotypes, the LMRA assigns about a 70% harmful probability for male ratings and about a 65% harmful probability for female ratings.", "perturbed_explanation": "The perturbation is incorrect because, on the plot at +50% on the x-axis, the blue (male) fitted curve is near 90% on the y-axis and the red (female) curve is near 85%, not 70% and 65% as stated.", "claim": "At a 50% human rating difference in harmful F–M stereotypes, the LMRA assigns about a 90% harmful probability for male ratings and about an 85% harmful probability for female ratings.", "label": true }, { "paperid": "2410.04979v2", "paper_path": "./SciVer/papers/2410.04979v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2410.04979v2-Table1-1.png", "request_id": 1615, "origin_statement": "The 1.5 mm pellet diameter is roughly 3.1 million times larger than the 4.8 Å nominal pore size when both are expressed in nanometers (1,500,000 nm vs. 0.48 nm).", "perturbed_statement": "The 1.5 mm pellet diameter is roughly 3.1 thousand times larger than the 4.8 Å nominal pore size when both are expressed in nanometers (1,500,000 nm vs. 0.48 nm).", "perturbed_explanation": "The actual ratio of 1,500,000 nm (1.5 mm) to 0.48 nm (4.8 Å) is about 3,125,000, or 3.1 million, not 3.1 thousand; the perturbed statement underestimates the ratio by a factor of 1,000.", "claim": "The 1.5 mm pellet diameter is roughly 3.1 million times larger than the 4.8 Å nominal pore size when both are expressed in nanometers (1,500,000 nm vs. 0.48 nm).", "label": true }, { "paperid": "2411.10322v1", "paper_path": "./SciVer/papers/2411.10322v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4" ], "image_path": "./SciVer/images/2411.10322v1_figure_5.png", "request_id": 20, "origin_statement": "Applying uncertainty-based rejection reduces false positives in ISIC’18 by 119 cases, more than any other benchmark.", "perturbed_statement": "Applying uncertainty-based rejection reduces false positives in ISIC’18 by 133 cases, more than any other benchmark.", "perturbed_explanation": "The false positives for ISIC’18 decrease from 203 to 84 after rejection, which is a reduction of 119 cases, not 133 as stated.", "claim": "Applying uncertainty-based rejection reduces false positives in ISIC’18 by 119 cases, more than any other benchmark.", "label": true }, { "paperid": "2411.09289v1", "paper_path": "./SciVer/papers/2411.09289v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.09289v1_figure_3.png", "request_id": 36, "origin_statement": "On the unseen ARC-E task, H2O’s accuracy jumps from about 45% with one demonstration to roughly 80% with five demonstrations, an increase of approximately 35 percentage points.", "perturbed_statement": "On the unseen ARC-E task, H2O’s accuracy jumps from about 55% with one demonstration to roughly 80% with five demonstrations, an increase of approximately 25 percentage points.", "perturbed_explanation": "According to the figure, H2O’s accuracy at one demonstration on ARC-E is actually around 45%, not 55%, so both the stated initial accuracy and the implied 25-point increase are incorrect.", "claim": "On the unseen ARC-E task, H2O’s accuracy jumps from about 45% with one demonstration to roughly 80% with five demonstrations, an increase of approximately 35 percentage points.", "label": true }, { "paperid": "2409.13587v1", "paper_path": "./SciVer/papers/2409.13587v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "6.3" ], "image_path": "./SciVer/images/2409.13587v1-Table1-1.png", "request_id": 55, "origin_statement": "28qubits_03 uses 82,966 sampling_shots, over 11.6 times fewer than the 964,853 sampling_shots of 20qubits_01.", "perturbed_statement": "28qubits_03 uses 291,401 sampling_shots, over 11.6 times fewer than the 964,853 sampling_shots of 20qubits_01.", "perturbed_explanation": "This is incorrect because Table 1 shows that 28qubits_03 actually uses 82,966 sampling_shots, not 291,401, so the stated sampling_shots value is wrong and the comparison is invalid.", "claim": "28qubits_03 uses 82,966 sampling_shots, over 11.6 times fewer than the 964,853 sampling_shots of 20qubits_01.", "label": true }, { "paperid": "2409.01239v1", "paper_path": "./SciVer/papers/2409.01239v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "2.2" ], "image_path": "./SciVer/images/2409.01239v1_figure_6.png", "request_id": 58, "origin_statement": "In the follow-up light curves, the transit depth observed in the Sinistro i' band (~1.6%) is about 0.35 percentage points deeper than that in the Sinistro r' band (~1.25%).", "perturbed_statement": "In the follow-up light curves, the transit depth observed in the Sinistro i' band (~1.1%) is about 0.2 percentage points shallower than that in the Sinistro r' band (~1.3%).", "perturbed_explanation": "This is incorrect because panels showing the Sinistro i' band have a flux minimum around 0.984 (depth ≃1.6%), while the r' band panels reach only about 0.9885 (depth ≃1.25%), so the i' depth is actually deeper, not shallower.", "claim": "In the follow-up light curves, the transit depth observed in the Sinistro i' band (~1.6%) is about 0.35 percentage points deeper than that in the Sinistro r' band (~1.25%).", "label": true }, { "paperid": "2409.03735v1", "paper_path": "./SciVer/papers/2409.03735v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.03735v1_figure_2.png", "request_id": 74, "origin_statement": "At T=0, the IoT dataset had about 75% of responses rated unacceptable, which is 25 percentage points more than the COPPA dataset’s roughly 50% unacceptable responses.", "perturbed_statement": "At T=0, the IoT dataset had about 85% of responses rated unacceptable, which is 35 percentage points more than the COPPA dataset’s roughly 50% unacceptable responses.", "perturbed_explanation": "This statement is incorrect because Figure 2 shows the IoT dataset had approximately 75% unacceptable responses at T=0 (not 85%), making the difference with COPPA’s ~50% unacceptable responses 25 percentage points, not 35.", "claim": "At T=0, the IoT dataset had about 75% of responses rated unacceptable, which is 25 percentage points more than the COPPA dataset’s roughly 50% unacceptable responses.", "label": true }, { "paperid": "2410.04088v1", "paper_path": "./SciVer/papers/2410.04088v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.04088v1-Table3-1.png", "request_id": 91, "origin_statement": "By integrating OSMA and CRAM, the model’s APS increases by 4.9 points (from 17.3 to 22.2) with only a 9G rise in FLOPs over the baseline.", "perturbed_statement": "By integrating OSMA and CRAM, the model’s APS increases by 6.2 points (from 17.3 to 22.2) with only a 9G rise in FLOPs over the baseline.", "perturbed_explanation": "The table shows APS rises from 17.3 to 22.2, which is a gain of 4.9 points, not 6.2 points, so the claimed 6.2-point increase is incorrect.", "claim": "By integrating OSMA and CRAM, the model’s APS increases by 4.9 points (from 17.3 to 22.2) with only a 9G rise in FLOPs over the baseline.", "label": true }, { "paperid": "2411.00915v1", "paper_path": "./SciVer/papers/2411.00915v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2411.00915v1-Table1-1.png", "request_id": 99, "origin_statement": "Under Input ② (8192×4096, 4096×128), Config ② achieves 0.10ms latency, a 47% reduction compared to Punica’s 0.19ms latency.", "perturbed_statement": "Under Input ② (8192×4096, 4096×128), Config ② achieves 0.12ms latency, a 47% reduction compared to Punica’s 0.19ms latency.", "perturbed_explanation": "The perturbed statement incorrectly states Config ②’s latency as 0.12ms under Input ②, but Table 1 reports its latency as 0.10ms for that configuration.", "claim": "Under Input ② (8192×4096, 4096×128), Config ② achieves 0.10ms latency, a 47% reduction compared to Punica’s 0.19ms latency.", "label": true }, { "paperid": "2410.04797v1", "paper_path": "./SciVer/papers/2410.04797v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.5" ], "image_path": "./SciVer/images/2410.04797v1-Table4-1.png", "request_id": 103, "origin_statement": "The multi-stage strategy increases recall on SVD-vow from 81.24% to 87.84%, a 6.6 percentage point gain, which is the largest recall improvement among all four subsets.", "perturbed_statement": "The multi-stage strategy increases recall on SVD-vow from 81.24% to 88.84%, a 7.6 percentage point gain, which is the largest recall improvement among all four subsets.", "perturbed_explanation": "The perturbation is incorrect because Table 4 shows the multi-stage recall for SVD-vow is 87.84%, not 88.84%, so the stated 7.6-point gain is unsupported by the data.", "claim": "The multi-stage strategy increases recall on SVD-vow from 81.24% to 87.84%, a 6.6 percentage point gain, which is the largest recall improvement among all four subsets.", "label": true }, { "paperid": "2411.02542v1", "paper_path": "./SciVer/papers/2411.02542v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.02542v1-Table1-1.png", "request_id": 109, "origin_statement": "As k increases from 1 to 10, the p-value for ANCD increases by about ten orders of magnitude, from 1.13E-29 to 4.53E-19.", "perturbed_statement": "As k increases from 1 to 10, the p-value for ANCD increases by about twenty orders of magnitude, from 1.13E-29 to 4.53E-19.", "perturbed_explanation": "This statement is wrong because the p-value for ANCD goes from 1.13×10⁻²⁹ to 4.53×10⁻¹⁹, an increase of ten orders of magnitude, not twenty.", "claim": "As k increases from 1 to 10, the p-value for ANCD increases by about ten orders of magnitude, from 1.13E-29 to 4.53E-19.", "label": true }, { "paperid": "2409.12479v1", "paper_path": "./SciVer/papers/2409.12479v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "4.5.1" ], "image_path": "./SciVer/images/2409.12479v1_figure_2(b).png", "request_id": 116, "origin_statement": "The average AUC across all OOD datasets increases from approximately 89% for N=1 to about 91% for N=5, indicating a two-percentage-point improvement when enrolling five OOD samples over a single sample.", "perturbed_statement": "The average AUC across all OOD datasets increases from approximately 89% for N=1 to about 94% for N=5, indicating a five-percentage-point improvement when enrolling five OOD samples over a single sample.", "perturbed_explanation": "This is incorrect because the bar for N=5 in the figure shows an average AUC near 91%, not 94%, so the improvement is about two percentage points rather than five.", "claim": "The average AUC across all OOD datasets increases from approximately 89% for N=1 to about 91% for N=5, indicating a two-percentage-point improvement when enrolling five OOD samples over a single sample.", "label": true }, { "paperid": "2409.16016v2", "paper_path": "./SciVer/papers/2409.16016v2.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.16016v2_figure_5(b).png", "request_id": 120, "origin_statement": "In macula-centered images, VascX’s median artery-vein segmentation Dice score is approximately 0.85, about 0.15 higher than LWNet’s median of around 0.70.", "perturbed_statement": "In macula-centered images, VascX’s median artery-vein segmentation Dice score is approximately 0.80, about 0.10 higher than LWNet’s median of around 0.70.", "perturbed_explanation": "The perturbed claim is incorrect because, according to the figure, VascX’s actual median A/V segmentation Dice score on macula-centered images is about 0.85 (not 0.80), making the true difference roughly 0.15 rather than 0.10.", "claim": "In macula-centered images, VascX’s median artery-vein segmentation Dice score is approximately 0.85, about 0.15 higher than LWNet’s median of around 0.70.", "label": true }, { "paperid": "2410.13762v1", "paper_path": "./SciVer/papers/2410.13762v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.4" ], "image_path": "./SciVer/images/2410.13762v1-Table3-1.png", "request_id": 121, "origin_statement": "The maximum pressure relative L2 error (2.04×10⁻²) exceeds the minimum (2.01×10⁻²) by just 0.0003, or about 1.5%, across different train-test splits, highlighting minimal sensitivity of pressure predictions to training data size.", "perturbed_statement": "The maximum pressure relative L2 error (2.08×10⁻²) exceeds the minimum (2.01×10⁻²) by 0.007, or about 3.5%, across different train-test splits, highlighting minimal sensitivity of pressure predictions to training data size.", "perturbed_explanation": "The table shows the highest pressure relative L2 error as 2.04×10⁻² and the lowest as 2.01×10⁻², giving a difference of 0.0003. There is no 2.08×10⁻² value and the actual difference is not 0.007, so the perturbed values are incorrect.", "claim": "The maximum pressure relative L2 error (2.04×10⁻²) exceeds the minimum (2.01×10⁻²) by just 0.0003, or about 1.5%, across different train-test splits, highlighting minimal sensitivity of pressure predictions to training data size.", "label": true }, { "paperid": "2410.21131v1", "paper_path": "./SciVer/papers/2410.21131v1.json", "claim_type": "direct", "type": "chart", "item": "3(c)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.21131v1_figure_3(c).png", "request_id": 124, "origin_statement": "The model correctly classifies true 'High' instances 90% of the time—5 percentage points higher than the 85% correct rate for true 'Low' instances—while true 'Medium' instances are misclassified as 'High' 49% of the time, which is over four times the 11% correct 'Medium' rate.", "perturbed_statement": "The model correctly classifies true 'High' instances 95% of the time—10 percentage points higher than the 85% correct rate for true 'Low' instances—while true 'Medium' instances are misclassified as 'High' 45% of the time, which is four times the 11% correct 'Medium' rate.", "perturbed_explanation": "According to the confusion matrix, the accuracy for true 'High' instances is 90%, not 95% (bottom-right cell shows 0.90), and the misclassification rate of true 'Medium' as 'High' is 49%, not 45% (middle row, rightmost cell shows 0.49).", "claim": "The model correctly classifies true 'High' instances 90% of the time—5 percentage points higher than the 85% correct rate for true 'Low' instances—while true 'Medium' instances are misclassified as 'High' 49% of the time, which is over four times the 11% correct 'Medium' rate.", "label": true }, { "paperid": "2410.19218v1", "paper_path": "./SciVer/papers/2410.19218v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.19218v1-Table1-1.png", "request_id": 163, "origin_statement": "On CSFCube with SPECTER-v2, TaxoIndex++ achieves N@5 of 0.469, which is 6.3 percentage points higher than FFT w/ ToTER's 0.406, representing approximately a 15.5% relative increase.", "perturbed_statement": "On CSFCube with SPECTER-v2, TaxoIndex++ achieves an N@5 of 0.445, which is 3.9 percentage points higher than FFT w/ ToTER's 0.406, representing approximately a 9.6% relative increase.", "perturbed_explanation": "The statement is incorrect because the table shows TaxoIndex++ has an N@5 of 0.469 (not 0.445), and FFT w/ ToTER has 0.406, so the actual point difference is 0.063 (6.3 points) and the relative increase is about 15.5%, not 3.9 points or 9.6%.", "claim": "On CSFCube with SPECTER-v2, TaxoIndex++ achieves N@5 of 0.469, which is 6.3 percentage points higher than FFT w/ ToTER's 0.406, representing approximately a 15.5% relative increase.", "label": true }, { "paperid": "2410.13500v1", "paper_path": "./SciVer/papers/2410.13500v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.13500v1-Table2-1.png", "request_id": 177, "origin_statement": "The jaccard index improves by 0.002 from cosine similarity (0.763) to trained similarity (0.765) and by a further 0.004 to 0.769 with sub-pixel enhancement.", "perturbed_statement": "The jaccard index improves by 0.002 from cosine similarity (0.763) to trained similarity (0.766) and by a further 0.003 to 0.768 with sub-pixel enhancement.", "perturbed_explanation": "The table shows a jaccardIndex of 0.765 for the trained similarity method and 0.769 for the trained similarity plus sub-pixel enhancement, not 0.766 and 0.768 as stated.", "claim": "The jaccard index improves by 0.002 from cosine similarity (0.763) to trained similarity (0.765) and by a further 0.004 to 0.769 with sub-pixel enhancement.", "label": true }, { "paperid": "2411.15871v1", "paper_path": "./SciVer/papers/2411.15871v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15871v1-Table2-1.png", "request_id": 217, "origin_statement": "Table 2 lists seven computation operators versus four communication operators, showing computation bases are almost twice as many as communication bases.", "perturbed_statement": "Table 2 lists six computation operators versus five communication operators, showing computation bases barely outnumber communication bases.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 actually lists seven computation operators (GEMM, FlashAttention, Group-GEMM, Fused BDA, Layernorm, Router, Permute) and four communication operators (AllGather, ReduceScatter, AlltoAll, Send/Recv), not six and five respectively.", "claim": "Table 2 lists seven computation operators versus four communication operators, showing computation bases are almost twice as many as communication bases.", "label": true }, { "paperid": "2410.20399v1", "paper_path": "./SciVer/papers/2410.20399v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.20399v1_figure_6.png", "request_id": 228, "origin_statement": "At three worker warpgroups, the LCSF kernel reaches a peak of 440 TFLOPs—99 TFLOPs more than its 341 TFLOPs at four warpgroups and 185 TFLOPs more than the synchronous kernel's 255 TFLOPs at three warpgroups.", "perturbed_statement": "At three worker warpgroups, the LCSF kernel reaches a peak of 400 TFLOPs—59 TFLOPs more than its 341 TFLOPs at four warpgroups and 145 TFLOPs more than the synchronous kernel's 255 TFLOPs at three warpgroups.", "perturbed_explanation": "The perturbed statement is wrong because the LCSF kernel actually peaks at 440 TFLOPs at three warpgroups, not 400. The true gap to four warpgroups is 440−341=99 TFLOPs (not 59), and the gap over the synchronous 255 TFLOPs is 185 TFLOPs (not 145).", "claim": "At three worker warpgroups, the LCSF kernel reaches a peak of 440 TFLOPs—99 TFLOPs more than its 341 TFLOPs at four warpgroups and 185 TFLOPs more than the synchronous kernel's 255 TFLOPs at three warpgroups.", "label": true }, { "paperid": "2411.14497v1", "paper_path": "./SciVer/papers/2411.14497v1.json", "claim_type": "direct", "type": "chart", "item": "4(b)", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.14497v1_figure_4(b).png", "request_id": 242, "origin_statement": "On MT-Bench, Llama-2-7B-star_instruct scores about 9.0 on humanities, roughly 1.0 point higher than Llama-2-7B-evol_instruct (~8.0).", "perturbed_statement": "On MT-Bench, Llama-2-7B-star_instruct scores about 10.0 on humanities, roughly 2.0 points higher than Llama-2-7B-evol_instruct (~8.0).", "perturbed_explanation": "The perturbed statement is incorrect because the radar plot shows Llama-2-7B-star_instruct’s humanities score is about 9.0, not 10.0, and the actual difference from Llama-2-7B-evol_instruct (~8.0) is approximately 1.0 point, not 2.0.", "claim": "On MT-Bench, Llama-2-7B-star_instruct scores about 9.0 on humanities, roughly 1.0 point higher than Llama-2-7B-evol_instruct (~8.0).", "label": true }, { "paperid": "2410.01839v1", "paper_path": "./SciVer/papers/2410.01839v1.json", "claim_type": "direct", "type": "chart", "item": "2(d)", "section": [ "3.1", "3.3" ], "image_path": "./SciVer/images/2410.01839v1_figure_2(d).png", "request_id": 260, "origin_statement": "In the true H1 persistence diagram (blue squares), the feature born at around 2.1 has the largest lifetime of about 4.7 (death ≈6.8), whereas the other three features have lifetimes below 0.6.", "perturbed_statement": "In the true H1 persistence diagram (blue squares), the feature born at around 4.6 has the largest lifetime of about 4.7 (death ≈9.2), whereas the other three features have lifetimes below 0.6.", "perturbed_explanation": "This is incorrect because the feature born at ≈4.6 actually dies at ≈5.1, giving a lifetime of ≈0.5, and no feature in the diagram has a death time near 9.2, so the claimed 4.7 persistence is unsupported by the plot.", "claim": "In the true H1 persistence diagram (blue squares), the feature born at around 2.1 has the largest lifetime of about 4.7 (death ≈6.8), whereas the other three features have lifetimes below 0.6.", "label": true }, { "paperid": "2411.16474v1", "paper_path": "./SciVer/papers/2411.16474v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.16474v1_figure_5.png", "request_id": 282, "origin_statement": "The peak desorption flux for the 0.75 ML O-covered W surface is about 0.8×10^17 m−2 s−1 at ≈370 K, roughly 18% of the clean surface’s ≈4.5×10^17 m−2 s−1 peak at ≈430 K, with a 60 K lower peak temperature.", "perturbed_statement": "The peak desorption flux for the 0.75 ML O-covered W surface is about 0.8×10^17 m−2 s−1 at ≈370 K, roughly 30% of the clean surface’s ≈4.5×10^17 m−2 s−1 peak at ≈430 K, with a 50 K lower peak temperature.", "perturbed_explanation": "This statement is incorrect because the flux ratio is about 0.8/4.5 ≈ 18%, not 30%, and the peak temperature difference is ≈430 K − 370 K = 60 K, not 50 K, as shown in the TDS spectra.", "claim": "The peak desorption flux for the 0.75 ML O-covered W surface is about 0.8×10^17 m−2 s−1 at ≈370 K, roughly 18% of the clean surface’s ≈4.5×10^17 m−2 s−1 peak at ≈430 K, with a 60 K lower peak temperature.", "label": true }, { "paperid": "2409.02554v1", "paper_path": "./SciVer/papers/2409.02554v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "2.2" ], "image_path": "./SciVer/images/2409.02554v1-Table3-1.png", "request_id": 295, "origin_statement": "Cycle 23 recorded 30 type II bursts ending beyond 50 kHz, which is 37% of its 81 total events—more than triple the 12% (7 of 57) observed in cycle 24.", "perturbed_statement": "Cycle 23 recorded 25 type II bursts ending beyond 50 kHz, which is 35% of its 81 total events—double the 12% (7 of 57) observed in cycle 24.", "perturbed_explanation": "The statement is incorrect because Table 3 shows cycle 23 actually had 30 events (not 25) ending beyond 50 kHz, accounting for 37% (not 35%) of 81 events. Additionally, 37% is more than triple 12%, not double.", "claim": "Cycle 23 recorded 30 type II bursts ending beyond 50 kHz, which is 37% of its 81 total events—more than triple the 12% (7 of 57) observed in cycle 24.", "label": true }, { "paperid": "2409.19942v2", "paper_path": "./SciVer/papers/2409.19942v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5" ], "image_path": "./SciVer/images/2409.19942v2-Table4-1.png", "request_id": 297, "origin_statement": "The CCD-trained R(2+1)D model achieves 76.14% accuracy on CCD but only 55.76% when finetuned and tested on CycleCrash, a drop of 20.38 percentage points.", "perturbed_statement": "The CCD-trained R(2+1)D model achieves 76.14% accuracy on CCD but only 65.76% when finetuned and tested on CycleCrash, a drop of 10.38 percentage points.", "perturbed_explanation": "According to the table, the finetuned accuracy on CycleCrash is actually 55.76%, not 65.76%, making the true drop from 76.14% equal to 20.38 points rather than 10.38.", "claim": "The CCD-trained R(2+1)D model achieves 76.14% accuracy on CCD but only 55.76% when finetuned and tested on CycleCrash, a drop of 20.38 percentage points.", "label": true }, { "paperid": "2411.15553v1", "paper_path": "./SciVer/papers/2411.15553v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15553v1-Table1-1.png", "request_id": 301, "origin_statement": "Using RN-50 as the source model, RDI-FTM-E achieves an average targeted attack success rate of 79.5%, outperforming RDI-CFM's 74.6% by 4.9 percentage points.", "perturbed_statement": "Using RN-50 as the source model, RDI-FTM-E achieves an average targeted attack success rate of 82.5%, outperforming RDI-CFM's 74.6% by 7.9 percentage points.", "perturbed_explanation": "The table shows that RDI-FTM-E's average success rate is 79.5%, not 82.5%, and the actual difference over RDI-CFM's 74.6% is 4.9 points, not 7.9.", "claim": "Using RN-50 as the source model, RDI-FTM-E achieves an average targeted attack success rate of 79.5%, outperforming RDI-CFM's 74.6% by 4.9 percentage points.", "label": true }, { "paperid": "2409.05249v1", "paper_path": "./SciVer/papers/2409.05249v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.05249v1-Table1-1.png", "request_id": 317, "origin_statement": "NetDPSyn achieves a 0.90 Spearman correlation on both TON and CIDDS, 0.20 higher than PGM's 0.70 on those datasets.", "perturbed_statement": "NetDPSyn achieves a 0.90 Spearman correlation on both TON and CIDDS, 0.10 higher than PGM's 0.80 on those datasets.", "perturbed_explanation": "This statement is incorrect because Table 1 shows PGM’s Spearman correlation on TON and CIDDS is 0.70, not 0.80, so the difference with NetDPSyn’s 0.90 is 0.20, not 0.10.", "claim": "NetDPSyn achieves a 0.90 Spearman correlation on both TON and CIDDS, 0.20 higher than PGM's 0.70 on those datasets.", "label": true }, { "paperid": "2409.13175v1", "paper_path": "./SciVer/papers/2409.13175v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.2", "5.3" ], "image_path": "./SciVer/images/2409.13175v1-Table1-1.png", "request_id": 329, "origin_statement": "RPAF-TD3-MSE outperforms RPAF-TD3-KL by 18 seconds in average watch time and is 201 seconds below the ideal All Real-Time strategy.", "perturbed_statement": "RPAF-TD3-MSE outperforms RPAF-TD3-KL by 28 seconds in average watch time and is 191 seconds below the ideal All Real-Time strategy.", "perturbed_explanation": "The perturbed statement misreports both differences. The table shows RPAF-TD3-MSE at 2146 s versus RPAF-TD3-KL at 2128 s, a difference of 18 seconds (not 28), and 2146 s is 201 seconds below the ideal 2347 s (not 191).", "claim": "RPAF-TD3-MSE outperforms RPAF-TD3-KL by 18 seconds in average watch time and is 201 seconds below the ideal All Real-Time strategy.", "label": true }, { "paperid": "2409.04050v1", "paper_path": "./SciVer/papers/2409.04050v1.json", "claim_type": "direct", "type": "chart", "item": "6(b)", "section": [ "4.3.3" ], "image_path": "./SciVer/images/2409.04050v1_figure_6(b).png", "request_id": 336, "origin_statement": "At 5 iterations, inference time (~4 s) is more than triple that at 1 iteration (~1.2 s), while SAM score decreases from ~2.25 at 1 iteration to ~1.90, surpassing EigenSR-α's constant SAM of 2.22.", "perturbed_statement": "At 5 iterations, inference time (~4 s) is only twice that at 1 iteration (~1.2 s), while SAM score decreases from ~2.25 at 1 iteration to ~1.90, surpassing EigenSR-α's constant SAM of 2.22.", "perturbed_explanation": "The claim is incorrect because, according to the figure, the inference time at 1 iteration is approximately 1.2 s and at 5 iterations is about 4.0 s, which is more than three times (4.0 s/1.2 s ≈ 3.33), not just twice.", "claim": "At 5 iterations, inference time (~4 s) is more than triple that at 1 iteration (~1.2 s), while SAM score decreases from ~2.25 at 1 iteration to ~1.90, surpassing EigenSR-α's constant SAM of 2.22.", "label": true }, { "paperid": "2409.02246v1", "paper_path": "./SciVer/papers/2409.02246v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.02246v1-Table2-1.png", "request_id": 359, "origin_statement": "Compared to the heuristic, the equitable policy (ρ_large=0.5) increases the coverage ratio by 0.49, from 0.91 to 1.40.", "perturbed_statement": "Compared to the heuristic, the equitable policy (ρ_large=0.5) increases the coverage ratio by 0.55, from 0.92 to 1.40.", "perturbed_explanation": "This statement is wrong because the table shows the heuristic coverage ratio is 0.91 (not 0.92) and the actual difference from 0.91 to 1.40 is 0.49 (not 0.55).", "claim": "Compared to the heuristic, the equitable policy (ρ_large=0.5) increases the coverage ratio by 0.49, from 0.91 to 1.40.", "label": true }, { "paperid": "2411.02359v1", "paper_path": "./SciVer/papers/2411.02359v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.02359v1_figure_4.png", "request_id": 362, "origin_statement": "DeeR-S reduces peak GPU memory by 4.0× relative to RoboFlamingo++ 9B, lowering the requirement from 32 GB to 8 GB.", "perturbed_statement": "DeeR-S reduces peak GPU memory by 2.7× relative to RoboFlamingo++ 9B, lowering the requirement from 32 GB to 8 GB.", "perturbed_explanation": "The chart shows the GPU memory requirement drops from 32 GB to 8 GB for DeeR-S, which is a 4× reduction (32/8), not a 2.7× decrease. The cited factor is therefore incorrect.", "claim": "DeeR-S reduces peak GPU memory by 4.0× relative to RoboFlamingo++ 9B, lowering the requirement from 32 GB to 8 GB.", "label": true }, { "paperid": "2409.15329v1", "paper_path": "./SciVer/papers/2409.15329v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "7.2" ], "image_path": "./SciVer/images/2409.15329v1-Table4-1.png", "request_id": 363, "origin_statement": "For TD3-INVASE, the communication beam gain falls from 15122.5 at (0,0,15) tilt to 338.8 at (0,0,90), a drop of about 98% as z-axis tilt increases.", "perturbed_statement": "For TD3-INVASE, the communication beam gain falls from 8974.5 at (0,0,15) tilt to 338.8 at (0,0,90), a drop of about 96% as z-axis tilt increases.", "perturbed_explanation": "This is incorrect because the actual TD3-INVASE gain at (0,0,15) is 15122.5 (not 8974.5), and the drop from 15122.5 to 338.8 corresponds to about 97.8%, not 96%.", "claim": "For TD3-INVASE, the communication beam gain falls from 15122.5 at (0,0,15) tilt to 338.8 at (0,0,90), a drop of about 98% as z-axis tilt increases.", "label": true }, { "paperid": "2410.09432v1", "paper_path": "./SciVer/papers/2410.09432v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.09432v1-Table2-1.png", "request_id": 365, "origin_statement": "In the r=4 setting, FedEx-LoRA attains a BLEU score of 68.15, which is 1.36 points higher than FFA-LoRA (66.79) and 0.76 points below Centralized LoRA (68.91).", "perturbed_statement": "In the r=4 setting, FedEx-LoRA attains a BLEU score of 69.15, which is 2.36 points higher than FFA-LoRA (66.79) and 0.24 points below Centralized LoRA (68.91).", "perturbed_explanation": "This is incorrect because the table reports FedEx-LoRA’s BLEU as 68.15, not 69.15. Therefore, its difference from FFA-LoRA (66.79) is actually +1.36 (not +2.36) and its gap to Centralized LoRA (68.91) is –0.76 (not –0.24).", "claim": "In the r=4 setting, FedEx-LoRA attains a BLEU score of 68.15, which is 1.36 points higher than FFA-LoRA (66.79) and 0.76 points below Centralized LoRA (68.91).", "label": true }, { "paperid": "2411.04188v3", "paper_path": "./SciVer/papers/2411.04188v3.json", "claim_type": "direct", "type": "chart", "item": "23", "section": [ "4.2", "7.3" ], "image_path": "./SciVer/images/2411.04188v3_figure_23.png", "request_id": 382, "origin_statement": "By epoch 100, the training loss for the evaluation subset (773 sources) remains about 0.22, more than double the roughly 0.11 loss achieved by training on the full dataset (4621 sources).", "perturbed_statement": "By epoch 100, the training loss for the evaluation subset (773 sources) is about 0.15, nearly three times the approximately 0.05 loss achieved by training on the full dataset (4621 sources).", "perturbed_explanation": "The perturbed statement is incorrect because the chart shows the evaluation subset training loss at epoch 100 is roughly 0.22 (not 0.15) and the full dataset training loss is around 0.11 (not 0.05). These values contradict the plotted losses in Figure 23.", "claim": "By epoch 100, the training loss for the evaluation subset (773 sources) remains about 0.22, more than double the roughly 0.11 loss achieved by training on the full dataset (4621 sources).", "label": true }, { "paperid": "2409.13828v1", "paper_path": "./SciVer/papers/2409.13828v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.13828v1_figure_4.png", "request_id": 402, "origin_statement": "Under the Attention-Fool attack on CIFAR-10, ViTGuard’s fooling rate (0.5%) represents a 46.4 percentage point decrease from the 46.9% rate observed without any detection.", "perturbed_statement": "Under the Attention-Fool attack on CIFAR-10, ViTGuard’s fooling rate (5.0%) represents a 41.9 percentage point decrease from the 46.9% rate observed without any detection.", "perturbed_explanation": "The perturbed statement is incorrect because the source figure shows ViTGuard’s fooling rate on CIFAR-10 under Attention-Fool is 0.5%, not 5.0%, so the claimed rate and resulting decrease are wrong.", "claim": "Under the Attention-Fool attack on CIFAR-10, ViTGuard’s fooling rate (0.5%) represents a 46.4 percentage point decrease from the 46.9% rate observed without any detection.", "label": true }, { "paperid": "2410.16928v2", "paper_path": "./SciVer/papers/2410.16928v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.16928v2_figure_4.png", "request_id": 416, "origin_statement": "Increasing the hidden dimension from 2^5 to 2^11 reduces MSE by about 7% at prediction length 96 (from 0.14 to 0.13) but by about 16% at length 720 (from 0.22 to 0.185), indicating larger benefit for longer horizons.", "perturbed_statement": "Increasing the hidden dimension from 2^5 to 2^11 reduces MSE by about 20% at prediction length 96 (from 0.14 to 0.13) but only about 8% at length 720 (from 0.22 to 0.185), indicating larger benefit for shorter horizons.", "perturbed_explanation": "The perturbed percentages are incorrect. The actual MSE drop at length 96 is (0.14–0.13)/0.14 ≈ 7%, not 20%, and at length 720 is (0.22–0.185)/0.22 ≈ 16%, not 8%, as seen from the plotted values.", "claim": "Increasing the hidden dimension from 2^5 to 2^11 reduces MSE by about 7% at prediction length 96 (from 0.14 to 0.13) but by about 16% at length 720 (from 0.22 to 0.185), indicating larger benefit for longer horizons.", "label": true }, { "paperid": "2409.17732v1", "paper_path": "./SciVer/papers/2409.17732v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "2.1" ], "image_path": "./SciVer/images/2409.17732v1_figure_3.png", "request_id": 426, "origin_statement": "The Italian trend line shows a warming slope of 0.0445°C per year (R²=0.1802) from 2000 to 2022, while the UK trend line shows a slight cooling slope of -0.0047°C per year (R²=0.0086).", "perturbed_statement": "The Italian trend line shows a warming slope of 0.0745°C per year (R²=0.3000) from 2000 to 2022, while the UK trend line shows a slight warming slope of 0.003°C per year (R²=0.0086).", "perturbed_explanation": "The perturbed statement is incorrect because the figure reports the Italian slope as 0.0445°C/year (not 0.0745°C/year) with R²=0.1802, and the UK slope as -0.0047°C/year (cooling), not +0.003°C/year.", "claim": "The Italian trend line shows a warming slope of 0.0445°C per year (R²=0.1802) from 2000 to 2022, while the UK trend line shows a slight cooling slope of -0.0047°C per year (R²=0.0086).", "label": true }, { "paperid": "2409.08732v1", "paper_path": "./SciVer/papers/2409.08732v1.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "4.7" ], "image_path": "./SciVer/images/2409.08732v1_figure_5(b).png", "request_id": 436, "origin_statement": "On UK GDP nowcasting, NCDENow uses about 50k parameters—around 3% of DFM-LSTM’s 1.7M—but achieves a 2.5 percentage point lower MAPE (6.3% vs 8.8%).", "perturbed_statement": "On UK GDP nowcasting, NCDENow uses about 500k parameters—around 30% of DFM-LSTM’s 1.7M—but achieves a 2.5 percentage point lower MAPE (6.3% vs 8.8%).", "perturbed_explanation": "The perturbed statement is wrong because Figure 5 indicates that NCDENow actually uses about 0.05 million (50k) parameters, not 0.5 million (500k). It therefore does not use 500k parameters.", "claim": "On UK GDP nowcasting, NCDENow uses about 50k parameters—around 3% of DFM-LSTM’s 1.7M—but achieves a 2.5 percentage point lower MAPE (6.3% vs 8.8%).", "label": true }, { "paperid": "2411.15524v1", "paper_path": "./SciVer/papers/2411.15524v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "3", "4.1" ], "image_path": "./SciVer/images/2411.15524v1_figure_2.png", "request_id": 460, "origin_statement": "IRAS02438+2135’s spectrum peaks at ~35 mJy, exceeding CGCG147-020’s ~25 mJy by ~10 mJy, TGN229Z166’s ~22 mJy by ~13 mJy, and J0425−2521’s ~12 mJy by ~23 mJy.", "perturbed_statement": "IRAS02438+2135’s spectrum peaks at ~15 mJy, which is lower than CGCG147-020’s ~25 mJy and TGN229Z166’s ~22 mJy.", "perturbed_explanation": "This is incorrect because the Figure 2 spectrum for IRAS02438+2135 in the top-right panel shows a peak near 35 mJy, not 15 mJy, making it the highest among the four detections.", "claim": "IRAS02438+2135’s spectrum peaks at ~35 mJy, exceeding CGCG147-020’s ~25 mJy by ~10 mJy, TGN229Z166’s ~22 mJy by ~13 mJy, and J0425−2521’s ~12 mJy by ~23 mJy.", "label": true }, { "paperid": "2409.14836v2", "paper_path": "./SciVer/papers/2409.14836v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.3.1" ], "image_path": "./SciVer/images/2409.14836v2-Table3-1.png", "request_id": 471, "origin_statement": "On Mistral-Base, removing the reverse Givens rotation matrix ('single') reduces the MT-Bench WWR by 10.85 points compared to RoPO.", "perturbed_statement": "On Mistral-Base, removing the reverse Givens rotation matrix ('single') reduces the MT-Bench WWR by 8.23 points compared to RoPO.", "perturbed_explanation": "The actual MT-Bench WWR for RoPO is 22.84 and for 'single' is 11.99, yielding a reduction of 10.85 points, not 8.23, so the stated difference is incorrect.", "claim": "On Mistral-Base, removing the reverse Givens rotation matrix ('single') reduces the MT-Bench WWR by 10.85 points compared to RoPO.", "label": true }, { "paperid": "2409.11074v2", "paper_path": "./SciVer/papers/2409.11074v2.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.11074v2_figure_3.png", "request_id": 476, "origin_statement": "On the Synthetic subset, deepseek-math-7b achieves around 25% accuracy in Probability, which is over 10 percentage points higher than all other models, none of which exceed 15%.", "perturbed_statement": "On the Synthetic subset, deepseek-math-7b achieves around 15% accuracy in Probability, which is over 5 percentage points higher than all other models, none of which exceed 10%.", "perturbed_explanation": "This is incorrect because in the chart the deepseek-math-7b bar for Probability on the Synthetic subset is around 25%, not 15%. Additionally, Mathstral-7b-v0.1 (and even RoMistral-7b) exceed 10%, with Mathstral-7b-v0.1 near 15%.", "claim": "On the Synthetic subset, deepseek-math-7b achieves around 25% accuracy in Probability, which is over 10 percentage points higher than all other models, none of which exceed 15%.", "label": true }, { "paperid": "2411.14749v2", "paper_path": "./SciVer/papers/2411.14749v2.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "3.4.1" ], "image_path": "./SciVer/images/2411.14749v2_figure_7.png", "request_id": 488, "origin_statement": "At 23 days post-burst, the SN 1998bw model at z=0.86 shows Z-band flux of about 0.6 µJy, roughly three times the R-band flux density (∼0.2 µJy) at the same epoch.", "perturbed_statement": "At 23 days post-burst, the SN 1998bw model at z=0.86 shows Z-band flux of about 0.4 µJy, roughly twice the R-band flux density (∼0.2 µJy) at the same epoch.", "perturbed_explanation": "This is incorrect because, in Figure 7 at 23 days, the orange Z-band curve is at about 0.6 µJy, not 0.4 µJy, so the Z-band flux is around three times the R-band flux, not two times.", "claim": "At 23 days post-burst, the SN 1998bw model at z=0.86 shows Z-band flux of about 0.6 µJy, roughly three times the R-band flux density (∼0.2 µJy) at the same epoch.", "label": true }, { "paperid": "2409.17455v1", "paper_path": "./SciVer/papers/2409.17455v1.json", "claim_type": "direct", "type": "chart", "item": "9(d)", "section": [ "3.1.2" ], "image_path": "./SciVer/images/2409.17455v1_figure_9(d).png", "request_id": 494, "origin_statement": "At λ=1.0, BERT’s macro F1 drop (0.33) is approximately 5.5 times its drop at λ=0.6 (0.06), compared to llama2-7b’s increase from 0.03 to 0.05 (about 1.7 times) over the same λ interval.", "perturbed_statement": "At λ=1.0, BERT’s macro F1 drop (0.33) is approximately 5.5 times its drop at λ=0.6 (0.06), compared to llama2-7b’s increase from 0.03 to 0.07 (about 2.3 times) over the same λ interval.", "perturbed_explanation": "This statement is incorrect because llama2-7b’s macro F1 drop at λ=1.0 is about 0.05, not 0.07, so its increase is roughly 1.7× (0.03 to 0.05) rather than 2.3×.", "claim": "At λ=1.0, BERT’s macro F1 drop (0.33) is approximately 5.5 times its drop at λ=0.6 (0.06), compared to llama2-7b’s increase from 0.03 to 0.05 (about 1.7 times) over the same λ interval.", "label": true }, { "paperid": "2411.16119v1", "paper_path": "./SciVer/papers/2411.16119v1.json", "claim_type": "direct", "type": "chart", "item": "2(c)", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.16119v1_figure_2(c).png", "request_id": 520, "origin_statement": "At approximately 0.34 bpp, the factorized-LVQ curve achieves about 0.980 dB MS-SSIM, around 0.003 dB higher than the factorized-USQ value of 0.977 dB.", "perturbed_statement": "At approximately 0.34 bpp, the factorized-LVQ curve achieves about 0.980 dB MS-SSIM, around 0.010 dB higher than the factorized-USQ value of 0.977 dB.", "perturbed_explanation": "The perturbed statement exaggerates the gain as 0.010 dB, but the plot shows factorized-LVQ at ~0.980 dB and factorized-USQ at ~0.977 dB, a difference of only ~0.003 dB, not 0.010 dB.", "claim": "At approximately 0.34 bpp, the factorized-LVQ curve achieves about 0.980 dB MS-SSIM, around 0.003 dB higher than the factorized-USQ value of 0.977 dB.", "label": true }, { "paperid": "2410.06169v2", "paper_path": "./SciVer/papers/2410.06169v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.4" ], "image_path": "./SciVer/images/2410.06169v2-Table1-1.png", "request_id": 527, "origin_statement": "Skipping cross-modal computation in layers 0–5 causes the largest GQA drop of 24.7 points, whereas skipping layers 20–25 reduces GQA by only 0.9 points, showing deeper blocks have less impact on GQA performance.", "perturbed_statement": "Skipping cross-modal computation in layers 0–5 causes the largest GQA drop of 24.7 points, whereas skipping layers 20–25 reduces GQA by only 1.9 points, showing deeper blocks have less impact on GQA performance.", "perturbed_explanation": "The perturbed statement incorrectly states that skipping layers 20–25 reduces GQA by 1.9 points. The table shows GQA drops from 63.0 to 62.1 when layers 20–25 are skipped, which is a decrease of 0.9 points, not 1.9 points.", "claim": "Skipping cross-modal computation in layers 0–5 causes the largest GQA drop of 24.7 points, whereas skipping layers 20–25 reduces GQA by only 0.9 points, showing deeper blocks have less impact on GQA performance.", "label": true }, { "paperid": "2409.05790v1", "paper_path": "./SciVer/papers/2409.05790v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.05790v1_figure_3(a).png", "request_id": 542, "origin_statement": "The CVAE model’s relative error distribution peaks at about 0.26 near 0%, surpassing the DNN’s peak around 0.23, and CVAE frequencies approach zero beyond ±10%, while DNN maintains small frequencies past ±10%, showing CVAE’s tighter error concentration.", "perturbed_statement": "The CVAE model’s relative error distribution peaks at about 0.20 near 0%, below the DNN’s peak around 0.23, and CVAE frequencies approach zero beyond ±5%, while DNN maintains small frequencies past ±5%, showing CVAE’s tighter error concentration.", "perturbed_explanation": "This statement is incorrect because the figure shows the CVAE peak frequency is around 0.26 (not 0.20) which is higher than the DNN’s ~0.23. Both distributions only drop to near zero beyond ±10%, not ±5% as claimed.", "claim": "The CVAE model’s relative error distribution peaks at about 0.26 near 0%, surpassing the DNN’s peak around 0.23, and CVAE frequencies approach zero beyond ±10%, while DNN maintains small frequencies past ±10%, showing CVAE’s tighter error concentration.", "label": true }, { "paperid": "2411.13919v1", "paper_path": "./SciVer/papers/2411.13919v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.5" ], "image_path": "./SciVer/images/2411.13919v1-Table3-1.png", "request_id": 553, "origin_statement": "Random Forest with pre-clustering reduces false negatives by 7352 compared to without pre-clustering.", "perturbed_statement": "Random Forest with pre-clustering reduces false negatives by 8000 compared to without pre-clustering.", "perturbed_explanation": "This is incorrect because Table 3 shows false negatives drop from 10000 without pre-clustering to 2648 with pre-clustering, a reduction of 10000 − 2648 = 7352, not 8000.", "claim": "Random Forest with pre-clustering reduces false negatives by 7352 compared to without pre-clustering.", "label": true }, { "paperid": "2409.15053v1", "paper_path": "./SciVer/papers/2409.15053v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.15053v1-Table3-1.png", "request_id": 559, "origin_statement": "For Ga41As41H72, the fraction of compute time spent on matrix-vector products increases by 11 percentage points, rising from 82% at polynomial degree 200 to 93% at degree 500.", "perturbed_statement": "For Ga41As41H72, the fraction of compute time spent on matrix-vector products increases by 8 percentage points, rising from 84% at polynomial degree 200 to 92% at degree 500.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows that at degree 200 the MV time is 82%, not 84%, and at degree 500 it is 93%, not 92%, so both the starting and ending percentages (and the computed increase) are wrong.", "claim": "For Ga41As41H72, the fraction of compute time spent on matrix-vector products increases by 11 percentage points, rising from 82% at polynomial degree 200 to 93% at degree 500.", "label": true }, { "paperid": "2410.20320v1", "paper_path": "./SciVer/papers/2410.20320v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.20320v1_figure_3(a).png", "request_id": 562, "origin_statement": "Proto-BERT’s accuracy falls from about 81.7% at 0% NOTA to 46.0% at 50% NOTA, a drop of roughly 35.7 points, while BERT-PAIR rises from about 76.8% to 80.3%, a gain of 3.5 points.", "perturbed_statement": "Proto-BERT’s accuracy falls from about 81.7% at 0% NOTA to 46.0% at 50% NOTA, a drop of roughly 25.7 points, while BERT-PAIR declines from about 76.8% to 80.3%, a loss of 3.5 points.", "perturbed_explanation": "This is incorrect because Proto-BERT actually drops by about 35.7 points (81.7% to 46.0%), not 25.7, and BERT-PAIR’s accuracy increases by 3.5 points (76.8% to 80.3%), rather than declining.", "claim": "Proto-BERT’s accuracy falls from about 81.7% at 0% NOTA to 46.0% at 50% NOTA, a drop of roughly 35.7 points, while BERT-PAIR rises from about 76.8% to 80.3%, a gain of 3.5 points.", "label": true }, { "paperid": "2409.01854v1", "paper_path": "./SciVer/papers/2409.01854v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.4.2" ], "image_path": "./SciVer/images/2409.01854v1-Table4-1.png", "request_id": 573, "origin_statement": "On DuIE, omitting labelled sample retrieval (-samples) reduces F1 from 42.97 to 32.30, a drop of 10.67 points—larger than the 6.55-point drop without annotation guidelines (-doc) and the 2.45-point drop without KG (-KG), highlighting samples’ greatest impact.", "perturbed_statement": "On DuIE, removing KG retrieval (-KG) causes F1 to fall from 42.97 to 36.54, a decline of 6.43 points, exceeding the impact of removing both samples and documents.", "perturbed_explanation": "The table shows that the F1 score for -KG on DuIE is actually 40.52, not 36.54, and the drop from AgentRE-w/oM’s 42.97 to 40.52 is only 2.45 points, not 6.43, so the perturbed claim is incorrect.", "claim": "On DuIE, omitting labelled sample retrieval (-samples) reduces F1 from 42.97 to 32.30, a drop of 10.67 points—larger than the 6.55-point drop without annotation guidelines (-doc) and the 2.45-point drop without KG (-KG), highlighting samples’ greatest impact.", "label": true }, { "paperid": "2411.18432v1", "paper_path": "./SciVer/papers/2411.18432v1.json", "claim_type": "direct", "type": "chart", "item": "12", "section": [ "5.4.2" ], "image_path": "./SciVer/images/2411.18432v1_figure_12.png", "request_id": 574, "origin_statement": "In the large-scale network under the Gaussian distribution, at a 60% control ratio, the SPO-A framework achieves a 13.75% improvement in matching RMSE over PTO, approximately 1.5% higher than the SPO-C framework’s improvement.", "perturbed_statement": "In the large-scale network under the Gaussian distribution, at a 60% control ratio, the SPO-A framework achieves a 15.75% improvement in matching RMSE over PTO, approximately 3.5% higher than the SPO-C framework’s improvement.", "perturbed_explanation": "The perturbed statement is incorrect because, according to the chart in panel (e), SPO-A’s improvement at the 60% control ratio is about 13.75%, not 15.75%, and SPO-C’s improvement there is around 12%, making the actual difference approximately 1.5%, not 3.5%.", "claim": "In the large-scale network under the Gaussian distribution, at a 60% control ratio, the SPO-A framework achieves a 13.75% improvement in matching RMSE over PTO, approximately 1.5% higher than the SPO-C framework’s improvement.", "label": true }, { "paperid": "2409.10775v1", "paper_path": "./SciVer/papers/2409.10775v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.10775v1-Table2-1.png", "request_id": 575, "origin_statement": "The Friedman Q statistic decreases slightly from 71.0 at occlusion level 1 to 65.5 at level 2, while the corresponding p-value increases from 5.0×10⁻¹⁰ to 5.3×10⁻⁹.", "perturbed_statement": "The Friedman Q statistic decreases slightly from 71.0 at occlusion level 1 to 70.5 at level 2, while the corresponding p-value increases from 5.0×10⁻¹⁰ to 5.3×10⁻⁹.", "perturbed_explanation": "This is incorrect because Table 2 shows the Friedman Q statistic for occlusion level 2 is 65.5, not 70.5.", "claim": "The Friedman Q statistic decreases slightly from 71.0 at occlusion level 1 to 65.5 at level 2, while the corresponding p-value increases from 5.0×10⁻¹⁰ to 5.3×10⁻⁹.", "label": true }, { "paperid": "2411.16471v1", "paper_path": "./SciVer/papers/2411.16471v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "6.1" ], "image_path": "./SciVer/images/2411.16471v1-Table4-1.png", "request_id": 591, "origin_statement": "Nunki’s uniform disk angular diameter increases from 0.51 ± 0.03 mas at 375 nm to 0.61 ± 0.02 mas at 470 nm, an increase of 0.10 mas.", "perturbed_statement": "Nunki’s uniform disk angular diameter increases from 0.51 ± 0.03 mas at 375 nm to 0.61 ± 0.02 mas at 470 nm, an increase of 0.15 mas.", "perturbed_explanation": "According to the table, Nunki’s θ_UD values are 0.51 mas at 375 nm and 0.61 mas at 470 nm, so the true increase is 0.61−0.51=0.10 mas, not 0.15 mas as stated.", "claim": "Nunki’s uniform disk angular diameter increases from 0.51 ± 0.03 mas at 375 nm to 0.61 ± 0.02 mas at 470 nm, an increase of 0.10 mas.", "label": true }, { "paperid": "2410.20609v1", "paper_path": "./SciVer/papers/2410.20609v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1", "2.3.2" ], "image_path": "./SciVer/images/2410.20609v1-Table1-1.png", "request_id": 593, "origin_statement": "The host galaxy lies east of the nearby galaxy by 0.10 seconds of right ascension (≈1.5″) and north by 0.5″ in declination, yielding an angular separation of about 1.6 arcseconds.", "perturbed_statement": "The host galaxy lies east of the nearby galaxy by 0.20 seconds of right ascension (≈3.0″) and north by 1.0″ in declination, yielding an angular separation of about 3.2 arcseconds.", "perturbed_explanation": "The statement is incorrect because the actual RA difference is 0.10s (≈1.5″), not 0.20s, and the declination offset is 0.5″, not 1.0″. Therefore, the true angular separation is ≈1.6″, not 3.2″.", "claim": "The host galaxy lies east of the nearby galaxy by 0.10 seconds of right ascension (≈1.5″) and north by 0.5″ in declination, yielding an angular separation of about 1.6 arcseconds.", "label": true }, { "paperid": "2409.20541v3", "paper_path": "./SciVer/papers/2409.20541v3.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.20541v3-Table2-1.png", "request_id": 595, "origin_statement": "The magnitude of the ω coefficient grows rapidly: it increases from 709 at O(ε^1) to 3573 at O(ε^{3/2})—a fivefold rise—and further to 16375 at O(ε^2), about 4.6 times the previous value.", "perturbed_statement": "The magnitude of the ω coefficient grows rapidly: it increases from 709 at O(ε^1) to 2573 at O(ε^{3/2}), then further to 16375 at O(ε^2).", "perturbed_explanation": "The perturbed statement gives the O(ε^{3/2}) coefficient magnitude as 2573, but Table 2 shows the value is –3573(718), so its magnitude is 3573, not 2573.", "claim": "The magnitude of the ω coefficient grows rapidly: it increases from 709 at O(ε^1) to 3573 at O(ε^{3/2})—a fivefold rise—and further to 16375 at O(ε^2), about 4.6 times the previous value.", "label": true }, { "paperid": "2411.08982v1", "paper_path": "./SciVer/papers/2411.08982v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.08982v1_figure_8.png", "request_id": 600, "origin_statement": "In the HumanEval task, dropping experts during decode while keeping prefill experts reduces accuracy from 37.80% to 30.49%, a decrease of 7.31 percentage points.", "perturbed_statement": "In the HumanEval task, dropping experts during decode while keeping prefill experts reduces accuracy from 37.80% to 25.49%, a decrease of 12.31 percentage points.", "perturbed_explanation": "According to Figure 8, the accuracy for Prefill Keep & Decode Drop is actually 30.49%, not 25.49%. Therefore, the drop is 7.31 points rather than the 12.31 points claimed.", "claim": "In the HumanEval task, dropping experts during decode while keeping prefill experts reduces accuracy from 37.80% to 30.49%, a decrease of 7.31 percentage points.", "label": true }, { "paperid": "2409.14444v1", "paper_path": "./SciVer/papers/2409.14444v1.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.14444v1_figure_5(b).png", "request_id": 602, "origin_statement": "Between the first and third epochs, SSBI’s proportion increases from approximately 35% to about 38%, while BI’s proportion drops from around 32% to 30%.", "perturbed_statement": "Between the first and third epochs, SSBI’s proportion decreases from approximately 35% to about 30%, while BI’s proportion rises from around 32% to 38%.", "perturbed_explanation": "This is incorrect because the chart shows SSBI actually increases from about 35% to 38% (not decreases), and BI falls from around 32% to 30% (not rises).", "claim": "Between the first and third epochs, SSBI’s proportion increases from approximately 35% to about 38%, while BI’s proportion drops from around 32% to 30%.", "label": true }, { "paperid": "2410.13650v1", "paper_path": "./SciVer/papers/2410.13650v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1", "4.3" ], "image_path": "./SciVer/images/2410.13650v1-Table1-1.png", "request_id": 637, "origin_statement": "The median orbital period of black widow pulsars in Table 1 is 0.20 days, about 17% shorter than the median orbital period of redback pulsars at 0.24 days.", "perturbed_statement": "The median orbital period of black widow pulsars in Table 1 is 0.20 days, about 30% shorter than the median orbital period of redback pulsars at 0.28 days.", "perturbed_explanation": "This is incorrect because redback pulsars in Table 1 have a median orbital period of 0.24 days (not 0.28 days), so the true difference is ~17%, not 30%.", "claim": "The median orbital period of black widow pulsars in Table 1 is 0.20 days, about 17% shorter than the median orbital period of redback pulsars at 0.24 days.", "label": true }, { "paperid": "2411.01424v1", "paper_path": "./SciVer/papers/2411.01424v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.4" ], "image_path": "./SciVer/images/2411.01424v1-Table1-1.png", "request_id": 639, "origin_statement": "Table 1 defines three threshold parameters (k, r, σ) and two weight metrics (w_{u,v} and w⟟), indicating that threshold parameters exceed weight definitions by one.", "perturbed_statement": "Table 1 defines only two threshold parameters (k, r) and two weight metrics (w_{u,v} and w⟟), indicating that threshold parameters equal weight definitions.", "perturbed_explanation": "This statement is incorrect because Table 1 actually lists three threshold parameters: k, r, and σ, not just two.", "claim": "Table 1 defines three threshold parameters (k, r, σ) and two weight metrics (w_{u,v} and w⟟), indicating that threshold parameters exceed weight definitions by one.", "label": true }, { "paperid": "2409.10502v1", "paper_path": "./SciVer/papers/2409.10502v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.10502v1-Table3-1.png", "request_id": 647, "origin_statement": "Increasing the beam search width from 1 to 5 raises eval accuracy from 98.15% to 98.37%, and complete puzzle accuracy from 94.76% to 95.43%.", "perturbed_statement": "Increasing the beam search width from 1 to 5 raises eval accuracy from 98.15% to 98.65%, and complete puzzle accuracy from 94.76% to 95.75%.", "perturbed_explanation": "The eval accuracy for beam search width 5 is 98.37%, not 98.65%, and the complete puzzle accuracy is 95.43%, not 95.75%, according to the table.", "claim": "Increasing the beam search width from 1 to 5 raises eval accuracy from 98.15% to 98.37%, and complete puzzle accuracy from 94.76% to 95.43%.", "label": true }, { "paperid": "2411.05087v1", "paper_path": "./SciVer/papers/2411.05087v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.3.3" ], "image_path": "./SciVer/images/2411.05087v1-Table3-1.png", "request_id": 651, "origin_statement": "The interquartile range (IQR) of complexity ratings for JavaScript releases is 2, exactly double the IQR of 1 observed in both Python and Ruby.", "perturbed_statement": "The interquartile range (IQR) of complexity ratings for Python releases is 2, which is double the IQR of 1 in both JavaScript and Ruby.", "perturbed_explanation": "This is incorrect because Python’s 75th and 25th percentiles are 5 and 4, yielding an IQR of 1, not 2. Moreover, JavaScript’s IQR is 5−3=2, not 1, so the comparison is reversed and factually wrong.", "claim": "The interquartile range (IQR) of complexity ratings for JavaScript releases is 2, exactly double the IQR of 1 observed in both Python and Ruby.", "label": true }, { "paperid": "2409.09099v2", "paper_path": "./SciVer/papers/2409.09099v2.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.09099v2_figure_1.png", "request_id": 656, "origin_statement": "ΔF2 reaches up to ~0.08—about five times the maximum ΔF1 (~0.015)—showing ΔF2 variations are substantially larger.", "perturbed_statement": "ΔF2 reaches up to ~0.02—about the same as the maximum ΔF1 (~0.015)—showing ΔF2 variations are similar in magnitude to ΔF1.", "perturbed_explanation": "This is incorrect because the scatter shows ΔF2 values extend up to about 0.08, not 0.02, so ΔF2 variations are actually much larger than ΔF1 rather than similar in magnitude.", "claim": "ΔF2 reaches up to ~0.08—about five times the maximum ΔF1 (~0.015)—showing ΔF2 variations are substantially larger.", "label": true }, { "paperid": "2410.19796v1", "paper_path": "./SciVer/papers/2410.19796v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5.3.2" ], "image_path": "./SciVer/images/2410.19796v1_figure_3.png", "request_id": 684, "origin_statement": "At clipping threshold c=0.30, accuracy rises to approximately 95.32%, about 0.27% above the baseline (95.05%), and ECE falls to around 2.1%, roughly 2.2 percentage points lower than the baseline ECE of 4.3%.", "perturbed_statement": "At clipping threshold c=0.30, accuracy rises to approximately 95.12%, about 0.07% above the baseline (95.05%), and ECE falls to around 7.1%, roughly 2.2 percentage points lower than the baseline ECE of 4.3%.", "perturbed_explanation": "The perturbed statement misreports the values at c=0.30: the figure shows an accuracy of about 95.32% (not 95.12%) and an ECE near 2.1% (not 7.1%).", "claim": "At clipping threshold c=0.30, accuracy rises to approximately 95.32%, about 0.27% above the baseline (95.05%), and ECE falls to around 2.1%, roughly 2.2 percentage points lower than the baseline ECE of 4.3%.", "label": true }, { "paperid": "2411.08494v1", "paper_path": "./SciVer/papers/2411.08494v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.08494v1_figure_5.png", "request_id": 708, "origin_statement": "Across the 21 EC configurations for the 507.cactuBSSN_r workload, the sample mean SPEC score ratio (x86/ARM) is 1.104 with a 95% confidence interval of 1.053–1.156 and a 99% confidence interval of 1.034–1.175, while the SPEC-R ratio is 0.893, which falls below the 99% interval.", "perturbed_statement": "Across the 21 EC configurations for the 507.cactuBSSN_r workload, the sample mean SPEC score ratio (x86/ARM) is 1.204 with a 95% confidence interval of 1.053–1.156 and a 99% confidence interval of 1.034–1.175, while the SPEC-R ratio is 0.893, which falls below the 99% interval.", "perturbed_explanation": "The perturbed statement incorrectly reports the sample mean as 1.204. The figure and caption clearly show the orange dashed line labeled \"Sample Mean: 1.104\", so the true sample mean is 1.104, not 1.204.", "claim": "Across the 21 EC configurations for the 507.cactuBSSN_r workload, the sample mean SPEC score ratio (x86/ARM) is 1.104 with a 95% confidence interval of 1.053–1.156 and a 99% confidence interval of 1.034–1.175, while the SPEC-R ratio is 0.893, which falls below the 99% interval.", "label": true }, { "paperid": "2411.03122v2", "paper_path": "./SciVer/papers/2411.03122v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4", "9" ], "image_path": "./SciVer/images/2411.03122v2-Table1-1.png", "request_id": 711, "origin_statement": "Reactor EνES experiments lowered the μ_νe limit from 2.4×10^-10 μB in 1992 (Krasnoyarsk) to 2.9×10^-11 μB in 2012 (GEMMA), an 8.3-fold improvement over two decades.", "perturbed_statement": "Reactor EνES experiments lowered the μ_νe limit from 2.4×10^-10 μB in 1992 (Krasnoyarsk) to 1.8×10^-11 μB in 2012 (GEMMA), a thirteen-fold tightening over two decades.", "perturbed_explanation": "The GEMMA experiment’s actual 2012 μ_νe limit is 2.9×10^-11 μB, not 1.8×10^-11 μB as stated, so the claimed final limit and improvement factor are incorrect.", "claim": "Reactor EνES experiments lowered the μ_νe limit from 2.4×10^-10 μB in 1992 (Krasnoyarsk) to 2.9×10^-11 μB in 2012 (GEMMA), an 8.3-fold improvement over two decades.", "label": true }, { "paperid": "2410.17484v1", "paper_path": "./SciVer/papers/2410.17484v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3" ], "image_path": "./SciVer/images/2410.17484v1_figure_4.png", "request_id": 744, "origin_statement": "Between aggregation rates of 10⁻³ and 10⁻¹, the HEAD client’s accuracy increases by approximately 5.4 percentage points, while the ABD client’s accuracy decreases by about 5.1 percentage points.", "perturbed_statement": "Between aggregation rates of 10⁻³ and 10⁻¹, the HEAD client’s accuracy increases by approximately 4.5 percentage points, while the ABD client’s accuracy decreases by about 3.1 percentage points.", "perturbed_explanation": "The actual change for the HEAD client from μ=10⁻³ (≈59.1%) to μ=10⁻¹ (≈64.5%) is a 5.4-point increase, not 4.5. The ABD client falls from ≈78.8% to ≈73.7%, a 5.1-point drop, not 3.1.", "claim": "Between aggregation rates of 10⁻³ and 10⁻¹, the HEAD client’s accuracy increases by approximately 5.4 percentage points, while the ABD client’s accuracy decreases by about 5.1 percentage points.", "label": true }, { "paperid": "2409.18011v1", "paper_path": "./SciVer/papers/2409.18011v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.18011v1_figure_5.png", "request_id": 750, "origin_statement": "Between late September and mid-November 1992, the 10 Tg eruption induces a statistically significant entropy-based mean cooling of about −0.9 K, roughly 0.3 K more than the −0.6 K cooling observed in the 5 Tg case.", "perturbed_statement": "Between late September and mid-November 1992, the 10 Tg eruption induces a statistically significant entropy-based mean cooling of about −0.9 K, roughly 0.3 K more than the −1.0 K cooling observed in the 5 Tg case.", "perturbed_explanation": "Figure 5 shows that the 5 Tg eruption panel reaches only about −0.6 K of cooling during this period, not −1.0 K as stated in the perturbed claim.", "claim": "Between late September and mid-November 1992, the 10 Tg eruption induces a statistically significant entropy-based mean cooling of about −0.9 K, roughly 0.3 K more than the −0.6 K cooling observed in the 5 Tg case.", "label": true }, { "paperid": "2410.07905v1", "paper_path": "./SciVer/papers/2410.07905v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "2.2" ], "image_path": "./SciVer/images/2410.07905v1-Table2-1.png", "request_id": 785, "origin_statement": "B3 1307+433 exhibits a [O II] emission-line luminosity of 2.0×10^42 erg/s, which is twenty times higher than the 1.0×10^41 erg/s measured for TXS 0506+056.", "perturbed_statement": "B3 1307+433 exhibits a [O II] emission-line luminosity of 2.0×10^42 erg/s, which is ten times higher than the 1.0×10^41 erg/s measured for TXS 0506+056.", "perturbed_explanation": "The perturbation incorrectly states that the [O II] luminosity of B3 1307+433 is ten times that of TXS 0506+056. According to the table, B3 1307+433 has 20.0×10^41 erg/s for [O II], while TXS 0506+056 has 1.0×10^41 erg/s, making it twenty times higher, not ten.", "claim": "B3 1307+433 exhibits a [O II] emission-line luminosity of 2.0×10^42 erg/s, which is twenty times higher than the 1.0×10^41 erg/s measured for TXS 0506+056.", "label": true }, { "paperid": "2409.15466v1", "paper_path": "./SciVer/papers/2409.15466v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.15466v1-Table1-1.png", "request_id": 793, "origin_statement": "On ShapeNet with bandwidth 2, Matérn 3/2 achieves an F-score of 94.9%, the highest among Matérn kernels, exceeding Matérn 1/2's 93.7% and Matérn 5/2's 92.9%.", "perturbed_statement": "On ShapeNet with bandwidth 2, Matérn 5/2 achieves an F-score of 94.9%, the highest among Matérn kernels, exceeding Matérn 1/2's 93.7% and Matérn 3/2's 92.9%.", "perturbed_explanation": "The table shows that at bandwidth 2, Matérn 3/2 has an F-score of 94.9% and Matérn 5/2 only 92.9%. Assigning 94.9% to Matérn 5/2 contradicts the reported values.", "claim": "On ShapeNet with bandwidth 2, Matérn 3/2 achieves an F-score of 94.9%, the highest among Matérn kernels, exceeding Matérn 1/2's 93.7% and Matérn 5/2's 92.9%.", "label": true }, { "paperid": "2411.04487v1", "paper_path": "./SciVer/papers/2411.04487v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "2.1" ], "image_path": "./SciVer/images/2411.04487v1_figure_2.png", "request_id": 796, "origin_statement": "The lowest median lasing threshold across all fields is 102 µJ cm-2 pulse-1 in sample D field 7, 81 µJ lower than the lowest threshold among the 5 QW samples (183 µJ cm-2 pulse-1 in sample C field 9).", "perturbed_statement": "The lowest median lasing threshold across all fields is 95 µJ cm-2 pulse-1 in sample F field 7, which is 88 µJ lower than the lowest threshold among the 5 QW samples (183 µJ cm-2 pulse-1 in sample C field 9).", "perturbed_explanation": "This statement is incorrect because Figure 2(b) shows that sample F field 7 has a median lasing threshold of 114 µJ cm-2 pulse-1, not 95, and the true lowest threshold is 102 µJ cm-2 pulse-1 in sample D field 7. These values contradict the chart.", "claim": "The lowest median lasing threshold across all fields is 102 µJ cm-2 pulse-1 in sample D field 7, 81 µJ lower than the lowest threshold among the 5 QW samples (183 µJ cm-2 pulse-1 in sample C field 9).", "label": true }, { "paperid": "2411.05174v1", "paper_path": "./SciVer/papers/2411.05174v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "7" ], "image_path": "./SciVer/images/2411.05174v1_figure_3.png", "request_id": 798, "origin_statement": "Under ITL, the two most likely next states (O2=1, BP=0, GCS=0, Crea=2 and O2=1, BP=1, GCS=0, Crea=2) each have a probability of approximately 0.055, slightly above the probability (~0.053) of the third state (O2=1, BP=1, GCS=1, Crea=2).", "perturbed_statement": "Under ITL, the next state O2=1, BP=1, GCS=1, Crea=2 has the highest probability of around 0.06, which is substantially higher than the other two states at around 0.05.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 3 shows that the state O2=1, BP=1, GCS=1, Crea=2 actually has the lowest probability (~0.053), while the two other states each have higher probabilities (~0.055).", "claim": "Under ITL, the two most likely next states (O2=1, BP=0, GCS=0, Crea=2 and O2=1, BP=1, GCS=0, Crea=2) each have a probability of approximately 0.055, slightly above the probability (~0.053) of the third state (O2=1, BP=1, GCS=1, Crea=2).", "label": true }, { "paperid": "2410.04698v1", "paper_path": "./SciVer/papers/2410.04698v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1" ], "image_path": "./SciVer/images/2410.04698v1-Table1-1.png", "request_id": 815, "origin_statement": "Of the twelve existing benchmarks shown, eight support multi-step reasoning whereas only two support mathematical reasoning.", "perturbed_statement": "Of the twelve existing benchmarks shown, nine support multi-step reasoning whereas only three support mathematical reasoning.", "perturbed_explanation": "The table indicates eight benchmarks have a check under Multi-Step Reasoning (not nine) and only two benchmarks (L-Eval and InfiniteBench) have a check under Mathematical Reasoning (not three), so both numbers in the perturbed claim conflict with the table.", "claim": "Of the twelve existing benchmarks shown, eight support multi-step reasoning whereas only two support mathematical reasoning.", "label": true }, { "paperid": "2410.03030v1", "paper_path": "./SciVer/papers/2410.03030v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.03030v1_figure_7.png", "request_id": 824, "origin_statement": "On ImageNet with Dense ResNet50, applying a low-frequency attenuation radius of 32 reduces test accuracy to about 15%, while a high-frequency attenuation radius of 32 only lowers accuracy to around 55%.", "perturbed_statement": "On ImageNet with Dense ResNet50, applying a low-frequency attenuation radius of 32 reduces test accuracy to about 5%, while a high-frequency attenuation radius of 32 only lowers accuracy to around 75%.", "perturbed_explanation": "This claim is wrong because the actual low-frequency attenuation accuracy at radius 32 is about 15%, not 5%, and the high-frequency attenuation accuracy at radius 32 is about 55%, not 75%, as shown in the left RA curves of Figure 7.", "claim": "On ImageNet with Dense ResNet50, applying a low-frequency attenuation radius of 32 reduces test accuracy to about 15%, while a high-frequency attenuation radius of 32 only lowers accuracy to around 55%.", "label": true }, { "paperid": "2410.09008v1", "paper_path": "./SciVer/papers/2410.09008v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.09008v1-Table3-1.png", "request_id": 831, "origin_statement": "The CoT and HSFT methods produce final answers of 513 and 511 respectively, which differ by 2 subsets.", "perturbed_statement": "The CoT and HSFT methods produce final answers of 513 and 510 respectively, which differ by 3 subsets.", "perturbed_explanation": "This is incorrect because the HSFT method actually yields 511 subsets (512 − 1), not 510, so the difference between the two counts is 2, not 3.", "claim": "The CoT and HSFT methods produce final answers of 513 and 511 respectively, which differ by 2 subsets.", "label": true }, { "paperid": "2411.02996v1", "paper_path": "./SciVer/papers/2411.02996v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.2.2", "3.2.3", "3.2.4" ], "image_path": "./SciVer/images/2411.02996v1_figure_5.png", "request_id": 834, "origin_statement": "The vdW gap electrostatic potential barrier height (Φ_tb) rises from about 3.86 eV for n=1 to 4.21 eV for n=2, remains near 4.20 eV for n=3, and reaches 4.40 eV for n=4 in the MoSi2N4/MoSi2N4(MoN)_n heterostructures.", "perturbed_statement": "The vdW gap electrostatic potential barrier height (Φ_tb) rises from about 3.86 eV for n=1 to just 3.90 eV for n=2, remains near 4.20 eV for n=3, and reaches 4.40 eV for n=4 in the MoSi2N4/MoSi2N4(MoN)_n heterostructures.", "perturbed_explanation": "The perturbed statement incorrectly reports the barrier height for n=2 as 3.90 eV, whereas the lower panel of Fig. 5(b) clearly shows a value of 4.21 eV for n=2.", "claim": "The vdW gap electrostatic potential barrier height (Φ_tb) rises from about 3.86 eV for n=1 to 4.21 eV for n=2, remains near 4.20 eV for n=3, and reaches 4.40 eV for n=4 in the MoSi2N4/MoSi2N4(MoN)_n heterostructures.", "label": true }, { "paperid": "2409.13726v1", "paper_path": "./SciVer/papers/2409.13726v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.3", "5.3.4" ], "image_path": "./SciVer/images/2409.13726v1_figure_6.png", "request_id": 838, "origin_statement": "In German sessions, novices' engagement decreases by 0.17 from speaking turn (0.66) to silence (0.49), which is the largest drop among all language sessions.", "perturbed_statement": "In German sessions, novices' engagement decreases by 0.13 from speaking turn (0.66) to silence (0.49), which is the largest drop among all language sessions.", "perturbed_explanation": "The actual decrease from 0.66 during speaking turn to 0.49 during silence in German sessions is 0.17, not 0.13, so the stated magnitude is incorrect; furthermore, no other language shows a larger drop, so labeling it the largest is unsupported.", "claim": "In German sessions, novices' engagement decreases by 0.17 from speaking turn (0.66) to silence (0.49), which is the largest drop among all language sessions.", "label": true }, { "paperid": "2409.09205v1", "paper_path": "./SciVer/papers/2409.09205v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.09205v1-Table1-1.png", "request_id": 839, "origin_statement": "In the six treatment sequences, each modality type appears exactly twice in the first position, twice in the second, and twice in the third position.", "perturbed_statement": "In the six treatment sequences, text appears in the first position in three sequences (I, II, and III), in the second position in two sequences (IV and V), and only once in the third position (VI).", "perturbed_explanation": "This is incorrect because the table shows text appears first only in sequences I and II (not III), and appears third in both sequences V and VI (two occurrences), not just once in sequence VI.", "claim": "In the six treatment sequences, each modality type appears exactly twice in the first position, twice in the second, and twice in the third position.", "label": true }, { "paperid": "2410.10469v1", "paper_path": "./SciVer/papers/2410.10469v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.10469v1-Table1-1.png", "request_id": 853, "origin_statement": "Moirai-MoE_B activates just 9.2% of its 935M parameters per token, whereas the dense Moirai_B activates 100% of its 91M parameters.", "perturbed_statement": "Moirai-MoE_B activates just 15% of its 935M parameters per token, whereas the dense Moirai_B activates 95% of its 91M parameters.", "perturbed_explanation": "According to Table 1, Moirai-MoE_B has 86M activated parameters out of 935M total, which is about 9.2%, not 15%. The table also shows Moirai_B activates all 91M of its 91M total parameters (100%), not 95%.", "claim": "Moirai-MoE_B activates just 9.2% of its 935M parameters per token, whereas the dense Moirai_B activates 100% of its 91M parameters.", "label": true }, { "paperid": "2409.06649v1", "paper_path": "./SciVer/papers/2409.06649v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4" ], "image_path": "./SciVer/images/2409.06649v1-Table4-1.png", "request_id": 875, "origin_statement": "The KAN model’s state variable error ψ(τ) equals 2.99×10⁻⁴, which is over 16 times lower than fractional KAN’s 4.85×10⁻³.", "perturbed_statement": "The KAN model’s state variable error ψ(τ) equals 2.99×10⁻³, which is roughly 1.6 times smaller than fractional KAN’s 4.85×10⁻³.", "perturbed_explanation": "According to Table 4, the KAN ψ(τ) error is 2.99×10⁻⁴, not 2.99×10⁻³. The perturbed statement misstates this value, so the comparison to the 4.85×10⁻³ error for fractional KAN is invalid.", "claim": "The KAN model’s state variable error ψ(τ) equals 2.99×10⁻⁴, which is over 16 times lower than fractional KAN’s 4.85×10⁻³.", "label": true }, { "paperid": "2409.02850v2", "paper_path": "./SciVer/papers/2409.02850v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.1.1" ], "image_path": "./SciVer/images/2409.02850v2_figure_4.png", "request_id": 898, "origin_statement": "In the 1-shot regime at Q=1, the 95% confidence interval for MSCOCO is approximately 0.7, the smallest among datasets, while for DTD it is about 9, the largest.", "perturbed_statement": "In the 1-shot regime at Q=1, the 95% confidence interval for MSCOCO is approximately 9, the largest among datasets, while for DTD it is about 0.7, the smallest.", "perturbed_explanation": "The perturbation swaps the actual CI values: the figure shows MSCOCO’s CI around 0.7 (not 9) and DTD’s CI around 9 (not 0.7), so the statement contradicts the plotted data at Q=1 for 1 shot.", "claim": "In the 1-shot regime at Q=1, the 95% confidence interval for MSCOCO is approximately 0.7, the smallest among datasets, while for DTD it is about 9, the largest.", "label": true }, { "paperid": "2409.01378v1", "paper_path": "./SciVer/papers/2409.01378v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "6.1" ], "image_path": "./SciVer/images/2409.01378v1_figure_6.png", "request_id": 918, "origin_statement": "Approximately 48% of participants in the only-incentive group reported zero phishing emails, compared to about 46% in the all-incentive group and 39% in the control group.", "perturbed_statement": "Approximately 52% of participants in the only-incentive group reported zero phishing emails, compared to about 46% in the all-incentive group and 45% in the control group.", "perturbed_explanation": "The perturbed statement is incorrect because the figure shows the only-incentive group at about 48%, not 52%, and the control group at about 39%, not 45%.", "claim": "Approximately 48% of participants in the only-incentive group reported zero phishing emails, compared to about 46% in the all-incentive group and 39% in the control group.", "label": true }, { "paperid": "2411.10906v1", "paper_path": "./SciVer/papers/2411.10906v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.10906v1-Table2-1.png", "request_id": 941, "origin_statement": "On the Phoenix dataset at 100 episodes, LSVI-UCB-Adaptive consumes 2.561 GiB of space, which is approximately 5.71 times the 0.449 GiB used by LSVI-UCB-Fixed.", "perturbed_statement": "On the Phoenix dataset at 100 episodes, LSVI-UCB-Adaptive consumes 1.561 GiB of space, which is approximately 3.47 times the 0.449 GiB used by LSVI-UCB-Fixed.", "perturbed_explanation": "The table shows that LSVI-UCB-Adaptive uses 2.561 GiB at 100 episodes on Phoenix, not 1.561 GiB, so the claimed space usage and ratio are incorrect.", "claim": "On the Phoenix dataset at 100 episodes, LSVI-UCB-Adaptive consumes 2.561 GiB of space, which is approximately 5.71 times the 0.449 GiB used by LSVI-UCB-Fixed.", "label": true }, { "paperid": "2411.14713v1", "paper_path": "./SciVer/papers/2411.14713v1.json", "claim_type": "direct", "type": "chart", "item": "1(b)", "section": [ "1" ], "image_path": "./SciVer/images/2411.14713v1_figure_1(b).png", "request_id": 954, "origin_statement": "On Amazon-books, increasing history length from 20 to 100 raises DIEN’s AUC from 0.833 to 0.837 (+0.004) but lowers DIEN+Llama2’s AUC from 0.842 to 0.840 (−0.002), narrowing their performance gap from 0.009 to 0.003.", "perturbed_statement": "On Amazon-books, increasing history length from 20 to 100 raises DIEN’s AUC from 0.833 to 0.842 (+0.009) but lowers DIEN+Llama2’s AUC from 0.842 to 0.839 (−0.003), narrowing their performance gap from 0.009 to 0.002.", "perturbed_explanation": "The perturbation is incorrect because the figure shows DIEN’s AUC at history length 100 is 0.837, not 0.842, and DIEN+Llama2’s AUC at history length 100 is 0.840, not 0.839. These discrepancies falsify the stated changes.", "claim": "On Amazon-books, increasing history length from 20 to 100 raises DIEN’s AUC from 0.833 to 0.837 (+0.004) but lowers DIEN+Llama2’s AUC from 0.842 to 0.840 (−0.002), narrowing their performance gap from 0.009 to 0.003.", "label": true }, { "paperid": "2409.11463v1", "paper_path": "./SciVer/papers/2409.11463v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "2.1" ], "image_path": "./SciVer/images/2409.11463v1_figure_2.png", "request_id": 956, "origin_statement": "In the highest [CII] luminosity bin (log10(L_CII/L_⊙)≈9.8), about 60% of ALMA targets have at least one companion, whereas in the lowest bin (≈8.0) none do.", "perturbed_statement": "In the highest [CII] luminosity bin (log10(L_CII/L_⊙)≈9.8), about 40% of ALMA targets have at least one companion, whereas in the lowest bin (≈8.0) none do.", "perturbed_explanation": "The figure’s black point at log10(L_CII)≈9.8 shows a fraction of roughly 0.6 (60%), not 0.4 (40%), so the perturbed percentage contradicts the plotted data.", "claim": "In the highest [CII] luminosity bin (log10(L_CII/L_⊙)≈9.8), about 60% of ALMA targets have at least one companion, whereas in the lowest bin (≈8.0) none do.", "label": true }, { "paperid": "2410.06898v1", "paper_path": "./SciVer/papers/2410.06898v1.json", "claim_type": "direct", "type": "chart", "item": "2(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.06898v1_figure_2(a).png", "request_id": 966, "origin_statement": "At around 10,000 global training steps, the WECHSEL multi-epoch pretraining model’s training loss spikes to about 10.0, more than double the roughly 4.0 training loss maintained by all single-epoch GaMS variants.", "perturbed_statement": "At around 10,000 global training steps, the WECHSEL multi-epoch pretraining model’s training loss spikes to about 5.0, roughly in line with the 4.0 training loss of all single-epoch GaMS variants.", "perturbed_explanation": "This is incorrect because in the figure the WECHSEL multi-epoch line actually peaks at approximately 10.0 loss at step 10,000, not at 5.0, making it more than twice the losses of single-epoch variants.", "claim": "At around 10,000 global training steps, the WECHSEL multi-epoch pretraining model’s training loss spikes to about 10.0, more than double the roughly 4.0 training loss maintained by all single-epoch GaMS variants.", "label": true }, { "paperid": "2411.11524v1", "paper_path": "./SciVer/papers/2411.11524v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3" ], "image_path": "./SciVer/images/2411.11524v1-Table4-1.png", "request_id": 977, "origin_statement": "In region B, at 100 TeV, Cygnus Cocoon contributes 35.8% of the total flux, and the three highest sources together account for 52.9%; at 1 PeV, Cygnus Cocoon’s share rises to 51.8%, representing over half of the source flux.", "perturbed_statement": "In region B, at 100 TeV, Cygnus Cocoon contributes 35.8% of the total flux, and the three highest sources together account for 56.4%; at 1 PeV, Cygnus Cocoon’s share rises to 48.5%, representing under half of the source flux.", "perturbed_explanation": "The table shows that at 100 TeV the top three sources sum to 52.9%, not 56.4%, and at 1 PeV Cygnus Cocoon’s relative contribution is 51.8%, not 48.5%.", "claim": "In region B, at 100 TeV, Cygnus Cocoon contributes 35.8% of the total flux, and the three highest sources together account for 52.9%; at 1 PeV, Cygnus Cocoon’s share rises to 51.8%, representing over half of the source flux.", "label": true }, { "paperid": "2409.05114v2", "paper_path": "./SciVer/papers/2409.05114v2.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.05114v2_figure_5.png", "request_id": 992, "origin_statement": "In configuration II (top), the green probe at x=0.5 shows density decreasing almost linearly from about 2.7 at the bottom (y=0) to about 0.3 at the top (y=1), a drop of approximately 2.4 units.", "perturbed_statement": "In configuration II (top), the green probe at x=0.5 shows density decreasing almost linearly from about 1.8 at the bottom (y=0) to about 0.5 at the top (y=1), a drop of approximately 1.3 units.", "perturbed_explanation": "The perturbed statement is incorrect because the plot shows the green density at x=0.5 actually starts near 2.7 at y=0 and falls to about 0.3 at y=1. The values 1.8 (initial) and 0.5 (final) do not match the figure’s marks of ~2.7 and ~0.3.", "claim": "In configuration II (top), the green probe at x=0.5 shows density decreasing almost linearly from about 2.7 at the bottom (y=0) to about 0.3 at the top (y=1), a drop of approximately 2.4 units.", "label": true }, { "paperid": "2410.11666v3", "paper_path": "./SciVer/papers/2410.11666v3.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.11666v3-Table2-1.png", "request_id": 1011, "origin_statement": "On the RGB-D-D dataset, DORNet’s RMSE of 3.68 is 0.40 lower than SFG’s 4.08; on TOFDSR, DORNet’s 4.47 is 0.61 lower than SUFT’s 5.08, making it the most accurate across both benchmarks.", "perturbed_statement": "On the RGB-D-D dataset, DORNet’s RMSE of 3.68 is 1.00 lower than SFG’s 4.08; on TOFDSR, DORNet’s 4.47 is 0.50 lower than SUFT’s 5.08, making it the most accurate across both benchmarks.", "perturbed_explanation": "The perturbed statement misstates the error reductions. According to the table, SFG’s RMSE is 4.08 and DORNet’s is 3.68, a 0.40 difference (not 1.00). Similarly, SUFT’s RMSE is 5.08 and DORNet’s is 4.47, a 0.61 reduction (not 0.50).", "claim": "On the RGB-D-D dataset, DORNet’s RMSE of 3.68 is 0.40 lower than SFG’s 4.08; on TOFDSR, DORNet’s 4.47 is 0.61 lower than SUFT’s 5.08, making it the most accurate across both benchmarks.", "label": true }, { "paperid": "2411.15819v1", "paper_path": "./SciVer/papers/2411.15819v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "4" ], "image_path": "./SciVer/images/2411.15819v1_figure_2(b).png", "request_id": 1014, "origin_statement": "Within the top-left submatrix, the highest p-value is 0.89 (BG vs. NTAP), which is 66 percentage points greater than the lowest p-value of 0.23 (PGR vs. SJM).", "perturbed_statement": "Within the top-left submatrix, the highest p-value is 0.89 (BG vs. NTAP), which is 76 percentage points greater than the lowest p-value of 0.33 (PGR vs. QCOM).", "perturbed_explanation": "The lowest p-value in the top-left submatrix is actually 0.23 for the PGR–SJM pair, not 0.33 for PGR–QCOM, and 0.89 minus 0.23 equals 0.66 (66 percentage points), not 0.76.", "claim": "Within the top-left submatrix, the highest p-value is 0.89 (BG vs. NTAP), which is 66 percentage points greater than the lowest p-value of 0.23 (PGR vs. SJM).", "label": true }, { "paperid": "2409.18783v2", "paper_path": "./SciVer/papers/2409.18783v2.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.18783v2_figure_5.png", "request_id": 1022, "origin_statement": "At noise level K = 0.02, dual-domain denoising outperforms raw denoising by approximately 1.4 dB for SwinIR, 0.8 dB for MIRNet-v2, and 2.2 dB for Restormer, demonstrating its growing advantage at high noise.", "perturbed_statement": "At noise level K = 0.02, dual-domain denoising outperforms raw denoising by approximately 1.4 dB for SwinIR, 1.8 dB for MIRNet-v2, and 2.2 dB for Restormer.", "perturbed_explanation": "The statement is incorrect because for MIRNet-v2 at K = 0.02, the PSNR of dual-domain denoising is about 33.3 dB and raw denoising is about 32.5 dB, yielding a difference of roughly 0.8 dB, not 1.8 dB.", "claim": "At noise level K = 0.02, dual-domain denoising outperforms raw denoising by approximately 1.4 dB for SwinIR, 0.8 dB for MIRNet-v2, and 2.2 dB for Restormer, demonstrating its growing advantage at high noise.", "label": true }, { "paperid": "2409.05840v3", "paper_path": "./SciVer/papers/2409.05840v3.json", "claim_type": "direct", "type": "chart", "item": "10(a)", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.05840v3_figure_10(a).png", "request_id": 1052, "origin_statement": "In the original data, ~470K instructions have one skill, while the evolved data has ~360K. Conversely, at skill length 2, the evolved data (~520K) far exceeds the original (~120K), indicating a pronounced shift toward dual-skill instructions in the evolution.", "perturbed_statement": "In the original data, ~360K instructions have one skill, while the evolved data has ~470K. Conversely, at skill length 2, the evolved data (~360K) only slightly exceeds the original (~100K), suggesting a modest shift toward two-skill sequences.", "perturbed_explanation": "This statement is incorrect because the original data actually has about 470K one-skill instructions (not ~360K), and the evolved data has about 360K one-skill instructions (not ~470K). Moreover, at skill length 2 the evolved data has roughly 520K instances—not ~360K—so the shift is far more pronounced.", "claim": "In the original data, ~470K instructions have one skill, while the evolved data has ~360K. Conversely, at skill length 2, the evolved data (~520K) far exceeds the original (~120K), indicating a pronounced shift toward dual-skill instructions in the evolution.", "label": true }, { "paperid": "2409.03165v1", "paper_path": "./SciVer/papers/2409.03165v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.03165v1_figure_7.png", "request_id": 1056, "origin_statement": "The Σ_c K̄ phase shift in Fig.7(a) rises steeply through 90°, peaking near 175° at about 40 MeV, whereas the Ξ_c′ N phase shift in Fig.7(d) remains below 5° for all energies up to 200 MeV.", "perturbed_statement": "The Σ_c K̄ phase shift in Fig.7(a) peaks near 90° at about 80 MeV, and the Ξ_c′ N phase shift in Fig.7(d) exceeds 20° around 150 MeV.", "perturbed_explanation": "This is incorrect because in Fig.7(a) the Σ_c K̄ phase shift actually crosses 90° near 30–40 MeV and reaches about 175°, not 90° at 80 MeV. Also, in Fig.7(d) the Ξ_c′ N phase shift stays under ~5° up to 200 MeV, never exceeding 20°.", "claim": "The Σ_c K̄ phase shift in Fig.7(a) rises steeply through 90°, peaking near 175° at about 40 MeV, whereas the Ξ_c′ N phase shift in Fig.7(d) remains below 5° for all energies up to 200 MeV.", "label": true }, { "paperid": "2411.04588v1", "paper_path": "./SciVer/papers/2411.04588v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "5.3.1" ], "image_path": "./SciVer/images/2411.04588v1-Table6-1.png", "request_id": 1059, "origin_statement": "Punctuation confusion (PC) appears in our automatic data + A7’ta at 31,822 instances (7%), whereas in the Tibyan corpus it occurs 32,091 times (6%).", "perturbed_statement": "Punctuation confusion (PC) appears in our automatic data + A7’ta at 31,822 instances (8%), whereas in the Tibyan corpus it occurs 32,091 times (6%).", "perturbed_explanation": "The perturbed statement incorrectly states 8% for PC in the automatic data + A7’ta, but Table 6 shows it as 7% for that corpus.", "claim": "Punctuation confusion (PC) appears in our automatic data + A7’ta at 31,822 instances (7%), whereas in the Tibyan corpus it occurs 32,091 times (6%).", "label": true }, { "paperid": "2411.06805v1", "paper_path": "./SciVer/papers/2411.06805v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.06805v1-Table2-1.png", "request_id": 1063, "origin_statement": "Removing question decomposition (F_QD) lowers AssistRAG’s F1 on the 2Wiki dataset from 45.6 to 37.8, a decrease of 7.8 points.", "perturbed_statement": "Removing question decomposition (F_QD) lowers AssistRAG’s F1 on the 2Wiki dataset from 45.6 to 37.8, a decrease of 8.8 points.", "perturbed_explanation": "The perturbed statement miscalculates the drop: 45.6 minus 37.8 is 7.8 points, not 8.8 points as claimed.", "claim": "Removing question decomposition (F_QD) lowers AssistRAG’s F1 on the 2Wiki dataset from 45.6 to 37.8, a decrease of 7.8 points.", "label": true }, { "paperid": "2411.01431v1", "paper_path": "./SciVer/papers/2411.01431v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "7.1" ], "image_path": "./SciVer/images/2411.01431v1-Table3-1.png", "request_id": 1083, "origin_statement": "Three frameworks were released in 2015, which is the highest number of new releases in any single year compared to 2014 (one), 2016 (two), and 2017 (two).", "perturbed_statement": "Three frameworks were released in 2017, which is the highest number of new releases in any single year compared to 2014 (one), 2015 (three), and 2016 (two).", "perturbed_explanation": "This statement is wrong because, according to the table, only two frameworks (CoreML and BigDL) were released in 2017, not three. Therefore, 2017 did not see three new releases and was not the year with the highest number of framework introductions.", "claim": "Three frameworks were released in 2015, which is the highest number of new releases in any single year compared to 2014 (one), 2016 (two), and 2017 (two).", "label": true }, { "paperid": "2411.18082v1", "paper_path": "./SciVer/papers/2411.18082v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.2.2" ], "image_path": "./SciVer/images/2411.18082v1-Table5-1.png", "request_id": 1091, "origin_statement": "The saliency technique raises UM AP from 33.9% to 59.0%, a 25.1 percentage point increase.", "perturbed_statement": "The saliency technique raises UM AP from 33.9% to 62.1%, a 28.2 percentage point increase.", "perturbed_explanation": "According to Table 5, the UM AP with the saliency technique is actually 59.0%, not 62.1%, so the claimed increase to 62.1% (and the 28.2 point gain) is incorrect.", "claim": "The saliency technique raises UM AP from 33.9% to 59.0%, a 25.1 percentage point increase.", "label": true }, { "paperid": "2409.00851v1", "paper_path": "./SciVer/papers/2409.00851v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.3.4" ], "image_path": "./SciVer/images/2409.00851v1_figure_5.png", "request_id": 1100, "origin_statement": "The temporal cue \"Before\" appears in about 2.9% of original AudioCaps validation descriptions and rises to roughly 6.3% in the uniform variant, more than doubling its relative frequency.", "perturbed_statement": "The temporal cue \"Before\" appears in about 2.9% of original AudioCaps validation descriptions and rises to roughly 5.0% in the uniform variant, more than doubling its relative frequency.", "perturbed_explanation": "This is incorrect because the bar chart shows the uniform variant’s \"Before\" percentage at approximately 6.3%, not 5.0%.", "claim": "The temporal cue \"Before\" appears in about 2.9% of original AudioCaps validation descriptions and rises to roughly 6.3% in the uniform variant, more than doubling its relative frequency.", "label": true }, { "paperid": "2411.01062v1", "paper_path": "./SciVer/papers/2411.01062v1.json", "claim_type": "direct", "type": "chart", "item": "1(a)", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.01062v1_figure_1(a).png", "request_id": 1114, "origin_statement": "The He II λ5411 emission line shows red half-max markers at about 5400 Å and 5433 Å, indicating a full width at half maximum of approximately 33 Å.", "perturbed_statement": "The He II λ5411 emission line shows red half-max markers at about 5395 Å and 5433 Å, indicating a full width at half maximum of approximately 38 Å.", "perturbed_explanation": "This is incorrect because the red half-max markers actually occur at around 5400 Å (not 5395 Å), so the FWHM is 5433 Å – 5400 Å = 33 Å, not 38 Å.", "claim": "The He II λ5411 emission line shows red half-max markers at about 5400 Å and 5433 Å, indicating a full width at half maximum of approximately 33 Å.", "label": true }, { "paperid": "2410.00049v2", "paper_path": "./SciVer/papers/2410.00049v2.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "5.5" ], "image_path": "./SciVer/images/2410.00049v2_figure_5(b).png", "request_id": 1118, "origin_statement": "At k=4, the yellow US-States line reaches its peak RMSE variation (~10), outperforming the green US-Region (~7) by about 3 and the red Australia-COVID (~2) by around 8.", "perturbed_statement": "At k=4, the yellow US-States line reaches its peak RMSE variation (~7), underperforming compared to the green US-Region (~10) by about 3 and the red Australia-COVID (~2) by around 5.", "perturbed_explanation": "This is incorrect because in the figure at k=4 the yellow US-States variation is actually ~10 (not ~7) and the green US-Region is ~7 (not ~10), so the US-States does not underperform the US-Region nor have the stated values.", "claim": "At k=4, the yellow US-States line reaches its peak RMSE variation (~10), outperforming the green US-Region (~7) by about 3 and the red Australia-COVID (~2) by around 8.", "label": true }, { "paperid": "2410.09841v1", "paper_path": "./SciVer/papers/2410.09841v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "7.4" ], "image_path": "./SciVer/images/2410.09841v1-Table3-1.png", "request_id": 1127, "origin_statement": "LieSD achieves an orthogonality error of 1.72×10^−6, which is approximately 340,000 times smaller than the 5.82×10^−1 error of LieGAN with 7 channels.", "perturbed_statement": "LieSD achieves an orthogonality error of 1.72×10^−6, which is approximately 34,000 times smaller than the 5.82×10^−1 error of LieGAN with 7 channels.", "perturbed_explanation": "The factor of 34,000 is incorrect. The table shows LieGAN’s orthogonality error as 5.82×10^−1 and LieSD’s as 1.72×10^−6; dividing 5.82×10^−1 by 1.72×10^−6 yields about 338,000, not 34,000.", "claim": "LieSD achieves an orthogonality error of 1.72×10^−6, which is approximately 340,000 times smaller than the 5.82×10^−1 error of LieGAN with 7 channels.", "label": true }, { "paperid": "2410.23371v1", "paper_path": "./SciVer/papers/2410.23371v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "6.2" ], "image_path": "./SciVer/images/2410.23371v1-Table3-1.png", "request_id": 1141, "origin_statement": "The pure LLM policy achieved a maximum positive preference shift of 50 points (35 to 85), while the UCB policy’s largest positive shift was 40 points (25 to 65).", "perturbed_statement": "The pure LLM policy achieved a maximum positive preference shift of 45 points, while the UCB policy’s largest positive shift was 35 points.", "perturbed_explanation": "The perturbation is incorrect because Table 3 shows the pure LLM’s positive shift is from 35 to 85 (50 points), not 45, and the UCB policy’s positive shift is from 25 to 65 (40 points), not 35.", "claim": "The pure LLM policy achieved a maximum positive preference shift of 50 points (35 to 85), while the UCB policy’s largest positive shift was 40 points (25 to 65).", "label": true }, { "paperid": "2409.06227v1", "paper_path": "./SciVer/papers/2409.06227v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "3" ], "image_path": "./SciVer/images/2409.06227v1_figure_3(a).png", "request_id": 1158, "origin_statement": "In the 2021-11-02 spectrum, the peak-to-peak antenna temperature spans about 0.14 K, which is roughly 17% larger than the approximately 0.12 K span observed on 2021-10-18.", "perturbed_statement": "In the 2021-11-02 spectrum, the peak-to-peak antenna temperature spans about 0.14 K, which is roughly 25% larger than the approximately 0.12 K span observed on 2021-10-18.", "perturbed_explanation": "The perturbed statement inflates the relative difference: the actual increase from 0.12 K to 0.14 K is (0.14–0.12)/0.12 ≈ 17%, not 25%.", "claim": "In the 2021-11-02 spectrum, the peak-to-peak antenna temperature spans about 0.14 K, which is roughly 17% larger than the approximately 0.12 K span observed on 2021-10-18.", "label": true }, { "paperid": "2410.12102v1", "paper_path": "./SciVer/papers/2410.12102v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3", "4.2", "5" ], "image_path": "./SciVer/images/2410.12102v1-Table4-1.png", "request_id": 1159, "origin_statement": "The upper limit for C2H column density (3.478×10^11 cm⁻²) in the stacked spectrum is about 38 times higher than the limit for HCO⁺ (9.257×10^9 cm⁻²).", "perturbed_statement": "The upper limit for C2H column density (3.478×10^11 cm⁻²) in the stacked spectrum is about 3 times higher than the limit for HCO⁺ (9.257×10^9 cm⁻²).", "perturbed_explanation": "The perturbed statement underestimates the ratio between the C2H and HCO⁺ limits. Dividing 3.478×10^11 cm⁻² by 9.257×10^9 cm⁻² yields approximately 37.6, not 3, so C2H’s limit is about 38 times higher, not 3 times higher.", "claim": "The upper limit for C2H column density (3.478×10^11 cm⁻²) in the stacked spectrum is about 38 times higher than the limit for HCO⁺ (9.257×10^9 cm⁻²).", "label": true }, { "paperid": "2411.10332v1", "paper_path": "./SciVer/papers/2411.10332v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.2.3" ], "image_path": "./SciVer/images/2411.10332v1_figure_6.png", "request_id": 1176, "origin_statement": "Our method's event boundary predictions deviate from ground truth by at most 0.32 seconds in both examples, whereas TimeChat's predictions deviate by up to 13.95 seconds and VTimeLLM's by up to 17.86 seconds.", "perturbed_statement": "Our method's event boundary predictions deviate from ground truth by at most 0.5 seconds in both examples, whereas TimeChat's predictions deviate by up to 5 seconds and VTimeLLM's by up to 10 seconds.", "perturbed_explanation": "This is incorrect because TimeChat actually deviates by 13.95 seconds (starting at 10.00s vs ground truth 23.95s) and VTimeLLM deviates by 17.86 seconds (ending at 47.88s vs ground truth 30.02s), not 5 or 10 seconds.", "claim": "Our method's event boundary predictions deviate from ground truth by at most 0.32 seconds in both examples, whereas TimeChat's predictions deviate by up to 13.95 seconds and VTimeLLM's by up to 17.86 seconds.", "label": true }, { "paperid": "2410.03960v1", "paper_path": "./SciVer/papers/2410.03960v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.4" ], "image_path": "./SciVer/images/2410.03960v1-Table5-1.png", "request_id": 1181, "origin_statement": "Applying per-token KV quantization without AcrossKV reduces SwiftKV’s average score from 72.70 to 72.30, a drop of 0.40 points.", "perturbed_statement": "Applying per-token KV quantization without AcrossKV increases SwiftKV’s average score from 72.70 to 72.30, a gain of 0.40 points.", "perturbed_explanation": "This is incorrect because the table shows the average score with quantization is 72.30, which is lower than the baseline 72.70, indicating a decrease rather than an increase.", "claim": "Applying per-token KV quantization without AcrossKV reduces SwiftKV’s average score from 72.70 to 72.30, a drop of 0.40 points.", "label": true }, { "paperid": "2409.00742v1", "paper_path": "./SciVer/papers/2409.00742v1.json", "claim_type": "direct", "type": "chart", "item": "1(c)", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.00742v1_figure_1(c).png", "request_id": 1188, "origin_statement": "Throughout the simulation for b=2, the asset price fluctuates roughly between 9.6 and 10.5, representing a maximum deviation of about ±0.5 from the fundamental price of 10.", "perturbed_statement": "Throughout the simulation for b=2, the asset price fluctuates roughly between 9.0 and 11.0, representing a maximum deviation of about ±1.0 from the fundamental price of 10.", "perturbed_explanation": "Figure 1 shows the asset price remains between approximately 9.6 and 10.5, not between 9.0 and 11.0. The actual maximum deviation from 10 is about ±0.5, so claiming ±1.0 (9.0 to 11.0) contradicts these observed bounds.", "claim": "Throughout the simulation for b=2, the asset price fluctuates roughly between 9.6 and 10.5, representing a maximum deviation of about ±0.5 from the fundamental price of 10.", "label": true }, { "paperid": "2411.17510v1", "paper_path": "./SciVer/papers/2411.17510v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.3.2", "5.3.3" ], "image_path": "./SciVer/images/2411.17510v1-Table2-1.png", "request_id": 1189, "origin_statement": "For (c,i)=(2,1) instances, the mean MIP-Gap increases from 6.88% for 1–75 facilities to 18.36% for 150–200 facilities, showing more than a twofold rise in solution suboptimality as problem size grows.", "perturbed_statement": "For (c,i)=(2,1) instances, the mean MIP-Gap increases from 6.88% for 1–75 facilities to 12.36% for 150–200 facilities, showing a less than twofold rise in solution suboptimality as problem size grows.", "perturbed_explanation": "The perturbed statement understates the gap for 150–200 facilities. The table reports a mean MIP-Gap of 18.36% (not 12.36%) for (2,1) instances with 150–200 facilities, making the claimed 12.36% incorrect.", "claim": "For (c,i)=(2,1) instances, the mean MIP-Gap increases from 6.88% for 1–75 facilities to 18.36% for 150–200 facilities, showing more than a twofold rise in solution suboptimality as problem size grows.", "label": true }, { "paperid": "2411.09315v1", "paper_path": "./SciVer/papers/2411.09315v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.09315v1_figure_1.png", "request_id": 1190, "origin_statement": "From N28 to N2, the normalized area per cell falls about 7× (1.0→0.15), while embodied CO2e per unit area roughly doubles (1.0→2.0), indicating advanced nodes cut area much faster than they reduce carbon intensity per area.", "perturbed_statement": "From N28 to N2, the normalized area per cell falls about 4× (1.0→0.15), while embodied CO2e per unit area increases by about 20% (1.0→2.0), indicating advanced nodes cut area much faster than they reduce carbon intensity per area.", "perturbed_explanation": "The perturbation is incorrect because the normalized area per cell actually decreases from 1.0 to approximately 0.15—about a 6.7× reduction, not 4×—and the embodied CO2e per unit area increases from 1.0 to around 2.0, a 100% increase, not 20%.", "claim": "From N28 to N2, the normalized area per cell falls about 7× (1.0→0.15), while embodied CO2e per unit area roughly doubles (1.0→2.0), indicating advanced nodes cut area much faster than they reduce carbon intensity per area.", "label": true }, { "paperid": "2410.14725v1", "paper_path": "./SciVer/papers/2410.14725v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2", "5.3" ], "image_path": "./SciVer/images/2410.14725v1-Table2-1.png", "request_id": 1193, "origin_statement": "Under 20% FLOPS reduction on Mamba-2.8B, our method achieves a PPL of 23.97 compared to EViT’s 9784.73, representing over 99.8% lower perplexity, and yields 57.6% average accuracy, a 14.0-point improvement over EViT’s 43.6%.", "perturbed_statement": "Under 20% FLOPS reduction on Mamba-2.8B, our method achieves a PPL of 239.7 compared to EViT’s 9784.73, representing over 97.6% lower perplexity, and yields 54.6% average accuracy, an 11.0-point improvement over EViT’s 43.6%.", "perturbed_explanation": "The table indicates that under 20% FLOPS reduction for Mamba-2.8B, our method’s PPL is actually 23.97 (not 239.7) and its average accuracy is 57.6% (not 54.6%), so the perturbed values for PPL and accuracy contradict the provided data.", "claim": "Under 20% FLOPS reduction on Mamba-2.8B, our method achieves a PPL of 23.97 compared to EViT’s 9784.73, representing over 99.8% lower perplexity, and yields 57.6% average accuracy, a 14.0-point improvement over EViT’s 43.6%.", "label": true }, { "paperid": "2409.08703v1", "paper_path": "./SciVer/papers/2409.08703v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4" ], "image_path": "./SciVer/images/2409.08703v1_figure_4.png", "request_id": 1208, "origin_statement": "On the Avazu dataset, fitness scores fluctuate from a low of about 0.718 at iteration 15 to a high of about 0.758 at iteration 20, resulting in a range of roughly 0.040 across the 100 iterations.", "perturbed_statement": "On the Avazu dataset, fitness scores fluctuate from a low of about 0.712 at iteration 15 to a high of about 0.768 at iteration 20, resulting in a range of roughly 0.056 across the 100 iterations.", "perturbed_explanation": "The perturbed statement is incorrect because the actual minimum fitness observed is around 0.718 (not 0.712) and the maximum is around 0.758 (not 0.768), so the true range is approximately 0.040, not 0.056.", "claim": "On the Avazu dataset, fitness scores fluctuate from a low of about 0.718 at iteration 15 to a high of about 0.758 at iteration 20, resulting in a range of roughly 0.040 across the 100 iterations.", "label": true }, { "paperid": "2411.11559v1", "paper_path": "./SciVer/papers/2411.11559v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4", "4.1" ], "image_path": "./SciVer/images/2411.11559v1-Table4-1.png", "request_id": 1213, "origin_statement": "Of the five weight properties in Table 4, only scale-normalized and fully-normalized weights have their total outcome weight sum equal to zero, whereas the other three classes feature sums that are non-zero.", "perturbed_statement": "Of the five weight properties in Table 4, only fully-normalized weights have their total outcome weight sum equal to zero, while all other weight classes have non-zero sums.", "perturbed_explanation": "This is incorrect because the table specifies that scale-normalized weights also sum to zero (∑_i ω_i = 0), not just the fully-normalized class.", "claim": "Of the five weight properties in Table 4, only scale-normalized and fully-normalized weights have their total outcome weight sum equal to zero, whereas the other three classes feature sums that are non-zero.", "label": true }, { "paperid": "2411.03745v1", "paper_path": "./SciVer/papers/2411.03745v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "7.3" ], "image_path": "./SciVer/images/2411.03745v1-Table2-1.png", "request_id": 1221, "origin_statement": "As the outlier ratio increases from 10% to 50%, the average RANSAC iterations rise from 125 to 200 and the success rate drops from 100% to 53%.", "perturbed_statement": "As the outlier ratio increases from 10% to 50%, the average RANSAC iterations rise from 100 to 250 and the success rate drops from 100% to 40%.", "perturbed_explanation": "This is incorrect because the table shows 125 iterations at a 10% outlier ratio (not 100), 200 iterations at 50% (not 250), and a 53% success rate at 50% (not 40%).", "claim": "As the outlier ratio increases from 10% to 50%, the average RANSAC iterations rise from 125 to 200 and the success rate drops from 100% to 53%.", "label": true }, { "paperid": "2410.20034v1", "paper_path": "./SciVer/papers/2410.20034v1.json", "claim_type": "direct", "type": "chart", "item": "8(b)", "section": [ "5.2.1", "5.2.3" ], "image_path": "./SciVer/images/2410.20034v1_figure_8(b).png", "request_id": 1230, "origin_statement": "For the first ten seconds (1904–1912), all seven EMG channels show minor fluctuations within ±20 units; in the final two seconds (1916–1918), peak amplitudes reach approximately ±80 units, representing a fourfold increase in muscle activity.", "perturbed_statement": "For the first ten seconds (1904–1912), all eight EMG channels show minor fluctuations within ±20 units; in the final two seconds (1916–1918), peak amplitudes reach approximately ±80 units, representing a fourfold increase in muscle activity.", "perturbed_explanation": "The perturbed statement is incorrect because the EMG graph clearly displays seven channels, not eight. The extra channel count contradicts the visible number of traces in the image.", "claim": "For the first ten seconds (1904–1912), all seven EMG channels show minor fluctuations within ±20 units; in the final two seconds (1916–1918), peak amplitudes reach approximately ±80 units, representing a fourfold increase in muscle activity.", "label": true }, { "paperid": "2411.00112v1", "paper_path": "./SciVer/papers/2411.00112v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.00112v1_figure_5.png", "request_id": 1250, "origin_statement": "At 400 sample pairs, Cor-CFD-GD with σ=1 achieves a solution gap of about 6.8, approximately 1.6 units lower than SPSA with σ=1, which remains around 8.4.", "perturbed_statement": "At 400 sample pairs, Cor-CFD-GD with σ=1 achieves a solution gap of about 6.8, approximately 2.2 units lower than SPSA with σ=1, which remains around 8.4.", "perturbed_explanation": "The plot shows Cor-CFD-GD σ=1 at approximately 6.8 and SPSA σ=1 at around 8.4, so the actual gap is about 1.6 units, not 2.2 as stated.", "claim": "At 400 sample pairs, Cor-CFD-GD with σ=1 achieves a solution gap of about 6.8, approximately 1.6 units lower than SPSA with σ=1, which remains around 8.4.", "label": true }, { "paperid": "2411.16694v1", "paper_path": "./SciVer/papers/2411.16694v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.16694v1-Table3-1.png", "request_id": 1251, "origin_statement": "For EC1, GENZYME achieves a Top1 TM-score of 0.62, which is 0.03 higher than ZymCTRL+ESMFold’s 0.59 and 0.14 higher than RFDiffusionAA’s 0.48.", "perturbed_statement": "For EC1, GENZYME achieves a Top1 TM-score of 0.65, which is 0.06 higher than ZymCTRL+ESMFold’s 0.59 and 0.17 higher than RFDiffusionAA’s 0.48.", "perturbed_explanation": "The perturbed claim incorrectly states GENZYME’s Top1 TM-score as 0.65; Table 3 lists it as 0.62, so the provided value is inconsistent with the source data.", "claim": "For EC1, GENZYME achieves a Top1 TM-score of 0.62, which is 0.03 higher than ZymCTRL+ESMFold’s 0.59 and 0.14 higher than RFDiffusionAA’s 0.48.", "label": true }, { "paperid": "2410.00972v2", "paper_path": "./SciVer/papers/2410.00972v2.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "2.2" ], "image_path": "./SciVer/images/2410.00972v2_figure_3(b).png", "request_id": 1252, "origin_statement": "In the NuSTAR observation (obs ID 91002313004), the hardness ratio rises from about 1.25 to 1.75 while the count rate decreases from roughly 2000 counts/s to 1200 counts/s, indicating a ~40% drop in intensity.", "perturbed_statement": "In the NuSTAR observation (obs ID 91002313004), the hardness ratio rises from about 1.25 to 1.75 while the count rate decreases from roughly 2400 counts/s to 900 counts/s, indicating a ~62.5% drop in intensity.", "perturbed_explanation": "This is incorrect because the figure shows the count rate falling from around 2000 counts/s at hardness 1.25 to about 1200 counts/s at hardness 1.75, not from 2400 to 900 counts/s.", "claim": "In the NuSTAR observation (obs ID 91002313004), the hardness ratio rises from about 1.25 to 1.75 while the count rate decreases from roughly 2000 counts/s to 1200 counts/s, indicating a ~40% drop in intensity.", "label": true }, { "paperid": "2409.04880v1", "paper_path": "./SciVer/papers/2409.04880v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "6" ], "image_path": "./SciVer/images/2409.04880v1-Table6-1.png", "request_id": 1261, "origin_statement": "The leak for the Samsung S10 appeared approximately 228 days before its official press-release, compared to approximately 72 days for the S9 and 25 days for the S8.", "perturbed_statement": "The leak for the Samsung S10 appeared approximately 228 days before its official press-release, compared to approximately 72 days for the S9 and 60 days for the S8.", "perturbed_explanation": "The table lists the S8 leak on Jan 31, 2017 and its press-release on Feb 25, 2017, a 25-day gap—not 60 days—so the claim of a 60-day interval for the S8 is incorrect.", "claim": "The leak for the Samsung S10 appeared approximately 228 days before its official press-release, compared to approximately 72 days for the S9 and 25 days for the S8.", "label": true }, { "paperid": "2411.15546v1", "paper_path": "./SciVer/papers/2411.15546v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "3" ], "image_path": "./SciVer/images/2411.15546v1_figure_2.png", "request_id": 1274, "origin_statement": "The power-law index of the DM structure function is -0.04±0.02 for FRB 20201124A, which is ten times steeper than the -0.004±0.021 index measured for PSR B1744-24A.", "perturbed_statement": "The power-law index of the DM structure function is -0.004±0.021 for FRB 20201124A, which is ten times shallower than the -0.04±0.02 index measured for PSR B1744-24A.", "perturbed_explanation": "This statement is incorrect because the figure caption shows the DM SF index m = -0.004±0.021 applies to PSR B1744-24A (panel b), and m = -0.04±0.02 applies to FRB 20201124A (panel d), not the other way around.", "claim": "The power-law index of the DM structure function is -0.04±0.02 for FRB 20201124A, which is ten times steeper than the -0.004±0.021 index measured for PSR B1744-24A.", "label": true }, { "paperid": "2409.07233v1", "paper_path": "./SciVer/papers/2409.07233v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.4", "4.5" ], "image_path": "./SciVer/images/2409.07233v1-Table1-1.png", "request_id": 1, "origin_statement": "The CN model’s AIC (173.0) is 139.4 units higher than the N model’s AIC (33.6).", "perturbed_statement": "The CN model’s AIC (173.0) is 139.4 units lower than the N model’s AIC (33.6).", "perturbed_explanation": "This statement is wrong because Table 1 shows the CN model’s AIC is 173.0, which is actually 139.4 units higher than the N model’s AIC of 33.6, not lower.", "claim": "The CN model’s AIC (173.0) is 139.4 units higher than the N model’s AIC (33.6).", "label": true }, { "paperid": "2409.06994v2", "paper_path": "./SciVer/papers/2409.06994v2.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "4.1.2" ], "image_path": "./SciVer/images/2409.06994v2_figure_1.png", "request_id": 2, "origin_statement": "In setting 4, the RN method’s ARI drops from about 0.98 at a 0.5 node proportion to around 0.05 at a 0.95 proportion.", "perturbed_statement": "In setting 4, the RN method’s ARI drops from about 0.98 at a 0.5 node proportion to around 0.50 at a 0.95 proportion.", "perturbed_explanation": "The perturbed claim is incorrect because in panel (4) of Figure 1, the orange dotted RN curve falls to approximately 0.05 at a 0.95 proportion, not to 0.50 as stated.", "claim": "In setting 4, the RN method’s ARI drops from about 0.98 at a 0.5 node proportion to around 0.05 at a 0.95 proportion.", "label": true }, { "paperid": "2410.10995v2", "paper_path": "./SciVer/papers/2410.10995v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.10995v2-Table4-1.png", "request_id": 5, "origin_statement": "For masculine-inflected sources, context-aware QAD with Kiwi 22 reduces the Comet-22 score from 89.44 to 87.72, a drop of 1.72 points compared to non-context-aware Kiwi 22 reranking.", "perturbed_statement": "For masculine-inflected sources, context-aware QAD with Kiwi 22 reduces the Comet-22 score from 89.44 to 87.72, a drop of 2.72 points compared to non-context-aware Kiwi 22 reranking.", "perturbed_explanation": "The perturbed statement incorrectly states the drop as 2.72 points, but the table shows a decrease from 89.44 to 87.72, which is actually a 1.72-point drop.", "claim": "For masculine-inflected sources, context-aware QAD with Kiwi 22 reduces the Comet-22 score from 89.44 to 87.72, a drop of 1.72 points compared to non-context-aware Kiwi 22 reranking.", "label": true }, { "paperid": "2410.09504v1", "paper_path": "./SciVer/papers/2410.09504v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.09504v1_figure_4.png", "request_id": 6, "origin_statement": "SMK’s posterior means for φ1 and φ2 are around 6.1 and 7.3, overshooting the true value of 4 by about 52% and 83%, while BPS means for both φ1 and φ2 are approximately 4.2 and 4.1, respectively, very close to the truth.", "perturbed_statement": "SMK’s posterior means for φ1 and φ2 are around 5.1 and 6.2, overshooting the true value of 4 by about 27% and 55%, while BPS means for both φ1 and φ2 are approximately 3.5 and 3.9, respectively, underestimating true values.", "perturbed_explanation": "The SMK posterior means shown in the image for φ1 and φ2 are approximately 6.1 and 7.3 (not 5.1 and 6.2), and the BPS posterior means are about 4.2 and 4.1 (not 3.5 and 3.9). Thus the stated values contradict the plotted posterior means.", "claim": "SMK’s posterior means for φ1 and φ2 are around 6.1 and 7.3, overshooting the true value of 4 by about 52% and 83%, while BPS means for both φ1 and φ2 are approximately 4.2 and 4.1, respectively, very close to the truth.", "label": true }, { "paperid": "2411.14880v1", "paper_path": "./SciVer/papers/2411.14880v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.14880v1_figure_5.png", "request_id": 8, "origin_statement": "For the ‘Condition’ prototype in PDTB-3, 80% of its top ten neighbors share the Condition label, while 10% are labeled as Substitution and another 10% as Cause.", "perturbed_statement": "For the ‘Condition’ prototype in PDTB-3, 70% of its top ten neighbors share the Condition label, while 20% are labeled as Substitution and another 10% as Cause.", "perturbed_explanation": "This statement is incorrect because the chart shows that 80% of the nearest neighbors of the ‘Condition’ prototype are labeled Condition (not 70%), and only 10% are labeled Substitution (not 20%).", "claim": "For the ‘Condition’ prototype in PDTB-3, 80% of its top ten neighbors share the Condition label, while 10% are labeled as Substitution and another 10% as Cause.", "label": true }, { "paperid": "2411.15223v1", "paper_path": "./SciVer/papers/2411.15223v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.3.4" ], "image_path": "./SciVer/images/2411.15223v1-Table3-1.png", "request_id": 11, "origin_statement": "The Paper Model's AUC (0.7850) exceeds DeepFM's AUC (0.7816) by 0.0034, indicating a modest performance improvement.", "perturbed_statement": "The Paper Model's AUC (0.7880) exceeds DeepFM's AUC (0.7816) by 0.0064, indicating a modest performance improvement.", "perturbed_explanation": "This is incorrect because the Paper Model's AUC is actually 0.7850 (not 0.7880), so the real difference from DeepFM's 0.7816 is 0.0034, not 0.0064.", "claim": "The Paper Model's AUC (0.7850) exceeds DeepFM's AUC (0.7816) by 0.0034, indicating a modest performance improvement.", "label": true }, { "paperid": "2409.10066v1", "paper_path": "./SciVer/papers/2409.10066v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2.1" ], "image_path": "./SciVer/images/2409.10066v1-Table1-1.png", "request_id": 15, "origin_statement": "The Lane changing method defines three parameters—target lane, target speed, and trigger sequence—one more than each of the Accelerate and Decelerate methods, which both accept only two parameters (target speed and trigger sequence).", "perturbed_statement": "The Decelerate method defines three parameters—target speed, trigger sequence, and lane offset—one more than the Lane changing method’s two parameters.", "perturbed_explanation": "This is incorrect because, according to the table, the Decelerate method only has two parameters (target speed and trigger sequence), and the Lane changing method actually has three parameters, not two.", "claim": "The Lane changing method defines three parameters—target lane, target speed, and trigger sequence—one more than each of the Accelerate and Decelerate methods, which both accept only two parameters (target speed and trigger sequence).", "label": true }, { "paperid": "2409.16902v1", "paper_path": "./SciVer/papers/2409.16902v1.json", "claim_type": "direct", "type": "chart", "item": "2(a)", "section": [ "3" ], "image_path": "./SciVer/images/2409.16902v1_figure_2(a).png", "request_id": 16, "origin_statement": "SAM2-large achieves an average performance score of 0.587, which is 0.007 points above ARTrack's 0.580 and 0.062 points above OSTrack's 0.525.", "perturbed_statement": "SAM2-large achieves an average performance score of 0.587, which is 0.012 points above ARTrack's 0.575 and 0.062 points above OSTrack's 0.525.", "perturbed_explanation": "The actual ARTrack score is 0.580 rather than 0.575, so the claimed 0.012-point advantage is incorrect; the real difference is 0.007 points.", "claim": "SAM2-large achieves an average performance score of 0.587, which is 0.007 points above ARTrack's 0.580 and 0.062 points above OSTrack's 0.525.", "label": true }, { "paperid": "2411.15098v2", "paper_path": "./SciVer/papers/2411.15098v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15098v2-Table2-1.png", "request_id": 17, "origin_statement": "Our method requires only 14.5M trainable parameters (0.1%) on a 12B-parameter FLUX.1 model, which is over 60 times fewer parameters than the 918M (7.6%) used by IP-Adapter.", "perturbed_statement": "Our method requires only 48.7M trainable parameters (0.4%) on a 12B-parameter FLUX.1 model, which is about 19 times fewer parameters than the 918M (7.6%) used by IP-Adapter.", "perturbed_explanation": "The perturbed statement incorrectly states that our method uses 48.7M parameters and a 0.4% ratio, whereas the table shows it uses 14.5M parameters (0.1%). Consequently, the claimed 19x reduction versus IP-Adapter is wrong—using the correct 14.5M figure yields over a 60x reduction.", "claim": "Our method requires only 14.5M trainable parameters (0.1%) on a 12B-parameter FLUX.1 model, which is over 60 times fewer parameters than the 918M (7.6%) used by IP-Adapter.", "label": true }, { "paperid": "2409.14672v1", "paper_path": "./SciVer/papers/2409.14672v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.14672v1_figure_4.png", "request_id": 18, "origin_statement": "Compared to GPT-4-ICL, GPT-4-Prompt achieves a 1.2 percentage point higher win rate against GPT-4 (86.2% vs 85%) and completely eliminates ties (0% vs 3.3%).", "perturbed_statement": "Compared to GPT-4-ICL, GPT-4-Prompt achieves a 3.2 percentage point higher win rate against GPT-4 (88.2% vs 85%) and reduces ties to 0.1% from 3.3%.", "perturbed_explanation": "This is incorrect because GPT-4-Prompt’s actual win rate against GPT-4 is 86.2%, not 88.2%, and it eliminates ties entirely (0%), rather than having 0.1% ties.", "claim": "Compared to GPT-4-ICL, GPT-4-Prompt achieves a 1.2 percentage point higher win rate against GPT-4 (86.2% vs 85%) and completely eliminates ties (0% vs 3.3%).", "label": true }, { "paperid": "2409.06367v1", "paper_path": "./SciVer/papers/2409.06367v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.06367v1_figure_5.png", "request_id": 22, "origin_statement": "SimpleNet achieves 59.12% average Image-AUROC on Texture-AD, which is 40.46 percentage points lower than its 99.58% on MVTec.", "perturbed_statement": "SimpleNet achieves 59.12% average Image-AUROC on Texture-AD, which is 30.46 percentage points lower than its 99.58% on MVTec.", "perturbed_explanation": "The perturbed claim is incorrect because the actual difference between 99.58% (MVTec) and 59.12% (Texture-AD) is 40.46 percentage points, not 30.46.", "claim": "SimpleNet achieves 59.12% average Image-AUROC on Texture-AD, which is 40.46 percentage points lower than its 99.58% on MVTec.", "label": true }, { "paperid": "2409.18042v2", "paper_path": "./SciVer/papers/2409.18042v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.18042v2-Table2-1.png", "request_id": 23, "origin_statement": "In Stage-2, EMOVA uses 7.4M alignment samples, which is 3M more than Stage-3's 4.4M in the SFT stage, representing a 68% larger dataset.", "perturbed_statement": "In Stage-2, EMOVA uses 7.4M alignment samples, which is 2M more than Stage-3's 5.4M in the SFT stage.", "perturbed_explanation": "This statement is incorrect because the table shows Stage-3 uses 4.4M samples, not 5.4M, so the difference between Stage-2 and Stage-3 is actually 3M, not 2M.", "claim": "In Stage-2, EMOVA uses 7.4M alignment samples, which is 3M more than Stage-3's 4.4M in the SFT stage, representing a 68% larger dataset.", "label": true }, { "paperid": "2411.14321v1", "paper_path": "./SciVer/papers/2411.14321v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.1", "4.4" ], "image_path": "./SciVer/images/2411.14321v1_figure_5.png", "request_id": 24, "origin_statement": "Figure 5 shows RL policy data (first row) clustered between -0.6 to 0.6 joint-relative and -2 to 1 root-relative, while our algorithm (second row) adds blue traces reaching down to about -4 on the root-relative axis.", "perturbed_statement": "Figure 5 shows RL policy data (first row) clustered between -0.2 to 0.2 joint-relative and -1 to 1 root-relative, while our algorithm (second row) adds blue traces reaching down to about -3 on the root-relative axis.", "perturbed_explanation": "The perturbation is incorrect because the RL policy data in the first row actually spans roughly -0.6 to 0.6 on the joint-relative axis and approximately -2 to 1 on the root-relative axis, not the narrower -0.2 to 0.2 and -1 to 1 range. Additionally, the algorithm’s blue traces in the second row extend down to about -4 on the root-relative axis, not -3.", "claim": "Figure 5 shows RL policy data (first row) clustered between -0.6 to 0.6 joint-relative and -2 to 1 root-relative, while our algorithm (second row) adds blue traces reaching down to about -4 on the root-relative axis.", "label": true }, { "paperid": "2410.07110v1", "paper_path": "./SciVer/papers/2410.07110v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "1" ], "image_path": "./SciVer/images/2410.07110v1-Table5-1.png", "request_id": 25, "origin_statement": "ER has the shortest runtime at 1.25 hours, making it roughly 16 times faster than GEM’s 20.42-hour runtime on Split CIFAR-100 with buffer size 2000.", "perturbed_statement": "Ours has the shortest runtime at 1.25 hours, making it roughly 16 times faster than GEM’s 20.42-hour runtime on Split CIFAR-100 with buffer size 2000.", "perturbed_explanation": "The perturbed claim is incorrect because Table 5 shows that ER—not Ours—has a runtime of 1.25 hours. The Ours method actually runs for 3.49 hours, so it cannot be the shortest or have a 1.25-hour runtime.", "claim": "ER has the shortest runtime at 1.25 hours, making it roughly 16 times faster than GEM’s 20.42-hour runtime on Split CIFAR-100 with buffer size 2000.", "label": true }, { "paperid": "2411.10431v1", "paper_path": "./SciVer/papers/2411.10431v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.10431v1_figure_8.png", "request_id": 26, "origin_statement": "In Figure 8, the scatter plot of estimated synchronous reactance X_d' versus transient open-circuit time constant T_{0d}' shows a clear positive correlation, with both CDI (blue) and JCDI (orange) points aligned along an upward diagonal trend.", "perturbed_statement": "In Figure 8, the scatter plot of estimated synchronous reactance X_d' versus transient open-circuit time constant T_{0d}' shows a clear negative correlation, with T_{0d}' decreasing as X_d' increases.", "perturbed_explanation": "This statement is incorrect because the actual scatter plot displays an upward trend—T_{0d}' increases as X_d' increases—indicating a positive, not negative, correlation between these parameters.", "claim": "In Figure 8, the scatter plot of estimated synchronous reactance X_d' versus transient open-circuit time constant T_{0d}' shows a clear positive correlation, with both CDI (blue) and JCDI (orange) points aligned along an upward diagonal trend.", "label": true }, { "paperid": "2411.10948v1", "paper_path": "./SciVer/papers/2411.10948v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.3", "5.1" ], "image_path": "./SciVer/images/2411.10948v1-Table1-1.png", "request_id": 27, "origin_statement": "Our method is the only one among the four that fully quantizes normalization layers; none of the three prior methods do.", "perturbed_statement": "Both our method and the fine-grained method support fully-quantized normalization layers, while the other two methods do not.", "perturbed_explanation": "This is incorrect because Table 1 shows an ✘ for the fine-grained method under 'Fully-quantized normalization layers', indicating it does not support full quantization of normalization layers.", "claim": "Our method is the only one among the four that fully quantizes normalization layers; none of the three prior methods do.", "label": true }, { "paperid": "2409.02664v1", "paper_path": "./SciVer/papers/2409.02664v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.4.2" ], "image_path": "./SciVer/images/2409.02664v1_figure_6.png", "request_id": 28, "origin_statement": "With the visual prompt-enhanced CLIP features (CLIP+VP), FF real images (red dots) and FF fake images (blue dots) form two distinct clusters with negligible overlap on the left side, whereas in the original CLIP feature space, their corresponding magenta and cyan markers significantly intermingle.", "perturbed_statement": "In the CLIP-only feature space, FF real images (magenta ×) and FF fake images (cyan ×) form two distinct clusters with negligible overlap on the left side.", "perturbed_explanation": "This is incorrect because in the original CLIP embedding (shown by magenta × and cyan ×), FF real and FF fake samples overlap heavily in the central region rather than forming separate, non-overlapping clusters.", "claim": "With the visual prompt-enhanced CLIP features (CLIP+VP), FF real images (red dots) and FF fake images (blue dots) form two distinct clusters with negligible overlap on the left side, whereas in the original CLIP feature space, their corresponding magenta and cyan markers significantly intermingle.", "label": true }, { "paperid": "2410.06795v1", "paper_path": "./SciVer/papers/2410.06795v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.06795v1_figure_2(b).png", "request_id": 30, "origin_statement": "On indirect misleading questions, SFD achieves 0.8164 accuracy, 0.2826 higher than MiniGPT-v2’s 0.5338.", "perturbed_statement": "On indirect misleading questions, SFD achieves 0.7164 accuracy, 0.1826 higher than MiniGPT-v2’s 0.5338.", "perturbed_explanation": "The chart shows SFD’s accuracy on indirect misleading is 0.8164, not 0.7164, and the gap over MiniGPT-v2 (0.5338) is 0.2826, not 0.1826.", "claim": "On indirect misleading questions, SFD achieves 0.8164 accuracy, 0.2826 higher than MiniGPT-v2’s 0.5338.", "label": true }, { "paperid": "2410.09356v1", "paper_path": "./SciVer/papers/2410.09356v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.09356v1-Table1-1.png", "request_id": 31, "origin_statement": "The NYCTaxi dataset has 266 nodes, which is 6.4% more nodes than the NYCBike dataset's 250 nodes, despite both having the same 4,368 samples.", "perturbed_statement": "The NYCTaxi dataset has 280 nodes, which is 12% more nodes than the NYCBike dataset's 250 nodes, despite both having the same 4,368 samples.", "perturbed_explanation": "This is incorrect because Table 1 shows NYCTaxi actually has 266 nodes (not 280), so it is only 6.4% more than NYCBike’s 250 nodes, not 12%.", "claim": "The NYCTaxi dataset has 266 nodes, which is 6.4% more nodes than the NYCBike dataset's 250 nodes, despite both having the same 4,368 samples.", "label": true }, { "paperid": "2411.08298v1", "paper_path": "./SciVer/papers/2411.08298v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5" ], "image_path": "./SciVer/images/2411.08298v1_figure_4.png", "request_id": 32, "origin_statement": "At the first peak near r ≈ 1.0σ, the attractive LJ liquid (PE24) exhibits g(r) ≈ 1.42, while the purely repulsive liquid (PE24r) shows ≈ 1.38, a mere ~3% enhancement in local ordering from attractions.", "perturbed_statement": "At the first peak near r ≈ 1.0σ, the purely repulsive liquid (PE24r) exhibits g(r) ≈ 1.45, whereas the attractive LJ liquid (PE24) has only ≈ 1.38, indicating repulsion boosts local ordering by ~5%.", "perturbed_explanation": "This is incorrect because the plot shows the solid blue line (PE24) peaking at about 1.42 and the dashed red line (PE24r) peaking lower at about 1.38. The perturbed statement reverses and misstates these exact peak values.", "claim": "At the first peak near r ≈ 1.0σ, the attractive LJ liquid (PE24) exhibits g(r) ≈ 1.42, while the purely repulsive liquid (PE24r) shows ≈ 1.38, a mere ~3% enhancement in local ordering from attractions.", "label": true }, { "paperid": "2409.15727v1", "paper_path": "./SciVer/papers/2409.15727v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.15727v1-Table3-1.png", "request_id": 33, "origin_statement": "LaPose (Ours) achieves an NIou75 score of 14.1%, which is 3.0 percentage points higher than DMSR's 11.1%.", "perturbed_statement": "LaPose (Ours) achieves an NIou75 score of 16.1%, which is 5.0 percentage points higher than DMSR's 11.1%.", "perturbed_explanation": "The table shows that LaPose (Ours) actually has an NIou75 score of 14.1%, not 16.1%. Therefore, the claimed difference of 5.0 points over DMSR's 11.1% is incorrect; the true difference is 3.0 points.", "claim": "LaPose (Ours) achieves an NIou75 score of 14.1%, which is 3.0 percentage points higher than DMSR's 11.1%.", "label": true }, { "paperid": "2409.14067v1", "paper_path": "./SciVer/papers/2409.14067v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.14067v1-Table3-1.png", "request_id": 37, "origin_statement": "Our method's average PSNR on the test scenes is 30.14 dB, which is 4.30 dB higher than PNeRFLoc's 25.84 dB.", "perturbed_statement": "Our method's average PSNR on the test scenes is 30.14 dB, which is 6.30 dB higher than PNeRFLoc's 25.84 dB.", "perturbed_explanation": "This is incorrect because Table 3 shows our average PSNR is 30.14 dB and PNeRFLoc's is 25.84 dB, a difference of 4.30 dB, not 6.30 dB.", "claim": "Our method's average PSNR on the test scenes is 30.14 dB, which is 4.30 dB higher than PNeRFLoc's 25.84 dB.", "label": true }, { "paperid": "2411.03966v1", "paper_path": "./SciVer/papers/2411.03966v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "7.2" ], "image_path": "./SciVer/images/2411.03966v1_figure_9.png", "request_id": 40, "origin_statement": "The predicted anxiety arc using WorryWords has a Spearman correlation of 0.903, which is 0.308 higher than the anger lexicon’s 0.595 and 1.562 higher than the anticipation lexicon’s -0.659.", "perturbed_statement": "The predicted anxiety arc using WorryWords has a Spearman correlation of 0.913, which is 0.318 higher than the anger lexicon’s 0.595 and 1.562 higher than the anticipation lexicon’s -0.659.", "perturbed_explanation": "The perturbed statement is incorrect because the actual Spearman correlation for the anxiety arc (WorryWords) is 0.903—not 0.913—and the true difference from the anger lexicon’s 0.595 score is 0.308, not 0.318.", "claim": "The predicted anxiety arc using WorryWords has a Spearman correlation of 0.903, which is 0.308 higher than the anger lexicon’s 0.595 and 1.562 higher than the anticipation lexicon’s -0.659.", "label": true }, { "paperid": "2409.05755v1", "paper_path": "./SciVer/papers/2409.05755v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.05755v1-Table4-1.png", "request_id": 41, "origin_statement": "For the GCN curves, the H_class metric has the smallest Fréchet distance on GenCat graphs (0.07), compared to all other metrics.", "perturbed_statement": "For the GCN curves, the H_class metric has the smallest Fréchet distance on GenCat graphs (0.15), compared to all other metrics.", "perturbed_explanation": "The table reports the Fréchet distance for H_class on GenCat graphs as 0.07, not 0.15, so the stated value of 0.15 is incorrect.", "claim": "For the GCN curves, the H_class metric has the smallest Fréchet distance on GenCat graphs (0.07), compared to all other metrics.", "label": true }, { "paperid": "2410.15355v1", "paper_path": "./SciVer/papers/2410.15355v1.json", "claim_type": "direct", "type": "chart", "item": "7(b)", "section": [ "6.6" ], "image_path": "./SciVer/images/2410.15355v1_figure_7(b).png", "request_id": 42, "origin_statement": "In Figure 7(b), on the Amazon-Photo dataset, the classification accuracy at mask ratio 0.3 is approximately 94.7%, which is about 2.2 percentage points higher than at mask ratio 0.1 (~92.5%).", "perturbed_statement": "In Figure 7(b), on the Amazon-Photo dataset, the classification accuracy at mask ratio 0.3 is approximately 96.5%, which is about 4.3 percentage points higher than at mask ratio 0.1 (~92.2%).", "perturbed_explanation": "The perturbed statement is incorrect because the chart shows the Amazon-Photo accuracy at mask ratio 0.3 is around 94.7%, not 96.5%, and the increase over the ~92.5% accuracy at mask ratio 0.1 is about 2.2 percentage points, not 4.3.", "claim": "In Figure 7(b), on the Amazon-Photo dataset, the classification accuracy at mask ratio 0.3 is approximately 94.7%, which is about 2.2 percentage points higher than at mask ratio 0.1 (~92.5%).", "label": true }, { "paperid": "2410.09729v2", "paper_path": "./SciVer/papers/2410.09729v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.09729v2-Table1-1.png", "request_id": 43, "origin_statement": "GPT 4o attains an F1 score of 7.57%, which is 2.04 percentage points higher than Gemini 1.5 Pro’s 5.53% F1 score and 5.57 points above LLaVA 1.6’s 2.00%.", "perturbed_statement": "GPT 4o attains an F1 score of 7.57%, which is 3.04 percentage points higher than Gemini 1.5 Pro’s 5.53% F1 score and 4.57 points above LLaVA 1.6’s 2.00%.", "perturbed_explanation": "The perturbed statement miscalculates both differences. The actual gap between 7.57% and Gemini 1.5 Pro’s 5.53% F1 score is 2.04 points (not 3.04), and the gap between 7.57% and LLaVA 1.6’s 2.00% is 5.57 points (not 4.57).", "claim": "GPT 4o attains an F1 score of 7.57%, which is 2.04 percentage points higher than Gemini 1.5 Pro’s 5.53% F1 score and 5.57 points above LLaVA 1.6’s 2.00%.", "label": true }, { "paperid": "2411.08909v1", "paper_path": "./SciVer/papers/2411.08909v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.08909v1-Table2-1.png", "request_id": 45, "origin_statement": "LC-PLM-790M with BiMamba-S achieves a 20.8% higher TM score on CASP15-multimers than ESM-2-650M (100B), increasing from 0.4228 to 0.5109.", "perturbed_statement": "LC-PLM-790M with BiMamba-S achieves a 25.8% higher TM score on CASP15-multimers than ESM-2-650M (100B), increasing from 0.4228 to 0.5109.", "perturbed_explanation": "The stated percentage increase is incorrect. The actual improvement from 0.4228 to 0.5109 is (0.5109–0.4228)/0.4228≈20.8%, not 25.8%.", "claim": "LC-PLM-790M with BiMamba-S achieves a 20.8% higher TM score on CASP15-multimers than ESM-2-650M (100B), increasing from 0.4228 to 0.5109.", "label": true }, { "paperid": "2409.07355v1", "paper_path": "./SciVer/papers/2409.07355v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.07355v1-Table3-1.png", "request_id": 49, "origin_statement": "InteractEval (GPT-4) achieves the highest average Spearman correlation (0.635), surpassing G-Eval (0.632) and CheckEval (0.620), and outperforming Gemini-1.5-Pro by 0.083 and Claude-3.5-Sonnet by 0.099.", "perturbed_statement": "InteractEval (GPT-4) achieves the highest average Spearman correlation (0.640), surpassing G-Eval (0.632) and CheckEval (0.620), and outperforming Gemini-1.5-Pro by 0.088 and Claude-3.5-Sonnet by 0.105.", "perturbed_explanation": "The perturbed statement misstates InteractEval (GPT-4)’s average Spearman correlation: the table reports 0.635, not 0.640. Consequently, the claimed differences (0.088 and 0.105) contradict the actual gaps of 0.083 (0.635–0.552) and 0.099 (0.635–0.536) shown in the table.", "claim": "InteractEval (GPT-4) achieves the highest average Spearman correlation (0.635), surpassing G-Eval (0.632) and CheckEval (0.620), and outperforming Gemini-1.5-Pro by 0.083 and Claude-3.5-Sonnet by 0.099.", "label": true }, { "paperid": "2411.11912v1", "paper_path": "./SciVer/papers/2411.11912v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.11912v1-Table1-1.png", "request_id": 51, "origin_statement": "Although OmniMedVQA includes the most modalities (12), our dataset with 9 modalities contains 707,962 VQA triplets, representing about 5.5 times more than the 127,995 triplets in OmniMedVQA.", "perturbed_statement": "Although OmniMedVQA includes the most modalities (12), our dataset with 10 modalities contains 707,962 VQA triplets, representing about 5.5 times more than the 127,995 triplets in OmniMedVQA.", "perturbed_explanation": "The perturbed statement incorrectly claims our dataset has 10 modalities; the table shows it actually has 9 modalities, so this detail contradicts the context.", "claim": "Although OmniMedVQA includes the most modalities (12), our dataset with 9 modalities contains 707,962 VQA triplets, representing about 5.5 times more than the 127,995 triplets in OmniMedVQA.", "label": true }, { "paperid": "2410.21603v2", "paper_path": "./SciVer/papers/2410.21603v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.21603v2-Table2-1.png", "request_id": 53, "origin_statement": "At μ₀=2.75 with n=100, ABC-Stat achieves the lowest MSE of 0.0001 among all ABC methods.", "perturbed_statement": "At μ₀=2.75 with n=100, ABC-Stat achieves the lowest MSE of 0.001 among all ABC methods.", "perturbed_explanation": "The table shows that for μ₀=2.75 and n=100, the MSE of ABC-Stat is 0.0001, not 0.001, so the perturbed value contradicts the actual MSE entry.", "claim": "At μ₀=2.75 with n=100, ABC-Stat achieves the lowest MSE of 0.0001 among all ABC methods.", "label": true }, { "paperid": "2410.17406v1", "paper_path": "./SciVer/papers/2410.17406v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.2.1" ], "image_path": "./SciVer/images/2410.17406v1-Table1-1.png", "request_id": 59, "origin_statement": "When provided with NVD+CWE sources, the summarizing technique achieved 379 mitigation true positives (79%), 117 more than the chunking technique’s 262 true positives (54%) with the same evidence.", "perturbed_statement": "When provided with NVD+CWE sources, the summarizing technique achieved 299 mitigation true positives (69%), only 37 more than the chunking technique’s 262 true positives (54%) with the same evidence.", "perturbed_explanation": "The table shows that the summarizing technique actually achieved 379 true positives (79%) in mitigation, not 299 (69%), making the stated counts and percentage incorrect.", "claim": "When provided with NVD+CWE sources, the summarizing technique achieved 379 mitigation true positives (79%), 117 more than the chunking technique’s 262 true positives (54%) with the same evidence.", "label": true }, { "paperid": "2411.01021v1", "paper_path": "./SciVer/papers/2411.01021v1.json", "claim_type": "direct", "type": "chart", "item": "10", "section": [ "3.3.3" ], "image_path": "./SciVer/images/2411.01021v1_figure_10.png", "request_id": 60, "origin_statement": "The median contraction parameter α for RL-optimized no-error simulations is approximately 0.75, about 50% higher than the α_S median near 0.5 and over 75% greater than the near-zero α_C.", "perturbed_statement": "The median contraction parameter α for RL-optimized no-error simulations is approximately 0.45, similar to the α_S median near 0.5 and only marginally higher than the near-zero α_C.", "perturbed_explanation": "This is incorrect because the RL-optimized no-error α distribution visibly centers around 0.75, not 0.45, as shown by the densest region of the violin at about 0.75, whereas α_S centers at about 0.5 and α_C is near zero.", "claim": "The median contraction parameter α for RL-optimized no-error simulations is approximately 0.75, about 50% higher than the α_S median near 0.5 and over 75% greater than the near-zero α_C.", "label": true }, { "paperid": "2411.18383v1", "paper_path": "./SciVer/papers/2411.18383v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.18383v1-Table3-1.png", "request_id": 61, "origin_statement": "Topics No.5 and No.3 both had major video coverage spikes in August 2023, while Topic No.2's second spike occurred six months earlier in February 2023, highlighting a staggered timing of these nuclear issues in news videos.", "perturbed_statement": "Topics No.5 and No.3 both had major video coverage spikes in July 2023, while Topic No.2's second spike occurred six months earlier in February 2023, highlighting a staggered timing of these nuclear issues in news videos.", "perturbed_explanation": "The table indicates that Topics No.5 and No.3 spiked in 2023/08 (August), not in 2023/07 (July). The perturbed statement incorrectly shifts these spikes to July 2023.", "claim": "Topics No.5 and No.3 both had major video coverage spikes in August 2023, while Topic No.2's second spike occurred six months earlier in February 2023, highlighting a staggered timing of these nuclear issues in news videos.", "label": true }, { "paperid": "2410.10442v1", "paper_path": "./SciVer/papers/2410.10442v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.10442v1_figure_7.png", "request_id": 62, "origin_statement": "Model accuracy rises from 48.6% at a learning rate of 1e-1 to a maximum of 50.4% at 1e-4, before declining to 49.2% at 1e-5.", "perturbed_statement": "Model accuracy rises from 48.6% at a learning rate of 1e-1 to a maximum of 50.4% at 1e-3, before declining to 49.2% at 1e-5.", "perturbed_explanation": "This is incorrect because Figure 7 shows that the peak accuracy of 50.4% occurs at a learning rate of 1e-4, not at 1e-3 as stated.", "claim": "Model accuracy rises from 48.6% at a learning rate of 1e-1 to a maximum of 50.4% at 1e-4, before declining to 49.2% at 1e-5.", "label": true }, { "paperid": "2409.14762v1", "paper_path": "./SciVer/papers/2409.14762v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.14762v1-Table1-1.png", "request_id": 63, "origin_statement": "The Hard Mode prompt in Table 1 contains six sentences, which is one more than the five sentences in both the Easy and Medium Mode prompts.", "perturbed_statement": "The Easy Mode prompt in Table 1 contains only four sentences, which is one fewer than the five sentences in both the Medium and Hard Mode prompts.", "perturbed_explanation": "This is incorrect because the Easy Mode prompt actually has five sentences, not four. You can count five full sentences in the Easy Mode prompt in Table 1.", "claim": "The Hard Mode prompt in Table 1 contains six sentences, which is one more than the five sentences in both the Easy and Medium Mode prompts.", "label": true }, { "paperid": "2410.17694v1", "paper_path": "./SciVer/papers/2410.17694v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.5" ], "image_path": "./SciVer/images/2410.17694v1_figure_3.png", "request_id": 66, "origin_statement": "The performance gap between SynthRAG and baseline models narrows across comparisons: 47.0 percentage points over Qwen-Max, 42.8 over RAG, 10.2 over OutlineRAG, and just 0.4 over the no-generation ablation.", "perturbed_statement": "The performance gap between SynthRAG and baseline models narrows across comparisons: 47.0 percentage points over Qwen-Max, 42.8 over RAG, 20.2 over OutlineRAG, and just 0.4 over the no-generation ablation.", "perturbed_explanation": "The perturbed statement incorrectly reports a 20.2-point gap against OutlineRAG, but the figure shows SynthRAG achieves 55.1% versus OutlineRAG's 44.9%, a 10.2-point difference.", "claim": "The performance gap between SynthRAG and baseline models narrows across comparisons: 47.0 percentage points over Qwen-Max, 42.8 over RAG, 10.2 over OutlineRAG, and just 0.4 over the no-generation ablation.", "label": true }, { "paperid": "2409.14396v1", "paper_path": "./SciVer/papers/2409.14396v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.14396v1-Table2-1.png", "request_id": 67, "origin_statement": "Flat-LoRA with rank 16 achieves 78.24% accuracy on DTD, which is 1.44 percentage points higher than full fine-tuning’s 76.80%.", "perturbed_statement": "Flat-LoRA with rank 16 achieves 79.24% accuracy on DTD, which is 2.44 percentage points higher than full fine-tuning’s 76.80%.", "perturbed_explanation": "This claim is incorrect because Table 2 shows Flat-LoRA (r=16) attains 78.24% on DTD (not 79.24%), and the gap over full fine-tuning’s 76.80% is 1.44 percentage points (not 2.44).", "claim": "Flat-LoRA with rank 16 achieves 78.24% accuracy on DTD, which is 1.44 percentage points higher than full fine-tuning’s 76.80%.", "label": true }, { "paperid": "2410.02401v5", "paper_path": "./SciVer/papers/2410.02401v5.json", "claim_type": "direct", "type": "chart", "item": "5(c)", "section": [ "4.3", "5.5" ], "image_path": "./SciVer/images/2410.02401v5_figure_5(c).png", "request_id": 70, "origin_statement": "SynCo surpasses MoCo-v2 at the two largest tested queue sizes: at 65,536 by 0.68 percentage points (48.42% vs 47.74%) and at 131,072 by 0.90 percentage points (48.50% vs 47.60%).", "perturbed_statement": "SynCo surpasses MoCo-v2 at the two largest tested queue sizes: at 65,536 by 0.68 percentage points (48.42% vs 48.74%) and at 131,072 by 0.90 percentage points (48.50% vs 48.60%).", "perturbed_explanation": "The perturbed statement is incorrect because MoCo-v2’s actual Top-1 accuracies are 47.74% at queue size 65,536 (not 48.74%) and 47.60% at queue size 131,072 (not 48.60%), contradicting the chart values.", "claim": "SynCo surpasses MoCo-v2 at the two largest tested queue sizes: at 65,536 by 0.68 percentage points (48.42% vs 47.74%) and at 131,072 by 0.90 percentage points (48.50% vs 47.60%).", "label": true }, { "paperid": "2409.16400v1", "paper_path": "./SciVer/papers/2409.16400v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.6" ], "image_path": "./SciVer/images/2409.16400v1_figure_7.png", "request_id": 72, "origin_statement": "The model's recall increases from 64.74% in the Top-1 evaluation to 76.69% in the Top-2 evaluation, representing an 11.95 percentage point improvement.", "perturbed_statement": "The model's recall increases from 61.36% in the Top-1 evaluation to 69.98% in the Top-2 evaluation, representing an 8.62 percentage point improvement.", "perturbed_explanation": "This statement is incorrect because Figure 7 shows recall values of 64.74% for Top-1 and 76.69% for Top-2, not 61.36% or 69.98%, and the actual improvement is 11.95 percentage points, not 8.62.", "claim": "The model's recall increases from 64.74% in the Top-1 evaluation to 76.69% in the Top-2 evaluation, representing an 11.95 percentage point improvement.", "label": true }, { "paperid": "2410.19886v1", "paper_path": "./SciVer/papers/2410.19886v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.19886v1-Table4-1.png", "request_id": 75, "origin_statement": "Using the combined kernel reduces RMSE from 46.03 cycles (RBF) to 24.57 cycles, a 46.6% decrease, while covariance uncertainty drops from 4.50 to 2.60 cycles, a 42.2% reduction, showing improved prediction accuracy and confidence.", "perturbed_statement": "Using the combined kernel reduces RMSE from 46.03 cycles (RBF) to 28.57 cycles, a 37.9% decrease, while covariance uncertainty drops from 4.50 to 2.60 cycles, a 42.2% reduction, showing improved prediction accuracy and confidence.", "perturbed_explanation": "The perturbed statement incorrectly states the combined kernel RMSE as 28.57 cycles; Table 4 lists the combined RMSE as 24.57 cycles. Therefore, the claimed 37.9% decrease is not supported by the data.", "claim": "Using the combined kernel reduces RMSE from 46.03 cycles (RBF) to 24.57 cycles, a 46.6% decrease, while covariance uncertainty drops from 4.50 to 2.60 cycles, a 42.2% reduction, showing improved prediction accuracy and confidence.", "label": true }, { "paperid": "2411.07133v2", "paper_path": "./SciVer/papers/2411.07133v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.07133v2-Table1-1.png", "request_id": 77, "origin_statement": "The Qwen2.5 family contains five models with sizes ranging from 3B to 72B, meaning the largest model is 24 times the size of the smallest one.", "perturbed_statement": "The Qwen2.5 family contains five models with sizes ranging from 3B to 32B, making the largest model about 4 times the size of the smallest one.", "perturbed_explanation": "The table shows the largest Qwen2.5 model is 72B, not 32B, and its size is 24 times the smallest (3B), not roughly 4 times.", "claim": "The Qwen2.5 family contains five models with sizes ranging from 3B to 72B, meaning the largest model is 24 times the size of the smallest one.", "label": true }, { "paperid": "2411.10018v1", "paper_path": "./SciVer/papers/2411.10018v1.json", "claim_type": "direct", "type": "chart", "item": "1(b)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.10018v1_figure_1(b).png", "request_id": 78, "origin_statement": "Joy in film performances dips from about 0.45 at 5% narrative time to around 0.41 between 50–80%, then rises sharply to roughly 0.49 by the end of the film.", "perturbed_statement": "Joy in film performances dips from about 0.45 at 5% narrative time to around 0.41 between 50–80%, then rises sharply to roughly 0.43 by the end of the film.", "perturbed_explanation": "The perturbed statement claims joy rises to roughly 0.43 at the film’s end, but Figure 1(b) shows joy at 100% narrative time is approximately 0.49, not 0.43.", "claim": "Joy in film performances dips from about 0.45 at 5% narrative time to around 0.41 between 50–80%, then rises sharply to roughly 0.49 by the end of the film.", "label": true }, { "paperid": "2409.01672v2", "paper_path": "./SciVer/papers/2409.01672v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.01672v2-Table2-1.png", "request_id": 81, "origin_statement": "On the CUB200 dataset, FMR’s classification accuracy improves by 21.8 percentage points from 61.30% at 15% training size to 83.10% at 100% training size.", "perturbed_statement": "On the CUB200 dataset, FMR’s classification accuracy improves by 19.5 percentage points from 61.30% at 15% training size to 83.10% at 100% training size.", "perturbed_explanation": "The perturbed statement is incorrect because the actual increase indicated in the table is 83.10% − 61.30% = 21.8 percentage points, not 19.5.", "claim": "On the CUB200 dataset, FMR’s classification accuracy improves by 21.8 percentage points from 61.30% at 15% training size to 83.10% at 100% training size.", "label": true }, { "paperid": "2409.11491v1", "paper_path": "./SciVer/papers/2409.11491v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2409.11491v1_figure_3.png", "request_id": 84, "origin_statement": "The mean predicted birth year by Claude-3.5-Sonnet is approximately 11 years later than the actual mean (1981 vs. 1970), while Llama-3.1-8b’s mean is about 8 years earlier than the actual mean (1962 vs. 1970).", "perturbed_statement": "The mean predicted birth year by Claude-3.5-Sonnet is about 5 years earlier than the actual mean (1970 vs. 1981), and Llama-3.1-8b’s mean is roughly 20 years later than the actual mean (1970 vs. 1962).", "perturbed_explanation": "This statement is incorrect because the chart shows Claude-3.5-Sonnet’s mean prediction (1981) is actually 11 years later than the actual mean (1970), not 5 years earlier, and Llama-3.1-8b’s mean prediction (1962) is 8 years earlier than the actual mean, not 20 years later.", "claim": "The mean predicted birth year by Claude-3.5-Sonnet is approximately 11 years later than the actual mean (1981 vs. 1970), while Llama-3.1-8b’s mean is about 8 years earlier than the actual mean (1962 vs. 1970).", "label": true }, { "paperid": "2411.12812v1", "paper_path": "./SciVer/papers/2411.12812v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.5" ], "image_path": "./SciVer/images/2411.12812v1-Table1-1.png", "request_id": 85, "origin_statement": "FT-Dense’s MAE decreases by 0.0703 (from 0.1254 at one day to 0.0551 at ten days), the largest reduction among all models across training durations.", "perturbed_statement": "FT-Dense’s MAE decreases by 0.0703 (from 0.1254 at one day to 0.0551 at ten days), the smallest reduction among all models across training durations.", "perturbed_explanation": "This is incorrect because FT-Dense’s MAE reduction of 0.0703 is actually larger than the reductions achieved by Single (0.0544), FT-full (0.0519), and FT-CNN&Dense (0.0374). Therefore, it is the largest, not the smallest, reduction.", "claim": "FT-Dense’s MAE decreases by 0.0703 (from 0.1254 at one day to 0.0551 at ten days), the largest reduction among all models across training durations.", "label": true }, { "paperid": "2410.20348v1", "paper_path": "./SciVer/papers/2410.20348v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.5.1", "5" ], "image_path": "./SciVer/images/2410.20348v1_figure_4.png", "request_id": 86, "origin_statement": "On the hippocampus, UTSRMorph achieves a median DSC of about 0.87, compared to roughly 0.80 for VoxelMorph, representing a 0.07 absolute improvement.", "perturbed_statement": "On the hippocampus, UTSRMorph achieves a median DSC of about 0.90, compared to roughly 0.75 for VoxelMorph, representing a 0.15 absolute improvement.", "perturbed_explanation": "The actual boxplot shows UTSRMorph’s median DSC on the hippocampus at approximately 0.87 (not 0.90) and VoxelMorph’s median at around 0.80 (not 0.75), so the claimed values and improvement are incorrect.", "claim": "On the hippocampus, UTSRMorph achieves a median DSC of about 0.87, compared to roughly 0.80 for VoxelMorph, representing a 0.07 absolute improvement.", "label": true }, { "paperid": "2411.14516v1", "paper_path": "./SciVer/papers/2411.14516v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "8" ], "image_path": "./SciVer/images/2411.14516v1-Table6-1.png", "request_id": 87, "origin_statement": "Pattern-based triggers are detected with 100% input AUC for all models and over 99% output AUC, while code-based triggers show average input AUC of about 57.9% but maintain high output AUC above 97.5%.", "perturbed_statement": "Pattern-based triggers are detected with 100% input AUC for all models and over 99% output AUC, while code-based triggers show average input AUC of about 67.9% but maintain high output AUC above 99%.", "perturbed_explanation": "The perturbed statement misreports the average input AUC for code-based triggers as 67.9%, but the actual values are 57.75%, 55%, and 58.82% (around 57.9%). It also claims output AUC above 99%, while the real output AUCs for code-based triggers are 100%, 97.94%, and 97.49%, only one of which exceeds 99%.", "claim": "Pattern-based triggers are detected with 100% input AUC for all models and over 99% output AUC, while code-based triggers show average input AUC of about 57.9% but maintain high output AUC above 97.5%.", "label": true }, { "paperid": "2411.05608v1", "paper_path": "./SciVer/papers/2411.05608v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "1", "4.1", "4.2" ], "image_path": "./SciVer/images/2411.05608v1_figure_5.png", "request_id": 88, "origin_statement": "Six of the eight black circles (Baikal-GVD cascade events) lie within the two full magenta curves (|b|<20°), indicating that 75% of these events cluster near the Galactic plane.", "perturbed_statement": "All eight black circles (Baikal-GVD cascade events) lie within the two full magenta curves (|b|<20°), indicating that 100% of these events cluster near the Galactic plane.", "perturbed_explanation": "Figure 5 shows two black circles outside the full magenta curves (|b|<20°), so only six of the eight events actually fall within the Galactic-plane band, not all eight as the perturbed statement claims.", "claim": "Six of the eight black circles (Baikal-GVD cascade events) lie within the two full magenta curves (|b|<20°), indicating that 75% of these events cluster near the Galactic plane.", "label": true }, { "paperid": "2411.16868v1", "paper_path": "./SciVer/papers/2411.16868v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2411.16868v1_figure_5.png", "request_id": 90, "origin_statement": "Between MJD 60085 and MJD 60096, Ψ_R,Q1 rotated by about 90°, flipping from perpendicular to parallel relative to the jet axis over an 11-day interval.", "perturbed_statement": "Between MJD 60090 and MJD 60096, Ψ_R,Q1 rotated by about 90°, flipping from perpendicular to parallel relative to the jet axis over a 6-day interval.", "perturbed_explanation": "The timeline shows Ψ_R,Q1 was perpendicular at MJD 60085 and only became parallel at MJD 60096. There is no data point at MJD 60090 for Ψ_R,Q1, and the correct interval is 11 days (60085–60096), not 6 days.", "claim": "Between MJD 60085 and MJD 60096, Ψ_R,Q1 rotated by about 90°, flipping from perpendicular to parallel relative to the jet axis over an 11-day interval.", "label": true }, { "paperid": "2411.01299v1", "paper_path": "./SciVer/papers/2411.01299v1.json", "claim_type": "direct", "type": "chart", "item": "14", "section": [ "3.5.3" ], "image_path": "./SciVer/images/2411.01299v1_figure_14.png", "request_id": 92, "origin_statement": "Angle_Left_3 exhibits the most outliers—about eight points beyond its whiskers—while Pitch_Right_1_90 has the fewest, with just two outliers.", "perturbed_statement": "Angle_Left_4 exhibits the most outliers—about ten points beyond its whiskers—while Pitch_Right_1_90 has the fewest, with just one outlier.", "perturbed_explanation": "This is incorrect because Figure 14 shows Angle_Left_3 actually has the highest number of outliers, not Angle_Left_4, and Pitch_Right_1_90 has two outliers, not one, contradicting both claimed details.", "claim": "Angle_Left_3 exhibits the most outliers—about eight points beyond its whiskers—while Pitch_Right_1_90 has the fewest, with just two outliers.", "label": true }, { "paperid": "2410.23992v1", "paper_path": "./SciVer/papers/2410.23992v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.23992v1-Table5-1.png", "request_id": 93, "origin_statement": "Across all three forecasting horizons (96, 336, 720), Ada-MSHyper achieves the lowest MSE values: 0.372, 0.422, and 0.445.", "perturbed_statement": "Across all three forecasting horizons (96, 336, 720), Ada-MSHyper achieves the lowest MSE values: 0.372, 0.422, and 0.455.", "perturbed_explanation": "This statement is incorrect because Table 5 shows that Ada-MSHyper's MSE at horizon 720 is 0.445, not 0.455.", "claim": "Across all three forecasting horizons (96, 336, 720), Ada-MSHyper achieves the lowest MSE values: 0.372, 0.422, and 0.445.", "label": true }, { "paperid": "2409.17730v1", "paper_path": "./SciVer/papers/2409.17730v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1.1" ], "image_path": "./SciVer/images/2409.17730v1-Table1-1.png", "request_id": 97, "origin_statement": "BeerAdvocate's average interaction length (185.3) is over four times the Steam dataset's (43.4).", "perturbed_statement": "BeerAdvocate's average interaction length (95.5) is about twice the Steam dataset's (43.4).", "perturbed_explanation": "This is incorrect because Table 1 lists BeerAdvocate's average interaction length as 185.3 (not 95.5), making it over four times Steam's 43.4 rather than roughly twice.", "claim": "BeerAdvocate's average interaction length (185.3) is over four times the Steam dataset's (43.4).", "label": true }, { "paperid": "2411.16535v1", "paper_path": "./SciVer/papers/2411.16535v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.16535v1_figure_5.png", "request_id": 102, "origin_statement": "At 4× acceleration, ADOBI’s worst-case PSNR of roughly 30.5 dB surpasses DDS’s worst-case (~26.5 dB) by about 4 dB and DPS’s worst-case (~22.5 dB) by approximately 8 dB, highlighting its superior stability.", "perturbed_statement": "At 4× acceleration, ADOBI’s worst-case PSNR of roughly 28 dB surpasses DDS’s worst-case (~24 dB) by about 4 dB and DPS’s worst-case (~20 dB) by approximately 8 dB, highlighting its superior stability.", "perturbed_explanation": "The perturbed statement is wrong because in the figure the lower whiskers show ADOBI’s worst-case PSNR around 30.5 dB (not 28 dB), DDS’s around 26.5 dB (not 24 dB), and DPS’s around 22.5 dB (not 20 dB).", "claim": "At 4× acceleration, ADOBI’s worst-case PSNR of roughly 30.5 dB surpasses DDS’s worst-case (~26.5 dB) by about 4 dB and DPS’s worst-case (~22.5 dB) by approximately 8 dB, highlighting its superior stability.", "label": true }, { "paperid": "2410.06423v1", "paper_path": "./SciVer/papers/2410.06423v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.06423v1_figure_4.png", "request_id": 106, "origin_statement": "On the Student dataset under the Decision Tree model for the Gender feature, FairEdu’s |1−DI| is approximately 0.8, which is 0.1 lower than LTDD’s ~0.9 and 0.6 lower than the baseline ~1.4.", "perturbed_statement": "On the Student dataset under the Decision Tree model for the Gender feature, FairEdu’s |1−DI| is approximately 1.0, which is 0.1 higher than LTDD’s ~0.9 and 0.4 lower than the baseline ~1.4.", "perturbed_explanation": "The figure shows FairEdu’s |1−DI| for the Student Gender feature is about 0.8 (not 1.0) and that it sits below LTDD’s ~0.9, so it cannot be higher than LTDD as stated.", "claim": "On the Student dataset under the Decision Tree model for the Gender feature, FairEdu’s |1−DI| is approximately 0.8, which is 0.1 lower than LTDD’s ~0.9 and 0.6 lower than the baseline ~1.4.", "label": true }, { "paperid": "2409.19514v1", "paper_path": "./SciVer/papers/2409.19514v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.19514v1-Table2-1.png", "request_id": 107, "origin_statement": "The experimental error bound increases by over 100-fold between s=10 and s=1000, rising from under 0.0004 to under 0.05.", "perturbed_statement": "The experimental error bound increases by about 10-fold between s=10 and s=1000, rising from under 0.0004 to under 0.05.", "perturbed_explanation": "The perturbation is incorrect: the error bound rises from 0.0004 at s=10 to 0.05 at s=1000, which is more than a 100-fold increase, not roughly 10-fold as stated.", "claim": "The experimental error bound increases by over 100-fold between s=10 and s=1000, rising from under 0.0004 to under 0.05.", "label": true }, { "paperid": "2411.12078v1", "paper_path": "./SciVer/papers/2411.12078v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1", "4.1" ], "image_path": "./SciVer/images/2411.12078v1_figure_1.png", "request_id": 108, "origin_statement": "Mol GA attains the highest novelty score on the radar plot at approximately 1.2, which is about 0.3 higher than f-RAG’s novelty (~0.9).", "perturbed_statement": "Mol GA’s novelty score is approximately 0.8, which is about 0.1 lower than f-RAG’s novelty (~0.9).", "perturbed_explanation": "The radar plot shows Mol GA’s novelty value near the outermost ring around 1.2, not 0.8, and it exceeds f-RAG’s novelty (~0.9), so stating it is 0.8 and lower than f-RAG contradicts the plotted values.", "claim": "Mol GA attains the highest novelty score on the radar plot at approximately 1.2, which is about 0.3 higher than f-RAG’s novelty (~0.9).", "label": true }, { "paperid": "2411.16459v1", "paper_path": "./SciVer/papers/2411.16459v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.16459v1_figure_7.png", "request_id": 110, "origin_statement": "In Figure 7, two cyan dots appear above the 8 M⊙ threshold at radii of ~0.04–0.05 pc, while all other cyan cores lie below this threshold at radii between 0.01 and 0.05 pc.", "perturbed_statement": "Only one cyan core in Figure 7 exceeds the 8 M⊙ threshold at a radius of ~0.05 pc, while the rest lie below this threshold at radii between 0.01 and 0.05 pc.", "perturbed_explanation": "The statement is incorrect because the image clearly shows two cyan dots above the horizontal 8 M⊙ threshold line at radii around 0.04–0.05 pc, not just one.", "claim": "In Figure 7, two cyan dots appear above the 8 M⊙ threshold at radii of ~0.04–0.05 pc, while all other cyan cores lie below this threshold at radii between 0.01 and 0.05 pc.", "label": true }, { "paperid": "2409.04384v1", "paper_path": "./SciVer/papers/2409.04384v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.04384v1-Table5-1.png", "request_id": 111, "origin_statement": "At a 50% sampling rate, our method achieves a PSNR of 37.4 dB, which is 0.2 dB higher than DiffPIR’s 37.2 dB.", "perturbed_statement": "At a 50% sampling rate, our method achieves a PSNR of 37.4 dB, which is 0.2 dB lower than DiffPIR’s 37.2 dB.", "perturbed_explanation": "The statement is wrong because the table shows our method’s PSNR is 37.4 dB, exceeding DiffPIR’s 37.2 dB, so it is higher rather than lower.", "claim": "At a 50% sampling rate, our method achieves a PSNR of 37.4 dB, which is 0.2 dB higher than DiffPIR’s 37.2 dB.", "label": true }, { "paperid": "2410.04422v5", "paper_path": "./SciVer/papers/2410.04422v5.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.04422v5-Table2-1.png", "request_id": 113, "origin_statement": "GPT-4o's accuracy in KV retrieval decreases by 92 percentage points from 97% at N=4 to 5% at N=1000, representing the largest drop among the models.", "perturbed_statement": "GPT-4o's accuracy in KV retrieval decreases by 90 percentage points from 97% at N=4 to 7% at N=1000, representing the largest drop among the models.", "perturbed_explanation": "According to the table, GPT-4o's accuracy at N=1000 is 5%, not 7%, so the actual decrease is 92 points (97% to 5%), not 90 points.", "claim": "GPT-4o's accuracy in KV retrieval decreases by 92 percentage points from 97% at N=4 to 5% at N=1000, representing the largest drop among the models.", "label": true }, { "paperid": "2411.04709v1", "paper_path": "./SciVer/papers/2411.04709v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.04709v1-Table5-1.png", "request_id": 123, "origin_statement": "The Cross Domain version of our model achieves an average μAP of 84.7%, outperforming the SSCD image copy detection model's 77.5% by 7.2 percentage points.", "perturbed_statement": "The Cross Domain version of our model achieves an average μAP of 84.7%, outperforming the SSCD image copy detection model's 77.5% by 5.2 percentage points.", "perturbed_explanation": "This statement is incorrect because the actual difference, according to Table 5, between 84.7% (Cross Domain ours) and 77.5% (SSCD) is 7.2 percentage points, not 5.2.", "claim": "The Cross Domain version of our model achieves an average μAP of 84.7%, outperforming the SSCD image copy detection model's 77.5% by 7.2 percentage points.", "label": true }, { "paperid": "2410.09133v1", "paper_path": "./SciVer/papers/2410.09133v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2410.09133v1_figure_2.png", "request_id": 128, "origin_statement": "In Σ_{t+2}, the log-score model’s covariance block among indices 5–9 has values above 0.5, while the same block under MVG-CRPS stays below 0.3.", "perturbed_statement": "In Σ_{t+2}, the log-score model’s covariance block among indices 5–9 has values above 0.6, while the same block under MVG-CRPS stays below 0.2.", "perturbed_explanation": "The perturbed claim is wrong because the covariance matrices are clipped at 0.6, so no block exceeds 0.6; and in the MVG-CRPS Σ_{t+2} plot, the values in the 5–9 index block are around 0.25–0.35 (not below 0.2).", "claim": "In Σ_{t+2}, the log-score model’s covariance block among indices 5–9 has values above 0.5, while the same block under MVG-CRPS stays below 0.3.", "label": true }, { "paperid": "2409.03904v1", "paper_path": "./SciVer/papers/2409.03904v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2409.03904v1_figure_3.png", "request_id": 132, "origin_statement": "Across all 30,000 CG iterations, the log10 residual curves for the 48^3×96×24 and 96^3×192×24 lattices differ by less than 0.03, despite a 16× volume increase, indicating nearly identical convergence behavior.", "perturbed_statement": "Across all 30,000 CG iterations, the log10 residual curves for the 48^3×96×24 and 96^3×192×24 lattices differ by as much as 0.2, despite an 8× volume increase, indicating nearly identical convergence behavior.", "perturbed_explanation": "This claim is wrong because the two lattices actually differ by a factor of 16 in volume (not 8×), and the maximum observed difference in log10 residual between their curves is under 0.03 (not 0.2), as shown by the nearly overlapping lines in the figure.", "claim": "Across all 30,000 CG iterations, the log10 residual curves for the 48^3×96×24 and 96^3×192×24 lattices differ by less than 0.03, despite a 16× volume increase, indicating nearly identical convergence behavior.", "label": true }, { "paperid": "2409.00718v1", "paper_path": "./SciVer/papers/2409.00718v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2", "3.4" ], "image_path": "./SciVer/images/2409.00718v1-Table1-1.png", "request_id": 133, "origin_statement": "The proposed MCGAEc model achieves an AUC of 0.994, which is 0.001 higher than the next best model (HSV+YCbCr+Fundus+OCT+RFC at σ=1 with 0.993).", "perturbed_statement": "The proposed MCGAEc model achieves an AUC of 0.994, which is 0.003 higher than the next best model (HSV+YCbCr+Fundus+OCT+RFC at σ=1 with 0.991).", "perturbed_explanation": "This statement is incorrect because the HSV+YCbCr+Fundus+OCT+RFC (σ=1) model actually has an AUC of 0.993, not 0.991, making the true difference 0.001 rather than 0.003.", "claim": "The proposed MCGAEc model achieves an AUC of 0.994, which is 0.001 higher than the next best model (HSV+YCbCr+Fundus+OCT+RFC at σ=1 with 0.993).", "label": true }, { "paperid": "2410.07196v1", "paper_path": "./SciVer/papers/2410.07196v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.07196v1_figure_5.png", "request_id": 134, "origin_statement": "Sub6_S01_Exp1.mat exhibits predominantly strong positive correlations (above 0.5) across most channel pairs, whereas Sub23_S01_Exp2.mat displays a balanced mix of positive and negative correlations, with several values below -0.5.", "perturbed_statement": "Sub6_S01_Exp1.mat exhibits a balanced mix of positive and negative correlations, with several values below -0.5, whereas Sub23_S01_Exp2.mat displays predominantly strong positive correlations (above 0.5) across most channel pairs.", "perturbed_explanation": "The perturbed claim is incorrect because the heatmap for Sub6_S01_Exp1.mat in the figure is almost entirely red, indicating strong positive correlations above 0.5 and no significant negative values. Conversely, Sub23_S01_Exp2.mat clearly shows blue regions below -0.5, contradicting the assertion of uniformly strong positives.", "claim": "Sub6_S01_Exp1.mat exhibits predominantly strong positive correlations (above 0.5) across most channel pairs, whereas Sub23_S01_Exp2.mat displays a balanced mix of positive and negative correlations, with several values below -0.5.", "label": true }, { "paperid": "2411.07239v1", "paper_path": "./SciVer/papers/2411.07239v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1", "4.1.1" ], "image_path": "./SciVer/images/2411.07239v1-Table1-1.png", "request_id": 137, "origin_statement": "When fine-tuning with PI-LoRA, pretraining using D2NO yields a 3.11% relative error, which is 0.38 percentage points lower than the 3.49% error from single-operator pretraining.", "perturbed_statement": "When fine-tuning with PI-LoRA, pretraining using D2NO yields a 3.11% relative error, which is 0.58 percentage points lower than the 3.49% error from single-operator pretraining.", "perturbed_explanation": "The stated difference of 0.58 percentage points is incorrect. The actual difference between 3.49% and 3.11% is 0.38 percentage points, not 0.58.", "claim": "When fine-tuning with PI-LoRA, pretraining using D2NO yields a 3.11% relative error, which is 0.38 percentage points lower than the 3.49% error from single-operator pretraining.", "label": true }, { "paperid": "2411.10213v1", "paper_path": "./SciVer/papers/2411.10213v1.json", "claim_type": "direct", "type": "chart", "item": "1(a)", "section": [ "4.1", "4.2" ], "image_path": "./SciVer/images/2411.10213v1_figure_1(a).png", "request_id": 138, "origin_statement": "Honeycomb resolves 115 cases, which is 40.2% more than Agentless's 82 cases and only 2.5% fewer than MarsCode Agent's 118 cases.", "perturbed_statement": "Honeycomb resolves 112 cases, which is 36.6% more than Agentless's 82 cases and only 6 fewer than MarsCode Agent's 118 cases.", "perturbed_explanation": "The histogram shows Honeycomb resolves 115 cases, not 112. The difference between Honeycomb and MarsCode Agent is 3 cases, not 6, and the increase over Agentless (82 to 115) is about 40.2%, not 36.6%.", "claim": "Honeycomb resolves 115 cases, which is 40.2% more than Agentless's 82 cases and only 2.5% fewer than MarsCode Agent's 118 cases.", "label": true }, { "paperid": "2410.19523v1", "paper_path": "./SciVer/papers/2410.19523v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.19523v1-Table5-1.png", "request_id": 139, "origin_statement": "From u=4 to u=5 in the heuristic table (w^+1), the sixth row’s value rises by 3 (from 9 to 12), whereas the fifth row’s value rises by 2 (from 6 to 8), indicating a steeper ascent in row 6.", "perturbed_statement": "From u=4 to u=5 in the heuristic table (w^+1), the sixth row’s value rises by 4 (from 9 to 13), whereas the fifth row’s value rises by 2 (from 6 to 8), indicating a steeper ascent in row 6.", "perturbed_explanation": "This is incorrect because in the heuristic table, the sixth row’s values at u=4 and u=5 are actually 9 and 12, so the rise is 3 (9 → 12), not 4, and the value at u=5 is 12, not 13.", "claim": "From u=4 to u=5 in the heuristic table (w^+1), the sixth row’s value rises by 3 (from 9 to 12), whereas the fifth row’s value rises by 2 (from 6 to 8), indicating a steeper ascent in row 6.", "label": true }, { "paperid": "2410.18529v2", "paper_path": "./SciVer/papers/2410.18529v2.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.18529v2_figure_3.png", "request_id": 140, "origin_statement": "For the keyword “natural language instruction(s)”, Google Scholar indexed about 200 publications, exceeding the combined IEEE (~40) and DBLP (~125) counts of approximately 165 papers.", "perturbed_statement": "For the keyword “natural language instruction(s)”, Google Scholar indexed about 150 publications, which is lower than the combined IEEE (~40) and DBLP (~125) counts of around 165 papers.", "perturbed_explanation": "The bar chart shows Google Scholar retrieved about 200 papers for “natural language instruction(s)”, not 150, and 200 exceeds the combined IEEE (~40) and DBLP (~125) total of about 165. The perturbed statement’s 150 figure contradicts the actual ~200 value on the chart.", "claim": "For the keyword “natural language instruction(s)”, Google Scholar indexed about 200 publications, exceeding the combined IEEE (~40) and DBLP (~125) counts of approximately 165 papers.", "label": true }, { "paperid": "2411.01289v1", "paper_path": "./SciVer/papers/2411.01289v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.01289v1-Table2-1.png", "request_id": 143, "origin_statement": "ML_CP achieves an accuracy of 0.98 in multi-level classification, outperforming NPM (0.72) by 0.26 and IPM (0.74) by 0.24.", "perturbed_statement": "ML_CP achieves an accuracy of 0.95 in multi-level classification, outperforming NPM (0.72) by 0.23 and IPM (0.74) by 0.21.", "perturbed_explanation": "The perturbed statement misreports ML_CP's accuracy as 0.95, but Table 2 shows its accuracy is 0.98. Therefore, the claimed performance differences of 0.23 and 0.21 are incorrect; the actual gaps are 0.26 and 0.24.", "claim": "ML_CP achieves an accuracy of 0.98 in multi-level classification, outperforming NPM (0.72) by 0.26 and IPM (0.74) by 0.24.", "label": true }, { "paperid": "2411.00429v1", "paper_path": "./SciVer/papers/2411.00429v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "7.3" ], "image_path": "./SciVer/images/2411.00429v1_figure_3.png", "request_id": 144, "origin_statement": "In the five categories panel, the Gower distance has a median alienation coefficient around 0.42, which is roughly 0.17 higher than the unbiased dependent variant’s median of about 0.25.", "perturbed_statement": "In the five categories panel, the Gower distance has a median alienation coefficient around 0.30, which is roughly 0.05 higher than the unbiased dependent variant’s median of about 0.25.", "perturbed_explanation": "The perturbation is wrong because in the five-category boxplot, the median for Gower (G) is actually about 0.42, not 0.30, so the difference from Udep’s median (~0.25) is around 0.17, not 0.05.", "claim": "In the five categories panel, the Gower distance has a median alienation coefficient around 0.42, which is roughly 0.17 higher than the unbiased dependent variant’s median of about 0.25.", "label": true }, { "paperid": "2411.15413v1", "paper_path": "./SciVer/papers/2411.15413v1.json", "claim_type": "direct", "type": "table", "item": "8", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.15413v1-Table8-1.png", "request_id": 145, "origin_statement": "In Table 8, applying both λ_c and λ_h penalties yields an fwIoU of 80.69, which is 2.57 points higher than the 78.12 achieved without penalty, and also improves PSNR from 16.58 to 17.41.", "perturbed_statement": "In Table 8, applying both λ_c and λ_h penalties yields an fwIoU of 81.69, which is 3.57 points higher than the 78.12 achieved without penalty, and also improves PSNR from 16.58 to 17.41.", "perturbed_explanation": "The perturbed statement is incorrect because the table reports an fwIoU of 80.69—not 81.69—when both λ_c and λ_h penalties are applied. Hence the improvement over 78.12 is actually 2.57 points, not 3.57.", "claim": "In Table 8, applying both λ_c and λ_h penalties yields an fwIoU of 80.69, which is 2.57 points higher than the 78.12 achieved without penalty, and also improves PSNR from 16.58 to 17.41.", "label": true }, { "paperid": "2410.15910v2", "paper_path": "./SciVer/papers/2410.15910v2.json", "claim_type": "direct", "type": "chart", "item": "1(d)", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.15910v2_figure_1(d).png", "request_id": 146, "origin_statement": "After 75 steps, the red trajectory’s circular loop has a radius roughly twice that of the orange loop.", "perturbed_statement": "After 75 steps, the red trajectory’s circular loop has a radius roughly half that of the orange loop.", "perturbed_explanation": "This is incorrect because, in the figure, the red loop spans approximately from x=1.2 to x=1.8 (radius ≈0.3), while the orange loop spans from x=1.35 to x=1.65 (radius ≈0.15), so the red radius is about twice the orange radius, not half.", "claim": "After 75 steps, the red trajectory’s circular loop has a radius roughly twice that of the orange loop.", "label": true }, { "paperid": "2410.23844v1", "paper_path": "./SciVer/papers/2410.23844v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.2.2" ], "image_path": "./SciVer/images/2410.23844v1_figure_3.png", "request_id": 150, "origin_statement": "At p(d)=0, the mean probability of commonsense knowledge in the Attention layers (~0.155) exceeds that in the MLP layers (~0.135) by about 0.02.", "perturbed_statement": "At p(d)=0, the mean probability of commonsense knowledge in the Attention layers (~0.125) is about 0.02 lower than in the MLP layers (~0.145).", "perturbed_explanation": "The Attention layers actually show a mean probability around 0.155 (not 0.125) and the MLP layers around 0.135 (not 0.145) at p(d)=0, so the perturbed statement incorrectly swaps both the values and the comparison.", "claim": "At p(d)=0, the mean probability of commonsense knowledge in the Attention layers (~0.155) exceeds that in the MLP layers (~0.135) by about 0.02.", "label": true }, { "paperid": "2409.17791v1", "paper_path": "./SciVer/papers/2409.17791v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.5" ], "image_path": "./SciVer/images/2409.17791v1_figure_5.png", "request_id": 152, "origin_statement": "At an SPO weight of 0.1, LLaMA-7B with DPO+SPO achieves a peak win rate of about 84.7%, exceeding its DPO-only baseline of 81% by approximately 3.7 percentage points.", "perturbed_statement": "At an SPO weight of 0.2, LLaMA-7B with DPO+SPO achieves a peak win rate of about 86%, exceeding its DPO-only baseline of 81% by approximately 5 percentage points.", "perturbed_explanation": "The figure shows that the highest win rate for LLaMA-7B with DPO+SPO occurs at weight 0.1 (around 84.7%), not at weight 0.2, and the difference from the 81% DPO baseline is about 3.7 points, not 5.", "claim": "At an SPO weight of 0.1, LLaMA-7B with DPO+SPO achieves a peak win rate of about 84.7%, exceeding its DPO-only baseline of 81% by approximately 3.7 percentage points.", "label": true }, { "paperid": "2410.08821v1", "paper_path": "./SciVer/papers/2410.08821v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2.1" ], "image_path": "./SciVer/images/2410.08821v1-Table2-1.png", "request_id": 155, "origin_statement": "Ours outperforms Ours w/o IIC&AMR on MuSiQue F1 by 7.4 points, from 16.8% to 24.2%.", "perturbed_statement": "Ours outperforms Ours w/o IIC&AMR on MuSiQue F1 by 6.4 points, from 16.8% to 24.2%.", "perturbed_explanation": "This is incorrect because the MuSiQue F1 score increases from 16.8% to 24.2%, which is a 7.4-point gain, not 6.4 points.", "claim": "Ours outperforms Ours w/o IIC&AMR on MuSiQue F1 by 7.4 points, from 16.8% to 24.2%.", "label": true }, { "paperid": "2409.14335v1", "paper_path": "./SciVer/papers/2409.14335v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.14335v1-Table1-1.png", "request_id": 159, "origin_statement": "Among the four methods in Table 1, MQM-APE is the only one that satisfies all three evaluated features—interpretability, training-free operation, and high-quality error annotations.", "perturbed_statement": "EAPrompt is the only approach that satisfies all three evaluated features—interpretability, training-free operation, and high-quality error annotations.", "perturbed_explanation": "This is incorrect because Table 1 shows EAPrompt has only one of the three features (interpretability) and lacks training-free operation and high-quality error annotations.", "claim": "Among the four methods in Table 1, MQM-APE is the only one that satisfies all three evaluated features—interpretability, training-free operation, and high-quality error annotations.", "label": true }, { "paperid": "2411.15865v1", "paper_path": "./SciVer/papers/2411.15865v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "4.2.2" ], "image_path": "./SciVer/images/2411.15865v1_figure_8.png", "request_id": 160, "origin_statement": "The correct detection rate for target state |1> is 99.628 ± 0.489%, which is 0.108% higher than the 99.520 ± 0.681% observed for state |0>.", "perturbed_statement": "The correct detection rate for target state |1> is 99.520 ± 0.681%, which is 0.108% higher than the 99.628 ± 0.489% observed for state |0>.", "perturbed_explanation": "This statement is wrong because it swaps the measured percentages and uncertainties: the actual detection rate for |1> is 99.628 ± 0.489%, and for |0> it is 99.520 ± 0.681%, not the other way around.", "claim": "The correct detection rate for target state |1> is 99.628 ± 0.489%, which is 0.108% higher than the 99.520 ± 0.681% observed for state |0>.", "label": true }, { "paperid": "2409.00049v2", "paper_path": "./SciVer/papers/2409.00049v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.00049v2-Table3-1.png", "request_id": 161, "origin_statement": "The lab-based thermal probe reduces measurement uncertainty by 8 percentage points compared to the in situ probe, at an additional cost of £1,613, implying about £202 per percentage point reduction.", "perturbed_statement": "The lab-based thermal probe reduces measurement uncertainty by 10 percentage points compared to the in situ probe, at an additional cost of £1,613, implying about £161 per percentage point reduction.", "perturbed_explanation": "The table shows the in situ probe uncertainty is 25% and the lab test is 17%, a reduction of 8 percentage points, not 10. Therefore, the cost per percentage point is roughly £202 rather than £161, making the perturbed details incorrect.", "claim": "The lab-based thermal probe reduces measurement uncertainty by 8 percentage points compared to the in situ probe, at an additional cost of £1,613, implying about £202 per percentage point reduction.", "label": true }, { "paperid": "2411.07050v1", "paper_path": "./SciVer/papers/2411.07050v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.07050v1_figure_4.png", "request_id": 162, "origin_statement": "FedProx attains a macro F1 score of 44.28%, which is 1.05 percentage points above FedAvg’s 43.23%.", "perturbed_statement": "FedProx attains a macro F1 score of 44.28%, which is 2.05 percentage points above FedAvg’s 43.23%.", "perturbed_explanation": "The claimed difference is incorrect: 44.28% minus 43.23% equals 1.05%, not 2.05%, so the stated 2.05 percentage‐point gap contradicts the actual values shown.", "claim": "FedProx attains a macro F1 score of 44.28%, which is 1.05 percentage points above FedAvg’s 43.23%.", "label": true }, { "paperid": "2411.14711v1", "paper_path": "./SciVer/papers/2411.14711v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "8.3" ], "image_path": "./SciVer/images/2411.14711v1_figure_6.png", "request_id": 164, "origin_statement": "On ogbl-citation2, encoding SPD and AA heuristics via HE yields an MRR of 81.42, which is 5.39 points higher than the best single heuristic RA at 76.03.", "perturbed_statement": "On ogbl-citation2, encoding SPD and AA via HE yields an MRR of 85.42, outperforming the single heuristic RA’s 70.03 by 15.39 points.", "perturbed_explanation": "This statement is incorrect because the figure shows HE(SPD+AA) has an MRR of 81.42 (not 85.42), and RA’s single-heuristic MRR is 76.03 (not 70.03), so both the HE(SPD+AA) value and RA’s baseline are misstated.", "claim": "On ogbl-citation2, encoding SPD and AA heuristics via HE yields an MRR of 81.42, which is 5.39 points higher than the best single heuristic RA at 76.03.", "label": true }, { "paperid": "2409.11887v1", "paper_path": "./SciVer/papers/2409.11887v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.11887v1_figure_1.png", "request_id": 168, "origin_statement": "At an input length of 2560, DocMamba uses about 8.6 times less GPU memory than LayoutLMv3, requiring approximately 5.0 GB compared to LayoutLMv3’s 42.8 GB.", "perturbed_statement": "At an input length of 2560, DocMamba uses only about 2.3 GB of GPU memory compared to LayoutLMv3’s 42.8 GB, an 18-fold reduction.", "perturbed_explanation": "This is incorrect because the chart shows DocMamba actually uses around 5.0 GB at input length 2560 (not 2.3 GB), resulting in an 8.6× reduction rather than 18×.", "claim": "At an input length of 2560, DocMamba uses about 8.6 times less GPU memory than LayoutLMv3, requiring approximately 5.0 GB compared to LayoutLMv3’s 42.8 GB.", "label": true }, { "paperid": "2409.07107v1", "paper_path": "./SciVer/papers/2409.07107v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.07107v1-Table1-1.png", "request_id": 169, "origin_statement": "The Atlas robot's 3,399 collision pairs exceed H1's 255 collision pairs by over 13-fold.", "perturbed_statement": "The Atlas robot's 3,399 collision pairs exceed H1's 494 collision pairs by over 6-fold.", "perturbed_explanation": "Table 1 shows H1 has 255 collision pairs, not 494. Therefore comparing Atlas’s 3,399 pairs to an incorrect H1 count of 494 is factually wrong.", "claim": "The Atlas robot's 3,399 collision pairs exceed H1's 494 collision pairs by over 6-fold.", "label": false }, { "paperid": "2410.22543v1", "paper_path": "./SciVer/papers/2410.22543v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "2.4" ], "image_path": "./SciVer/images/2410.22543v1_figure_2(b).png", "request_id": 170, "origin_statement": "Between argument of perihelion angles of 0° and 270°, the aphelion distance Ra of asteroid 2001 YB5 rises from about 0.9 AU to around 1.8 AU, an increase of approximately 0.9 AU.", "perturbed_statement": "Between argument of perihelion angles of 0° and 280°, the aphelion distance Ra of asteroid 2001 YB5 rises from about 1.1 AU to around 2.0 AU, an increase of approximately 0.9 AU.", "perturbed_explanation": "This statement is incorrect because the graph shows Ra at 0° is about 0.9 AU, not 1.1 AU, and its maximum Ra near 270° is around 1.8 AU, not 2.0 AU; the argument range is also 270°, not 280°.", "claim": "Between argument of perihelion angles of 0° and 280°, the aphelion distance Ra of asteroid 2001 YB5 rises from about 1.1 AU to around 2.0 AU, an increase of approximately 0.9 AU.", "label": false }, { "paperid": "2409.16057v2", "paper_path": "./SciVer/papers/2409.16057v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.16057v2-Table1-1.png", "request_id": 171, "origin_statement": "Mask R-CNN has the largest increase, 0.21, which is greater than the increases of 0.12 for Faster R-CNN, 0.14 for Faster R-CNN FPN, and 0.15 for Double-Head R-CNN.", "perturbed_statement": "Mask R-CNN has the largest increase, 0.19, which is greater than the increases of 0.12 for Faster R-CNN, 0.14 for Faster R-CNN FPN, and 0.15 for Double-Head R-CNN.", "perturbed_explanation": "The table shows Mask R-CNN’s inconsistency rising from 0.51 (clean) to 0.72 (poisoned), an increase of 0.21, not 0.19 as stated.", "claim": "Mask R-CNN has the largest increase, 0.19, which is greater than the increases of 0.12 for Faster R-CNN, 0.14 for Faster R-CNN FPN, and 0.15 for Double-Head R-CNN.", "label": false }, { "paperid": "2409.14878v1", "paper_path": "./SciVer/papers/2409.14878v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "4.5.2" ], "image_path": "./SciVer/images/2409.14878v1-Table6-1.png", "request_id": 173, "origin_statement": "The cross-validation shows fold 4 achieves the highest F1 score of 0.946, which is 0.023 higher than fold 1's 0.923.", "perturbed_statement": "The cross-validation shows fold 3 achieves the highest F1 score of 0.946, which is 0.023 higher than fold 1's 0.923.", "perturbed_explanation": "This is incorrect because in the table fold 3’s F1 score is 0.927, not 0.946 as claimed.", "claim": "The cross-validation shows fold 3 achieves the highest F1 score of 0.946, which is 0.023 higher than fold 1's 0.923.", "label": false }, { "paperid": "2409.06280v1", "paper_path": "./SciVer/papers/2409.06280v1.json", "claim_type": "direct", "type": "chart", "item": "10", "section": [ "6" ], "image_path": "./SciVer/images/2409.06280v1_figure_10.png", "request_id": 174, "origin_statement": "At marking ratios of 0.01% and 0.005%, TPR remains at 100% across all FPR values, whereas at a marking ratio of 0.0025%, TPR starts at about 78% at FPR=0.001 and rises to nearly 100% as FPR increases.", "perturbed_statement": "At marking ratios of 0.01% and 0.005%, TPR remains at 98% across all FPR values, whereas at a marking ratio of 0.0025%, TPR starts at about 85% at FPR=0.001 and rises to nearly 100% as FPR increases.", "perturbed_explanation": "This statement is incorrect because in the figure both the 0.01% (green) and 0.005% (yellow) curves maintain a TPR of 100% (not 98%) across all FPR values, and the 0.0025% (blue) curve begins at approximately 78% TPR at FPR=0.001 (not 85%).", "claim": "At marking ratios of 0.01% and 0.005%, TPR remains at 98% across all FPR values, whereas at a marking ratio of 0.0025%, TPR starts at about 85% at FPR=0.001 and rises to nearly 100% as FPR increases.", "label": false }, { "paperid": "2410.11378v1", "paper_path": "./SciVer/papers/2410.11378v1.json", "claim_type": "direct", "type": "chart", "item": "5(b)", "section": [ "4.8" ], "image_path": "./SciVer/images/2410.11378v1_figure_5(b).png", "request_id": 176, "origin_statement": "In the 40% adversary scenario, WPFed’s (Ours) average accuracy rises from around 87% at iteration 0 to about 94% at iteration 200, while ProxyFL’s accuracy fluctuates around 82% with periodic drops below 50%.", "perturbed_statement": "In the 40% adversary scenario, WPFed’s (Ours) average accuracy starts at around 80% and climbs to about 98% by iteration 200, while ProxyFL’s accuracy remains steadily above 90% without major fluctuations.", "perturbed_explanation": "The perturbed claim is incorrect because the blue \"Ours\" line actually begins near 87% (not 80%) and only reaches about 94% (not 98%) by iteration 200. Additionally, the red \"ProxyFL\" curve fluctuates around 82% with several dips below 50%, rather than staying steadily above 90%.", "claim": "In the 40% adversary scenario, WPFed’s (Ours) average accuracy starts at around 80% and climbs to about 98% by iteration 200, while ProxyFL’s accuracy remains steadily above 90% without major fluctuations.", "label": false }, { "paperid": "2409.07124v1", "paper_path": "./SciVer/papers/2409.07124v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.2", "3.3", "4.1" ], "image_path": "./SciVer/images/2409.07124v1-Table3-1.png", "request_id": 179, "origin_statement": "Trumpler 10 has experienced 4 supernovae, which is twice the combined supernova count of Sco-Cen UCL (1) and Sco-Cen LCC (1).", "perturbed_statement": "Trumpler 10 has experienced 3 supernovae, which is twice the combined supernova count of Sco-Cen UCL (1) and Sco-Cen LCC (1).", "perturbed_explanation": "According to the table, Trumpler 10’s SNe column is 4, not 3. Therefore stating it has experienced 3 supernovae contradicts the provided data.", "claim": "Trumpler 10 has experienced 3 supernovae, which is twice the combined supernova count of Sco-Cen UCL (1) and Sco-Cen LCC (1).", "label": false }, { "paperid": "2411.09556v1", "paper_path": "./SciVer/papers/2411.09556v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.2", "3.3" ], "image_path": "./SciVer/images/2411.09556v1_figure_4.png", "request_id": 180, "origin_statement": "In the snapshot at 27860 r_g/c (right panels), the electron-to-overall heating ratio δ_e at the jet–disk interface reaches about 0.29, roughly 20% higher than the time-averaged value of around 0.24.", "perturbed_statement": "In the snapshot at 27860 r_g/c (right panels), the electron-to-overall heating ratio δ_e at the jet–disk interface reaches about 0.31, roughly 30% higher than the time-averaged value of around 0.24.", "perturbed_explanation": "The color scale for δ_e in the snapshot (right top-right panel) peaks at approximately 0.29, not 0.31. Thus stating δ_e reaches 0.31 contradicts the plotted maximum value shown on the bar, making the perturbed claim incorrect.", "claim": "In the snapshot at 27860 r_g/c (right panels), the electron-to-overall heating ratio δ_e at the jet–disk interface reaches about 0.31, roughly 30% higher than the time-averaged value of around 0.24.", "label": false }, { "paperid": "2411.15583v1", "paper_path": "./SciVer/papers/2411.15583v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15583v1_figure_3.png", "request_id": 182, "origin_statement": "Modality 5 (with an agency) achieved the highest overall effect size, Hedge’s g = 0.58, compared to near-zero effects in Modality 4 (g = 0.06) and Modality 1 (g = −0.14).", "perturbed_statement": "Modality 5 achieved the highest overall effect size, Hedge’s g = 0.08, compared to near-zero effects in Modality 4 (g = −0.20) and Modality 1 (g = −0.14).", "perturbed_explanation": "The perturbed statement is incorrect because the forest plot shows Modality 5’s overall effect size is 0.58, not 0.08, and Modality 4’s overall effect size is 0.06, not −0.20.", "claim": "Modality 5 achieved the highest overall effect size, Hedge’s g = 0.08, compared to near-zero effects in Modality 4 (g = −0.20) and Modality 1 (g = −0.14).", "label": false }, { "paperid": "2410.16843v1", "paper_path": "./SciVer/papers/2410.16843v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.16843v1-Table1-1.png", "request_id": 183, "origin_statement": "On ConflictQA-StrategyQA, Trustworthy-Alignment yields an accuracy of 77.06% on Vicuna-7b-v1.5, surpassing the SFT accuracy of 71.73% by 5.33 percentage points while reducing MR from 29.97% to 24.19%.", "perturbed_statement": "On ConflictQA-StrategyQA, Trustworthy-Alignment yields an accuracy of 79.06% on Vicuna-7b-v1.5, surpassing the SFT accuracy of 71.73% by 7.33 percentage points while reducing MR from 29.97% to 24.19%.", "perturbed_explanation": "The perturbed claim incorrectly states the Trustworthy-Alignment accuracy as 79.06%. According to Table 1, the actual accuracy on ConflictQA-StrategyQA for Vicuna-7b-v1.5 using Trustworthy-Alignment is 77.06%, not 79.06%.", "claim": "On ConflictQA-StrategyQA, Trustworthy-Alignment yields an accuracy of 79.06% on Vicuna-7b-v1.5, surpassing the SFT accuracy of 71.73% by 7.33 percentage points while reducing MR from 29.97% to 24.19%.", "label": false }, { "paperid": "2409.15317v1", "paper_path": "./SciVer/papers/2409.15317v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.15317v1_figure_2.png", "request_id": 184, "origin_statement": "At 100 candidate goals, IDA achieves about 9 targets per minute, roughly 1 target per minute higher than the laggy pilot’s ∼8, whereas the copilot remains at zero.", "perturbed_statement": "At 100 candidate goals, IDA achieves about 7 targets per minute, roughly 1 target per minute lower than the laggy pilot’s ∼8, while the copilot remains at zero.", "perturbed_explanation": "The Reacher experiments plot shows that at 100 goals IDA’s hit rate is around 9 targets/min versus the laggy pilot’s approximately 8. The perturbed statement incorrectly states IDA’s rate as 7 and that it underperforms the pilot, which contradicts the visual data.", "claim": "At 100 candidate goals, IDA achieves about 7 targets per minute, roughly 1 target per minute lower than the laggy pilot’s ∼8, while the copilot remains at zero.", "label": false }, { "paperid": "2410.02409v1", "paper_path": "./SciVer/papers/2410.02409v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.02409v1_figure_4.png", "request_id": 186, "origin_statement": "Over the first 150 positions, the abelian complexity varies between 3 and 8, while the additive complexity remains constant at 3.", "perturbed_statement": "Over the first 150 positions, the abelian complexity varies between 4 and 9, while the additive complexity remains constant at 4.", "perturbed_explanation": "This is incorrect because the figure shows the abelian complexity ranges from 3 (not 4) up to 8 (not 9), and the additive complexity stays constant at 3 (not 4).", "claim": "Over the first 150 positions, the abelian complexity varies between 4 and 9, while the additive complexity remains constant at 4.", "label": false }, { "paperid": "2410.09123v2", "paper_path": "./SciVer/papers/2410.09123v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5.4" ], "image_path": "./SciVer/images/2410.09123v2-Table4-1.png", "request_id": 187, "origin_statement": "Our adapter uses 5,125 parameters on each dataset, which corresponds to only 0.002% of MetaR’s parameters on WIKI, 0.311% on FB15K-237, and 2.187% on UMLS, highlighting increasing relative overhead as MetaR’s size decreases.", "perturbed_statement": "Our adapter uses 5,125 parameters on each dataset, which corresponds to only 0.015% of MetaR’s parameters on WIKI, 0.131% on FB15K-237, and 2.187% on UMLS, highlighting increasing relative overhead as MetaR’s size decreases.", "perturbed_explanation": "The perturbed claim incorrectly states that the adapter represents 0.015% of MetaR’s parameters on WIKI instead of the actual 0.002%, and 0.131% on FB15K-237 instead of the actual 0.311% as shown in Table 4.", "claim": "Our adapter uses 5,125 parameters on each dataset, which corresponds to only 0.015% of MetaR’s parameters on WIKI, 0.131% on FB15K-237, and 2.187% on UMLS, highlighting increasing relative overhead as MetaR’s size decreases.", "label": false }, { "paperid": "2409.08598v1", "paper_path": "./SciVer/papers/2409.08598v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.08598v1_figure_3.png", "request_id": 192, "origin_statement": "In the Ours t-SNE plot, the centroids of the neutral (pink) and sadness (blue) clusters are about one cluster diameter farther apart than in the Baseline visualization, indicating improved inter-category separation.", "perturbed_statement": "In the Ours t-SNE plot, the centroids of the neutral (pink) and happiness (green) clusters are about two cluster diameters farther apart than in the Baseline visualization, indicating improved inter-category separation.", "perturbed_explanation": "This is incorrect because the original visualization shows the increased separation occurs between the neutral (pink) and sadness (blue) clusters, not between neutral and happiness (green). Additionally, the actual distance increase is roughly one cluster diameter, not two.", "claim": "In the Ours t-SNE plot, the centroids of the neutral (pink) and happiness (green) clusters are about two cluster diameters farther apart than in the Baseline visualization, indicating improved inter-category separation.", "label": false }, { "paperid": "2410.04199v3", "paper_path": "./SciVer/papers/2410.04199v3.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2", "5.3" ], "image_path": "./SciVer/images/2410.04199v3-Table2-1.png", "request_id": 193, "origin_statement": "In the LongGenBench-GSM8K task, GPT-3.5-Turbo, GPT-4o, and Gemini-1.5-Flash use K=35, which is 5 more questions per query than the K=30 used by all open-source models.", "perturbed_statement": "In the LongGenBench-GSM8K task, GPT-3.5-Turbo, GPT-4o, and Gemini-1.5-Flash use K=30, which is equal to the number of questions per query used by all open-source models.", "perturbed_explanation": "The table shows that GPT-3.5-Turbo, GPT-4o, and Gemini-1.5-Flash actually have K=35 for GSM8K, not K=30, so stating they use K=30 contradicts the provided values.", "claim": "In the LongGenBench-GSM8K task, GPT-3.5-Turbo, GPT-4o, and Gemini-1.5-Flash use K=30, which is equal to the number of questions per query used by all open-source models.", "label": false }, { "paperid": "2411.03697v1", "paper_path": "./SciVer/papers/2411.03697v1.json", "claim_type": "direct", "type": "chart", "item": "15", "section": [ "6.4" ], "image_path": "./SciVer/images/2411.03697v1_figure_15.png", "request_id": 194, "origin_statement": "The BERT GELU function reaches about 80 GFLOPS, roughly 25 GFLOPS more throughput than the ChatGLM SiLU function’s approximate 55 GFLOPS.", "perturbed_statement": "The BERT GELU function reaches about 55 GFLOPS, roughly 5 GFLOPS less throughput than the ChatGLM SiLU function’s approximate 60 GFLOPS.", "perturbed_explanation": "Figure 15 shows the BERT GELU bar at around 80 GFLOPS, not 55 GFLOPS, and it exceeds the ChatGLM SiLU’s ~55 GFLOPS rather than falling below it.", "claim": "The BERT GELU function reaches about 55 GFLOPS, roughly 5 GFLOPS less throughput than the ChatGLM SiLU function’s approximate 60 GFLOPS.", "label": false }, { "paperid": "2411.07976v5", "paper_path": "./SciVer/papers/2411.07976v5.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.07976v5_figure_6(a).png", "request_id": 196, "origin_statement": "The DINO-LG model correctly identified 318 out of 357 actual positive slices, achieving 89.1% recall on the positive class.", "perturbed_statement": "The DINO-LG model correctly identified 300 out of 357 actual positive slices, achieving 89.1% recall on the positive class.", "perturbed_explanation": "The confusion matrix shows the model made 318 true positive predictions, not 300, so the stated count of 300 is incorrect.", "claim": "The DINO-LG model correctly identified 300 out of 357 actual positive slices, achieving 89.1% recall on the positive class.", "label": false }, { "paperid": "2411.05521v2", "paper_path": "./SciVer/papers/2411.05521v2.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "4", "5.2", "6" ], "image_path": "./SciVer/images/2411.05521v2_figure_3(a).png", "request_id": 198, "origin_statement": "In the dev set, 'allergies' questions count is 240, 26 more than 'imaging_studies' (214), while in the test set 'imaging_studies' peaks at 234, just one above 'allergies' (233).", "perturbed_statement": "In the dev set, 'allergies' questions count is 240, 30 more than 'imaging_studies' (214), while in the test set 'imaging_studies' peaks at 234, just one above 'allergies' (233).", "perturbed_explanation": "The difference between 240 and 214 is actually 26, not 30, making the stated 30-question gap incorrect based on the provided counts.", "claim": "In the dev set, 'allergies' questions count is 240, 30 more than 'imaging_studies' (214), while in the test set 'imaging_studies' peaks at 234, just one above 'allergies' (233).", "label": false }, { "paperid": "2410.07484v2", "paper_path": "./SciVer/papers/2410.07484v2.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.07484v2_figure_5.png", "request_id": 200, "origin_statement": "By the second iteration, the cover rate for 'mine' actions rises to about 85%, exceeding the 'craft' actions cover rate of about 75% by roughly 10 percentage points.", "perturbed_statement": "By the second iteration, the cover rate for 'mine' actions rises to about 90%, exceeding the 'craft' actions cover rate of about 75% by roughly 15 percentage points.", "perturbed_explanation": "This statement is incorrect because at iteration 2 the 'mine' cover rate is approximately 85% (not 90%), so it only exceeds the 'craft' cover rate (about 75%) by roughly 10 percentage points, not 15.", "claim": "By the second iteration, the cover rate for 'mine' actions rises to about 90%, exceeding the 'craft' actions cover rate of about 75% by roughly 15 percentage points.", "label": false }, { "paperid": "2411.01370v1", "paper_path": "./SciVer/papers/2411.01370v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "5.2.2" ], "image_path": "./SciVer/images/2411.01370v1-Table6-1.png", "request_id": 201, "origin_statement": "In Pattern IV, the multistage model achieves a maintenance cost of $2,049,671K, which is $563,362K lower than the two-stage model’s $2,613,033K.", "perturbed_statement": "In Pattern IV, the multistage model achieves a maintenance cost of $2,049,671K, which is $600,000K lower than the two-stage model’s $2,613,033K.", "perturbed_explanation": "The perturbed statement misstates the cost difference. The actual maintenance costs are $2,613,033K (two-stage) and $2,049,671K (multistage), yielding a difference of $563,362K, not $600,000K.", "claim": "In Pattern IV, the multistage model achieves a maintenance cost of $2,049,671K, which is $600,000K lower than the two-stage model’s $2,613,033K.", "label": false }, { "paperid": "2411.04093v1", "paper_path": "./SciVer/papers/2411.04093v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.04093v1_figure_5.png", "request_id": 202, "origin_statement": "BART’s extraction coverage climbs from about 0.36 at the 10th percentile of arousing term density to a peak near 0.44 at the 50th percentile, then falls to roughly 0.38 at the 100th percentile.", "perturbed_statement": "BART’s extraction coverage climbs from about 0.36 at the 10th percentile of arousing term density to a peak near 0.44 at the 60th percentile, then falls to roughly 0.42 at the 100th percentile.", "perturbed_explanation": "The perturbed claim is false because, in the figure, BART’s extraction coverage actually peaks around the 50th percentile of arousing term density (not the 60th) and decreases to about 0.40 (not 0.42) at the 100th percentile.", "claim": "BART’s extraction coverage climbs from about 0.36 at the 10th percentile of arousing term density to a peak near 0.44 at the 60th percentile, then falls to roughly 0.42 at the 100th percentile.", "label": false }, { "paperid": "2410.05053v1", "paper_path": "./SciVer/papers/2410.05053v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.2", "3.5" ], "image_path": "./SciVer/images/2410.05053v1-Table2-1.png", "request_id": 203, "origin_statement": "Between 250K and 400K, the monolayer's nematic order parameter decreases by about 33.7% (from 0.7248 to 0.4804), while the bilayer's decreases by about 25.0% (from 0.3992 to 0.2996).", "perturbed_statement": "Between 250K and 400K, the monolayer's nematic order parameter decreases by about 33.7% (from 0.7248 to 0.4804), while the bilayer's decreases by about 15.0% (from 0.3992 to 0.2996).", "perturbed_explanation": "According to the table, the bilayer value falls from 0.3992 at 250K to 0.2996 at 400K, which is a drop of approximately 25%, not 15% as claimed.", "claim": "Between 250K and 400K, the monolayer's nematic order parameter decreases by about 33.7% (from 0.7248 to 0.4804), while the bilayer's decreases by about 15.0% (from 0.3992 to 0.2996).", "label": false }, { "paperid": "2409.05305v1", "paper_path": "./SciVer/papers/2409.05305v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.05305v1_figure_2.png", "request_id": 206, "origin_statement": "In the Anharmonic Oscillator: Exponential Energy plot, as Energy increases from 3 to 9, the latent encoding rises from about 280 to about 800, reflecting a non-linear increase of approximately 520 units over a 6-unit energy range.", "perturbed_statement": "In the Anharmonic Oscillator: Exponential Energy plot, as Energy increases from 3 to 9, the latent encoding rises from about 300 to about 700, reflecting an increase of approximately 400 units over a 6-unit energy range.", "perturbed_explanation": "The perturbation is incorrect because the plot shows the latent value at energy ≈3 is closer to 280 (not 300) and at energy ≈9 is around 800 (not 700), so the actual increase is about 520 units rather than 400.", "claim": "In the Anharmonic Oscillator: Exponential Energy plot, as Energy increases from 3 to 9, the latent encoding rises from about 300 to about 700, reflecting an increase of approximately 400 units over a 6-unit energy range.", "label": false }, { "paperid": "2410.20063v2", "paper_path": "./SciVer/papers/2410.20063v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3" ], "image_path": "./SciVer/images/2410.20063v2-Table1-1.png", "request_id": 207, "origin_statement": "The ST yield for the K+π−π− tag mode (2,164,074±1,571) is about 3.14 times larger than that for the K+π−π−π0 mode (689,042±1,172).", "perturbed_statement": "The ST yield for the K+π−π− tag mode (2,164,074±1,571) is about 2.5 times larger than that for the K+π−π−π0 mode (689,042±1,172).", "perturbed_explanation": "This is incorrect because 2,164,074 divided by 689,042 equals approximately 3.14, not 2.5, so the ratio in the perturbed statement contradicts the values given in the table.", "claim": "The ST yield for the K+π−π− tag mode (2,164,074±1,571) is about 2.5 times larger than that for the K+π−π−π0 mode (689,042±1,172).", "label": false }, { "paperid": "2411.15799v1", "paper_path": "./SciVer/papers/2411.15799v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.15799v1_figure_4.png", "request_id": 210, "origin_statement": "The automated method's recall of 0.945 on general severity level estimation is 0.424 higher than Expert1's recall of 0.521.", "perturbed_statement": "The automated method's recall of 0.845 on general severity level estimation is 0.324 higher than Expert1's recall of 0.521.", "perturbed_explanation": "The perturbed statement is incorrect because the confusion matrix shows the method's recall is 0.945, not 0.845, making the true difference from Expert1's 0.521 recall equal to 0.424 rather than 0.324.", "claim": "The automated method's recall of 0.845 on general severity level estimation is 0.324 higher than Expert1's recall of 0.521.", "label": false }, { "paperid": "2409.15155v1", "paper_path": "./SciVer/papers/2409.15155v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.1.3", "4" ], "image_path": "./SciVer/images/2409.15155v1-Table2-1.png", "request_id": 213, "origin_statement": "On the artifact-only dataset D_Art, under the L1+SSIM+MS-SSIM loss combination, MAR-DTN achieved a PSNR of 27.46 dB, outperforming custom-pix2pix’s 27.06 dB by 0.40 dB.", "perturbed_statement": "On the artifact-only dataset D_Art, under the L1+SSIM+MS-SSIM loss combination, MAR-DTN achieved a PSNR of 26.46 dB, outperforming custom-pix2pix’s 27.06 dB by 0.60 dB.", "perturbed_explanation": "The perturbed statement misreports MAR-DTN’s PSNR as 26.46 dB instead of the correct 27.46 dB and miscalculates the margin. Table 2 shows MAR-DTN’s PSNR is 27.46 dB (not 26.46) and exceeds custom-pix2pix’s 27.06 dB by 0.40 dB, not 0.60 dB.", "claim": "On the artifact-only dataset D_Art, under the L1+SSIM+MS-SSIM loss combination, MAR-DTN achieved a PSNR of 26.46 dB, outperforming custom-pix2pix’s 27.06 dB by 0.60 dB.", "label": false }, { "paperid": "2409.15440v1", "paper_path": "./SciVer/papers/2409.15440v1.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.15440v1_figure_6(a).png", "request_id": 214, "origin_statement": "The central region peaks at a spectral index of about −0.6, which is roughly 1.2 units flatter than the very steep outer lobes with α ≲ −1.8.", "perturbed_statement": "The central region peaks at a spectral index of about −0.6, which is roughly 0.6 units flatter than the very steep outer lobes with α ≲ −1.2.", "perturbed_explanation": "This statement is incorrect because the outer lobes have a spectral index of α ≲ −1.8, not −1.2, as indicated in the caption and text.", "claim": "The central region peaks at a spectral index of about −0.6, which is roughly 0.6 units flatter than the very steep outer lobes with α ≲ −1.2.", "label": false }, { "paperid": "2411.16393v1", "paper_path": "./SciVer/papers/2411.16393v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.16393v1-Table4-1.png", "request_id": 215, "origin_statement": "The baseline uniaxial hydrostatic strength is eight times larger than the baseline uniaxial tensile strength (8000 MPa vs 1000 MPa).", "perturbed_statement": "The baseline uniaxial hydrostatic strength is ten times larger than the baseline uniaxial tensile strength (10000 MPa vs 1000 MPa).", "perturbed_explanation": "The table lists the baseline uniaxial hydrostatic strength as 8000 MPa and the tensile strength as 1000 MPa, so the hydrostatic strength is eight times larger—not ten—and there is no 10000 MPa entry.", "claim": "The baseline uniaxial hydrostatic strength is ten times larger than the baseline uniaxial tensile strength (10000 MPa vs 1000 MPa).", "label": false }, { "paperid": "2409.09641v2", "paper_path": "./SciVer/papers/2409.09641v2.json", "claim_type": "direct", "type": "chart", "item": "9(b)", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.09641v2_figure_9(b).png", "request_id": 218, "origin_statement": "Turn-taking EMMeans climbed from approximately 3.0 on day 1 to about 3.3 on day 14, an increase of roughly 0.3 points over the two weeks.", "perturbed_statement": "Turn-taking EMMeans climbed from approximately 3.1 on day 1 to about 3.6 on day 14, an increase of roughly 0.5 points.", "perturbed_explanation": "The chart shows estimated marginal means of about 3.0 on day 1 and around 3.3 on day 14, not 3.1 and 3.6, so both the start and end values and the claimed 0.5-point increase contradict the visual data.", "claim": "Turn-taking EMMeans climbed from approximately 3.1 on day 1 to about 3.6 on day 14, an increase of roughly 0.5 points.", "label": false }, { "paperid": "2410.22517v1", "paper_path": "./SciVer/papers/2410.22517v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "6.1" ], "image_path": "./SciVer/images/2410.22517v1-Table1-1.png", "request_id": 219, "origin_statement": "Atlas yields a 0.317-point increase in the Race/Ethnicity EBS for GPT-J on the BBQ dataset, which is 0.052 points higher than the 0.265-point increase observed for GPT-2 XL.", "perturbed_statement": "Atlas yields a 0.327-point increase in the Race/Ethnicity EBS for GPT-J on the BBQ dataset, which is 0.062 points higher than the 0.265-point increase observed for GPT-2 XL.", "perturbed_explanation": "The perturbed statement is incorrect because the actual GPT-J EBS increase for Race/Ethnicity on the BBQ dataset is 0.740–0.423=0.317 points (not 0.327), making the difference versus GPT-2 XL’s 0.265-point increase equal to 0.052 (not 0.062).", "claim": "Atlas yields a 0.327-point increase in the Race/Ethnicity EBS for GPT-J on the BBQ dataset, which is 0.062 points higher than the 0.265-point increase observed for GPT-2 XL.", "label": false }, { "paperid": "2411.07504v1", "paper_path": "./SciVer/papers/2411.07504v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.4.4" ], "image_path": "./SciVer/images/2411.07504v1_figure_6.png", "request_id": 224, "origin_statement": "AdaS&S-R achieves the lowest inference FLOPs at 0.83G, which is about 35.5% lower than UES-32's 1.29G in Figure 6(b).", "perturbed_statement": "AdaS&S-R achieves the lowest inference FLOPs at 0.75G, which is about 42% lower than UES-32's 1.29G in Figure 6(b).", "perturbed_explanation": "Figure 6(b) shows AdaS&S-R at 0.83G FLOPs, not 0.75G. Because the actual value is 0.83G, the claimed 42% reduction from 1.29G is incorrect.", "claim": "AdaS&S-R achieves the lowest inference FLOPs at 0.75G, which is about 42% lower than UES-32's 1.29G in Figure 6(b).", "label": false }, { "paperid": "2411.12845v1", "paper_path": "./SciVer/papers/2411.12845v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.12845v1-Table2-1.png", "request_id": 225, "origin_statement": "In the Inflation Normalization sample (January 2023–December 2023), the two-regime model (M2) achieves the smallest month-over-month root mean squared difference of 0.000, versus other models whose M-o-M RMSD ranges from 0.002 (M1 and M3) to 0.019 (M4).", "perturbed_statement": "In the Inflation Normalization sample (January 2023–December 2023), the four-regime model (M4) achieves the smallest month-over-month root mean squared difference of 0.000, while other models’ M-o-M RMSD ranges from 0.002 (M1 and M3) to 0.019 (M2).", "perturbed_explanation": "Table 2 shows that in the inflation normalization sample, M4’s month-over-month RMS difference is actually 0.019 (not 0.000), and it is M2 that records the 0.000 RMSD. The perturbed claim misattributes the 0.000 value to M4 instead of M2.", "claim": "In the Inflation Normalization sample (January 2023–December 2023), the four-regime model (M4) achieves the smallest month-over-month root mean squared difference of 0.000, while other models’ M-o-M RMSD ranges from 0.002 (M1 and M3) to 0.019 (M2).", "label": false }, { "paperid": "2410.04784v1", "paper_path": "./SciVer/papers/2410.04784v1.json", "claim_type": "direct", "type": "chart", "item": "4(b)", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.04784v1_figure_4(b).png", "request_id": 226, "origin_statement": "When the consistency ratio shifts from 5:5 to 9:1, the preference score for Birth Date climbs from about 52.5% to around 90.1%, an increase of roughly 37.6 points, whereas University only increases by around 3.6 points from about 50.0% to 53.6%.", "perturbed_statement": "When the consistency ratio shifts from 5:5 to 9:1, the preference score for Birth Date climbs from about 52.5% to around 80.1%, an increase of roughly 27.6 points, whereas University only increases by around 4.6 points from about 50.0% to 54.6%.", "perturbed_explanation": "The chart shows the preference score for Birth Date at about 90.1% under a 9:1 ratio, not 80.1%, and the University score is approximately 53.6%, not 54.6%, so both the final values and the claimed increases are incorrect.", "claim": "When the consistency ratio shifts from 5:5 to 9:1, the preference score for Birth Date climbs from about 52.5% to around 80.1%, an increase of roughly 27.6 points, whereas University only increases by around 4.6 points from about 50.0% to 54.6%.", "label": false }, { "paperid": "2409.07135v1", "paper_path": "./SciVer/papers/2409.07135v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.07135v1-Table3-1.png", "request_id": 227, "origin_statement": "In four of the six models—KMeans, DBSCAN, GMM, and LOF—the AEA learning rate is at least twice the AER learning rate.", "perturbed_statement": "In five of the six models—KMeans, DBSCAN, GMM, nuSVM, and LOF—the AEA learning rate is at least twice the AER learning rate.", "perturbed_explanation": "This is incorrect because nuSVM’s AEA learning rate (0.09) is lower than its AER rate (0.10), and IForest’s AEA rate (0.02) is lower than its AER rate (0.03), so only four models meet the doubling condition.", "claim": "In five of the six models—KMeans, DBSCAN, GMM, nuSVM, and LOF—the AEA learning rate is at least twice the AER learning rate.", "label": false }, { "paperid": "2410.01485v1", "paper_path": "./SciVer/papers/2410.01485v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.01485v1-Table6-1.png", "request_id": 231, "origin_statement": "Using all full layers achieves an average performance of 0.29, which is 0.02 higher than the 0.27 average of 1/3 full layers and 0.03 higher than the 0.26 average of 1/5 full layers.", "perturbed_statement": "Using all full layers achieves an average performance of 0.31, which is 0.04 higher than the 0.27 average of 1/3 full layers and 0.05 higher than the 0.26 average of 1/5 full layers.", "perturbed_explanation": "The perturbed statement is incorrect because the table shows that using all full layers yields an average of 0.29, not 0.31. The actual differences are 0.02 and 0.03, not 0.04 and 0.05.", "claim": "Using all full layers achieves an average performance of 0.31, which is 0.04 higher than the 0.27 average of 1/3 full layers and 0.05 higher than the 0.26 average of 1/5 full layers.", "label": false }, { "paperid": "2410.01964v1", "paper_path": "./SciVer/papers/2410.01964v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.1.1" ], "image_path": "./SciVer/images/2410.01964v1_figure_6.png", "request_id": 232, "origin_statement": "The film dosimetry average beam offset is approximately (1.4 mm, –0.4 mm), which is about 0.4 mm to the right and 1.2 mm above the BTV average at (1.0 mm, –1.6 mm).", "perturbed_statement": "The film dosimetry average beam offset sits about 0.8 mm to the left and 0.8 mm below the BTV average offset.", "perturbed_explanation": "This statement is wrong because the plot shows the film dosimetry average (black ✖) at roughly x=1.4 mm, y=–0.4 mm and the BTV average (black ✖) at x=1.0 mm, y=–1.6 mm. The film offset is actually 0.4 mm to the right and 1.2 mm above the BTV average, not to the left or below.", "claim": "The film dosimetry average beam offset sits about 0.8 mm to the left and 0.8 mm below the BTV average offset.", "label": false }, { "paperid": "2410.06541v2", "paper_path": "./SciVer/papers/2410.06541v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.06541v2-Table1-1.png", "request_id": 233, "origin_statement": "On Baichuan2-13B, CT(Max) at ~50% pruning achieves an average score of 74.37, which is 16.97 points higher than the ShortGPT method's 57.40 average.", "perturbed_statement": "On Baichuan2-13B, CT(Max) at ~35% pruning achieves an average score of 74.37, which is 12.97 points higher than the ShortGPT method's 57.40 average.", "perturbed_explanation": "The perturbation is incorrect because Table 1 shows CT(Max) for Baichuan2-13B uses a pruning ratio of ~50%, not ~35%, and the actual score gap between CT(Max) (74.37) and ShortGPT (57.40) is 16.97 points, not 12.97.", "claim": "On Baichuan2-13B, CT(Max) at ~35% pruning achieves an average score of 74.37, which is 12.97 points higher than the ShortGPT method's 57.40 average.", "label": false }, { "paperid": "2411.16342v1", "paper_path": "./SciVer/papers/2411.16342v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "2.3" ], "image_path": "./SciVer/images/2411.16342v1_figure_5.png", "request_id": 234, "origin_statement": "For GNNBenchmark.TSP, the (pp,c) configuration is optimal in 62% of cases, nearly doubling (pp,e)’s 33% share and exceeding (pp,h)’s 3% frequency by over 20 times.", "perturbed_statement": "For GNNBenchmark.TSP, the (pp,c) configuration is optimal in 45% of cases, nearly doubling (pp,e)’s 33% share and exceeding (pp,h)’s 3% frequency by over 15 times.", "perturbed_explanation": "The perturbed statement is incorrect because the heatmap shows (pp,c) occurs in 62% of cases for GNNBenchmark.TSP, not 45%, and compared to (pp,h)’s 3% it exceeds by over 20 times, not 15.", "claim": "For GNNBenchmark.TSP, the (pp,c) configuration is optimal in 45% of cases, nearly doubling (pp,e)’s 33% share and exceeding (pp,h)’s 3% frequency by over 15 times.", "label": false }, { "paperid": "2409.05048v1", "paper_path": "./SciVer/papers/2409.05048v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5" ], "image_path": "./SciVer/images/2409.05048v1-Table1-1.png", "request_id": 235, "origin_statement": "Gamma1 increases by 0.0457 from 0.9507 at l=8 to 0.9964 at l=32.", "perturbed_statement": "Gamma1 increases by 0.0493 from 0.9507 at l=8 to 1.0000 at l=32.", "perturbed_explanation": "This is incorrect because Table 1 shows gamma1 at l=32 is 0.9964, not 1.0000, and the actual increase from 0.9507 to 0.9964 is 0.0457, not 0.0493.", "claim": "Gamma1 increases by 0.0493 from 0.9507 at l=8 to 1.0000 at l=32.", "label": false }, { "paperid": "2409.19611v1", "paper_path": "./SciVer/papers/2409.19611v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.6" ], "image_path": "./SciVer/images/2409.19611v1_figure_3.png", "request_id": 236, "origin_statement": "In dbpedia Order2, after training four tasks, the exact match score for the model without LoRA-Master falls to about 73%, while the LoRA-Master model retains near 98%, a difference of roughly 25.5 points.", "perturbed_statement": "In dbpedia Order2, after training four tasks, the exact match score for the model without LoRA-Master falls to about 73%, while the LoRA-Master model retains near 100%, a difference of roughly 27 points.", "perturbed_explanation": "The perturbed claim is incorrect because, according to Figure 3 (dbpedia Order2 at task 4), the LoRA-Master model preserves about 97–98% exact match—not 100%—and the actual gap is approximately 25.5 points (not 27).", "claim": "In dbpedia Order2, after training four tasks, the exact match score for the model without LoRA-Master falls to about 73%, while the LoRA-Master model retains near 100%, a difference of roughly 27 points.", "label": false }, { "paperid": "2409.10046v1", "paper_path": "./SciVer/papers/2409.10046v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "2.1" ], "image_path": "./SciVer/images/2409.10046v1_figure_1.png", "request_id": 238, "origin_statement": "The Pearson correlation between the Build Up Index (bui) and Drought Code (duff) is 0.99, indicating a near-perfect linear relationship, the highest among all source feature pairs.", "perturbed_statement": "The Pearson correlation between the Build Up Index (bui) and Drought Code (duff) is 0.85, indicating a moderate linear relationship, the highest among all source feature pairs.", "perturbed_explanation": "The correlation value is misreported: the matrix shows a correlation of 0.99 between bui and duff, not 0.85.", "claim": "The Pearson correlation between the Build Up Index (bui) and Drought Code (duff) is 0.85, indicating a moderate linear relationship, the highest among all source feature pairs.", "label": false }, { "paperid": "2409.14085v1", "paper_path": "./SciVer/papers/2409.14085v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "3.2.3" ], "image_path": "./SciVer/images/2409.14085v1-Table5-1.png", "request_id": 239, "origin_statement": "STOI's correlation with ASR-WER (−0.807) has an absolute strength 0.219 greater than that of PESQ with ASR-WER (−0.588), indicating speech intelligibility influences ASR performance more strongly than perceived quality.", "perturbed_statement": "STOI's correlation with ASR-WER (−0.807) has an absolute strength 0.319 greater than that of PESQ with ASR-WER (−0.588), indicating speech intelligibility influences ASR performance more strongly than perceived quality.", "perturbed_explanation": "The statement claims a difference of 0.319 between the absolute values of −0.807 and −0.588, but the actual difference is |−0.807|−|−0.588|=0.219, not 0.319, making the perturbed detail incorrect.", "claim": "STOI's correlation with ASR-WER (−0.807) has an absolute strength 0.319 greater than that of PESQ with ASR-WER (−0.588), indicating speech intelligibility influences ASR performance more strongly than perceived quality.", "label": false }, { "paperid": "2409.03074v1", "paper_path": "./SciVer/papers/2409.03074v1.json", "claim_type": "direct", "type": "chart", "item": "1(b)", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.03074v1_figure_1(b).png", "request_id": 240, "origin_statement": "At exponent n=8, the EFT n–n phase shift is about 48° for Λ=200 MeV but roughly 61° for Λ=300 MeV (and higher), yielding a ~13° difference between the lowest and higher cutoffs.", "perturbed_statement": "At exponent n=8, the EFT n–n phase shift is about 58° for Λ=200 MeV but roughly 61° for Λ=300 MeV (and higher), yielding only a 3° difference between the lowest and higher cutoffs.", "perturbed_explanation": "The perturbed statement is incorrect because the actual phase shift at n=8 for Λ=200 MeV is about 48°, not 58°, so the difference from ~61° at Λ=300 MeV is ~13°, not 3°.", "claim": "At exponent n=8, the EFT n–n phase shift is about 58° for Λ=200 MeV but roughly 61° for Λ=300 MeV (and higher), yielding only a 3° difference between the lowest and higher cutoffs.", "label": false }, { "paperid": "2411.05266v1", "paper_path": "./SciVer/papers/2411.05266v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1", "3" ], "image_path": "./SciVer/images/2411.05266v1-Table1-1.png", "request_id": 243, "origin_statement": "The distance between the 3D CGR coordinates for ATCAGGCAG and TGTAGGCAG decreases from 0.66667 at nucleotide 1 to 0.00007 at nucleotide 9, showing an over 99.99% reduction across the nine positions.", "perturbed_statement": "The distance between the 3D CGR coordinates for ATCAGGCAG and TGTAGGCAG decreases from 0.55555 at nucleotide 1 to 0.00007 at nucleotide 9, representing an over 99.99% reduction across the nine positions.", "perturbed_explanation": "The perturbed statement is incorrect because the table shows the distance at nucleotide 1 is actually 0.66667, not 0.55555, so the starting value contradicts the provided data.", "claim": "The distance between the 3D CGR coordinates for ATCAGGCAG and TGTAGGCAG decreases from 0.55555 at nucleotide 1 to 0.00007 at nucleotide 9, representing an over 99.99% reduction across the nine positions.", "label": false }, { "paperid": "2410.12057v2", "paper_path": "./SciVer/papers/2410.12057v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "7" ], "image_path": "./SciVer/images/2410.12057v2-Table3-1.png", "request_id": 245, "origin_statement": "The clustering model trained on shared human and LM responses achieves a CMI of .600 on human-only data, which is 0.095 higher than the .505 score of the LM-only model on the same human data.", "perturbed_statement": "The clustering model trained on shared human and LM responses achieves a CMI of .490 on human-only data, which is 0.015 lower than the .505 score of the LM-only model on the same human data.", "perturbed_explanation": "The perturbed claim is incorrect because the actual CMI for the shared-response model on human-only data is .600 (not .490), and its difference from the LM-only model’s .505 score is .095 (not .015). These mismatches contradict the table values.", "claim": "The clustering model trained on shared human and LM responses achieves a CMI of .490 on human-only data, which is 0.015 lower than the .505 score of the LM-only model on the same human data.", "label": false }, { "paperid": "2409.11176v1", "paper_path": "./SciVer/papers/2409.11176v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "2" ], "image_path": "./SciVer/images/2409.11176v1_figure_2.png", "request_id": 246, "origin_statement": "The blue-shifted [Ne II] emission extends to about +2.0″ in RA offset, while the red-shifted emission extends to about –2.5″, indicating the red wing stretches roughly 0.5″ farther than the blue wing.", "perturbed_statement": "The blue-shifted [Ne II] emission extends to about –2.5″ in RA offset, while the red-shifted emission extends to about +2.0″, indicating the blue wing stretches roughly 0.5″ farther than the red wing.", "perturbed_explanation": "This is incorrect because in the image the blue-shifted contours lie on the positive RA side (east) up to +2.0″, not at –2.5″, and the red-shifted contours lie on the negative side (west) up to –2.5″, so the roles and distances were swapped.", "claim": "The blue-shifted [Ne II] emission extends to about –2.5″ in RA offset, while the red-shifted emission extends to about +2.0″, indicating the blue wing stretches roughly 0.5″ farther than the red wing.", "label": false }, { "paperid": "2410.14875v1", "paper_path": "./SciVer/papers/2410.14875v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "2.1" ], "image_path": "./SciVer/images/2410.14875v1-Table2-1.png", "request_id": 247, "origin_statement": "The WP domain’s training set contains 24,803 AI-generated texts, which is 4,415 more than the CMV domain’s 20,388 AI-generated texts.", "perturbed_statement": "The WP domain’s training set contains 26,000 AI-generated texts, which is 5,612 more than the CMV domain’s 20,388 AI-generated texts.", "perturbed_explanation": "The table shows that the WP training set actually has 24,803 AI-generated texts (not 26,000), and the difference from CMV’s 20,388 AI-generated texts is 4,415 (not 5,612).", "claim": "The WP domain’s training set contains 26,000 AI-generated texts, which is 5,612 more than the CMV domain’s 20,388 AI-generated texts.", "label": false }, { "paperid": "2409.10951v1", "paper_path": "./SciVer/papers/2409.10951v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "5.4" ], "image_path": "./SciVer/images/2409.10951v1_figure_3(a).png", "request_id": 248, "origin_statement": "The recall@1200 of FairAD-C (~0.71) exceeds that of FairAD-N (~0.59) by approximately 0.12, indicating a roughly 20% relative increase.", "perturbed_statement": "The recall@1200 of FairAD-N (~0.59) exceeds that of FairAD-R (~0.63) by about 0.04, representing a 6% relative increase.", "perturbed_explanation": "In the figure, FairAD-N has a recall@1200 of about 0.59, which is actually lower than FairAD-R’s recall@1200 of about 0.63. Therefore, FairAD-N does not exceed FairAD-R, making the perturbed claim incorrect.", "claim": "The recall@1200 of FairAD-N (~0.59) exceeds that of FairAD-R (~0.63) by about 0.04, representing a 6% relative increase.", "label": false }, { "paperid": "2411.13677v1", "paper_path": "./SciVer/papers/2411.13677v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "8" ], "image_path": "./SciVer/images/2411.13677v1-Table6-1.png", "request_id": 249, "origin_statement": "The two xArm 6 arms cost $18,000 in total, accounting for about 57.7% of the $31,200 overall bill of materials.", "perturbed_statement": "The two xArm 6 arms cost $16,000 in total, accounting for about 57.7% of the $31,200 overall bill of materials.", "perturbed_explanation": "The perturbed statement is incorrect because the table specifies that two xArm 6 units total $18,000, not $16,000 as claimed.", "claim": "The two xArm 6 arms cost $16,000 in total, accounting for about 57.7% of the $31,200 overall bill of materials.", "label": false }, { "paperid": "2411.00254v1", "paper_path": "./SciVer/papers/2411.00254v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.00254v1_figure_7.png", "request_id": 250, "origin_statement": "The number of correctly classified benign images increased from 53 before augmentation to 18,145 after augmentation, an improvement of 18,092 cases.", "perturbed_statement": "The number of correctly classified benign images increased from 53 before augmentation to 18,150 after augmentation, an improvement of 18,097 cases.", "perturbed_explanation": "The perturbed statement is incorrect because the confusion matrix after augmentation shows 18,145 correctly classified benign cases (not 18,150), and the actual increase is 18,092 cases rather than 18,097.", "claim": "The number of correctly classified benign images increased from 53 before augmentation to 18,150 after augmentation, an improvement of 18,097 cases.", "label": false }, { "paperid": "2409.13441v1", "paper_path": "./SciVer/papers/2409.13441v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4" ], "image_path": "./SciVer/images/2409.13441v1-Table2-1.png", "request_id": 251, "origin_statement": "The e-folding time τ_main in the sfhdelayed module spans eight values from 0.1 to 5 Gyrs, making its maximum (5 Gyrs) fifty times larger than the maximum burst e-folding time τ_burst (0.1 Gyrs).", "perturbed_statement": "The e-folding time τ_main in the sfhdelayed module spans nine values from 0.1 to 6 Gyrs, making its maximum (6 Gyrs) sixty times larger than the maximum burst e-folding time τ_burst (0.1 Gyrs).", "perturbed_explanation": "The table actually lists only eight τ_main values (0.1, 0.4, 0.8, 1, 2, 3, 4, 5 Gyrs), not nine, and the maximum τ_main is 5 Gyrs, not 6 Gyrs, so the stated span and ratio are incorrect.", "claim": "The e-folding time τ_main in the sfhdelayed module spans nine values from 0.1 to 6 Gyrs, making its maximum (6 Gyrs) sixty times larger than the maximum burst e-folding time τ_burst (0.1 Gyrs).", "label": false }, { "paperid": "2411.01711v1", "paper_path": "./SciVer/papers/2411.01711v1.json", "claim_type": "direct", "type": "chart", "item": "2(a)", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.01711v1_figure_2(a).png", "request_id": 252, "origin_statement": "For a=1 and R=3, increasing P from 1 to 5 raises the payoff Δ^i_{22} by 2 (from 2 to 4) according to the plane in Figure 2(a).", "perturbed_statement": "For a=1 and R=3, increasing P from 1 to 5 raises the payoff Δ^i_{22} by 3 (from 2 to 5).", "perturbed_explanation": "This is incorrect because the graph shows Δ at (P=1,R=3) is 2 and at (P=5,R=3) is 4, so the increase is 2 (from 2 to 4), not 3 (to 5).", "claim": "For a=1 and R=3, increasing P from 1 to 5 raises the payoff Δ^i_{22} by 3 (from 2 to 5).", "label": false }, { "paperid": "2411.15835v1", "paper_path": "./SciVer/papers/2411.15835v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.15835v1-Table3-1.png", "request_id": 253, "origin_statement": "The TSC method successfully converted all 11 TPC-H benchmark queries (Q2, Q3, Q5, Q7, Q8, Q9, Q10, Q11, Q15, Q18, Q21), achieving a 100% conversion success rate.", "perturbed_statement": "The TSC method successfully converted only 10 of the 11 TPC-H benchmark queries, achieving a 90% conversion success rate.", "perturbed_explanation": "The table lists 11 queries (Q2, Q3, Q5, Q7, Q8, Q9, Q10, Q11, Q15, Q18, Q21) all marked with ✓, indicating all 11 conversions succeeded and a 100% success rate, not 10 queries or 90%.", "claim": "The TSC method successfully converted only 10 of the 11 TPC-H benchmark queries, achieving a 90% conversion success rate.", "label": false }, { "paperid": "2410.13000v1", "paper_path": "./SciVer/papers/2410.13000v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.4", "5" ], "image_path": "./SciVer/images/2410.13000v1_figure_3.png", "request_id": 256, "origin_statement": "At ρ=2, as N increases to 3000, the nnGP method's posterior probability error reaches approximately 0.155, which is about six times larger than the rational method's error of about 0.025.", "perturbed_statement": "At ρ=2, as N increases to 3000, the nnGP method's posterior probability error reaches approximately 0.12, which is about twice the rational method's error of about 0.06.", "perturbed_explanation": "The nnGP error at ρ=2 and N=3000 is actually about 0.155 (not 0.12), and the rational method's error is about 0.025 (not 0.06), so both reported error values and the implied ratio contradict the figure.", "claim": "At ρ=2, as N increases to 3000, the nnGP method's posterior probability error reaches approximately 0.12, which is about twice the rational method's error of about 0.06.", "label": false }, { "paperid": "2410.17196v2", "paper_path": "./SciVer/papers/2410.17196v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.17196v2-Table1-1.png", "request_id": 261, "origin_statement": "IFEval’s average word count per sample is 31.08, which is about 4.47 times higher than SD-QA’s 6.96.", "perturbed_statement": "IFEval’s average word count per sample is 31.08, which is about 3.2 times higher than SD-QA’s 6.96.", "perturbed_explanation": "The actual ratio of IFEval’s 31.08 words to SD-QA’s 6.96 words is approximately 4.47, not 3.2, so the stated multiplier is incorrect.", "claim": "IFEval’s average word count per sample is 31.08, which is about 3.2 times higher than SD-QA’s 6.96.", "label": false }, { "paperid": "2411.11129v1", "paper_path": "./SciVer/papers/2411.11129v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.11129v1-Table5-1.png", "request_id": 263, "origin_statement": "The error value E1 for ghiara mortar is about 53% greater than the error value E1 for azolo mortar.", "perturbed_statement": "The error value E1 for ghiara mortar is about 30% greater than the error value E1 for azolo mortar.", "perturbed_explanation": "The perturbed claim understates the difference: the table shows E1 for ghiara mortar is 2.20e-4 and for azolo mortar is 1.44e-4, which is an increase of approximately 53%, not 30%.", "claim": "The error value E1 for ghiara mortar is about 30% greater than the error value E1 for azolo mortar.", "label": false }, { "paperid": "2411.01030v3", "paper_path": "./SciVer/papers/2411.01030v3.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "4.3" ], "image_path": "./SciVer/images/2411.01030v3_figure_1.png", "request_id": 264, "origin_statement": "When retrieving 32 phone numbers, Hawk trained with Birdie achieves around 81% digit accuracy, whereas Hawk trained with Next Token Prediction remains near 10%.", "perturbed_statement": "When retrieving 32 phone numbers, Hawk trained with Birdie achieves around 92% digit accuracy, whereas Hawk trained with Next Token Prediction remains near 15%.", "perturbed_explanation": "The perturbed claim is incorrect because the actual accuracy for Hawk trained with Birdie at 32 numbers is about 81%, not 92%, and the Hawk Next Token Prediction accuracy is about 10%, not 15%.", "claim": "When retrieving 32 phone numbers, Hawk trained with Birdie achieves around 92% digit accuracy, whereas Hawk trained with Next Token Prediction remains near 15%.", "label": false }, { "paperid": "2409.01971v1", "paper_path": "./SciVer/papers/2409.01971v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.01971v1_figure_7.png", "request_id": 268, "origin_statement": "When increasing batch size from 512 to 2048, the inference time increases fourfold from about 25ms to about 100ms, while GPU memory utilization climbs from roughly 0.8GB to nearly 1.7GB.", "perturbed_statement": "When increasing batch size from 512 to 2048, the inference time increases fourfold from about 25ms to about 200ms, while GPU memory utilization climbs from roughly 0.8GB to nearly 1.7GB.", "perturbed_explanation": "Figure 7 shows the inference time at batch size 2048 is approximately 100 ms, not 200 ms as stated, making the perturbed claim incorrect.", "claim": "When increasing batch size from 512 to 2048, the inference time increases fourfold from about 25ms to about 200ms, while GPU memory utilization climbs from roughly 0.8GB to nearly 1.7GB.", "label": false }, { "paperid": "2409.11531v1", "paper_path": "./SciVer/papers/2409.11531v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.2.1", "5.2.2", "5.2.3", "5.2.4" ], "image_path": "./SciVer/images/2409.11531v1_figure_6.png", "request_id": 272, "origin_statement": "In Scenario 1 valence, the ESV condition’s average rating rose by approximately 1.8 points from about 2.7 pre-intervention to about 4.5 post-intervention, exceeding the Control group’s increase of roughly 1.6 points (from about 3.1 to about 4.7).", "perturbed_statement": "In Scenario 1, the Control condition’s valence increased by about 2.2 points (from 3.1 to 5.3), which is higher than the ESV condition’s increase of around 1.6 points (from 2.7 to 4.3).", "perturbed_explanation": "The bar chart shows the Control group’s post-intervention valence at around 4.7 (not 5.3) and the ESV group’s post-intervention valence at about 4.5 (not 4.3), so both the stated end values and calculated increases are incorrect.", "claim": "In Scenario 1, the Control condition’s valence increased by about 2.2 points (from 3.1 to 5.3), which is higher than the ESV condition’s increase of around 1.6 points (from 2.7 to 4.3).", "label": false }, { "paperid": "2409.19136v1", "paper_path": "./SciVer/papers/2409.19136v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5.1.1" ], "image_path": "./SciVer/images/2409.19136v1_figure_3.png", "request_id": 274, "origin_statement": "User 128’s max speeds span approximately 15 to 35 units, compared to User 68 whose max speeds range from about 10 to 90 units, indicating User 128’s max speed is far more consistent than User 68’s.", "perturbed_statement": "User 128’s max speeds span approximately 10 to 40 units, compared to User 68 whose max speeds range from about 20 to 80 units, indicating User 128’s max speed is far more consistent than User 68’s.", "perturbed_explanation": "The perturbed statement is incorrect because in the actual plot, User 128’s max speeds lie between about 15 and 35 units (not 10 to 40), and User 68’s max speeds range from roughly 5 to 90 units (not 20 to 80). These specific ranges contradict the visual data.", "claim": "User 128’s max speeds span approximately 10 to 40 units, compared to User 68 whose max speeds range from about 20 to 80 units, indicating User 128’s max speed is far more consistent than User 68’s.", "label": false }, { "paperid": "2411.02712v1", "paper_path": "./SciVer/papers/2411.02712v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2411.02712v1-Table5-1.png", "request_id": 277, "origin_statement": "In the RLHF-V scenario, applying normalization reduces CHAIR from 5.6 to 5.5, a decrease of 0.1, whereas on the Synthetic Augmented Data, normalization lowers CHAIR from 6.6 to 6.2, a larger decrease of 0.4.", "perturbed_statement": "In the RLHF-V scenario, applying normalization reduces CHAIR from 5.6 to 5.3, a decrease of 0.3, whereas on the Synthetic Augmented Data, normalization lowers CHAIR from 6.6 to 6.2, a larger decrease of 0.4.", "perturbed_explanation": "The perturbed statement claims RLHF-V CHAIR drops from 5.6 to 5.3 (a 0.3 decrease), but the table shows normalization yields 5.5, not 5.3. The actual decrease is 0.1, not 0.3.", "claim": "In the RLHF-V scenario, applying normalization reduces CHAIR from 5.6 to 5.3, a decrease of 0.3, whereas on the Synthetic Augmented Data, normalization lowers CHAIR from 6.6 to 6.2, a larger decrease of 0.4.", "label": false }, { "paperid": "2410.21329v1", "paper_path": "./SciVer/papers/2410.21329v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.21329v1-Table3-1.png", "request_id": 279, "origin_statement": "The overcast category accounts for more than four times the data proportion of the partly cloudy category (0.49 vs. 0.12 density).", "perturbed_statement": "The overcast category accounts for more than four times the data proportion of the partly cloudy category (0.34 vs. 0.12 density).", "perturbed_explanation": "The table lists the overcast category’s density as 0.49, not 0.34, so the perturbed density value is incorrect and the ratio no longer exceeds four.", "claim": "The overcast category accounts for more than four times the data proportion of the partly cloudy category (0.34 vs. 0.12 density).", "label": false }, { "paperid": "2411.03401v1", "paper_path": "./SciVer/papers/2411.03401v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.03401v1_figure_8.png", "request_id": 280, "origin_statement": "In 4PB specimens, pore sphericity decreases from about 0.68 at 10 µm to around 0.60 at 20 µm, while in axial specimens sphericity drops from approximately 0.62 at 20 µm to about 0.48 at 30 µm.", "perturbed_statement": "In 4PB specimens, pore sphericity decreases from about 0.75 at 10 µm to around 0.70 at 20 µm, while in axial specimens sphericity drops from approximately 0.58 at 20 µm to about 0.38 at 30 µm.", "perturbed_explanation": "The perturbation is incorrect because the plot shows 4PB sphericity values around 0.68 at 10 µm and 0.60 at 20 µm, not the inflated 0.75–0.70 range. Likewise, axial sphericity is near 0.62 at 20 µm and 0.48 at 30 µm, not as low as 0.58–0.38.", "claim": "In 4PB specimens, pore sphericity decreases from about 0.75 at 10 µm to around 0.70 at 20 µm, while in axial specimens sphericity drops from approximately 0.58 at 20 µm to about 0.38 at 30 µm.", "label": false }, { "paperid": "2409.19572v1", "paper_path": "./SciVer/papers/2409.19572v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.19572v1-Table2-1.png", "request_id": 285, "origin_statement": "On the Wizard-of-Internet dataset, Model-wholeseq achieves a Sum score of 130.86, surpassing T5-base's 119.71 by 11.15 points.", "perturbed_statement": "On the Wizard-of-Internet dataset, Model-wholeseq achieves a Sum score of 132.86, surpassing T5-base's 119.71 by 11.15 points.", "perturbed_explanation": "The table shows Model-wholeseq’s Sum score on Wizard-of-Internet is 130.86, not 132.86, so the perturbed Sum score is incorrect.", "claim": "On the Wizard-of-Internet dataset, Model-wholeseq achieves a Sum score of 132.86, surpassing T5-base's 119.71 by 11.15 points.", "label": false }, { "paperid": "2409.20058v1", "paper_path": "./SciVer/papers/2409.20058v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1", "4.3" ], "image_path": "./SciVer/images/2409.20058v1-Table1-1.png", "request_id": 287, "origin_statement": "In the 80–200 keV band, the Galactic Bulge region's flux (~102.2 mCrab) is about 3.5 times higher than that of the L–20 spiral arm (~29.3 mCrab).", "perturbed_statement": "In the 80–200 keV band, the Galactic Bulge region's flux (~95.2 mCrab) is about 3.5 times higher than that of the L–20 spiral arm (~29.3 mCrab).", "perturbed_explanation": "This statement is incorrect because Table 1 reports the Galactic Bulge flux in the 80–200 keV band as 102.2 ±13.3 mCrab, not 95.2 mCrab.", "claim": "In the 80–200 keV band, the Galactic Bulge region's flux (~95.2 mCrab) is about 3.5 times higher than that of the L–20 spiral arm (~29.3 mCrab).", "label": false }, { "paperid": "2410.18069v1", "paper_path": "./SciVer/papers/2410.18069v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.1.2" ], "image_path": "./SciVer/images/2410.18069v1_figure_4.png", "request_id": 288, "origin_statement": "At r≈600, three energy levels converge very close to the three-breather threshold (ratio≈3), two converge near the two-breather threshold (ratio≈2), one at the one-breather threshold (ratio≈1), and the vacuum remains at zero.", "perturbed_statement": "At r≈600, four energy levels converge near the two-breather threshold (ratio≈2), one converges near the three-breather threshold (ratio≈3), one at the one-breather threshold (ratio≈1), and the vacuum remains at zero.", "perturbed_explanation": "This is incorrect because the plot at r≈600 shows only two energy curves approaching the green dashed line at ratio≈2, while three curves approach the red dashed line at ratio≈3, not one.", "claim": "At r≈600, four energy levels converge near the two-breather threshold (ratio≈2), one converges near the three-breather threshold (ratio≈3), one at the one-breather threshold (ratio≈1), and the vacuum remains at zero.", "label": false }, { "paperid": "2410.04068v1", "paper_path": "./SciVer/papers/2410.04068v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "3.2.3" ], "image_path": "./SciVer/images/2410.04068v1-Table5-1.png", "request_id": 289, "origin_statement": "MiniCheck-D’s conflict detection accuracy spans from 89.0% at low intensity to 96.1% at high intensity, with a standard deviation of 3.7%, making it the most stable among factual consistency models.", "perturbed_statement": "MiniCheck-D’s conflict detection accuracy spans from 85.0% at low intensity to 95.0% at high intensity, with a standard deviation of 2.5%, making it the most stable among factual consistency models.", "perturbed_explanation": "The table shows that MiniCheck-D’s accuracy is actually 89.0% at low intensity (not 85.0%), 96.1% at high intensity (not 95.0%), and the standard deviation under conflict is 3.7 (not 2.5). These values contradict the perturbed statement.", "claim": "MiniCheck-D’s conflict detection accuracy spans from 85.0% at low intensity to 95.0% at high intensity, with a standard deviation of 2.5%, making it the most stable among factual consistency models.", "label": false }, { "paperid": "2410.07679v2", "paper_path": "./SciVer/papers/2410.07679v2.json", "claim_type": "direct", "type": "chart", "item": "9(a)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.07679v2_figure_9(a).png", "request_id": 292, "origin_statement": "At α = 0.8, the IS_P2P method achieves a FID of about 3.735, roughly 0.065 lower than the RCFD baseline of 3.800.", "perturbed_statement": "At α = 0.8, the IS_P2P method achieves a FID of about 3.680, roughly 0.120 lower than the RCFD baseline of 3.800.", "perturbed_explanation": "The chart shows the FID at α = 0.8 is approximately 3.735, not 3.680, so the perturbed FID value is incorrect.", "claim": "At α = 0.8, the IS_P2P method achieves a FID of about 3.680, roughly 0.120 lower than the RCFD baseline of 3.800.", "label": false }, { "paperid": "2410.24169v1", "paper_path": "./SciVer/papers/2410.24169v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.24169v1-Table3-1.png", "request_id": 293, "origin_statement": "EScAIP’s mean absolute error (MAE) of 38 eV/atom is 4 eV/atom lower than EquiformerV2’s MAE of 42 eV/atom.", "perturbed_statement": "EScAIP’s MAE of 38 eV/atom is 5 eV/atom lower than EquiformerV2’s MAE of 42 eV/atom.", "perturbed_explanation": "This is incorrect because the table shows EScAIP’s MAE is 38 eV/atom and EquiformerV2’s is 42 eV/atom, a difference of 4 eV/atom, not 5 eV/atom.", "claim": "EScAIP’s MAE of 38 eV/atom is 5 eV/atom lower than EquiformerV2’s MAE of 42 eV/atom.", "label": false }, { "paperid": "2410.02320v2", "paper_path": "./SciVer/papers/2410.02320v2.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "6.1" ], "image_path": "./SciVer/images/2410.02320v2_figure_3.png", "request_id": 294, "origin_statement": "Under dCPO, En→Ru post-edits show a median log-probability increase of approximately 0.20, while MT shows about 0.08.", "perturbed_statement": "Under dCPO, En→Ru post-edits show a median log-probability increase of approximately 0.05, while MT shows about 0.30.", "perturbed_explanation": "The perturbation is incorrect because the violin plot for dCPO shows the median increase for post-edits at around 0.20 (not 0.05) and for machine translations at around 0.08 (not 0.30).", "claim": "Under dCPO, En→Ru post-edits show a median log-probability increase of approximately 0.05, while MT shows about 0.30.", "label": false }, { "paperid": "2409.04846v1", "paper_path": "./SciVer/papers/2409.04846v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2409.04846v1_figure_1.png", "request_id": 298, "origin_statement": "The N662 (Hα) filter is centered at about 657 nm with a peak transmission of ~100%, whereas the N673 ([S II]) filter peaks at ≈672 nm with ~100% transmission—15 nm redward—and the DES r′ filter spans 575–725 nm at ~90% transmission.", "perturbed_statement": "The N662 (Hα) filter is centered at about 657 nm with a peak transmission of ~100%, whereas the N673 ([S II]) filter peaks at ≈680 nm with ~80% transmission—23 nm redward—and the DES r′ filter spans 550–750 nm at ~85% transmission.", "perturbed_explanation": "The perturbed claim is incorrect because the N673 ([S II]) filter actually peaks at ≈672 nm (not 680 nm) and reaches ~100% transmission (not ~80%), and the DES r′ filter covers approximately 575–725 nm at ~90% transmission (not 550–750 nm at ~85%).", "claim": "The N662 (Hα) filter is centered at about 657 nm with a peak transmission of ~100%, whereas the N673 ([S II]) filter peaks at ≈680 nm with ~80% transmission—23 nm redward—and the DES r′ filter spans 550–750 nm at ~85% transmission.", "label": false }, { "paperid": "2411.03445v1", "paper_path": "./SciVer/papers/2411.03445v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.2.3" ], "image_path": "./SciVer/images/2411.03445v1-Table3-1.png", "request_id": 299, "origin_statement": "Across both triggers, CNN models exhibit a 0.03 higher clean accuracy (0.93 vs 0.90) compared to FC models, and both architectures reach 100% ASR with either the watermark or checkerboard trigger.", "perturbed_statement": "Across both triggers, CNN models exhibit a 0.05 higher clean accuracy (0.93 vs 0.90) compared to FC models, and both architectures reach 100% ASR with either the watermark or checkerboard trigger.", "perturbed_explanation": "The table shows CNN clean accuracy is 0.93 and FC clean accuracy is 0.90, a difference of 0.03, not 0.05 as stated in the perturbed claim.", "claim": "Across both triggers, CNN models exhibit a 0.05 higher clean accuracy (0.93 vs 0.90) compared to FC models, and both architectures reach 100% ASR with either the watermark or checkerboard trigger.", "label": false }, { "paperid": "2411.06171v1", "paper_path": "./SciVer/papers/2411.06171v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.06171v1_figure_1.png", "request_id": 300, "origin_statement": "On the 20Minuten task, grafting attention weights onto the final model reduces perplexity from about 1.49 to 1.23, a drop of approximately 0.26.", "perturbed_statement": "On the 20Minuten task, grafting attention weights onto the final model reduces perplexity from about 1.49 to 1.15, a drop of approximately 0.34.", "perturbed_explanation": "The perturbed statement claims the grafted model achieves a perplexity of 1.15, but the chart shows the actual perplexity with grafted attention is about 1.23, not 1.15, making the stated reduction of 0.34 incorrect.", "claim": "On the 20Minuten task, grafting attention weights onto the final model reduces perplexity from about 1.49 to 1.15, a drop of approximately 0.34.", "label": false }, { "paperid": "2411.16198v1", "paper_path": "./SciVer/papers/2411.16198v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.4.1" ], "image_path": "./SciVer/images/2411.16198v1_figure_5.png", "request_id": 302, "origin_statement": "The classification confidence for the searched D shirt region (0.91) exceeds its object score (0.84) by 0.07, an 8.3% increase.", "perturbed_statement": "The classification confidence for the searched D shirt region (0.88) exceeds its object score (0.84) by 0.04, a 4.8% increase.", "perturbed_explanation": "The image shows the classification confidence for the D shirt region is actually 0.91 (not 0.88), so the perturbed statement’s confidence value is incorrect.", "claim": "The classification confidence for the searched D shirt region (0.88) exceeds its object score (0.84) by 0.04, a 4.8% increase.", "label": false }, { "paperid": "2410.05468v2", "paper_path": "./SciVer/papers/2410.05468v2.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.05468v2_figure_5.png", "request_id": 304, "origin_statement": "In the T&T unbounded scenario, the average Spearman correlation for 3DGS increases from approximately 0.32 at 16 training views to about 0.41 at 256 views.", "perturbed_statement": "In the T&T unbounded scenario, the average Spearman correlation for 3DGS increases from approximately 0.28 at 16 training views to about 0.48 at 256 views.", "perturbed_explanation": "The perturbed statement is incorrect because the actual average Spearman correlation for 3DGS at 16 views is about 0.32 (not 0.28) and at 256 views is about 0.41 (not 0.48), as shown in the third plot of Figure 5.", "claim": "In the T&T unbounded scenario, the average Spearman correlation for 3DGS increases from approximately 0.28 at 16 training views to about 0.48 at 256 views.", "label": false }, { "paperid": "2410.23910v1", "paper_path": "./SciVer/papers/2410.23910v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.23910v1-Table2-1.png", "request_id": 305, "origin_statement": "Our method's average ROC-AUC of 0.6235 exceeds the second-best BatchE's average ROC-AUC of 0.5754 by 0.0481.", "perturbed_statement": "Our method's average ROC-AUC of 0.6235 exceeds the second-best MaskE's average ROC-AUC of 0.5673 by 0.0562.", "perturbed_explanation": "This is incorrect because the table shows BatchE, not MaskE, has the second-highest average ROC-AUC at 0.5754. MaskE’s average ROC-AUC is 0.5673, so it is not the runner-up and cannot serve as the second-best method.", "claim": "Our method's average ROC-AUC of 0.6235 exceeds the second-best MaskE's average ROC-AUC of 0.5673 by 0.0562.", "label": false }, { "paperid": "2409.04766v1", "paper_path": "./SciVer/papers/2409.04766v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.7" ], "image_path": "./SciVer/images/2409.04766v1_figure_5.png", "request_id": 306, "origin_statement": "The epistemic uncertainty for EyeDiap (3.29×10⁻⁴) is over twice that of MPIIFaceGaze (1.52×10⁻⁴), while Gaze360 (2.87×10⁻⁴) exceeds ETH-Gaze (2.22×10⁻⁴) by 0.65×10⁻⁴.", "perturbed_statement": "The epistemic uncertainty for EyeDiap (2.29×10⁻⁴) is over twice that of MPIIFaceGaze (1.52×10⁻⁴), while Gaze360 (2.87×10⁻⁴) exceeds ETH-Gaze (2.22×10⁻⁴) by 0.65×10⁻⁴.", "perturbed_explanation": "The chart indicates EyeDiap’s uncertainty is 3.29×10⁻⁴, not 2.29×10⁻⁴, and 2.29×10⁻⁴ is not more than twice MPIIFaceGaze’s 1.52×10⁻⁴, making the comparison incorrect.", "claim": "The epistemic uncertainty for EyeDiap (2.29×10⁻⁴) is over twice that of MPIIFaceGaze (1.52×10⁻⁴), while Gaze360 (2.87×10⁻⁴) exceeds ETH-Gaze (2.22×10⁻⁴) by 0.65×10⁻⁴.", "label": false }, { "paperid": "2411.04649v1", "paper_path": "./SciVer/papers/2411.04649v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.3.1" ], "image_path": "./SciVer/images/2411.04649v1_figure_5.png", "request_id": 308, "origin_statement": "For the MultiRC dataset, all three models achieved a perfect Fleiss’ kappa of 1.0; in contrast, on the Movie dataset only BERT had a positive agreement of 0.52, while LSTM and S-BERT scored negative kappas of approximately -0.05 and -0.14 respectively.", "perturbed_statement": "For the MultiRC dataset, all three models achieved a perfect Fleiss’ kappa of 0.9; in contrast, on the Movie dataset BERT had a positive agreement of 0.75, while LSTM and S-BERT scored negative kappas of approximately -0.05 and -0.14 respectively.", "perturbed_explanation": "The perturbed statement misreports the MultiRC kappas: all three models actually scored 1.0, not 0.9. It also incorrectly inflates BERT’s Movie agreement, which is 0.52 rather than 0.75.", "claim": "For the MultiRC dataset, all three models achieved a perfect Fleiss’ kappa of 0.9; in contrast, on the Movie dataset BERT had a positive agreement of 0.75, while LSTM and S-BERT scored negative kappas of approximately -0.05 and -0.14 respectively.", "label": false }, { "paperid": "2410.19483v1", "paper_path": "./SciVer/papers/2410.19483v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.19483v1-Table4-1.png", "request_id": 309, "origin_statement": "In the Synthetic-NeRF MDL scenario, NGP-A-CAQ lowers FQR to 7.76—a 1.84-point drop versus NGP-LSQ+’s 9.60—while its PSNR of 32.00 dB is only 0.11 dB below the 32.11 dB of NGP-LSQ+.", "perturbed_statement": "In the Synthetic-NeRF MDL scenario, NGP-A-CAQ lowers FQR to 7.76—a 2.00-point drop versus NGP-LSQ+’s 9.60—while its PSNR of 32.00 dB is only 0.50 dB below the 32.11 dB of NGP-LSQ+.", "perturbed_explanation": "This is incorrect because the table shows FQR drops from 9.60 to 7.76, a 1.84-point reduction (not 2.00), and PSNR decreases from 32.11 dB to 32.00 dB, a 0.11 dB difference (not 0.50 dB).", "claim": "In the Synthetic-NeRF MDL scenario, NGP-A-CAQ lowers FQR to 7.76—a 2.00-point drop versus NGP-LSQ+’s 9.60—while its PSNR of 32.00 dB is only 0.50 dB below the 32.11 dB of NGP-LSQ+.", "label": false }, { "paperid": "2409.02184v1", "paper_path": "./SciVer/papers/2409.02184v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.02184v1_figure_5.png", "request_id": 310, "origin_statement": "The BCG+ICL light fraction measured within 150 kpc rises from about 0.47 at M14≈0.7 to about 0.73 at M14≈2.2, indicating an approximate 55% increase between the lowest and highest M14 bins.", "perturbed_statement": "The BCG+ICL light fraction measured within 150 kpc rises from about 0.47 at M14≈0.7 to about 0.68 at M14≈2.2, indicating an approximate 45% increase between the lowest and highest M14 bins.", "perturbed_explanation": "Figure 5 (middle panel) shows the BCG+ICL fraction at M14≈2.2 is roughly 0.73, not 0.68. This incorrect endpoint value makes the stated increase inaccurate.", "claim": "The BCG+ICL light fraction measured within 150 kpc rises from about 0.47 at M14≈0.7 to about 0.68 at M14≈2.2, indicating an approximate 45% increase between the lowest and highest M14 bins.", "label": false }, { "paperid": "2411.16787v1", "paper_path": "./SciVer/papers/2411.16787v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "4.6" ], "image_path": "./SciVer/images/2411.16787v1-Table5-1.png", "request_id": 319, "origin_statement": "The range width of γ_k (11 − 5 = 6) is ten times larger than the range width of the ρ parameters (0.9 − 0.3 = 0.6).", "perturbed_statement": "The range width of γ_k (11 − 4 = 7) is ten times larger than the range width of the ρ parameters (0.9 − 0.3 = 0.6).", "perturbed_explanation": "This is incorrect because γ_k actually ranges from 5 to 11, giving a width of 6 (11 − 5), not 7. Thus the claimed γ_k width of 7 is unsupported by the table.", "claim": "The range width of γ_k (11 − 4 = 7) is ten times larger than the range width of the ρ parameters (0.9 − 0.3 = 0.6).", "label": false }, { "paperid": "2411.13584v1", "paper_path": "./SciVer/papers/2411.13584v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.13584v1-Table2-1.png", "request_id": 321, "origin_statement": "AddrLLM(ours) achieved a Geocoding Acc@station of 94.3%, surpassing QWen-7B’s 79.1% by 15.2 points.", "perturbed_statement": "AddrLLM(ours) achieved a Geocoding Acc@station of 94.3%, surpassing QWen-7B’s 83.1% by 15.2 points.", "perturbed_explanation": "This statement is incorrect because Table 2 shows QWen-7B’s Geocoding Acc@station is 79.1%, not 83.1%. Consequently, the claimed comparison against 83.1% is not supported by the table.", "claim": "AddrLLM(ours) achieved a Geocoding Acc@station of 94.3%, surpassing QWen-7B’s 83.1% by 15.2 points.", "label": false }, { "paperid": "2410.10289v1", "paper_path": "./SciVer/papers/2410.10289v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.10289v1_figure_5.png", "request_id": 322, "origin_statement": "On industrial datasets, increasing the prompt lengths from (2,1) to (5,2) raises I-AUROC from ~90% to ~91.5%, I-AP from ~92.5% to ~93%, P-AUROC from ~84% to ~85%, and P-PRO from ~64% to ~65%.", "perturbed_statement": "On industrial datasets, increasing the prompt lengths from (2,1) to (5,2) raises I-AUROC from ~90% to ~92%, I-AP from ~92.5% to ~93%, P-AUROC from ~84% to ~85%, and P-PRO from ~64% to ~67%.", "perturbed_explanation": "The perturbation is wrong because the actual I-AUROC for (5,2) is approximately 91.5%, not 92%, and the actual P-PRO is about 65%, not 67%, as shown by the blue bar heights in Figure 5.", "claim": "On industrial datasets, increasing the prompt lengths from (2,1) to (5,2) raises I-AUROC from ~90% to ~92%, I-AP from ~92.5% to ~93%, P-AUROC from ~84% to ~85%, and P-PRO from ~64% to ~67%.", "label": false }, { "paperid": "2409.14001v1", "paper_path": "./SciVer/papers/2409.14001v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.3.1" ], "image_path": "./SciVer/images/2409.14001v1-Table2-1.png", "request_id": 323, "origin_statement": "On Cora, the proposed BPGNN model achieves an accuracy of 87.10%, which is 0.47 percentage points higher than the second-best model, dDGM-EHH at 86.63%.", "perturbed_statement": "On Cora, the proposed BPGNN model achieves an accuracy of 88.10%, which is 1.47 percentage points higher than the second-best model, dDGM-EHH at 86.63%.", "perturbed_explanation": "The table shows BPGNN’s accuracy on Cora is 87.10%, not 88.10%, and the actual difference to dDGM-EHH’s 86.63% is 0.47 percentage points, not 1.47.", "claim": "On Cora, the proposed BPGNN model achieves an accuracy of 88.10%, which is 1.47 percentage points higher than the second-best model, dDGM-EHH at 86.63%.", "label": false }, { "paperid": "2409.17137v3", "paper_path": "./SciVer/papers/2409.17137v3.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.17137v3_figure_5.png", "request_id": 324, "origin_statement": "As the regularization strength λ rises from 1e-3 to 5e4 in Figure 5, the average gradient norm of the +PACE model decreases from about 0.32 to 0.11 (a drop of ~0.21), while the baseline stays roughly constant around 0.33, highlighting PACE’s consistent gradient reduction.", "perturbed_statement": "As the regularization strength λ rises from 1e-3 to 5e4 in Figure 5, the average gradient norm of the +PACE model decreases from about 0.40 to 0.10 (a drop of 0.30), while the baseline stays roughly constant around 0.45, highlighting PACE’s consistent gradient reduction.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 5 shows the +PACE line falling from approximately 0.32 to 0.11 (not from 0.40 to 0.10), and the baseline line remains near 0.33 (not 0.45).", "claim": "As the regularization strength λ rises from 1e-3 to 5e4 in Figure 5, the average gradient norm of the +PACE model decreases from about 0.40 to 0.10 (a drop of 0.30), while the baseline stays roughly constant around 0.45, highlighting PACE’s consistent gradient reduction.", "label": false }, { "paperid": "2409.11357v1", "paper_path": "./SciVer/papers/2409.11357v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11357v1-Table3-1.png", "request_id": 325, "origin_statement": "The mean number of notifications called in the OS condition (7.15) is approximately 3.07 higher than in the O condition (4.08), and in the DS condition (6.58) it is about 2.46 higher than in the D condition (4.12).", "perturbed_statement": "The mean number of notifications called in the OS condition (7.15) is approximately 1.5 higher than in the O condition (4.08), and in the DS condition (6.58) it is about 4.5 higher than in the D condition (4.12).", "perturbed_explanation": "The perturbed statement misstates the differences: the actual increase from O (4.08) to OS (7.15) is 3.07, not 1.5, and from D (4.12) to DS (6.58) is 2.46, not 4.5, as shown in the table’s mean values.", "claim": "The mean number of notifications called in the OS condition (7.15) is approximately 1.5 higher than in the O condition (4.08), and in the DS condition (6.58) it is about 4.5 higher than in the D condition (4.12).", "label": false }, { "paperid": "2410.10177v1", "paper_path": "./SciVer/papers/2410.10177v1.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "6.3" ], "image_path": "./SciVer/images/2410.10177v1_figure_3(b).png", "request_id": 326, "origin_statement": "With eight query images, LDM achieves an AUC of 0.90, 0.10 higher than DDPM's 0.80 and 0.13 higher than DDIM's 0.77.", "perturbed_statement": "With eight query images, LDM achieves an AUC of 0.92, 0.12 higher than DDPM's 0.80 and 0.15 higher than DDIM's 0.77.", "perturbed_explanation": "The chart shows that the LDM model’s AUC at eight query images is 0.90, not 0.92. Therefore the stated 0.92 value (and the derived differences) contradicts the visual data.", "claim": "With eight query images, LDM achieves an AUC of 0.92, 0.12 higher than DDPM's 0.80 and 0.15 higher than DDIM's 0.77.", "label": false }, { "paperid": "2409.00700v1", "paper_path": "./SciVer/papers/2409.00700v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2409.00700v1-Table1-1.png", "request_id": 327, "origin_statement": "Of the six methods in Table 1, only one method (ID-FaceVC) supports controllability and uniquely accepts both audio and text inputs, while the other five each accept a single modality and lack controllability.", "perturbed_statement": "Of the six methods in Table 1, two methods (ID-FaceVC and SP-FaceVC) support controllability and accept both audio and text inputs.", "perturbed_explanation": "This is incorrect because SP-FaceVC only accepts audio as its input modality and has no controllability mark (✗) in Table 1, so it neither supports controllability nor accepts text.", "claim": "Of the six methods in Table 1, two methods (ID-FaceVC and SP-FaceVC) support controllability and accept both audio and text inputs.", "label": false }, { "paperid": "2410.21769v1", "paper_path": "./SciVer/papers/2410.21769v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "2.5" ], "image_path": "./SciVer/images/2410.21769v1-Table5-1.png", "request_id": 335, "origin_statement": "The Allen-Dynes predicted critical temperature Tc(AD) for TiSH increases from 15.31 K in the 2H phase to 23.11 K in the 1T phase, a rise of 7.8 K, while for TiSeH it increases from 11.51 K to 14.36 K, a rise of 2.85 K.", "perturbed_statement": "The Allen-Dynes predicted critical temperature Tc(AD) for TiSH increases from 15.31 K in the 2H phase to 24.11 K in the 1T phase, a rise of 8.8 K, while for TiSeH it increases from 11.51 K to 16.36 K, a rise of 4.85 K.", "perturbed_explanation": "The perturbed statement misreports the 1T-phase Tc(AD) values: Table 5 lists 23.11 K for 1T-TiSH (not 24.11 K) and 14.36 K for 1T-TiSeH (not 16.36 K), so both claimed temperatures and the calculated rises are incorrect.", "claim": "The Allen-Dynes predicted critical temperature Tc(AD) for TiSH increases from 15.31 K in the 2H phase to 24.11 K in the 1T phase, a rise of 8.8 K, while for TiSeH it increases from 11.51 K to 16.36 K, a rise of 4.85 K.", "label": false }, { "paperid": "2410.22015v1", "paper_path": "./SciVer/papers/2410.22015v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.1.3" ], "image_path": "./SciVer/images/2410.22015v1-Table3-1.png", "request_id": 337, "origin_statement": "Sant Andreu and Nou Barris record the lowest NO2 concentrations, 38.7 μg/m³ and 38.9 μg/m³ respectively, and have the two smallest pedestrian exposure levels (20,094 and 19,750 μg/m³·ped./h), indicating these clusters have minimal long-term exposure.", "perturbed_statement": "Sant Andreu and Nou Barris record the highest NO2 concentrations, 38.7 μg/m³ and 38.9 μg/m³ respectively, and also exhibit the two largest pedestrian exposure levels (20,094 and 19,750 μg/m³·ped./h), indicating these clusters face the greatest long-term exposure.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows Sant Andreu and Nou Barris actually have the lowest NO2 concentrations (38.7 and 38.9 μg/m³) and the two smallest pedestrian exposure values (20,094 and 19,750 μg/m³·ped./h), not the highest.", "claim": "Sant Andreu and Nou Barris record the highest NO2 concentrations, 38.7 μg/m³ and 38.9 μg/m³ respectively, and also exhibit the two largest pedestrian exposure levels (20,094 and 19,750 μg/m³·ped./h), indicating these clusters face the greatest long-term exposure.", "label": false }, { "paperid": "2411.03878v1", "paper_path": "./SciVer/papers/2411.03878v1.json", "claim_type": "direct", "type": "table", "item": "7", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.03878v1-Table7-1.png", "request_id": 339, "origin_statement": "Under simulation with the L-BFGS-B optimiser, the QLOQ ansatz yielded a mean GSEE of -7.86112, about 0.00852 hartree lower than the qubit ansatz with CZ Cascade (mean GSEE -7.8526), indicating improved energy accuracy.", "perturbed_statement": "Under simulation with the L-BFGS-B optimiser, the QLOQ ansatz yielded a mean GSEE of -7.86112, about 0.009 hartree lower than the qubit ansatz with CZ Cascade (mean GSEE -7.8535), indicating improved energy accuracy.", "perturbed_explanation": "This is incorrect because the table shows the qubit ansatz with CZ Cascade has a mean GSEE of -7.8526, not -7.8535, and the actual difference to -7.86112 is 0.00852 hartree, not 0.009.", "claim": "Under simulation with the L-BFGS-B optimiser, the QLOQ ansatz yielded a mean GSEE of -7.86112, about 0.009 hartree lower than the qubit ansatz with CZ Cascade (mean GSEE -7.8535), indicating improved energy accuracy.", "label": false }, { "paperid": "2411.02091v1", "paper_path": "./SciVer/papers/2411.02091v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.4", "4.5" ], "image_path": "./SciVer/images/2411.02091v1-Table3-1.png", "request_id": 345, "origin_statement": "The LOCADD coefficient (0.18) is six times larger than the MODFILE coefficient (0.03), and it is the only metric significant at p<0.01; MODFILE’s p-value is 0.36 and LOCDEL’s is 0.21, both above 0.05.", "perturbed_statement": "The LOCADD coefficient (0.12) is four times larger than the MODFILE coefficient (0.03), and both LOCADD and MODFILE are significant at p<0.05, while LOCDEL is not significant.", "perturbed_explanation": "This statement is incorrect because the LOCADD coefficient is actually 0.18 (not 0.12), creating a sixfold difference compared to MODFILE’s 0.03 rather than fourfold. Additionally, MODFILE’s p-value is 0.36, which exceeds 0.05, so MODFILE is not statistically significant.", "claim": "The LOCADD coefficient (0.12) is four times larger than the MODFILE coefficient (0.03), and both LOCADD and MODFILE are significant at p<0.05, while LOCDEL is not significant.", "label": false }, { "paperid": "2409.13171v1", "paper_path": "./SciVer/papers/2409.13171v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.13171v1-Table3-1.png", "request_id": 347, "origin_statement": "On Dataset A, the HR|SR configuration yields a PSNR of 27.12 dB, 17.72 dB higher than the 9.40 dB in the HR|LR configuration, representing a 188% increase.", "perturbed_statement": "On Dataset A, the HR|SR configuration yields a PSNR of 25.12 dB, 15.72 dB higher than the 9.40 dB in the HR|LR configuration, representing a 150% increase.", "perturbed_explanation": "The table reports a PSNR of 27.12 dB for HR|SR (not 25.12 dB), the difference from 9.40 dB is 17.72 dB (not 15.72 dB), and the relative increase is about 188% (not 150%).", "claim": "On Dataset A, the HR|SR configuration yields a PSNR of 25.12 dB, 15.72 dB higher than the 9.40 dB in the HR|LR configuration, representing a 150% increase.", "label": false }, { "paperid": "2411.12785v1", "paper_path": "./SciVer/papers/2411.12785v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "1", "3.1", "3.2" ], "image_path": "./SciVer/images/2411.12785v1_figure_2.png", "request_id": 348, "origin_statement": "In the image embeddings, skin tone attributes exhibit greater separation between light and dark clusters across concepts than gender or age attributes, indicating stronger skin tone bias in image modality.", "perturbed_statement": "In the image embeddings, age attributes exhibit greater separation between young, middle-aged, and old clusters across concepts than skin tone or gender attributes, indicating stronger age bias in image modality.", "perturbed_explanation": "This claim is wrong because Figure 2’s top-left panel shows skin tone points (Y-shaped for light vs pentagon for dark) are more widely dispersed than age points (X, diamond, star). Age-related clusters are more intermixed, so they do not exhibit the greatest separation.", "claim": "In the image embeddings, age attributes exhibit greater separation between young, middle-aged, and old clusters across concepts than skin tone or gender attributes, indicating stronger age bias in image modality.", "label": false }, { "paperid": "2409.14940v1", "paper_path": "./SciVer/papers/2409.14940v1.json", "claim_type": "direct", "type": "chart", "item": "5(a)", "section": [ "4.5" ], "image_path": "./SciVer/images/2409.14940v1_figure_5(a).png", "request_id": 350, "origin_statement": "The Threshold confidence rises from approximately 62% at testing index 64 to about 84% at index 2468, representing an increase of roughly 22 percentage points.", "perturbed_statement": "The Threshold confidence rises from approximately 60% at testing index 64 to about 82% at index 2468, representing an increase of roughly 22 percentage points.", "perturbed_explanation": "This is incorrect because the figure shows the Threshold confidence actually starts around 62% (not 60%) at index 64 and ends near 84% (not 82%) at index 2468, so both values are misstated.", "claim": "The Threshold confidence rises from approximately 60% at testing index 64 to about 82% at index 2468, representing an increase of roughly 22 percentage points.", "label": false }, { "paperid": "2409.08361v1", "paper_path": "./SciVer/papers/2409.08361v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2", "4.1", "4.2" ], "image_path": "./SciVer/images/2409.08361v1-Table1-1.png", "request_id": 351, "origin_statement": "WASP-47 has the highest stellar iron abundance ([Fe/H] = +0.36 dex) in the sample and an Equivalent CMF of 0.31, which is 0.10 higher than the lowest CMF (0.21) measured for TOI-561.", "perturbed_statement": "WASP-47 has the highest stellar iron abundance ([Fe/H] = +0.36 dex) in the sample and an Equivalent CMF of 0.35, which is 0.15 higher than the lowest CMF (0.21) measured for TOI-561.", "perturbed_explanation": "The perturbed statement is incorrect because the table lists WASP-47’s Equivalent CMF as 0.31, not 0.35. Therefore, the difference from TOI-561’s CMF of 0.21 is 0.10, not 0.15.", "claim": "WASP-47 has the highest stellar iron abundance ([Fe/H] = +0.36 dex) in the sample and an Equivalent CMF of 0.35, which is 0.15 higher than the lowest CMF (0.21) measured for TOI-561.", "label": false }, { "paperid": "2409.12993v1", "paper_path": "./SciVer/papers/2409.12993v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.12993v1-Table6-1.png", "request_id": 353, "origin_statement": "SDG-CC-Repair attains an 81.9% machine pass@1 rate, exceeding SDG's 75.2% by 6.7 points, and a 68.0% human pass@1 rate, outperforming SDG-CC's 62.0% by 6.0 points.", "perturbed_statement": "SDG-CC-Repair attains a 79.1% machine pass@1 rate, exceeding SDG's 75.2% by 3.9 points, and a 68.0% human pass@1 rate, outperforming SDG-CC's 62.0% by 6.0 points.", "perturbed_explanation": "The table lists the SDG-CC-Repair model’s machine pass@1 as 81.9%, not 79.1%, so the claimed 3.9-point increase over SDG’s 75.2% is incorrect; the actual improvement is 6.7 points.", "claim": "SDG-CC-Repair attains a 79.1% machine pass@1 rate, exceeding SDG's 75.2% by 3.9 points, and a 68.0% human pass@1 rate, outperforming SDG-CC's 62.0% by 6.0 points.", "label": false }, { "paperid": "2411.09534v1", "paper_path": "./SciVer/papers/2411.09534v1.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "5.5" ], "image_path": "./SciVer/images/2411.09534v1_figure_6(a).png", "request_id": 354, "origin_statement": "On HeartBeatClass, Apollo 3 consumes 0.360 mJ total, which is over 110 times less energy than Raspberry Pi Pico-S’s total of 39.698 mJ.", "perturbed_statement": "On HeartBeatClass, Apollo 3 consumes 0.360 mJ total, which is over 500 times less energy than Raspberry Pi Pico-S’s total of 39.698 mJ.", "perturbed_explanation": "The perturbed statement claims a 500× difference, but the actual values (39.698 mJ ÷ 0.360 mJ) yield approximately a 110× difference. Therefore, stating ‘over 500 times less’ contradicts the figure’s numbers.", "claim": "On HeartBeatClass, Apollo 3 consumes 0.360 mJ total, which is over 500 times less energy than Raspberry Pi Pico-S’s total of 39.698 mJ.", "label": false }, { "paperid": "2411.05010v1", "paper_path": "./SciVer/papers/2411.05010v1.json", "claim_type": "direct", "type": "table", "item": "8", "section": [ "4.6" ], "image_path": "./SciVer/images/2411.05010v1-Table8-1.png", "request_id": 355, "origin_statement": "The pass@any performance peaks at 90.2% with three tests, surpassing both no tests and all tests by 1.2 percentage points.", "perturbed_statement": "The pass@any performance peaks at 91.2% with three tests, surpassing both no tests and all tests by 2.2 percentage points.", "perturbed_explanation": "This is incorrect because the table shows a pass@any rate of 90.2% (not 91.2%) for three tests, and the difference from the 89.0% achieved with no tests or all tests is 1.2 points, not 2.2 points.", "claim": "The pass@any performance peaks at 91.2% with three tests, surpassing both no tests and all tests by 2.2 percentage points.", "label": false }, { "paperid": "2411.09458v1", "paper_path": "./SciVer/papers/2411.09458v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.1.2" ], "image_path": "./SciVer/images/2411.09458v1_figure_4.png", "request_id": 356, "origin_statement": "According to the full likelihood (blue curves), the peak of ℬ(\\bar{B}_s→D_s^*π) is ≈0.0027, about 65% of the peak for ℬ(\\bar{B}_s→D_sπ) at ≈0.0042.", "perturbed_statement": "According to the full likelihood (blue curves), the peak of ℬ(\\bar{B}_s→D_s^*π) is ≈0.0038, about 90% of the peak for ℬ(\\bar{B}_s→D_sπ) at ≈0.0042.", "perturbed_explanation": "The actual full-likelihood peak for ℬ(\\bar{B}_s→D_s^*π) shown in the figure is around 0.0027, not 0.0038, so both the stated peak value and the computed 90% ratio contradict the plotted distribution.", "claim": "According to the full likelihood (blue curves), the peak of ℬ(\\bar{B}_s→D_s^*π) is ≈0.0038, about 90% of the peak for ℬ(\\bar{B}_s→D_sπ) at ≈0.0042.", "label": false }, { "paperid": "2411.15743v1", "paper_path": "./SciVer/papers/2411.15743v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.15743v1-Table2-1.png", "request_id": 357, "origin_statement": "In the known sampling rate scenario, Freq-Synth's average MSE (0.407) is approximately 12.6% lower than TimesFM's average MSE (0.466).", "perturbed_statement": "In the known sampling rate scenario, Freq-Synth's average MSE (0.407) is approximately 15% lower than TimesFM's average MSE (0.466).", "perturbed_explanation": "The perturbed statement is incorrect because the actual reduction from 0.466 to 0.407 is about (0.466−0.407)/0.466≈12.6%, not 15% as claimed.", "claim": "In the known sampling rate scenario, Freq-Synth's average MSE (0.407) is approximately 15% lower than TimesFM's average MSE (0.466).", "label": false }, { "paperid": "2409.02120v1", "paper_path": "./SciVer/papers/2409.02120v1.json", "claim_type": "direct", "type": "chart", "item": "10", "section": [ "6.2" ], "image_path": "./SciVer/images/2409.02120v1_figure_10.png", "request_id": 358, "origin_statement": "The test RMSE decreases from 0.458 K at a 25 m buffer to 0.414 K at a 50 m buffer, but then increases to 0.756 K with a 75 m buffer.", "perturbed_statement": "The test RMSE decreases from 0.458 K at a 25 m buffer to 0.284 K at a 50 m buffer, but then increases to 0.756 K with a 75 m buffer.", "perturbed_explanation": "The perturbed statement incorrectly lists the test RMSE at 50 m as 0.284 K; the chart shows the actual test RMSE at 50 m is 0.414 K, while 0.284 K is the test MAE for the 25 m buffer, not an RMSE value.", "claim": "The test RMSE decreases from 0.458 K at a 25 m buffer to 0.284 K at a 50 m buffer, but then increases to 0.756 K with a 75 m buffer.", "label": false }, { "paperid": "2409.11704v1", "paper_path": "./SciVer/papers/2409.11704v1.json", "claim_type": "direct", "type": "chart", "item": "1(a)", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.11704v1_figure_1(a).png", "request_id": 360, "origin_statement": "At 128 candidates, the reward model trained on both bold and list pattern datasets produces approximately a 0.512 bold-pattern sample ratio, about 0.027 higher than the baseline’s 0.485.", "perturbed_statement": "At 128 candidates, the reward model trained on both bold and list pattern datasets produces approximately a 0.532 bold-pattern sample ratio, about 0.047 higher than the baseline’s 0.485.", "perturbed_explanation": "The perturbed statement is incorrect because, in Figure 1, the ratio for the +Bold and List model at 128 candidates is about 0.512, not 0.532, and the difference from the baseline’s 0.485 is 0.027 rather than 0.047.", "claim": "At 128 candidates, the reward model trained on both bold and list pattern datasets produces approximately a 0.532 bold-pattern sample ratio, about 0.047 higher than the baseline’s 0.485.", "label": false }, { "paperid": "2410.02346v2", "paper_path": "./SciVer/papers/2410.02346v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "2.2", "3.2" ], "image_path": "./SciVer/images/2410.02346v2-Table3-1.png", "request_id": 361, "origin_statement": "Table 3 shows that the reduced confidence parameter κ_red for Model M1/6COM in the 150–200 K interval is 0.474, which is 0.071 higher than Model M0COM’s κ_red of 0.403.", "perturbed_statement": "Table 3 shows that the reduced confidence parameter κ_red for Model M1/6COM in the 150–200 K interval is 0.403, which is 0.071 lower than Model M0COM’s κ_red of 0.474.", "perturbed_explanation": "The perturbed statement reverses the values: Table 3 actually lists κ_red = 0.474 for M1/6COM and κ_red = 0.403 for M0COM, not the other way around.", "claim": "Table 3 shows that the reduced confidence parameter κ_red for Model M1/6COM in the 150–200 K interval is 0.403, which is 0.071 lower than Model M0COM’s κ_red of 0.474.", "label": false }, { "paperid": "2411.06529v1", "paper_path": "./SciVer/papers/2411.06529v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.06529v1_figure_8.png", "request_id": 364, "origin_statement": "At iteration 3, TherINO and Mod. FNO-DEQ show nearly identical L2 strain errors, about 0.40%, which is approximately half of FNO-DEQ’s roughly 0.80% error.", "perturbed_statement": "At iteration 3, TherINO and Mod. FNO-DEQ show nearly identical L2 strain errors, about 0.60%, which is roughly the same as FNO-DEQ’s approximately 0.90% error.", "perturbed_explanation": "The perturbed statement misreports the L2 strain errors at iteration 3: TherINO and Mod. FNO-DEQ actually have errors of about 0.40%, not 0.60%, and FNO-DEQ’s error is about 0.80%, not 0.90%.", "claim": "At iteration 3, TherINO and Mod. FNO-DEQ show nearly identical L2 strain errors, about 0.60%, which is roughly the same as FNO-DEQ’s approximately 0.90% error.", "label": false }, { "paperid": "2410.03090v1", "paper_path": "./SciVer/papers/2410.03090v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.03090v1_figure_4.png", "request_id": 366, "origin_statement": "Across NarrativeQA, 2WikiMQA, and Musique, layer 8 exhibits an average truncation matrix entropy around 0.56, significantly higher than layer 28’s average of approximately 0.44.", "perturbed_statement": "Across NarrativeQA, 2WikiMQA, and Musique, layer 15 exhibits an average truncation matrix entropy around 0.60, significantly higher than layer 28’s average of approximately 0.44.", "perturbed_explanation": "This statement is incorrect because Figure 4 shows that the highest average entropy (around 0.56) occurs at layer 8 across all three datasets. Layer 15’s entropy values are closer to 0.50, not 0.60, so it does not exhibit the claimed higher entropy.", "claim": "Across NarrativeQA, 2WikiMQA, and Musique, layer 15 exhibits an average truncation matrix entropy around 0.60, significantly higher than layer 28’s average of approximately 0.44.", "label": false }, { "paperid": "2410.14731v1", "paper_path": "./SciVer/papers/2410.14731v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2", "5.1" ], "image_path": "./SciVer/images/2410.14731v1-Table1-1.png", "request_id": 367, "origin_statement": "At 50.0% KV cache budget for LLaMA2-7B-base, MatryoshkaKV achieves an average accuracy of 59.11%, which is 16.04 percentage points higher than PCA’s 43.07%.", "perturbed_statement": "At 50.0% KV cache budget for LLaMA2-7B-base, MatryoshkaKV achieves an average accuracy of 61.11%, which is 18.04 percentage points higher than PCA’s 43.07%.", "perturbed_explanation": "The perturbed statement wrongly reports MatryoshkaKV’s average accuracy as 61.11% and an 18.04-point gain. Table 1 shows that at 50.0% budget, MKV’s average is 59.11% and PCA’s is 43.07%, yielding a 16.04-point difference, not the inflated values.", "claim": "At 50.0% KV cache budget for LLaMA2-7B-base, MatryoshkaKV achieves an average accuracy of 61.11%, which is 18.04 percentage points higher than PCA’s 43.07%.", "label": false }, { "paperid": "2410.21205v1", "paper_path": "./SciVer/papers/2410.21205v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.21205v1-Table1-1.png", "request_id": 369, "origin_statement": "The AIC value decreases by over 1500 points from iteration 1 (1110.34) to iteration 3 (-406.71), before rising by 797.63 points in iteration 4 to 390.92, indicating iteration 3 as the optimal model.", "perturbed_statement": "The AIC value decreases by over 1500 points from iteration 1 (1110.34) to iteration 3 (-406.71), before rising by 797.63 points in iteration 4 to 500, indicating iteration 3 as the optimal model.", "perturbed_explanation": "The perturbed claim is incorrect because, according to Table 1, the AIC value in iteration 4 is 390.92, not 500 as stated.", "claim": "The AIC value decreases by over 1500 points from iteration 1 (1110.34) to iteration 3 (-406.71), before rising by 797.63 points in iteration 4 to 500, indicating iteration 3 as the optimal model.", "label": false }, { "paperid": "2410.13371v2", "paper_path": "./SciVer/papers/2410.13371v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.13371v2-Table3-1.png", "request_id": 371, "origin_statement": "Mult-8 yields an MRE of 0.055 pixels, 0.011 pixels lower than Mult-4 and 0.016 pixels lower than Star-16, making it the most accurate pattern.", "perturbed_statement": "Mult-8 yields an MRE of 0.065 pixels, 0.011 pixels lower than Mult-4 and 0.016 pixels lower than Star-16, making it the most accurate pattern.", "perturbed_explanation": "This statement is incorrect because Table 3 reports an MRE of 0.055 pixels for Mult-8, not 0.065 pixels. Consequently, the claimed difference from Mult-4 (0.066 pixels) of 0.011 pixels is also invalid.", "claim": "Mult-8 yields an MRE of 0.065 pixels, 0.011 pixels lower than Mult-4 and 0.016 pixels lower than Star-16, making it the most accurate pattern.", "label": false }, { "paperid": "2409.06224v1", "paper_path": "./SciVer/papers/2409.06224v1.json", "claim_type": "direct", "type": "chart", "item": "4(a)", "section": [ "3" ], "image_path": "./SciVer/images/2409.06224v1_figure_4(a).png", "request_id": 374, "origin_statement": "Three images (IDs 2, 8, and 13) have human agreement below 60%, while five images (IDs 1, 3, 4, 5, and 9) exceed 94% accuracy; the average accuracy across all 15 images is 80%.", "perturbed_statement": "Three images (IDs 2, 8, and 13) have human agreement below 60%, while six images (IDs 1, 3, 4, 5, 7, and 9) exceed 94% accuracy; the average accuracy across all 15 images is 78%.", "perturbed_explanation": "This is incorrect because only five images (IDs 1, 3, 4, 5, and 9) actually exceed 94% accuracy; ID 7 reaches only about 92%. In addition, the red dashed line shows the average accuracy is 80%, not 78%.", "claim": "Three images (IDs 2, 8, and 13) have human agreement below 60%, while six images (IDs 1, 3, 4, 5, 7, and 9) exceed 94% accuracy; the average accuracy across all 15 images is 78%.", "label": false }, { "paperid": "2411.01423v1", "paper_path": "./SciVer/papers/2411.01423v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.2.1" ], "image_path": "./SciVer/images/2411.01423v1-Table2-1.png", "request_id": 383, "origin_statement": "CLaSMO matches PGFS and GP-MOLFORMER with a top QED of 0.948 using a dataset of only 18,706 instances, which is roughly 59,000 times smaller than GP-MOLFORMER’s 1.1 billion-sample training set.", "perturbed_statement": "CLaSMO matches PGFS and GP-MOLFORMER with a top QED of 0.948 using a dataset of only 18,706 instances, which is roughly 5,000 times smaller than GP-MOLFORMER’s 1.1 billion-sample training set.", "perturbed_explanation": "The perturbed claim understates the dataset size difference. The actual ratio is approximately 1.1 billion divided by 18,706 (about 59,000), not 5,000, making the \"5,000 times smaller\" figure incorrect.", "claim": "CLaSMO matches PGFS and GP-MOLFORMER with a top QED of 0.948 using a dataset of only 18,706 instances, which is roughly 5,000 times smaller than GP-MOLFORMER’s 1.1 billion-sample training set.", "label": false }, { "paperid": "2411.00387v1", "paper_path": "./SciVer/papers/2411.00387v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.00387v1-Table3-1.png", "request_id": 387, "origin_statement": "Llama2-13B's overall accuracy improves by 9.1 percentage points when context length increases from one sentence (36.8%) to full manuscript (45.9%), while GPT-4o's gains over the same range are only 3.6 points (64.9% to 68.5%).", "perturbed_statement": "Llama2-13B's overall accuracy improves by 7.5 percentage points when context length increases from one sentence (36.8%) to full manuscript (45.9%), while GPT-4o's gains over the same range are 4.5 points (64.9% to 68.5%).", "perturbed_explanation": "The perturbed statement is incorrect because Llama2-13B's actual improvement is 45.9% - 36.8% = 9.1 percentage points (not 7.5), and GPT-4o's actual gain is 68.5% - 64.9% = 3.6 points (not 4.5).", "claim": "Llama2-13B's overall accuracy improves by 7.5 percentage points when context length increases from one sentence (36.8%) to full manuscript (45.9%), while GPT-4o's gains over the same range are 4.5 points (64.9% to 68.5%).", "label": false }, { "paperid": "2411.14164v1", "paper_path": "./SciVer/papers/2411.14164v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.14164v1-Table1-1.png", "request_id": 389, "origin_statement": "At 25% retention, LLaVA-NeXT-8B cuts TTFT from 94 ms to 52 ms, achieving a 1.83× speedup, and reduces GPU usage by 0.9 GB to 16.98 GB.", "perturbed_statement": "At 25% retention, LLaVA-NeXT-8B cuts TTFT from 94 ms to 57 ms, achieving a 1.47× speedup, and reduces GPU usage by 0.9 GB to 16.98 GB.", "perturbed_explanation": "The TTFT at 25% retention for LLaVA-NeXT-8B is actually 52 ms (not 57 ms), and the speedup is 1.83× (not 1.47×) according to Table 1.", "claim": "At 25% retention, LLaVA-NeXT-8B cuts TTFT from 94 ms to 57 ms, achieving a 1.47× speedup, and reduces GPU usage by 0.9 GB to 16.98 GB.", "label": false }, { "paperid": "2411.00997v1", "paper_path": "./SciVer/papers/2411.00997v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.4" ], "image_path": "./SciVer/images/2411.00997v1-Table3-1.png", "request_id": 395, "origin_statement": "Captions with 'CEO' are the most male-biased, with male pronouns in 74.2% compared to 25.8% female pronouns, a gap of 48.4 percentage points.", "perturbed_statement": "Captions with 'CEO' have male pronouns in 65.0% and female pronouns in 35.0%, indicating a 30 percentage point gap.", "perturbed_explanation": "This statement is incorrect. Table 3 reports that 'CEO' captions actually contain male pronouns in 74.2% of cases and female pronouns in 25.8%, not 65.0% and 35.0%.", "claim": "Captions with 'CEO' have male pronouns in 65.0% and female pronouns in 35.0%, indicating a 30 percentage point gap.", "label": false }, { "paperid": "2409.17476v1", "paper_path": "./SciVer/papers/2409.17476v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5.2.1" ], "image_path": "./SciVer/images/2409.17476v1-Table4-1.png", "request_id": 401, "origin_statement": "In adaptive DP attacks, VAT achieves an HR@20 of 13.461%, which is 0.509 percentage points higher than the TopBaseline’s HR@20 of 12.952%.", "perturbed_statement": "In adaptive DP attacks, VAT achieves an HR@20 of 13.961%, which is 1.009 percentage points higher than the TopBaseline’s HR@20 of 12.952%.", "perturbed_explanation": "The perturbed statement is wrong because the table shows VAT’s HR@20 is 13.461%, not 13.961%, and the actual difference from TopBaseline’s 12.952% is 0.509 percentage points, not 1.009.", "claim": "In adaptive DP attacks, VAT achieves an HR@20 of 13.961%, which is 1.009 percentage points higher than the TopBaseline’s HR@20 of 12.952%.", "label": false }, { "paperid": "2409.20553v2", "paper_path": "./SciVer/papers/2409.20553v2.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.20553v2-Table5-1.png", "request_id": 403, "origin_statement": "The model uses 12 backbone blocks, which is six times the number of attention blocks (2).", "perturbed_statement": "The model uses 12 backbone blocks, which is four times the number of attention blocks (2).", "perturbed_explanation": "This is incorrect because the table shows 12 backbone blocks and 2 attention blocks, so the actual ratio is 12\u00182 = 6, not 4.", "claim": "The model uses 12 backbone blocks, which is four times the number of attention blocks (2).", "label": false }, { "paperid": "2411.17674v1", "paper_path": "./SciVer/papers/2411.17674v1.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.17674v1_figure_6(a).png", "request_id": 406, "origin_statement": "In the precision-normalized confusion matrix, predicted Sad has the highest precision at about 75%, exceeding predicted Excited (~70%) and predicted Neutral (~62%).", "perturbed_statement": "In the precision-normalized confusion matrix, predicted Excited has the highest precision at about 75%, exceeding predicted Sad (~70%) and predicted Neutral (~62%).", "perturbed_explanation": "This is wrong because the confusion matrix’s predicted Sad column shows roughly 75% precision, while the predicted Excited column is only about 70%, not the other way around.", "claim": "In the precision-normalized confusion matrix, predicted Excited has the highest precision at about 75%, exceeding predicted Sad (~70%) and predicted Neutral (~62%).", "label": false }, { "paperid": "2409.10756v1", "paper_path": "./SciVer/papers/2409.10756v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.1.1" ], "image_path": "./SciVer/images/2409.10756v1-Table4-1.png", "request_id": 407, "origin_statement": "Llama3-70b’s Top-5 CVE accuracy (5.21%) represents approximately a 702% increase over Llama3-8b’s 0.65%.", "perturbed_statement": "Llama3-70b’s Top-5 CVE accuracy (5.21%) represents only a 300% increase over Llama3-8b’s 0.65%.", "perturbed_explanation": "This is incorrect because the actual percentage increase is (5.21%−0.65%)/0.65% ≈ 701.5%, not 300%, so the true increase is about 702%.", "claim": "Llama3-70b’s Top-5 CVE accuracy (5.21%) represents only a 300% increase over Llama3-8b’s 0.65%.", "label": false }, { "paperid": "2410.22269v1", "paper_path": "./SciVer/papers/2410.22269v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "6" ], "image_path": "./SciVer/images/2410.22269v1_figure_9.png", "request_id": 408, "origin_statement": "On the Tourism Monthly dataset forecast, the linear head’s next-token distribution peaks at about 0.019 probability mass, whereas the Fourier head’s distribution peaks at about 0.011, indicating the linear head concentrates nearly twice the probability on a single token.", "perturbed_statement": "On the Tourism Monthly dataset forecast, the linear head’s next-token distribution peaks at about 0.019 probability mass, whereas the Fourier head’s distribution peaks at about 0.014, indicating the linear head concentrates only about 40% more probability on a single token.", "perturbed_explanation": "The perturbation is incorrect because the Fourier head’s actual peak probability mass is around 0.011, not 0.014 as stated, contradicting the visual data in the figure.", "claim": "On the Tourism Monthly dataset forecast, the linear head’s next-token distribution peaks at about 0.019 probability mass, whereas the Fourier head’s distribution peaks at about 0.014, indicating the linear head concentrates only about 40% more probability on a single token.", "label": false }, { "paperid": "2409.12680v2", "paper_path": "./SciVer/papers/2409.12680v2.json", "claim_type": "direct", "type": "chart", "item": "6(a)", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.12680v2_figure_6(a).png", "request_id": 410, "origin_statement": "On the Cityscapes dataset with 1/8 labeled data, increasing the memory bank size from 0 to 256 raises the mIoU from 73.96% to 75.14%, a gain of 1.18 percentage points; further increasing to 512 reduces performance to 74.82%.", "perturbed_statement": "On the Cityscapes dataset with 1/8 labeled data, increasing the memory bank size from 0 to 512 raises the mIoU from 73.96% to 75.14%, a gain of 1.18 percentage points; the performance peaks at N=512.", "perturbed_explanation": "The perturbed statement incorrectly assigns the peak mIoU of 75.14% to a memory bank size of 512. According to Fig. 6(a), the model reaches 75.14% only at N=256, while at N=512 the mIoU falls to 74.82%.", "claim": "On the Cityscapes dataset with 1/8 labeled data, increasing the memory bank size from 0 to 512 raises the mIoU from 73.96% to 75.14%, a gain of 1.18 percentage points; the performance peaks at N=512.", "label": false }, { "paperid": "2410.03415v1", "paper_path": "./SciVer/papers/2410.03415v1.json", "claim_type": "direct", "type": "chart", "item": "4(b)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.03415v1_figure_4(b).png", "request_id": 414, "origin_statement": "The true refusal vector ablation produces a peak refusal score change of approximately -10 at token position -5 in layer 12, while the most negative change for false refusal vectors is around -8, making the true vectors cause about a 2-point greater impact.", "perturbed_statement": "The true refusal vector ablation produces a peak refusal score change of approximately -12 at token position -4 in layer 14, while the most negative change for false refusal vectors is about -9, making the true vectors cause a 3-point greater impact.", "perturbed_explanation": "The perturbation is incorrect because the heatmap actually shows the largest negative change for true refusal vectors is about \u00012 at token position \u00012 -5 in layer 12 (not -4 in layer 14), and the magnitude is around \u00010 rather than -12. Additionally, false refusal vectors only reach about -8, not -9.", "claim": "The true refusal vector ablation produces a peak refusal score change of approximately -12 at token position -4 in layer 14, while the most negative change for false refusal vectors is about -9, making the true vectors cause a 3-point greater impact.", "label": false }, { "paperid": "2409.16718v2", "paper_path": "./SciVer/papers/2409.16718v2.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.16718v2-Table2-1.png", "request_id": 415, "origin_statement": "CLIPFit’s lead over the second-best method shrinks from 1.70 percentage points at 1-shot (72.32 vs. 70.62) to just 0.12 points at 16-shot (81.27 vs. 81.15), showing its relative advantage diminishes as more samples are available.", "perturbed_statement": "CLIPFit’s lead over the second-best method increases from 1.70 percentage points at 1-shot (72.32 vs. 70.62) to 0.52 points at 16-shot (81.27 vs. 81.15), showing its relative advantage grows as more samples are available.", "perturbed_explanation": "The perturbed statement is incorrect because, at 16-shot, CLIPFit’s margin over the runner-up is 81.27 − 81.15 = 0.12 percentage points, not 0.52 points as claimed.", "claim": "CLIPFit’s lead over the second-best method increases from 1.70 percentage points at 1-shot (72.32 vs. 70.62) to 0.52 points at 16-shot (81.27 vs. 81.15), showing its relative advantage grows as more samples are available.", "label": false }, { "paperid": "2409.13844v1", "paper_path": "./SciVer/papers/2409.13844v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.13844v1-Table4-1.png", "request_id": 417, "origin_statement": "SW optimization reduces the absolute error in C12 relative to DFT from 19.62 GPa to 2.18 GPa, achieving an 88.9% error reduction.", "perturbed_statement": "SW optimization reduces the absolute error in C12 relative to DFT from 19.62 GPa to 2.18 GPa, achieving a 95.2% error reduction.", "perturbed_explanation": "The stated 95.2% error reduction is incorrect. The actual reduction is (19.62−2.18)/19.62≈0.889, i.e. 88.9%, not 95.2%.", "claim": "SW optimization reduces the absolute error in C12 relative to DFT from 19.62 GPa to 2.18 GPa, achieving a 95.2% error reduction.", "label": false }, { "paperid": "2411.01088v1", "paper_path": "./SciVer/papers/2411.01088v1.json", "claim_type": "direct", "type": "chart", "item": "2(c)", "section": [ "7.2" ], "image_path": "./SciVer/images/2411.01088v1_figure_2(c).png", "request_id": 420, "origin_statement": "CRONOS reaches about 90% validation accuracy in roughly 0.5 seconds, while tuned AdamW takes about 2 seconds to reach the same level, making CRONOS about four times faster in convergence.", "perturbed_statement": "CRONOS takes about 3 seconds to reach 90% validation accuracy, while tuned AdamW reaches 90% in 1 second, making AdamW three times faster than CRONOS.", "perturbed_explanation": "The perturbed statement is wrong because the plot shows CRONOS actually hits around 90% validation accuracy by approximately 0.5 seconds (not 3 seconds), and tuned AdamW reaches 90% at around 2 seconds (not 1 second).", "claim": "CRONOS takes about 3 seconds to reach 90% validation accuracy, while tuned AdamW reaches 90% in 1 second, making AdamW three times faster than CRONOS.", "label": false }, { "paperid": "2410.08695v2", "paper_path": "./SciVer/papers/2410.08695v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.08695v2-Table1-1.png", "request_id": 421, "origin_statement": "On SEEDBench, under the Add New Objects (v1) bootstrapping, GPT-4o’s accuracy declines by 6.68%, which is the largest drop among all models for that condition.", "perturbed_statement": "On SEEDBench, under the Add New Objects (v1) bootstrapping, InternVL-2’s accuracy declines by 6.68%, which is the largest drop among all models for that condition.", "perturbed_explanation": "The table shows that InternVL-2’s accuracy under v1 on SEEDBench drops by 6.34%, not 6.68%, and GPT-4o actually has the largest decrease of 6.68%, so attributing the 6.68% drop to InternVL-2 is incorrect.", "claim": "On SEEDBench, under the Add New Objects (v1) bootstrapping, InternVL-2’s accuracy declines by 6.68%, which is the largest drop among all models for that condition.", "label": false }, { "paperid": "2411.07565v1", "paper_path": "./SciVer/papers/2411.07565v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.07565v1_figure_2.png", "request_id": 422, "origin_statement": "At 32 threads, the total runtime under cc-pVQZ (~1.5×10^3 seconds) is about 15 times that under cc-pVDZ (~1.0×10^2 seconds), highlighting the basis size's impact on compute time.", "perturbed_statement": "At 32 threads, the total runtime under cc-pVQZ (~1.5×10^3 seconds) is about 5 times that under cc-pVDZ (~1.0×10^2 seconds), highlighting the basis size's impact on compute time.", "perturbed_explanation": "The perturbation misstates the speedup factor: the figure shows cc-pVQZ at 32 threads takes roughly 1.5×10^3 s versus cc-pVDZ’s 1.0×10^2 s, a ~15× difference, not ~5× as claimed.", "claim": "At 32 threads, the total runtime under cc-pVQZ (~1.5×10^3 seconds) is about 5 times that under cc-pVDZ (~1.0×10^2 seconds), highlighting the basis size's impact on compute time.", "label": false }, { "paperid": "2410.18462v1", "paper_path": "./SciVer/papers/2410.18462v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.18462v1-Table4-1.png", "request_id": 423, "origin_statement": "The encoder contains four stride-2 convolutional blocks whose output channels are 64, 128, 256, and 512, which sum to 960 channels.", "perturbed_statement": "The encoder contains four stride-2 convolutional blocks whose output channels are 64, 128, 256, and 512, which sum to 1024 channels.", "perturbed_explanation": "The sum given is wrong. The table lists four stride-2 blocks with outputs of 64, 128, 256, and 512 channels, which actually total 960 channels, not 1024.", "claim": "The encoder contains four stride-2 convolutional blocks whose output channels are 64, 128, 256, and 512, which sum to 1024 channels.", "label": false }, { "paperid": "2410.06971v2", "paper_path": "./SciVer/papers/2410.06971v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.1" ], "image_path": "./SciVer/images/2410.06971v2-Table4-1.png", "request_id": 425, "origin_statement": "Including city and year fixed effects reduces the estimated coefficient on industry complexity from 1.311 to 1.018, a decline of 0.293 (22%), highlighting the importance of controlling for location and time effects.", "perturbed_statement": "Including city and year fixed effects reduces the estimated coefficient on industry complexity from 1.311 to 1.200, a decline of 0.111 (8.5%), highlighting the importance of controlling for location and time effects.", "perturbed_explanation": "The regression table shows that with both city and year fixed effects the coefficient on industry complexity is 1.018 (column 3), not 1.200, and the drop from 1.311 is 0.293 (approximately 22%), not 0.111 (8.5%).", "claim": "Including city and year fixed effects reduces the estimated coefficient on industry complexity from 1.311 to 1.200, a decline of 0.111 (8.5%), highlighting the importance of controlling for location and time effects.", "label": false }, { "paperid": "2410.19779v1", "paper_path": "./SciVer/papers/2410.19779v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.19779v1-Table4-1.png", "request_id": 427, "origin_statement": "On the MW task, the AR model with ℓ₂ loss achieves 67.2% accuracy, 3.9 percentage points higher than the MAE model with the same loss (63.3%).", "perturbed_statement": "On the MW task, the AR model with ℓ₂ loss achieves 64.8% accuracy, 1.5 percentage points higher than the MAE model with the same loss (63.3%).", "perturbed_explanation": "This statement is wrong because the AR model with ℓ₂ loss actually achieves 67.2% on the MW task—not 64.8%—as shown in the table, so the reported accuracy and difference are incorrect.", "claim": "On the MW task, the AR model with ℓ₂ loss achieves 64.8% accuracy, 1.5 percentage points higher than the MAE model with the same loss (63.3%).", "label": false }, { "paperid": "2410.21100v1", "paper_path": "./SciVer/papers/2410.21100v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.21100v1_figure_2(b).png", "request_id": 428, "origin_statement": "At ν = 0.5%, mSSRM-PGA (m=20) achieves cumulative wealth of about 104, which is roughly three times higher than MAXER's wealth of about 35 at the same cost rate.", "perturbed_statement": "At ν = 0.5%, mSSRM-PGA (m=20) achieves cumulative wealth of about 120, which is roughly three times higher than MAXER's wealth of about 35 at the same cost rate.", "perturbed_explanation": "The plot shows that at a 0.5% transaction cost rate, mSSRM-PGA(m=20) ends with approximately 104 in cumulative wealth rather than 120, making the perturbed wealth figure incorrect.", "claim": "At ν = 0.5%, mSSRM-PGA (m=20) achieves cumulative wealth of about 120, which is roughly three times higher than MAXER's wealth of about 35 at the same cost rate.", "label": false }, { "paperid": "2411.06966v1", "paper_path": "./SciVer/papers/2411.06966v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.2" ], "image_path": "./SciVer/images/2411.06966v1-Table3-1.png", "request_id": 429, "origin_statement": "On CIFAR-10 to STL-10, E2E-FT + VRF raises OOD accuracy to 97.3%, exceeding E2E-FT + WSE's 95.7% by 1.6 percentage points.", "perturbed_statement": "On CIFAR-10 to STL-10, E2E-FT + VRF raises OOD accuracy to 96.3%, exceeding E2E-FT + WSE's 95.7% by 0.6 percentage points.", "perturbed_explanation": "The table reports E2E-FT + VRF achieving 97.3% OOD accuracy, not 96.3%, and thus the improvement over WSE's 95.7% is 1.6 points, not 0.6.", "claim": "On CIFAR-10 to STL-10, E2E-FT + VRF raises OOD accuracy to 96.3%, exceeding E2E-FT + WSE's 95.7% by 0.6 percentage points.", "label": false }, { "paperid": "2409.17561v1", "paper_path": "./SciVer/papers/2409.17561v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.1", "5.1.1" ], "image_path": "./SciVer/images/2409.17561v1_figure_6.png", "request_id": 430, "origin_statement": "GPT-3.5’s success rate (25.1%) is more than double that of CodeLlama (9.8%), highlighting its superior ability to generate passing test cases.", "perturbed_statement": "GPT-3.5’s success rate (18.0%) is more than double that of CodeLlama (9.8%), highlighting its superior ability to generate passing test cases.", "perturbed_explanation": "The perturbed statement misreports GPT-3.5’s success rate as 18.0%, but Figure 6 shows it is actually 25.1%. Furthermore, 18.0% is less than twice CodeLlama’s 9.8%, so the ‘more than double’ claim is incorrect.", "claim": "GPT-3.5’s success rate (18.0%) is more than double that of CodeLlama (9.8%), highlighting its superior ability to generate passing test cases.", "label": false }, { "paperid": "2410.08387v1", "paper_path": "./SciVer/papers/2410.08387v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.08387v1_figure_4.png", "request_id": 434, "origin_statement": "The four RUBIES DSFGs have F444W magnitudes of 22.2–23.0 AB and F277W–F444W colors of 1.0–1.4 mag, roughly 1.0 mag redder than the median PRIMER-UDS catalog galaxies at similar magnitudes (median color ∼0.3 mag).", "perturbed_statement": "The four RUBIES DSFGs have F444W magnitudes of 24.2–25.0 AB and F277W–F444W colors of 2.0–2.4 mag, roughly 1.5 mag redder than the median PRIMER-UDS catalog galaxies at similar magnitudes (median color ∼0.5 mag).", "perturbed_explanation": "Figure 4 (top) shows the four RUBIES DSFGs at F444W ≃22.2–23.0 AB (not 24.2–25.0 AB) and F277W–F444W ≃1.0–1.4 mag (not 2.0–2.4 mag). The PRIMER-UDS median color at those magnitudes is ∼0.3 mag, not ∼0.5 mag, so the perturbed ranges contradict the plotted data.", "claim": "The four RUBIES DSFGs have F444W magnitudes of 24.2–25.0 AB and F277W–F444W colors of 2.0–2.4 mag, roughly 1.5 mag redder than the median PRIMER-UDS catalog galaxies at similar magnitudes (median color ∼0.5 mag).", "label": false }, { "paperid": "2410.22023v3", "paper_path": "./SciVer/papers/2410.22023v3.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.4" ], "image_path": "./SciVer/images/2410.22023v3-Table2-1.png", "request_id": 437, "origin_statement": "Our model achieves a WAR of 81.16% on the MELD dataset, which is 2.66 percentage points higher than MAP’s WAR of 78.50%, making it the top WAR performer among all compared models.", "perturbed_statement": "Our model achieves a WAR of 81.16% on the MELD dataset, which is 3.50 percentage points higher than MAP’s WAR of 78.50%, making it the top WAR performer among all compared models.", "perturbed_explanation": "The perturbed statement claims a 3.50 percentage point advantage over MAP, but Table 2 shows our WAR is 81.16% and MAP’s WAR is 78.50%, a difference of 2.66 percentage points, not 3.50.", "claim": "Our model achieves a WAR of 81.16% on the MELD dataset, which is 3.50 percentage points higher than MAP’s WAR of 78.50%, making it the top WAR performer among all compared models.", "label": false }, { "paperid": "2410.05341v2", "paper_path": "./SciVer/papers/2410.05341v2.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.05341v2_figure_4.png", "request_id": 438, "origin_statement": "The model’s predictions achieve a mean Pearson correlation coefficient of 0.635 for the cuneus ROI, which is 0.202 higher than the 0.433 correlation observed in the thalamus ROI.", "perturbed_statement": "The model’s predictions achieve a mean Pearson correlation coefficient of 0.735 for the cuneus ROI, which is 0.302 higher than the 0.433 correlation observed in the thalamus ROI.", "perturbed_explanation": "This statement is wrong because Figure 4B reports a cuneus correlation of 0.635 rather than 0.735, and the actual difference between 0.635 and 0.433 is 0.202, not 0.302.", "claim": "The model’s predictions achieve a mean Pearson correlation coefficient of 0.735 for the cuneus ROI, which is 0.302 higher than the 0.433 correlation observed in the thalamus ROI.", "label": false }, { "paperid": "2410.08642v1", "paper_path": "./SciVer/papers/2410.08642v1.json", "claim_type": "direct", "type": "chart", "item": "10", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.08642v1_figure_10.png", "request_id": 440, "origin_statement": "In 1948, Zionist forces expelled 750,000 Palestinians, capturing 78% of historic Palestine and leaving Palestinians with only 22% of the territory.", "perturbed_statement": "In 1948, Zionist forces expelled 1,000,000 Palestinians, capturing 85% of historic Palestine and leaving Palestinians with only 15% of the territory.", "perturbed_explanation": "The perturbed claim is incorrect because the infographic specifies 750,000 Palestinians were expelled (not 1,000,000), and Zionist forces captured 78% of historic Palestine (not 85%), leaving 22% of the land for Palestinians (not 15%).", "claim": "In 1948, Zionist forces expelled 1,000,000 Palestinians, capturing 85% of historic Palestine and leaving Palestinians with only 15% of the territory.", "label": false }, { "paperid": "2410.23426v1", "paper_path": "./SciVer/papers/2410.23426v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "4" ], "image_path": "./SciVer/images/2410.23426v1-Table6-1.png", "request_id": 441, "origin_statement": "Among open-weight models, Qwen-2.5-Instruct has the most parameters at 72B, 2B more than the 70B Llama-3.1-Instruct.", "perturbed_statement": "Among open-weight models, Qwen-2.5-Instruct has the most parameters at 74B, 4B more than the 70B Llama-3.1-Instruct.", "perturbed_explanation": "The table shows Qwen-2.5-Instruct has 72B parameters, not 74B, so its lead over the 70B Llama-3.1-Instruct is 2B rather than 4B.", "claim": "Among open-weight models, Qwen-2.5-Instruct has the most parameters at 74B, 4B more than the 70B Llama-3.1-Instruct.", "label": false }, { "paperid": "2411.09127v1", "paper_path": "./SciVer/papers/2411.09127v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "6.3" ], "image_path": "./SciVer/images/2411.09127v1-Table4-1.png", "request_id": 443, "origin_statement": "The configuration with log γ=8e−3, β=1, α=0 prunes 67% of FLOPS, which is 28 percentage points more than layer pruning [16]-A's 39% FLOPS pruning, while leaving 34 layers intact.", "perturbed_statement": "The configuration with log γ=8e−3, β=1, α=0 prunes 70% of FLOPS, which is 31 percentage points more than layer pruning [16]-A's 39% FLOPS pruning, while still leaving 34 layers intact.", "perturbed_explanation": "The perturbed statement is incorrect because the actual FLOPS pruning ratio for the setting log γ=8e−3, β=1, α=0 is 67%, not 70%, and the true difference from layer pruning [16]-A's 39% is 28 percentage points, not 31.", "claim": "The configuration with log γ=8e−3, β=1, α=0 prunes 70% of FLOPS, which is 31 percentage points more than layer pruning [16]-A's 39% FLOPS pruning, while still leaving 34 layers intact.", "label": false }, { "paperid": "2411.02790v1", "paper_path": "./SciVer/papers/2411.02790v1.json", "claim_type": "direct", "type": "chart", "item": "3(c)", "section": [ "6.2", "6.2.1", "6.2.2" ], "image_path": "./SciVer/images/2411.02790v1_figure_3(c).png", "request_id": 446, "origin_statement": "In the calibrated model (blue stars), as the mixing score goes from approximately 0.30 to 0.60, average NDCG@10 increases from about 0.24 to 0.40; by contrast, the uncalibrated model (pink pentagons) only sees NDCG@10 rise marginally from 0.31 at a score of 0.46 to 0.32 at 0.50.", "perturbed_statement": "In the calibrated model (blue stars), as the mixing score goes from approximately 0.30 to 0.80, average NDCG@10 increases from about 0.30 to 0.50; by contrast, the uncalibrated model sees NDCG@10 decrease from 0.31 at 0.46 to 0.30 at 0.50.", "perturbed_explanation": "The perturbed claim is incorrect because the figure’s calibrated scores only span up to a mixing score of about 0.60 (not 0.80) and the maximum average NDCG@10 for the calibrated model is around 0.40 (not 0.50). Additionally, the starting NDCG@10 at score 0.30 is about 0.24, not 0.30.", "claim": "In the calibrated model (blue stars), as the mixing score goes from approximately 0.30 to 0.80, average NDCG@10 increases from about 0.30 to 0.50; by contrast, the uncalibrated model sees NDCG@10 decrease from 0.31 at 0.46 to 0.30 at 0.50.", "label": false }, { "paperid": "2411.14254v1", "paper_path": "./SciVer/papers/2411.14254v1.json", "claim_type": "direct", "type": "table", "item": "7", "section": [ "4.5.1" ], "image_path": "./SciVer/images/2411.14254v1-Table7-1.png", "request_id": 449, "origin_statement": "The misclassification rate for Score 0→Score 3 is 2.800%, which is fourteen times higher than the 0.200% rate for Score 0→Score 2.", "perturbed_statement": "The misclassification rate for Score 0→Score 3 is 2.800%, which is twenty times higher than the 0.200% rate for Score 0→Score 2.", "perturbed_explanation": "Dividing the actual rates (2.800% ÷ 0.200%) yields 14, not 20; thus describing the rate as twenty times higher is incorrect according to the table.", "claim": "The misclassification rate for Score 0→Score 3 is 2.800%, which is twenty times higher than the 0.200% rate for Score 0→Score 2.", "label": false }, { "paperid": "2409.11143v1", "paper_path": "./SciVer/papers/2409.11143v1.json", "claim_type": "direct", "type": "chart", "item": "7(b)", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11143v1_figure_7(b).png", "request_id": 450, "origin_statement": "At two in-context examples, Pause (gray) achieves about 57.8% accuracy, approximately 10.3 percentage points higher than Pause without planning tokens at 47.5%.", "perturbed_statement": "At two in-context examples, Pause achieves 60.2% accuracy, which is only 5 percentage points more than the 55.2% accuracy of Pause without planning tokens.", "perturbed_explanation": "This is incorrect because, in the chart, the Pause model with planning tokens (gray) is about 57.8% at two examples (not 60.2%), and Pause without tokens (orange) is about 47.5% (not 55.2%). Both reported values contradict the actual bar heights.", "claim": "At two in-context examples, Pause achieves 60.2% accuracy, which is only 5 percentage points more than the 55.2% accuracy of Pause without planning tokens.", "label": false }, { "paperid": "2411.17251v1", "paper_path": "./SciVer/papers/2411.17251v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "4.4.5" ], "image_path": "./SciVer/images/2411.17251v1_figure_9.png", "request_id": 452, "origin_statement": "Over the 40 epochs, mAP@0.5 increased by 0.216 from 0.5 to 0.716, while mAP@0.5:0.95 rose by 0.175 from 0.4 to 0.575 by the final epoch.", "perturbed_statement": "Over the 40 epochs, mAP@0.5 increased by 0.216 from 0.5 to 0.716, while mAP@0.5:0.95 rose by 0.225 from 0.4 to 0.625 by the final epoch.", "perturbed_explanation": "The perturbed statement is incorrect because the actual mAP@0.5:0.95 increased by 0.175 to reach 0.575 at epoch 40, not by 0.225 to 0.625 as stated.", "claim": "Over the 40 epochs, mAP@0.5 increased by 0.216 from 0.5 to 0.716, while mAP@0.5:0.95 rose by 0.225 from 0.4 to 0.625 by the final epoch.", "label": false }, { "paperid": "2411.10060v1", "paper_path": "./SciVer/papers/2411.10060v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "5.8" ], "image_path": "./SciVer/images/2411.10060v1_figure_4.png", "request_id": 454, "origin_statement": "Between dimension size 100 and 500, IEMOCAP's W-F1 increases by 2.64 points, whereas MELD's W-F1 increases by 0.97 points.", "perturbed_statement": "Between dimension size 100 and 500, IEMOCAP's W-F1 increases by 3.64 points, whereas MELD's W-F1 increases by 1.97 points.", "perturbed_explanation": "The perturbed gains are incorrect. The figure shows IEMOCAP's W-F1 rises from 71.32 at dimension 100 to 73.96 at 500 (an increase of 2.64 points, not 3.64), and MELD's W-F1 rises from 65.41 to 66.38 (an increase of 0.97 points, not 1.97).", "claim": "Between dimension size 100 and 500, IEMOCAP's W-F1 increases by 3.64 points, whereas MELD's W-F1 increases by 1.97 points.", "label": false }, { "paperid": "2409.14780v2", "paper_path": "./SciVer/papers/2409.14780v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2.1", "3.2" ], "image_path": "./SciVer/images/2409.14780v2-Table1-1.png", "request_id": 457, "origin_statement": "The zoom-in simulation reduces dark matter particle mass by about 230× (from 5.68×10^8 to 2.44×10^6 h^-1 M⊙) and improves force resolution 20-fold (10 h^-1 kpc to 0.5 h^-1 kpc) compared to the parent simulation.", "perturbed_statement": "The zoom-in simulation reduces dark matter particle mass by about 300× (from 5.68×10^8 to 2.44×10^6 h^-1 M⊙) and improves force resolution 10-fold (10 h^-1 kpc to 0.5 h^-1 kpc) compared to the parent simulation.", "perturbed_explanation": "The actual dark matter mass ratio is approximately 5.68×10^8 / 2.44×10^6 ≈ 233×, not 300×, and the force resolution improves by a factor of 20 (10 h^-1 kpc to 0.5 h^-1 kpc), not 10-fold.", "claim": "The zoom-in simulation reduces dark matter particle mass by about 300× (from 5.68×10^8 to 2.44×10^6 h^-1 M⊙) and improves force resolution 10-fold (10 h^-1 kpc to 0.5 h^-1 kpc) compared to the parent simulation.", "label": false }, { "paperid": "2411.16659v1", "paper_path": "./SciVer/papers/2411.16659v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.16659v1_figure_4.png", "request_id": 458, "origin_statement": "In the s=3 plot, the R_ξ curve for L=96 rises from about 0.57 at β=0.64 to roughly 0.62 at β=0.70, an increase of ~0.05, whereas the L=16 curve increases only from ~0.63 to ~0.64, a change of ~0.01 over the same interval.", "perturbed_statement": "In the s=3 plot, the R_ξ curve for L=96 rises from about 0.57 at β=0.64 to roughly 0.59 at β=0.70, an increase of ~0.02, whereas the L=16 curve increases only from ~0.63 to ~0.64, a change of ~0.01 over the same interval.", "perturbed_explanation": "The perturbation understates the L=96 increase: the actual R_ξ at β=0.70 is about 0.62 (not 0.59), so the true rise is ~0.05, not ~0.02 as claimed.", "claim": "In the s=3 plot, the R_ξ curve for L=96 rises from about 0.57 at β=0.64 to roughly 0.59 at β=0.70, an increase of ~0.02, whereas the L=16 curve increases only from ~0.63 to ~0.64, a change of ~0.01 over the same interval.", "label": false }, { "paperid": "2409.16217v1", "paper_path": "./SciVer/papers/2409.16217v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4" ], "image_path": "./SciVer/images/2409.16217v1-Table3-1.png", "request_id": 461, "origin_statement": "Table 3 lists seven KPMs, two of which—sum_requested_prbs and sum_granted_prbs—aggregate PRB usage over a 250ms logging window, while the other five metrics represent instantaneous PHY/MAC-layer measurements.", "perturbed_statement": "Table 3 lists eight KPMs, two of which—sum_requested_prbs and sum_granted_prbs—aggregate PRB usage over a 100ms logging window, while the other six metrics represent instantaneous PHY/MAC-layer measurements.", "perturbed_explanation": "This is incorrect because Table 3 actually contains seven KPMs, not eight, and the PRB aggregation window is 250 ms, not 100 ms; additionally, there are five instantaneous metrics, not six.", "claim": "Table 3 lists eight KPMs, two of which—sum_requested_prbs and sum_granted_prbs—aggregate PRB usage over a 100ms logging window, while the other six metrics represent instantaneous PHY/MAC-layer measurements.", "label": false }, { "paperid": "2411.15668v1", "paper_path": "./SciVer/papers/2411.15668v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.15668v1-Table3-1.png", "request_id": 463, "origin_statement": "In the first grid layer (n=1), the ratio of the magnitudes of the enthalpy source term S_h to the mass source term S_m equals the specific internal energy E.", "perturbed_statement": "In the first grid layer (n=1), the ratio of the magnitudes of the enthalpy source term S_h to the mass source term S_m equals 1/E.", "perturbed_explanation": "This is incorrect because Table 3 shows S_h = -ṁ_c E/δ_g and S_m = -ṁ_c/δ_g, so S_h/S_m = E, not 1/E.", "claim": "In the first grid layer (n=1), the ratio of the magnitudes of the enthalpy source term S_h to the mass source term S_m equals 1/E.", "label": false }, { "paperid": "2411.12892v1", "paper_path": "./SciVer/papers/2411.12892v1.json", "claim_type": "direct", "type": "chart", "item": "3(d)", "section": [ "1", "2", "4.1", "6" ], "image_path": "./SciVer/images/2411.12892v1_figure_3(d).png", "request_id": 468, "origin_statement": "Selective Self-Attention achieves a cross-entropy loss of 0.009 versus vanilla attention’s 0.0126 (a 28.6% reduction), and its ℓ1 approximation error is 0.358 compared to 0.543 for vanilla attention (about 34% lower).", "perturbed_statement": "Selective Self-Attention achieves a cross-entropy loss of 0.011 versus vanilla attention’s 0.0126 (about a 13% reduction), and its ℓ1 approximation error is 0.358 compared to 0.543 for vanilla attention (around 45% lower).", "perturbed_explanation": "This is incorrect because SSA’s true cross-entropy loss is 0.009 (not 0.011) as stated in the context, and the ℓ1 error reduction from 0.543 to 0.358 is approximately 34%, not 45%.", "claim": "Selective Self-Attention achieves a cross-entropy loss of 0.011 versus vanilla attention’s 0.0126 (about a 13% reduction), and its ℓ1 approximation error is 0.358 compared to 0.543 for vanilla attention (around 45% lower).", "label": false }, { "paperid": "2411.03743v1", "paper_path": "./SciVer/papers/2411.03743v1.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "2.2.2" ], "image_path": "./SciVer/images/2411.03743v1-Table6-1.png", "request_id": 469, "origin_statement": "All three columns have an identical minimum value of 0 in the Min row, indicating uniform lower bounds with no negative or smaller values in any category.", "perturbed_statement": "The first column has a minimum of 0, the second column has a minimum of 1, and the third column has a minimum of 0, indicating varied lower bounds across categories.", "perturbed_explanation": "This is incorrect because the Min row in the image shows 0 for the second column as well; the second column’s minimum is actually 0, not 1.", "claim": "The first column has a minimum of 0, the second column has a minimum of 1, and the third column has a minimum of 0, indicating varied lower bounds across categories.", "label": false }, { "paperid": "2411.10746v1", "paper_path": "./SciVer/papers/2411.10746v1.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.10746v1_figure_3(a).png", "request_id": 470, "origin_statement": "Support Devices achieves the highest AUC of 0.94, which is 0.28 greater than Pneumonia’s AUC of 0.66.", "perturbed_statement": "Support Devices achieves the highest AUC of 0.94, which is 0.32 greater than Pneumonia’s AUC of 0.66.", "perturbed_explanation": "The actual difference between Support Devices (0.94) and Pneumonia (0.66) is 0.28, not 0.32, making the perturbed claim inaccurate.", "claim": "Support Devices achieves the highest AUC of 0.94, which is 0.32 greater than Pneumonia’s AUC of 0.66.", "label": false }, { "paperid": "2410.20597v1", "paper_path": "./SciVer/papers/2410.20597v1.json", "claim_type": "direct", "type": "chart", "item": "5(a)", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.20597v1_figure_5(a).png", "request_id": 474, "origin_statement": "When trading costs increase from 0 to 2 basis points, the GAT_industries strategy's Sharpe ratio declines by approximately 2.2 points, dropping from about 2.2 to approximately 0.", "perturbed_statement": "When trading costs increase from 0 to 2 basis points, the GAT_industries strategy's Sharpe ratio declines by approximately 1.2 points, dropping from about 2.2 to approximately 1.0.", "perturbed_explanation": "This statement is incorrect because the chart shows that the GAT_industries strategy's Sharpe ratio at 2 basis points is around 0, not around 1.0, so the actual decline is about 2.2 points rather than the claimed 1.2 points.", "claim": "When trading costs increase from 0 to 2 basis points, the GAT_industries strategy's Sharpe ratio declines by approximately 1.2 points, dropping from about 2.2 to approximately 1.0.", "label": false }, { "paperid": "2410.12851v3", "paper_path": "./SciVer/papers/2410.12851v3.json", "claim_type": "direct", "type": "table", "item": "14", "section": [ "6.2" ], "image_path": "./SciVer/images/2410.12851v3-Table14-1.png", "request_id": 475, "origin_statement": "Among the five vibes, Mathematical Notation Use has the highest MM coefficient (3.337) and an MM p-value of 0.000008845, indicating a strongly significant positive association.", "perturbed_statement": "Among the five vibes, Mathematical Notation Use has the highest MM coefficient (3.337) and an MM p-value of 0.00088, indicating a strongly significant positive association.", "perturbed_explanation": "The perturbed statement is wrong because the table reports the MM p-value for Mathematical Notation Use as 0.000008845, not 0.00088.", "claim": "Among the five vibes, Mathematical Notation Use has the highest MM coefficient (3.337) and an MM p-value of 0.00088, indicating a strongly significant positive association.", "label": false }, { "paperid": "2409.04941v1", "paper_path": "./SciVer/papers/2409.04941v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "3.2", "4" ], "image_path": "./SciVer/images/2409.04941v1_figure_2(b).png", "request_id": 478, "origin_statement": "The cuda-tensor implementation spends about 52% of its instructions on vector_memory operations, which is over ten times matrix-mul's approximately 5% share.", "perturbed_statement": "The cuda-tensor implementation spends about 32% of its instructions on vector_memory operations, which is over twice matrix-mul's approximately 15% share.", "perturbed_explanation": "This is incorrect because the histogram shows cuda-tensor’s vector_memory proportion is about 52%, not 32%, and matrix-mul’s is about 5%, not 15%.", "claim": "The cuda-tensor implementation spends about 32% of its instructions on vector_memory operations, which is over twice matrix-mul's approximately 15% share.", "label": false }, { "paperid": "2409.17090v1", "paper_path": "./SciVer/papers/2409.17090v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.17090v1-Table2-1.png", "request_id": 479, "origin_statement": "On the CMU Multi-PIE dataset Session 2, SRSG achieves an NMI of 0.9048, which is 0.1131 higher than LR-ℓ1-graph's NMI of 0.7917.", "perturbed_statement": "On the CMU Multi-PIE dataset Session 3, SRSG achieves an NMI of 0.9048, which is 0.1131 higher than LR-ℓ1-graph's NMI of 0.7917.", "perturbed_explanation": "This is incorrect because for Session 3 the table lists SRSG's NMI as 0.8963 (not 0.9048) and LR-ℓ1-graph's NMI as 0.7837 (not 0.7917).", "claim": "On the CMU Multi-PIE dataset Session 3, SRSG achieves an NMI of 0.9048, which is 0.1131 higher than LR-ℓ1-graph's NMI of 0.7917.", "label": false }, { "paperid": "2411.02099v2", "paper_path": "./SciVer/papers/2411.02099v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5.1.1" ], "image_path": "./SciVer/images/2411.02099v2-Table4-1.png", "request_id": 483, "origin_statement": "The IDG-DP model reduces total attack accuracy to 40.00%, which is 24 percentage points lower than the baseline’s 64.00% attack accuracy, indicating stronger defense against black-box membership inference attacks.", "perturbed_statement": "The IDG-DP model reduces total attack accuracy to 35.00%, which is 29 percentage points lower than the baseline’s 62.00% attack accuracy, indicating stronger defense against black-box membership inference attacks.", "perturbed_explanation": "This statement is wrong because Table 4 lists IDG-DP’s total attack accuracy as 40.00%, not 35.00%, and the baseline’s total attack accuracy as 64.00%, not 62.00%.", "claim": "The IDG-DP model reduces total attack accuracy to 35.00%, which is 29 percentage points lower than the baseline’s 62.00% attack accuracy, indicating stronger defense against black-box membership inference attacks.", "label": false }, { "paperid": "2409.01295v2", "paper_path": "./SciVer/papers/2409.01295v2.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "3.2.2" ], "image_path": "./SciVer/images/2409.01295v2_figure_7.png", "request_id": 484, "origin_statement": "The regression in (b) shows that for each 1 cm increase in petal width, petal length increases by about 1.8 cm, whereas in (a) each 1 cm increase in sepal length yields roughly a 0.6 cm increase in petal length, making the width–length slope about three times steeper.", "perturbed_statement": "The regression in (b) shows that for each 1 cm increase in petal width, petal length increases by about 1.2 cm, whereas in (a) each 1 cm increase in sepal length yields roughly a 0.6 cm increase in petal length, making the width–length slope about two times steeper.", "perturbed_explanation": "This is incorrect because panel (b)’s regression slope is around 1.8 cm per 1 cm width (not 1.2 cm), and dividing that by the ~0.6 slope in (a) gives about a threefold difference, not twofold.", "claim": "The regression in (b) shows that for each 1 cm increase in petal width, petal length increases by about 1.2 cm, whereas in (a) each 1 cm increase in sepal length yields roughly a 0.6 cm increase in petal length, making the width–length slope about two times steeper.", "label": false }, { "paperid": "2411.02278v1", "paper_path": "./SciVer/papers/2411.02278v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.02278v1-Table1-1.png", "request_id": 485, "origin_statement": "While most complexity metrics like Lines of Code, Max Nesting Depth, and Halstead Complexity span from 0 to infinity, Cyclomatic Complexity uniquely starts at 1 and the structural metrics (AST Node Quantity Compare and AST Tree Edit Distance) are bounded between 0 and 1.", "perturbed_statement": "While most complexity metrics like Lines of Code, Max Nesting Depth, and Halstead Complexity span from 0 to infinity, Cyclomatic Complexity also starts at 0 and the structural metrics (AST Node Quantity Compare and AST Tree Edit Distance) are bounded between 0 and 1.", "perturbed_explanation": "The perturbed statement is incorrect because according to the table, Cyclomatic Complexity has a range of [1, ∞), meaning its minimum value is 1, not 0.", "claim": "While most complexity metrics like Lines of Code, Max Nesting Depth, and Halstead Complexity span from 0 to infinity, Cyclomatic Complexity also starts at 0 and the structural metrics (AST Node Quantity Compare and AST Tree Edit Distance) are bounded between 0 and 1.", "label": false }, { "paperid": "2410.07571v2", "paper_path": "./SciVer/papers/2410.07571v2.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5.2" ], "image_path": "./SciVer/images/2410.07571v2_figure_5.png", "request_id": 486, "origin_statement": "At layer 32, the Chatty variant maintains about 0.976 cosine similarity, whereas the SL variant drops to approximately 0.74, illustrating a much steeper decline for the safety-tuned model.", "perturbed_statement": "At layer 32, the Chatty variant drops to about 0.95, whereas the SL variant only decreases to approximately 0.90, suggesting a smaller performance gap between the two fine-tuned models.", "perturbed_explanation": "This statement is incorrect because, according to the figure, at layer 32 the Chatty variant actually maintains around 0.976 similarity (not 0.95), and the SL variant falls to about 0.74 (not 0.90), so the performance gap is larger than stated.", "claim": "At layer 32, the Chatty variant drops to about 0.95, whereas the SL variant only decreases to approximately 0.90, suggesting a smaller performance gap between the two fine-tuned models.", "label": false }, { "paperid": "2410.09247v1", "paper_path": "./SciVer/papers/2410.09247v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.1" ], "image_path": "./SciVer/images/2410.09247v1-Table2-1.png", "request_id": 491, "origin_statement": "Retro’s semantic similarity score of 93.48 ± 1.85% is about fourteen times higher than the target dataset’s 6.67 ± 1.86%, indicating much closer alignment.", "perturbed_statement": "Retro’s semantic similarity score of 83.48 ± 1.85% is about fourteen times higher than the target dataset’s 6.67 ± 1.86%, indicating much closer alignment.", "perturbed_explanation": "This is incorrect because the table reports Retro’s semantic similarity as 93.48 ± 1.85%, not 83.48 ± 1.85%.", "claim": "Retro’s semantic similarity score of 83.48 ± 1.85% is about fourteen times higher than the target dataset’s 6.67 ± 1.86%, indicating much closer alignment.", "label": false }, { "paperid": "2409.11460v1", "paper_path": "./SciVer/papers/2409.11460v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.11460v1_figure_3.png", "request_id": 496, "origin_statement": "Our CSFG targets exhibit C IV λ1550/He II ratios between ∼4 and ∼14 and O III λ1663/He II ratios between ∼3 and ∼20, placing them well within the star-forming galaxy model region of Gutkin et al. (2016).", "perturbed_statement": "Our CSFG targets exhibit C IV λ1550/He II ratios between ∼4 and ∼14 and O III λ1663/He II ratios between ∼0.5 and ∼2, placing them well within the star-forming galaxy model region of Gutkin et al. (2016).", "perturbed_explanation": "The perturbed statement claims O III λ1663/He II ratios of 0.5–2, but in the top-left panel the red star markers for our CSFGs clearly lie at O III λ1663/He II values from about 3 up to 20. Thus the quoted 0.5–2 range contradicts the actual plotted values.", "claim": "Our CSFG targets exhibit C IV λ1550/He II ratios between ∼4 and ∼14 and O III λ1663/He II ratios between ∼0.5 and ∼2, placing them well within the star-forming galaxy model region of Gutkin et al. (2016).", "label": false }, { "paperid": "2409.19764v2", "paper_path": "./SciVer/papers/2409.19764v2.json", "claim_type": "direct", "type": "table", "item": "6", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.19764v2-Table6-1.png", "request_id": 499, "origin_statement": "Spikformer consumes 21.48 mJ, which is roughly twelve times less energy than ViT-B/16’s 254.84 mJ, demonstrating significant efficiency gains.", "perturbed_statement": "Spikformer consumes 21.48 mJ, which is roughly five times less energy than ViT-B/16’s 254.84 mJ, demonstrating significant efficiency gains.", "perturbed_explanation": "The claim is wrong because the table lists 254.84 mJ for ViT-B/16 and 21.48 mJ for Spikformer, giving a reduction factor of about 254.84/21.48 ≈ 11.9, not five.", "claim": "Spikformer consumes 21.48 mJ, which is roughly five times less energy than ViT-B/16’s 254.84 mJ, demonstrating significant efficiency gains.", "label": false }, { "paperid": "2411.02614v1", "paper_path": "./SciVer/papers/2411.02614v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "7" ], "image_path": "./SciVer/images/2411.02614v1_figure_8.png", "request_id": 500, "origin_statement": "The largest KL divergence in the heatmap is between deepdr and rldr at 4.9341, while the smallest divergence occurs between idrid and ddr at 1.3415, indicating a span of approximately 3.6 units.", "perturbed_statement": "The largest KL divergence in the heatmap is between deepdr and rldr at 4.9341, while the smallest divergence occurs between idrid and aptos at 1.4560, indicating a span of approximately 3.5 units.", "perturbed_explanation": "The perturbed statement misidentifies the smallest divergence pair. The heatmap shows the minimum non-zero KL divergence is actually between idrid and ddr at 1.3415, not between idrid and aptos at 1.4560, making the stated span incorrect.", "claim": "The largest KL divergence in the heatmap is between deepdr and rldr at 4.9341, while the smallest divergence occurs between idrid and aptos at 1.4560, indicating a span of approximately 3.5 units.", "label": false }, { "paperid": "2410.21562v1", "paper_path": "./SciVer/papers/2410.21562v1.json", "claim_type": "direct", "type": "chart", "item": "2(k)", "section": [ "2.3.2" ], "image_path": "./SciVer/images/2410.21562v1_figure_2(k).png", "request_id": 504, "origin_statement": "The two red dashed lines in the figure are positioned at the two local minima between the three main peaks of the blue curve, effectively segmenting the distribution at its lowest valleys.", "perturbed_statement": "The two red dashed lines in the figure are positioned at the two local maxima between the three main peaks of the blue curve, effectively segmenting the distribution at its highest points.", "perturbed_explanation": "This is incorrect because the red lines are clearly placed at the lowest valleys (local minima) between the peaks, not at the highest points (local maxima), as shown in the image.", "claim": "The two red dashed lines in the figure are positioned at the two local maxima between the three main peaks of the blue curve, effectively segmenting the distribution at its highest points.", "label": false }, { "paperid": "2410.18766v2", "paper_path": "./SciVer/papers/2410.18766v2.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.18766v2-Table3-1.png", "request_id": 505, "origin_statement": "Among all ablation variants, removing Module (b) results in the highest average RMSE of 5.46×10^-2, which is 0.95×10^-2 greater than the full model’s average RMSE of 4.51×10^-2, indicating adjacency graph fusion has the largest impact on prediction accuracy.", "perturbed_statement": "Among all ablation variants, removing Module (c) results in the highest average RMSE of 5.20×10^-2, which is 0.69×10^-2 greater than the full model’s average RMSE of 4.51×10^-2, indicating temporal feature fusion has the largest impact on prediction accuracy.", "perturbed_explanation": "This statement is incorrect because Table 3 shows that removing Module (b), not Module (c), yields the highest average RMSE of 5.46×10^-2. Removing Module (c) actually produces an average RMSE of 5.20×10^-2, which is lower than the 5.46×10^-2 for Module (b).", "claim": "Among all ablation variants, removing Module (c) results in the highest average RMSE of 5.20×10^-2, which is 0.69×10^-2 greater than the full model’s average RMSE of 4.51×10^-2, indicating temporal feature fusion has the largest impact on prediction accuracy.", "label": false }, { "paperid": "2409.12587v1", "paper_path": "./SciVer/papers/2409.12587v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.12587v1-Table4-1.png", "request_id": 509, "origin_statement": "On UTKFace, switching from uniform 6-TTA (Mixup + CutMix) to optimized 6-VB-TTA reduces the mean absolute error from 7.409 to 6.405, a decrease of 1.004.", "perturbed_statement": "On UTKFace, switching from uniform 6-TTA (Mixup + CutMix) to optimized 6-VB-TTA reduces the mean absolute error from 7.409 to 5.405, a decrease of 2.004.", "perturbed_explanation": "The table shows that 6-VB-TTA (Mixup + CutMix) achieves an MAE of 6.405, not 5.405, so the actual reduction from 7.409 is 1.004, not 2.004.", "claim": "On UTKFace, switching from uniform 6-TTA (Mixup + CutMix) to optimized 6-VB-TTA reduces the mean absolute error from 7.409 to 5.405, a decrease of 2.004.", "label": false }, { "paperid": "2409.16239v1", "paper_path": "./SciVer/papers/2409.16239v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.16239v1_figure_3.png", "request_id": 514, "origin_statement": "At 10×10^15 FLOPs on the AST benchmark, LADD achieves approximately 60% accuracy, outperforming baseline++ (~50%) by about 10 percentage points and baseline (~49%) by around 11 points.", "perturbed_statement": "At 10×10^15 FLOPs on the AST benchmark, LADD achieves only 55% accuracy, outperforming baseline++ (~50%) by about 5 percentage points and baseline (~49%) by around 6 points.", "perturbed_explanation": "The perturbed statement is incorrect because in the FLOPs-Accuracy plot for AST, LADD reaches about 60% accuracy at 10×10^15 FLOPs rather than 55%. Consequently, its advantage over baseline++ and baseline is approximately 10 and 11 percentage points, not 5 and 6 points as stated.", "claim": "At 10×10^15 FLOPs on the AST benchmark, LADD achieves only 55% accuracy, outperforming baseline++ (~50%) by about 5 percentage points and baseline (~49%) by around 6 points.", "label": false }, { "paperid": "2410.20708v1", "paper_path": "./SciVer/papers/2410.20708v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "5.1" ], "image_path": "./SciVer/images/2410.20708v1-Table2-1.png", "request_id": 517, "origin_statement": "As dx decreases from 0.01 to 0.0005, numerical solver time increases by about 388× (from 8.221 ms to 3.191 s), while NO time increases only by about 6% (from 4.38 ms to 4.63 ms).", "perturbed_statement": "As dx decreases from 0.01 to 0.0005, numerical solver time increases by about 100× (from 8.221 ms to 3.191 s), while NO time increases only by about 6% (from 4.38 ms to 4.63 ms).", "perturbed_explanation": "The numerical solver time actually grows from 8.221 ms to 3.191 s, which is roughly a 388× increase, not 100× as stated. The claim of 100× contradicts the table’s values.", "claim": "As dx decreases from 0.01 to 0.0005, numerical solver time increases by about 100× (from 8.221 ms to 3.191 s), while NO time increases only by about 6% (from 4.38 ms to 4.63 ms).", "label": false }, { "paperid": "2411.14608v1", "paper_path": "./SciVer/papers/2411.14608v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.14608v1_figure_4.png", "request_id": 518, "origin_statement": "At the X high-symmetry point, the top optical phonon branch reaches about 18.5 THz with HIP-NN and 18.3 THz with ANI, both within 0.3 THz of the experimental 18.6 THz.", "perturbed_statement": "At the X high-symmetry point, the top optical phonon branch reaches about 17.5 THz with HIP-NN and 18.3 THz with ANI, both within 0.3 THz of the experimental 18.6 THz.", "perturbed_explanation": "The HIP-NN frequency at the X point is incorrectly given as 17.5 THz; the figure shows it is approximately 18.5 THz, so this detail contradicts the visual data.", "claim": "At the X high-symmetry point, the top optical phonon branch reaches about 17.5 THz with HIP-NN and 18.3 THz with ANI, both within 0.3 THz of the experimental 18.6 THz.", "label": false }, { "paperid": "2409.07770v1", "paper_path": "./SciVer/papers/2409.07770v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "4.4", "5.2" ], "image_path": "./SciVer/images/2409.07770v1-Table4-1.png", "request_id": 521, "origin_statement": "On VoxCeleb1, the Uni. Pool model with WavLM features uses 2.9M parameters and achieves an EER of 1.90%, which is 0.25% lower than the 2.15% EER of the ECAPA-TDNN using WavLM, despite having over 5.8M fewer parameters.", "perturbed_statement": "On VoxCeleb1, the Uni. Pool model with WavLM features uses 3.2M parameters and achieves an EER of 1.90%, which is 0.30% lower than the 2.15% EER of the ECAPA-TDNN using WavLM, despite having over 5.8M fewer parameters.", "perturbed_explanation": "The table indicates that Uni. Pool with WavLM actually uses 2.9M parameters, not 3.2M, and the EER difference between 1.90% and 2.15% is 0.25%, not 0.30% as stated.", "claim": "On VoxCeleb1, the Uni. Pool model with WavLM features uses 3.2M parameters and achieves an EER of 1.90%, which is 0.30% lower than the 2.15% EER of the ECAPA-TDNN using WavLM, despite having over 5.8M fewer parameters.", "label": false }, { "paperid": "2410.18555v1", "paper_path": "./SciVer/papers/2410.18555v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.1.2" ], "image_path": "./SciVer/images/2410.18555v1-Table1-1.png", "request_id": 523, "origin_statement": "The model comprises 19 module instances in total, with 13 of them (approximately 68%) implemented as MLP-based modules.", "perturbed_statement": "The model comprises 17 module instances in total, with 11 of them (approximately 65%) implemented as MLP-based modules.", "perturbed_explanation": "This is incorrect because Table 1 lists 1 Node Embedding + 1 Edge Embedding + 5 EGAT layers + 1 Node Readout + 1 Edge Readout + 5 Aux. Node Readouts + 5 Aux. Edge Readouts = 19 modules, and 13 of those (Edge Embedding, Node Readout, Edge Readout, Aux. Readouts) use MLP, not 17 and 11.", "claim": "The model comprises 17 module instances in total, with 11 of them (approximately 65%) implemented as MLP-based modules.", "label": false }, { "paperid": "2410.06992v2", "paper_path": "./SciVer/papers/2410.06992v2.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "3" ], "image_path": "./SciVer/images/2410.06992v2_figure_7.png", "request_id": 524, "origin_statement": "For the sympy project, the original SWE-Bench dataset contains about six times more issues than the SWE-Bench+ dataset (463 vs. 77).", "perturbed_statement": "For the sympy project, the original SWE-Bench dataset contains about five times more issues than the SWE-Bench+ dataset (463 vs. 77).", "perturbed_explanation": "The perturbed statement understates the ratio. 463 divided by 77 is approximately 6.0, not 5. Therefore, SWE-Bench originally had about six times more sympy issues, not five times.", "claim": "For the sympy project, the original SWE-Bench dataset contains about five times more issues than the SWE-Bench+ dataset (463 vs. 77).", "label": false }, { "paperid": "2409.01722v2", "paper_path": "./SciVer/papers/2409.01722v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "2" ], "image_path": "./SciVer/images/2409.01722v2-Table1-1.png", "request_id": 525, "origin_statement": "Table 1 lists 4 notations pertaining to key generation and distribution (param, SK_i, PK_i, PK_all), while only 3 notations relate to masking (m_i,j, w_i^masked, W_aggregated^masked).", "perturbed_statement": "Table 1 lists 5 notations pertaining to key generation and distribution (param, SK_i, PK_i, PK_all, PRG), while only 3 notations relate to masking (m_i,j, w_i^masked, W_aggregated^masked).", "perturbed_explanation": "PRG is defined as a Pseudo Random Generator function, not a notation for key generation and distribution. Table 1 actually includes only four key-related notations: param, SK_i, PK_i, and PK_all.", "claim": "Table 1 lists 5 notations pertaining to key generation and distribution (param, SK_i, PK_i, PK_all, PRG), while only 3 notations relate to masking (m_i,j, w_i^masked, W_aggregated^masked).", "label": false }, { "paperid": "2409.11267v1", "paper_path": "./SciVer/papers/2409.11267v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.11267v1_figure_7.png", "request_id": 528, "origin_statement": "At prediction horizon Nₚ=48, the SL approach reduces computation time by about 78%, which is 22 percentage points more than the RL approach’s 56% reduction.", "perturbed_statement": "At prediction horizon Nₚ=48, the SL approach reduces computation time by about 85%, which is 24 percentage points more than the RL approach’s 61% reduction.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 7 shows the SL reduction at Nₚ=48 is approximately 78%, not 85%, and the RL reduction is approximately 56%, not 61%.", "claim": "At prediction horizon Nₚ=48, the SL approach reduces computation time by about 85%, which is 24 percentage points more than the RL approach’s 61% reduction.", "label": false }, { "paperid": "2409.14838v1", "paper_path": "./SciVer/papers/2409.14838v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.2.1" ], "image_path": "./SciVer/images/2409.14838v1-Table2-1.png", "request_id": 531, "origin_statement": "For ResNet18 on ImageNet, LSQ’s energy efficiency of 18.57 TOPS/W is approximately 4.5 times higher than WAGE’s 4.08 TOPS/W.", "perturbed_statement": "For ResNet18 on ImageNet, LSQ’s energy efficiency of 17.57 TOPS/W is approximately 4.5 times higher than WAGE’s 4.08 TOPS/W.", "perturbed_explanation": "The table lists LSQ’s energy efficiency as 18.57 TOPS/W, not 17.57 TOPS/W, so the perturbed value contradicts the source data.", "claim": "For ResNet18 on ImageNet, LSQ’s energy efficiency of 17.57 TOPS/W is approximately 4.5 times higher than WAGE’s 4.08 TOPS/W.", "label": false }, { "paperid": "2410.21259v2", "paper_path": "./SciVer/papers/2410.21259v2.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4", "4.3" ], "image_path": "./SciVer/images/2410.21259v2_figure_6.png", "request_id": 532, "origin_statement": "GPT-4o's basic-level performance drops by 21 percentage points from 90% at the easy level to 69% at the hard level.", "perturbed_statement": "GPT-4o's basic-level performance drops by 28 percentage points from 90% at the easy level to 62% at the hard level.", "perturbed_explanation": "In the figure, GPT-4o’s hard-level basic score is shown around 69%, not 62%, so the claimed drop of 28 points contradicts the actual decrease from 90% to 69% (21 points).", "claim": "GPT-4o's basic-level performance drops by 28 percentage points from 90% at the easy level to 62% at the hard level.", "label": false }, { "paperid": "2409.01696v1", "paper_path": "./SciVer/papers/2409.01696v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.01696v1-Table3-1.png", "request_id": 535, "origin_statement": "The CelebA dataset is tested with three MI attacks (KEDMI, LOMMA, and PLG-MI), whereas Stanford Dogs uses only one attack (PPA).", "perturbed_statement": "The CelebA dataset is tested with three MI attacks (KEDMI, LOMMA, and PLG-MI), whereas Stanford Dogs uses two attacks (PPA and KEDMI).", "perturbed_explanation": "This statement is incorrect because Table 3 shows that Stanford Dogs is evaluated only with the PPA attack; KEDMI is not applied to Stanford Dogs.", "claim": "The CelebA dataset is tested with three MI attacks (KEDMI, LOMMA, and PLG-MI), whereas Stanford Dogs uses two attacks (PPA and KEDMI).", "label": false }, { "paperid": "2409.15683v1", "paper_path": "./SciVer/papers/2409.15683v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.3", "3.4" ], "image_path": "./SciVer/images/2409.15683v1_figure_4.png", "request_id": 536, "origin_statement": "In the Burgers’ equation example (Fig. 4B), the maximum absolute error reaches 0.06, which is 50% higher than the 0.04 maximum error observed in the advection example (Fig. 4A).", "perturbed_statement": "In the Burgers’ equation example (Fig. 4B), the maximum absolute error reaches 0.08, which is twice the 0.04 maximum error observed in the advection example (Fig. 4A).", "perturbed_explanation": "The perturbed claim is incorrect because the error colorbar in Fig. 4B shows a maximum value of 0.06, not 0.08, and thus the Burgers’ error is 1.5 times (50% higher) the advection error, not twice as large.", "claim": "In the Burgers’ equation example (Fig. 4B), the maximum absolute error reaches 0.08, which is twice the 0.04 maximum error observed in the advection example (Fig. 4A).", "label": false }, { "paperid": "2411.01759v1", "paper_path": "./SciVer/papers/2411.01759v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.01759v1-Table2-1.png", "request_id": 537, "origin_statement": "On the FEMNIST dataset, our structured pruning model achieves 74.22% accuracy, outperforming the next-best method, FedPara (65.52%), by approximately 8.7 percentage points.", "perturbed_statement": "On the FEMNIST dataset, our structured pruning model achieves 74.22% accuracy, outperforming the next-best method, FedPara (65.52%), by approximately 12.7 percentage points.", "perturbed_explanation": "The stated gap of 12.7 percentage points is incorrect. The actual difference between 74.22% and 65.52% is 8.7 percentage points, not 12.7.", "claim": "On the FEMNIST dataset, our structured pruning model achieves 74.22% accuracy, outperforming the next-best method, FedPara (65.52%), by approximately 12.7 percentage points.", "label": false }, { "paperid": "2411.08284v1", "paper_path": "./SciVer/papers/2411.08284v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2411.08284v1-Table1-1.png", "request_id": 543, "origin_statement": "Five out of the fifteen abbreviations listed include the word \"thresholding\" in their full name, representing one third of the entries.", "perturbed_statement": "Six out of the fifteen abbreviations listed include the word \"thresholding\" in their full name, representing two fifths of the entries.", "perturbed_explanation": "This statement is incorrect because only five abbreviations (DTAM, NTP, PGROTP, ROTP, and ROTPω) include the word \"thresholding\" in their full names; there are not six such entries.", "claim": "Six out of the fifteen abbreviations listed include the word \"thresholding\" in their full name, representing two fifths of the entries.", "label": false }, { "paperid": "2409.11839v1", "paper_path": "./SciVer/papers/2409.11839v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.11839v1_figure_3.png", "request_id": 544, "origin_statement": "By 24 months after SCA submission, local black smoke concentrations had fallen by roughly 28 mcg/m³ relative to pre-submission levels, and this reduction persisted through 60 months post-submission.", "perturbed_statement": "By 24 months after SCA submission, local sulphur dioxide concentrations had fallen by roughly 28 mcg/m³ relative to pre-submission levels, and this reduction persisted through 60 months post-submission.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3 (right panel) shows no visible decline in sulphur dioxide levels after SCA submission; the estimates hover around zero rather than dropping by 28 mcg/m³.", "claim": "By 24 months after SCA submission, local sulphur dioxide concentrations had fallen by roughly 28 mcg/m³ relative to pre-submission levels, and this reduction persisted through 60 months post-submission.", "label": false }, { "paperid": "2409.06748v1", "paper_path": "./SciVer/papers/2409.06748v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.06748v1-Table1-1.png", "request_id": 545, "origin_statement": "On the Crime dataset, EasyST achieves an MAE of 0.2281, which is 0.0056 lower than STID’s 0.2337, signifying about a 2.4% improvement.", "perturbed_statement": "On the Crime dataset, EasyST achieves an MAE of 0.2381, which is 0.0044 higher than STID’s 0.2337, signifying about a 1.9% decrease in performance.", "perturbed_explanation": "This is incorrect because Table 1 shows EasyST’s Crime MAE is 0.2281 (not 0.2381) and that value is 0.0056 lower than STID’s 0.2337, not 0.0044 higher.", "claim": "On the Crime dataset, EasyST achieves an MAE of 0.2381, which is 0.0044 higher than STID’s 0.2337, signifying about a 1.9% decrease in performance.", "label": false }, { "paperid": "2410.15698v1", "paper_path": "./SciVer/papers/2410.15698v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.4" ], "image_path": "./SciVer/images/2410.15698v1-Table1-1.png", "request_id": 547, "origin_statement": "VQ-CD's mean return of 558.22 is approximately 80 points higher than CoD's 478.19 in the Ant-dir task sequence.", "perturbed_statement": "VQ-CD's mean return of 558.22 is approximately 180 points higher than CoD's 478.19 in the Ant-dir task sequence.", "perturbed_explanation": "The perturbed statement is incorrect because the actual difference between 558.22 and 478.19 is about 80.03 points, not 180 points, as shown by the values in Table 1.", "claim": "VQ-CD's mean return of 558.22 is approximately 180 points higher than CoD's 478.19 in the Ant-dir task sequence.", "label": false }, { "paperid": "2410.22364v2", "paper_path": "./SciVer/papers/2410.22364v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "6.4" ], "image_path": "./SciVer/images/2410.22364v2-Table4-1.png", "request_id": 549, "origin_statement": "Accelerating MoCo on ImageNet-1k reduces training iterations by 75% (from 6168M to 1542M) while causing only a 0.1% drop in LP accuracy (75.9% vs 76.0%), demonstrating near-identical performance at a quarter of the budget.", "perturbed_statement": "Accelerating MoCo on ImageNet-1k reduces training iterations by 75% (from 6168M to 1542M) while causing only a 0.5% drop in LP accuracy (75.9% vs 76.0%), demonstrating near-identical performance at a quarter of the budget.", "perturbed_explanation": "The statement incorrectly claims a 0.5% drop in LP accuracy; the table shows the LP accuracy changes from 76.0% (non-accelerated) to 75.9% (accelerated), a 0.1% drop, not 0.5%.", "claim": "Accelerating MoCo on ImageNet-1k reduces training iterations by 75% (from 6168M to 1542M) while causing only a 0.5% drop in LP accuracy (75.9% vs 76.0%), demonstrating near-identical performance at a quarter of the budget.", "label": false }, { "paperid": "2409.11395v1", "paper_path": "./SciVer/papers/2409.11395v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.3", "3.4" ], "image_path": "./SciVer/images/2409.11395v1_figure_4.png", "request_id": 550, "origin_statement": "At 9 μm, the linear slope parameter for Eclipse 1 is approximately -0.0055, whereas for Eclipse 2 it is approximately +0.002.", "perturbed_statement": "At 9 μm, the linear slope parameter for Eclipse 1 is approximately +0.0055, whereas for Eclipse 2 it is approximately -0.002.", "perturbed_explanation": "The perturbed claim is incorrect because, according to the bottom-left panel, Eclipse 1’s linear slope at 9 μm is around -0.0055 (negative), and Eclipse 2’s slope is around +0.002 (positive), not the opposite as stated.", "claim": "At 9 μm, the linear slope parameter for Eclipse 1 is approximately +0.0055, whereas for Eclipse 2 it is approximately -0.002.", "label": false }, { "paperid": "2411.07795v2", "paper_path": "./SciVer/papers/2411.07795v2.json", "claim_type": "direct", "type": "chart", "item": "3(a)", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.07795v2_figure_3(a).png", "request_id": 552, "origin_statement": "In the PSNR distribution shown, the bin centered at approximately 53 dB contains about 140 images, which is more than twice the count in the 46 dB bin (approximately 60 images), indicating most images have PSNR near 53 dB.", "perturbed_statement": "In the PSNR distribution shown, the bin centered at approximately 53 dB contains about 140 images, which is only slightly higher than the 46 dB bin (approximately 120 images), indicating a relatively uniform distribution.", "perturbed_explanation": "The statement is incorrect because the 46 dB bin actually contains around 60 images, not 120. Therefore it is not nearly equal to the 53 dB bin’s count of about 140 images, and the distribution is not uniform as claimed.", "claim": "In the PSNR distribution shown, the bin centered at approximately 53 dB contains about 140 images, which is only slightly higher than the 46 dB bin (approximately 120 images), indicating a relatively uniform distribution.", "label": false }, { "paperid": "2410.01631v1", "paper_path": "./SciVer/papers/2410.01631v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "2.2", "2.4", "3.1" ], "image_path": "./SciVer/images/2410.01631v1_figure_1.png", "request_id": 560, "origin_statement": "The frequency separation between the m=−1 and m=+1 NMR triplet peaks in Fig.1(b) increases linearly from about 4 MHz at 0 T to about 5 MHz at μ₀H = 1.6 T, corresponding to roughly 0.6 MHz per tesla.", "perturbed_statement": "The frequency separation between the m=−1 and m=+1 NMR triplet peaks in Fig.1(b) increases linearly from about 4 MHz at 0 T to about 6 MHz at μ₀H = 1.6 T, corresponding to roughly 1.2 MHz per tesla.", "perturbed_explanation": "This is incorrect because at μ₀H = 1.6 T the m=−1 and m=+1 peaks lie at approximately 61 MHz and 56 MHz, giving a separation of about 5 MHz (not 6 MHz), which corresponds to roughly 0.6 MHz/T rather than 1.2 MHz/T.", "claim": "The frequency separation between the m=−1 and m=+1 NMR triplet peaks in Fig.1(b) increases linearly from about 4 MHz at 0 T to about 6 MHz at μ₀H = 1.6 T, corresponding to roughly 1.2 MHz per tesla.", "label": false }, { "paperid": "2411.09118v1", "paper_path": "./SciVer/papers/2411.09118v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "4.4" ], "image_path": "./SciVer/images/2411.09118v1_figure_4.png", "request_id": 564, "origin_statement": "Under Gaussian noise perturbation (middle column), FxTS-Net displays ten distinct class clusters, whereas Neural ODE shows only approximately six clearly separated clusters with the other four largely overlapping.", "perturbed_statement": "Under Gaussian noise perturbation (middle column), FxTS-Net displays ten distinct class clusters, whereas Neural ODE shows nine clearly separated clusters with only one cluster overlapping.", "perturbed_explanation": "The perturbed statement incorrectly reports that Neural ODE under Gaussian noise has nine clearly separated clusters. The visualization shows only about six well-defined clusters for Neural ODE, with the remaining four classes overlapping, directly contradicting the claimed nine separate clusters.", "claim": "Under Gaussian noise perturbation (middle column), FxTS-Net displays ten distinct class clusters, whereas Neural ODE shows nine clearly separated clusters with only one cluster overlapping.", "label": false }, { "paperid": "2409.03769v1", "paper_path": "./SciVer/papers/2409.03769v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.03769v1-Table1-1.png", "request_id": 565, "origin_statement": "The MKG dataset includes approximately 31 times more 'connectedTo' triplets (50,251) than 'similarTo' triplets (1,613), indicating a much higher prevalence of component connections over substitutes.", "perturbed_statement": "The MKG dataset includes approximately 20 times more 'connectedTo' triplets (50,251) than 'similarTo' triplets (1,613), indicating a much higher prevalence of component connections over substitutes.", "perturbed_explanation": "The statement is incorrect because Table 1 shows 50,251 connectedTo triplets and 1,613 similarTo triplets, which yields an actual ratio of about 31:1, not 20:1 as claimed.", "claim": "The MKG dataset includes approximately 20 times more 'connectedTo' triplets (50,251) than 'similarTo' triplets (1,613), indicating a much higher prevalence of component connections over substitutes.", "label": false }, { "paperid": "2410.21815v1", "paper_path": "./SciVer/papers/2410.21815v1.json", "claim_type": "direct", "type": "table", "item": "7", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.21815v1-Table7-1.png", "request_id": 571, "origin_statement": "RISE’s insertion score is 0.9581±0.0333, which is 0.0113 higher than the second-highest insertion score achieved by Leave-one-out (0.9468±0.0666).", "perturbed_statement": "RISE’s insertion score is 0.9481±0.0333, which is 0.0017 higher than the second-highest insertion score achieved by Leave-one-out (0.9468±0.0666).", "perturbed_explanation": "The table shows RISE’s insertion score as 0.9581±0.0333, not 0.9481±0.0333, and the true gap over Leave-one-out’s 0.9468±0.0666 is 0.0113, not 0.0017.", "claim": "RISE’s insertion score is 0.9481±0.0333, which is 0.0017 higher than the second-highest insertion score achieved by Leave-one-out (0.9468±0.0666).", "label": false }, { "paperid": "2409.01274v1", "paper_path": "./SciVer/papers/2409.01274v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.3" ], "image_path": "./SciVer/images/2409.01274v1-Table3-1.png", "request_id": 577, "origin_statement": "The Concat+Conv module’s PSNR increases from 24.37 to 24.85 when GSS is applied, a gain of 0.48 dB, which is the largest improvement among the listed modules.", "perturbed_statement": "The Concat+Conv module’s PSNR increases from 24.37 to 24.65 when GSS is applied, a gain of 0.28 dB, which is the largest improvement among the listed modules.", "perturbed_explanation": "The perturbed statement is incorrect because the table reports that Concat+Conv with GSS achieves a PSNR of 24.85, not 24.65, yielding a gain of 0.48 dB rather than 0.28 dB.", "claim": "The Concat+Conv module’s PSNR increases from 24.37 to 24.65 when GSS is applied, a gain of 0.28 dB, which is the largest improvement among the listed modules.", "label": false }, { "paperid": "2409.11874v1", "paper_path": "./SciVer/papers/2409.11874v1.json", "claim_type": "direct", "type": "chart", "item": "8", "section": [ "6.3" ], "image_path": "./SciVer/images/2409.11874v1_figure_8.png", "request_id": 578, "origin_statement": "Using y = 0.89x + 0.04, an ABHINAW score increase from 0.4 to 0.8 corresponds to a predicted human score rise of about 0.36, from approximately 0.396 to 0.752.", "perturbed_statement": "Using y = 0.89x + 0.04, an ABHINAW score increase from 0.4 to 0.8 corresponds to a predicted human score rise of about 0.40, from approximately 0.40 to 0.80.", "perturbed_explanation": "The regression line actually yields y=0.89·0.4+0.04=0.396 and y=0.89·0.8+0.04=0.752, a rise of 0.356, not 0.40, and the predicted scores are ~0.396 to ~0.752, not 0.40 to 0.80.", "claim": "Using y = 0.89x + 0.04, an ABHINAW score increase from 0.4 to 0.8 corresponds to a predicted human score rise of about 0.40, from approximately 0.40 to 0.80.", "label": false }, { "paperid": "2409.03247v1", "paper_path": "./SciVer/papers/2409.03247v1.json", "claim_type": "direct", "type": "chart", "item": "6(d)", "section": [ "4.1.1" ], "image_path": "./SciVer/images/2409.03247v1_figure_6(d).png", "request_id": 582, "origin_statement": "By minute 10, the Prompt System achieved an F1 score approximately 0.60, surpassing the Rule System's ∼0.45 by about 0.15 points.", "perturbed_statement": "By minute 10, the Prompt System achieved an F1 score approximately 0.70, surpassing the Rule System's ∼0.45 by about 0.25 points.", "perturbed_explanation": "This statement is incorrect because the graph shows the Prompt System's F1 score at minute 10 is around 0.60, not 0.70. Therefore, the actual difference from the Rule System's ∼0.45 is about 0.15, not 0.25.", "claim": "By minute 10, the Prompt System achieved an F1 score approximately 0.70, surpassing the Rule System's ∼0.45 by about 0.25 points.", "label": false }, { "paperid": "2409.11927v1", "paper_path": "./SciVer/papers/2409.11927v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "3", "4" ], "image_path": "./SciVer/images/2409.11927v1-Table3-1.png", "request_id": 583, "origin_statement": "In the M1B model, the accretion rate falls from 262 to 212 (a 19% decline) between Data Set 1 and 2, closely matching the 17% decrease observed under M1A (14.4 to 11.9).", "perturbed_statement": "In the M1B model, the accretion rate falls from 262 to 200 (a 24% decline) between Data Set 1 and 2, closely matching the 17% decrease observed under M1A (14.4 to 11.9).", "perturbed_explanation": "The table lists the M1B accretion rate in Data Set 2 as 212 (not 200), so the decline from 262 is about 19%, not 24% as stated in the perturbed claim.", "claim": "In the M1B model, the accretion rate falls from 262 to 200 (a 24% decline) between Data Set 1 and 2, closely matching the 17% decrease observed under M1A (14.4 to 11.9).", "label": false }, { "paperid": "2411.14215v1", "paper_path": "./SciVer/papers/2411.14215v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "2.4" ], "image_path": "./SciVer/images/2411.14215v1_figure_3.png", "request_id": 586, "origin_statement": "GPT-3 WHL accuracy decreases from approximately 0.7 at zero generalizations to roughly 0.24 at three generalizations, a drop of about 0.46.", "perturbed_statement": "GPT-3 WHL accuracy decreases from approximately 0.7 at zero generalizations to roughly 0.34 at three generalizations, a drop of about 0.36.", "perturbed_explanation": "The perturb statement incorrectly lists the accuracy at three generalizations as 0.34 instead of the observed ~0.24; the chart shows the GPT-3 WHL accuracy at three generalizations is around 0.24, not 0.34, so the stated drop of 0.36 is inaccurate.", "claim": "GPT-3 WHL accuracy decreases from approximately 0.7 at zero generalizations to roughly 0.34 at three generalizations, a drop of about 0.36.", "label": false }, { "paperid": "2411.02653v1", "paper_path": "./SciVer/papers/2411.02653v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.2" ], "image_path": "./SciVer/images/2411.02653v1-Table1-1.png", "request_id": 589, "origin_statement": "The number of epochs (30,000) is 50 times the batch size (600), indicating extensive iterations per training batch.", "perturbed_statement": "The number of epochs (30,000) is exactly 60 times the batch size (500), indicating extensive iterations per training batch.", "perturbed_explanation": "This statement is wrong because Table 1 lists the batch size as 600 (not 500), so the actual ratio is 30,000/600 = 50, not 60.", "claim": "The number of epochs (30,000) is exactly 60 times the batch size (500), indicating extensive iterations per training batch.", "label": false }, { "paperid": "2409.04723v1", "paper_path": "./SciVer/papers/2409.04723v1.json", "claim_type": "direct", "type": "chart", "item": "4(c)", "section": [ "5" ], "image_path": "./SciVer/images/2409.04723v1_figure_4(c).png", "request_id": 592, "origin_statement": "Adding sleep-related measures increases the F1 score for depression classification by 0.20 when using ECG, from 0.50 to 0.70.", "perturbed_statement": "Adding sleep-related measures increases the F1 score for depression classification by 0.15 when using ECG, from 0.55 to 0.70.", "perturbed_explanation": "The ECG-only F1 score for depression is actually about 0.50 (not 0.55) and the boost after adding sleep is 0.20 (from 0.50 to 0.70), not 0.15.", "claim": "Adding sleep-related measures increases the F1 score for depression classification by 0.15 when using ECG, from 0.55 to 0.70.", "label": false }, { "paperid": "2410.02475v1", "paper_path": "./SciVer/papers/2410.02475v1.json", "claim_type": "direct", "type": "chart", "item": "2(b)", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.02475v1_figure_2(b).png", "request_id": 596, "origin_statement": "The geometry-unaware policy achieves a 66.4% success rate, over three times the 21.2% success of the full-observation policy on unseen objects.", "perturbed_statement": "The geometry-unaware policy achieves a 70.4% success rate, over four times the 15.2% success of the full-observation policy on unseen objects.", "perturbed_explanation": "The chart actually reports a 66.4% success rate for the geometry-unaware policy (not 70.4%) and a 21.2% success rate for the full-observation policy (not 15.2%), so both figures in the perturbed statement conflict with the source.", "claim": "The geometry-unaware policy achieves a 70.4% success rate, over four times the 15.2% success of the full-observation policy on unseen objects.", "label": false }, { "paperid": "2409.04477v1", "paper_path": "./SciVer/papers/2409.04477v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.3" ], "image_path": "./SciVer/images/2409.04477v1_figure_4.png", "request_id": 604, "origin_statement": "By iteration 6, the approximation ratio increases to about 0.56 while the distance to solution falls to about 0.16, compared to iteration 0’s AR of 0.30 and DS of 0.43.", "perturbed_statement": "By iteration 6, the approximation ratio increases to about 0.65 while the distance to solution falls to about 0.10, compared to iteration 0’s AR of 0.30 and DS of 0.43.", "perturbed_explanation": "The perturbed statement is incorrect because the plot shows the approximation ratio at iteration 6 is approximately 0.56 (not 0.65) and the distance to solution at iteration 6 is approximately 0.16 (not 0.10).", "claim": "By iteration 6, the approximation ratio increases to about 0.65 while the distance to solution falls to about 0.10, compared to iteration 0’s AR of 0.30 and DS of 0.43.", "label": false }, { "paperid": "2410.21357v1", "paper_path": "./SciVer/papers/2410.21357v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "5.2", "5.3" ], "image_path": "./SciVer/images/2410.21357v1-Table3-1.png", "request_id": 607, "origin_statement": "As timesteps increase from 256 to 2048, EDLM-AR’s Llama2 perplexity decreases from 34.7 to 14.6, a reduction of 20.1 points.", "perturbed_statement": "As timesteps increase from 256 to 2048, EDLM-AR’s Llama2 perplexity decreases from 32.7 to 11.6, a reduction of 21.1 points.", "perturbed_explanation": "The perturbation is incorrect because Table 3 reports EDLM-AR’s Llama2 perplexity as 34.7 at 256 timesteps (not 32.7) and 14.6 at 2048 timesteps (not 11.6), making both values and the calculated reduction inconsistent with the source.", "claim": "As timesteps increase from 256 to 2048, EDLM-AR’s Llama2 perplexity decreases from 32.7 to 11.6, a reduction of 21.1 points.", "label": false }, { "paperid": "2411.10545v1", "paper_path": "./SciVer/papers/2411.10545v1.json", "claim_type": "direct", "type": "chart", "item": "2(c)", "section": [ "4" ], "image_path": "./SciVer/images/2411.10545v1_figure_2(c).png", "request_id": 608, "origin_statement": "On the Anthropic HH Golden dataset, the ISA sampling strategy achieves a 25.23% win rate, representing a 3.36 percentage-point improvement over the density-based method (21.87%) and coming within 1.64 points of the full dataset’s performance (26.87%).", "perturbed_statement": "On the Anthropic HH Golden dataset, the ISA sampling strategy achieves a 26.23% win rate, representing a 4.36 percentage-point improvement over the density-based method (21.87%) and coming within 0.64 points of the full dataset’s performance (26.87%).", "perturbed_explanation": "The chart shows the ISA win rate is actually 25.23%, not 26.23%. Thus the real improvement over the density-based method (21.87%) is 3.36 points and the gap to full-data performance (26.87%) is 1.64 points, not 4.36 or 0.64.", "claim": "On the Anthropic HH Golden dataset, the ISA sampling strategy achieves a 26.23% win rate, representing a 4.36 percentage-point improvement over the density-based method (21.87%) and coming within 0.64 points of the full dataset’s performance (26.87%).", "label": false }, { "paperid": "2409.13499v2", "paper_path": "./SciVer/papers/2409.13499v2.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "4.1" ], "image_path": "./SciVer/images/2409.13499v2_figure_2.png", "request_id": 612, "origin_statement": "In Spanish, offline models without supervised data reduce WER from approximately 20.3% at 39M parameters to about 10.3% at 244M parameters, representing a ~10-point decrease.", "perturbed_statement": "In Spanish, offline models without supervised data reduce WER from approximately 20.3% at 39M parameters to about 12.3% at 244M parameters, representing an 8-point decrease.", "perturbed_explanation": "The figure shows the WER at 244M parameters is roughly 10.3%, not 12.3%, so the actual decrease from 20.3% to 10.3% is about 10 points, not 8.", "claim": "In Spanish, offline models without supervised data reduce WER from approximately 20.3% at 39M parameters to about 12.3% at 244M parameters, representing an 8-point decrease.", "label": false }, { "paperid": "2410.09300v2", "paper_path": "./SciVer/papers/2410.09300v2.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "2.1" ], "image_path": "./SciVer/images/2410.09300v2_figure_3.png", "request_id": 614, "origin_statement": "At a base top-1 probability threshold of 0.5, the alignment-related recall exceeds 90% while the proportion of positions below this threshold remains under 15%.", "perturbed_statement": "At a base top-1 probability threshold of 0.4, the alignment-related recall exceeds 90% while the proportion of positions below this threshold remains under 15%.", "perturbed_explanation": "This is incorrect because at a 0.4 threshold the dotted recall curve on the chart is only around 70%, not over 90% as claimed.", "claim": "At a base top-1 probability threshold of 0.4, the alignment-related recall exceeds 90% while the proportion of positions below this threshold remains under 15%.", "label": false }, { "paperid": "2410.10880v1", "paper_path": "./SciVer/papers/2410.10880v1.json", "claim_type": "direct", "type": "chart", "item": "3(d)", "section": [ "3.2" ], "image_path": "./SciVer/images/2410.10880v1_figure_3(d).png", "request_id": 622, "origin_statement": "Under FSD with Min-k%, the mean Min-k% score for non-members is approximately 3.2, while for members it\u000bs about -0.8, creating a gap of around 4 percentage points.", "perturbed_statement": "Under FSD with Min-k%, the mean Min-k% score for non-members is approximately -0.8, while for members it\u000bs about 3.2, creating a gap of around 4 percentage points.", "perturbed_explanation": "This is incorrect because the figure shows the blue dashed line (non-members) at around 3.2, not -0.8, and the green dashed line (members) at around -0.8, not 3.2.", "claim": "Under FSD with Min-k%, the mean Min-k% score for non-members is approximately -0.8, while for members it\u000bs about 3.2, creating a gap of around 4 percentage points.", "label": false }, { "paperid": "2410.22373v1", "paper_path": "./SciVer/papers/2410.22373v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.3" ], "image_path": "./SciVer/images/2410.22373v1_figure_3.png", "request_id": 630, "origin_statement": "Only in scenario (iv) does the leader's highest-probability bar exceed the follower’s highest bar by a visibly large margin, indicating a gap above the threshold Δ; scenarios (i), (ii), and (iii) all display minor gaps below Δ.", "perturbed_statement": "Only in scenario (ii) does the leader's highest-probability bar exceed the follower’s highest bar by a visibly large margin, indicating a gap above the threshold Δ; scenarios (i), (iii), and (iv) all display minor gaps below Δ.", "perturbed_explanation": "This is incorrect because in the figure only scenario (iv) (outlined in green) shows a large gap between the leader’s and follower’s peak bars that exceeds Δ. Scenario (ii) is outlined in red and has a small gap below Δ, so it does not meet the threshold.", "claim": "Only in scenario (ii) does the leader's highest-probability bar exceed the follower’s highest bar by a visibly large margin, indicating a gap above the threshold Δ; scenarios (i), (iii), and (iv) all display minor gaps below Δ.", "label": false }, { "paperid": "2411.15375v1", "paper_path": "./SciVer/papers/2411.15375v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "3" ], "image_path": "./SciVer/images/2411.15375v1_figure_1.png", "request_id": 636, "origin_statement": "Figure 1 shows AdamZ applies three learning rate increases during early stagnation phases and seven learning rate reductions in response to overshooting later in training.", "perturbed_statement": "Figure 1 shows AdamZ applies four learning rate increases during early stagnation phases and six learning rate reductions in response to overshooting events.", "perturbed_explanation": "The perturbed statement miscounts the adjustment events: Figure 1 actually depicts three learning rate increases (not four) and seven reductions (not six) as marked by the green-stagnation and red-overshooting annotations.", "claim": "Figure 1 shows AdamZ applies four learning rate increases during early stagnation phases and six learning rate reductions in response to overshooting events.", "label": false }, { "paperid": "2411.01023v1", "paper_path": "./SciVer/papers/2411.01023v1.json", "claim_type": "direct", "type": "chart", "item": "12", "section": [ "6.2" ], "image_path": "./SciVer/images/2411.01023v1_figure_12.png", "request_id": 638, "origin_statement": "At embedding dimension 32, the learning rate of 0.001 achieves a Hits@3 of approximately 0.48, about 0.02 higher than the 0.01 rate’s ∼0.46, while the 0.0001 rate remains nearly at zero.", "perturbed_statement": "At embedding dimension 32, the learning rate of 0.001 achieves a Hits@3 of approximately 0.46, which is 0.03 lower than the 0.01 rate’s ∼0.49, and the 0.0001 rate yields around 0.02.", "perturbed_explanation": "The chart shows that at dimension 32 the 0.001 curve (green) actually reaches about 0.48 (not 0.46), the 0.01 curve (orange) is around 0.46 (not 0.49), and the 0.0001 curve (red) stays near zero (not ~0.02).", "claim": "At embedding dimension 32, the learning rate of 0.001 achieves a Hits@3 of approximately 0.46, which is 0.03 lower than the 0.01 rate’s ∼0.49, and the 0.0001 rate yields around 0.02.", "label": false }, { "paperid": "2410.24145v1", "paper_path": "./SciVer/papers/2410.24145v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.4" ], "image_path": "./SciVer/images/2410.24145v1_figure_3.png", "request_id": 642, "origin_statement": "In the training sample circular histogram, the bar at π/2 is approximately three times taller than the bar at π, indicating winds from the north occur about three times more often than those from the west.", "perturbed_statement": "In the training sample circular histogram, the bar at π/2 is approximately five times taller than the bar at π, indicating winds from the north occur about five times more often than those from the west.", "perturbed_explanation": "The bar at π/2 in the histogram is only about three times the height of the bar at π, not five times. This discrepancy shows the perturbed claim exaggerates the relative frequency of northerly winds compared to westerly winds.", "claim": "In the training sample circular histogram, the bar at π/2 is approximately five times taller than the bar at π, indicating winds from the north occur about five times more often than those from the west.", "label": false }, { "paperid": "2409.08056v1", "paper_path": "./SciVer/papers/2409.08056v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "4.5" ], "image_path": "./SciVer/images/2409.08056v1_figure_7.png", "request_id": 644, "origin_statement": "Increasing VRAM from 7 GB to 17 GB raises PSNR from ~32.2 dB to ~32.53 dB, a gain of just 0.33 dB, indicating diminishing returns.", "perturbed_statement": "Increasing VRAM from 7 GB to 17 GB raises PSNR from ~32.2 dB to ~33.0 dB, a gain of 0.8 dB, indicating diminishing returns.", "perturbed_explanation": "The actual PSNR at 17 GB VRAM is about 32.53 dB (not 33.0 dB), so the gain from 32.2 dB is only ~0.33 dB, not 0.8 dB, contradicting the figure.", "claim": "Increasing VRAM from 7 GB to 17 GB raises PSNR from ~32.2 dB to ~33.0 dB, a gain of 0.8 dB, indicating diminishing returns.", "label": false }, { "paperid": "2411.09899v1", "paper_path": "./SciVer/papers/2411.09899v1.json", "claim_type": "direct", "type": "chart", "item": "1(b)", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.09899v1_figure_1(b).png", "request_id": 648, "origin_statement": "The estimated Merton ratio for logarithmic utility declines from about 0.30 at t=0 to approximately 0.26 by t=8 days, then remains nearly constant at around 0.26 up to t=365 days.", "perturbed_statement": "The estimated Merton ratio for logarithmic utility declines from about 0.30 at t=0 to approximately 0.26 by t=8 days, then remains nearly constant at around 0.30 up to t=365 days.", "perturbed_explanation": "This statement is incorrect because, as shown by the flat black line after day 8 in the figure, the Merton ratio stabilizes at about 0.26, not 0.30.", "claim": "The estimated Merton ratio for logarithmic utility declines from about 0.30 at t=0 to approximately 0.26 by t=8 days, then remains nearly constant at around 0.30 up to t=365 days.", "label": false }, { "paperid": "2410.23537v1", "paper_path": "./SciVer/papers/2410.23537v1.json", "claim_type": "direct", "type": "chart", "item": "9", "section": [ "4.3" ], "image_path": "./SciVer/images/2410.23537v1_figure_9.png", "request_id": 652, "origin_statement": "The mean response latency for ALISE (≈9s) is about half that of FCFS (≈18s), and the maximum FCFS latency peaks around 53s compared to ALISE’s peak near 27s.", "perturbed_statement": "The mean response latency for ALISE (≈18s) is about double that of FCFS (≈9s), and the maximum FCFS latency peaks around 53s compared to ALISE’s peak near 27s.", "perturbed_explanation": "The perturbed statement is wrong because the chart shows ALISE’s mean latency is approximately 9 seconds (blue dashed line), not 18 seconds, and FCFS’s mean latency is about 18 seconds (red dashed line), not 9 seconds.", "claim": "The mean response latency for ALISE (≈18s) is about double that of FCFS (≈9s), and the maximum FCFS latency peaks around 53s compared to ALISE’s peak near 27s.", "label": false }, { "paperid": "2411.12704v1", "paper_path": "./SciVer/papers/2411.12704v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.12704v1-Table2-1.png", "request_id": 659, "origin_statement": "Three hours before flare onset, X-class flares exhibit a 131 Å standard deviation ratio of 4.86, more than twice the C-class ratio at that time (2.38), and exceeding the M-class ratio (3.41) by approximately 1.45.", "perturbed_statement": "Three hours before flare onset, X-class flares exhibit a 131 Å standard deviation ratio of 5.10, more than twice the C-class ratio at that time (2.38), and exceeding the M-class ratio (3.41) by approximately 1.69.", "perturbed_explanation": "The table shows the X-class 131 Å ratio at 3 hours before flare onset is 4.86, not 5.10. It also indicates the difference between the X-class (4.86) and M-class (3.41) ratios is 1.45, not 1.69.", "claim": "Three hours before flare onset, X-class flares exhibit a 131 Å standard deviation ratio of 5.10, more than twice the C-class ratio at that time (2.38), and exceeding the M-class ratio (3.41) by approximately 1.69.", "label": false }, { "paperid": "2411.06214v1", "paper_path": "./SciVer/papers/2411.06214v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.5" ], "image_path": "./SciVer/images/2411.06214v1_figure_3.png", "request_id": 664, "origin_statement": "Principal component 1 accounts for 74.56% of the variance, approximately 4.7 times more than principal component 2 (15.85%) and about 14 times more than principal component 3 (5.29%).", "perturbed_statement": "Principal component 1 accounts for 74.56% of the variance, approximately 4.7 times more than principal component 2 (25.85%) and about 14 times more than principal component 3 (5.29%).", "perturbed_explanation": "The perturbed statement incorrectly lists principal component 2’s contribution as 25.85%, whereas the table and accompanying text specify it is actually 15.85%.", "claim": "Principal component 1 accounts for 74.56% of the variance, approximately 4.7 times more than principal component 2 (25.85%) and about 14 times more than principal component 3 (5.29%).", "label": false }, { "paperid": "2409.11724v2", "paper_path": "./SciVer/papers/2409.11724v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "3.4" ], "image_path": "./SciVer/images/2409.11724v2-Table4-1.png", "request_id": 667, "origin_statement": "Three of the five datasets in Table 4 are for Table Fact Checking, and the remaining two are for TableQA.", "perturbed_statement": "Four of the five datasets in Table 4 are for Table Fact Checking, and the remaining one is for TableQA.", "perturbed_explanation": "The table lists only three Table Fact Checking datasets (PubHealthTab, TabFact, SCITAB) and two TableQA datasets (TabMWP, FinQA), so it is incorrect to claim that four datasets are for Table Fact Checking.", "claim": "Four of the five datasets in Table 4 are for Table Fact Checking, and the remaining one is for TableQA.", "label": false }, { "paperid": "2410.23168v1", "paper_path": "./SciVer/papers/2410.23168v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "1" ], "image_path": "./SciVer/images/2410.23168v1_figure_1.png", "request_id": 668, "origin_statement": "To achieve a perplexity of about 11.7, training a Transformer from scratch requires roughly 12,000 TPU hours, whereas Tokenformer reaches the same perplexity with around 4,000 TPU hours, cutting training cost by approximately two thirds.", "perturbed_statement": "To achieve a perplexity of about 11.7, training a Transformer from scratch requires roughly 6,000 TPU hours, whereas Tokenformer reaches the same perplexity with around 4,000 TPU hours, cutting training cost by approximately one third.", "perturbed_explanation": "The perturbed statement is wrong because the Transformer-from-scratch curve shows a cost of about 12,000 TPU hours at perplexity 11.7, not 6,000 TPU hours. Consequently, the reduction is about two thirds, not one third.", "claim": "To achieve a perplexity of about 11.7, training a Transformer from scratch requires roughly 6,000 TPU hours, whereas Tokenformer reaches the same perplexity with around 4,000 TPU hours, cutting training cost by approximately one third.", "label": false }, { "paperid": "2409.12892v1", "paper_path": "./SciVer/papers/2409.12892v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.12892v1_figure_6.png", "request_id": 670, "origin_statement": "From iteration 1 to 10, total runtime falls from ~26.4s to ~21.5s (–18.6%), largely due to PCG time dropping by 3.7s from 13.5s to 9.8s.", "perturbed_statement": "From iteration 1 to 10, total runtime falls from ~26.4s to ~21.5s (–18.6%), largely due to PCG time dropping by 4.7s from 13.5s to 9.8s.", "perturbed_explanation": "The statement is incorrect because PCG time actually decreases by 13.5s – 9.8s = 3.7s, not 4.7s as claimed.", "claim": "From iteration 1 to 10, total runtime falls from ~26.4s to ~21.5s (–18.6%), largely due to PCG time dropping by 4.7s from 13.5s to 9.8s.", "label": false }, { "paperid": "2409.12314v1", "paper_path": "./SciVer/papers/2409.12314v1.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.2" ], "image_path": "./SciVer/images/2409.12314v1_figure_6.png", "request_id": 672, "origin_statement": "For dirty-label poisoning, increasing the number of poisoned concepts from 0 to 500 raises AD from about 0.45 to 0.59 while model utility falls from about 0.90 to 0.35.", "perturbed_statement": "For dirty-label poisoning, increasing the number of poisoned concepts from 0 to 500 raises AD from about 0.48 to 0.62 while model utility falls from about 0.92 to 0.30.", "perturbed_explanation": "The figure shows AD rising from approximately 0.45 (not 0.48) to 0.59 (not 0.62), and model utility dropping from about 0.90 (not 0.92) to 0.35 (not 0.30) for dirty-label poisoning.", "claim": "For dirty-label poisoning, increasing the number of poisoned concepts from 0 to 500 raises AD from about 0.48 to 0.62 while model utility falls from about 0.92 to 0.30.", "label": false }, { "paperid": "2410.16597v1", "paper_path": "./SciVer/papers/2410.16597v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "7.1" ], "image_path": "./SciVer/images/2410.16597v1_figure_3.png", "request_id": 676, "origin_statement": "Across document lengths from 100 to 1200 words, SynthKG holds steady at roughly 12–13 triplets per 100 words, whereas document-level extraction falls from about 12 triplets/100 words to under 5 triplets/100 words.", "perturbed_statement": "Across document lengths from 100 to 1200 words, SynthKG holds steady at roughly 14–15 triplets per 100 words, whereas document-level extraction falls from about 12 triplets/100 words to under 5 triplets/100 words.", "perturbed_explanation": "The perturbed statement incorrectly states that SynthKG maintains 14–15 triplets per 100 words. The chart shows SynthKG’s density stays around 12–13 triplets per 100 words throughout, never reaching 14–15.", "claim": "Across document lengths from 100 to 1200 words, SynthKG holds steady at roughly 14–15 triplets per 100 words, whereas document-level extraction falls from about 12 triplets/100 words to under 5 triplets/100 words.", "label": false }, { "paperid": "2411.17987v1", "paper_path": "./SciVer/papers/2411.17987v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.2.2" ], "image_path": "./SciVer/images/2411.17987v1_figure_7.png", "request_id": 678, "origin_statement": "At a 20 Gbit/s offered load, the SmartNIC maintains a throughput of approximately 19.5 Gbit/s across all six scenarios, indicating about a 2.5% drop from the offered load.", "perturbed_statement": "At a 20 Gbit/s offered load, the SmartNIC throughput across all six scenarios is about 18.5 Gbit/s, indicating a roughly 7.5% performance drop.", "perturbed_explanation": "This statement is incorrect because Figure 7 shows that at a 20 Gbit/s offered load, throughput for all configurations is approximately 19.5 Gbit/s, not 18.5 Gbit/s. The claimed 7.5% drop and 18.5 Gbit/s value directly contradict the plotted data.", "claim": "At a 20 Gbit/s offered load, the SmartNIC throughput across all six scenarios is about 18.5 Gbit/s, indicating a roughly 7.5% performance drop.", "label": false }, { "paperid": "2410.17226v2", "paper_path": "./SciVer/papers/2410.17226v2.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.3" ], "image_path": "./SciVer/images/2410.17226v2_figure_6.png", "request_id": 686, "origin_statement": "For the UK dataset, increasing memory budget per vertex from 256 to 4096 bytes decreases the distortion of C-BFS-based LL with w=64 from approximately 7.8% to 2.3%.", "perturbed_statement": "For the UK dataset, increasing memory budget per vertex from 256 to 4096 bytes decreases the distortion of C-BFS-based LL with w=64 from approximately 8.8% to 1.2%.", "perturbed_explanation": "The perturbed values are incorrect: the figure shows a distortion of about 7.8% (not 8.8%) at 256 bytes and about 2.3% (not 1.2%) at 4096 bytes for w=64.", "claim": "For the UK dataset, increasing memory budget per vertex from 256 to 4096 bytes decreases the distortion of C-BFS-based LL with w=64 from approximately 8.8% to 1.2%.", "label": false }, { "paperid": "2411.04451v1", "paper_path": "./SciVer/papers/2411.04451v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "2" ], "image_path": "./SciVer/images/2411.04451v1_figure_2.png", "request_id": 692, "origin_statement": "The mean HCN fractional abundance relative to H2 increases from about 1\u0000\u0000\u0000\u00010\u0000\u0000\u0000^{-9} in the SMC (metallicity 0.2) to roughly 2\u0000\u0000\u0000\u0000\u0000\u0000\u0000^{-8} in the Milky Way (metallicity 1.0), showing an order-of-magnitude rise with metallicity.", "perturbed_statement": "The mean HCN fractional abundance relative to H2 decreases from about 2\u0000\u0000\u0000\u0000\u0000\u0000\u0000^{-8} in the SMC (metallicity 0.2) to roughly 1\u0000\u0000\u0000\u0000\u0000\u0000\u0000^{-9} in the Milky Way (metallicity 1.0).", "perturbed_explanation": "This statement is incorrect because Figure 2(a) shows the mean HCN/H2 actually increases from ~1×10^{-9} at metallicity 0.2 (SMC) to ~2×10^{-8} at metallicity 1.0 (MW), not a decrease; the values for SMC and MW have been swapped and inverted.", "claim": "The mean HCN fractional abundance relative to H2 decreases from about 2\u0000\u0000\u0000\u0000\u0000\u0000\u0000^{-8} in the SMC (metallicity 0.2) to roughly 1\u0000\u0000\u0000\u0000\u0000\u0000\u0000^{-9} in the Milky Way (metallicity 1.0).", "label": false }, { "paperid": "2411.12248v2", "paper_path": "./SciVer/papers/2411.12248v2.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.4" ], "image_path": "./SciVer/images/2411.12248v2-Table1-1.png", "request_id": 695, "origin_statement": "EEG-3D is the only dataset offering resting-state, static, and dynamic brain activity recordings while also providing images, videos, 3D shape, 3D color, and text annotations.", "perturbed_statement": "EEG-3D and Mind-3D both offer resting-state, static, and dynamic brain activity recordings and include images, videos, 3D shape, 3D color, and text annotations.", "perturbed_explanation": "The perturbed statement is incorrect because Mind-3D lacks 3D color attributes. In the table, Mind-3D has a cross under the 3D (C) column, indicating it does not include color annotations.", "claim": "EEG-3D and Mind-3D both offer resting-state, static, and dynamic brain activity recordings and include images, videos, 3D shape, 3D color, and text annotations.", "label": false }, { "paperid": "2411.13900v1", "paper_path": "./SciVer/papers/2411.13900v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.3" ], "image_path": "./SciVer/images/2411.13900v1-Table2-1.png", "request_id": 697, "origin_statement": "Apple Firestorm’s indirect branch counter code (0xc6) is exactly one hexadecimal unit higher than its conditional branch counter code (0xc5), reflecting a sequential register assignment for these branch misprediction counters.", "perturbed_statement": "Apple Firestorm’s indirect branch counter code (0xc6) is two hexadecimal units higher than its conditional branch counter code (0xc5), reflecting a sequential register assignment for these branch misprediction counters.", "perturbed_explanation": "The perturbed statement claims a difference of two hexadecimal units, but the table shows indirect=0xc6 and conditional=0xc5, which differ by only one hexadecimal unit, not two.", "claim": "Apple Firestorm’s indirect branch counter code (0xc6) is two hexadecimal units higher than its conditional branch counter code (0xc5), reflecting a sequential register assignment for these branch misprediction counters.", "label": false }, { "paperid": "2409.02292v1", "paper_path": "./SciVer/papers/2409.02292v1.json", "claim_type": "direct", "type": "chart", "item": "7", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.02292v1_figure_7.png", "request_id": 702, "origin_statement": "In Figure 7, the t=250 ms (A) trace peaks at around 0 dB while the t=50 ms (C) trace peaks at about –3 dB, indicating approximately a 3 dB higher maximum SNR for the slower bit time.", "perturbed_statement": "In Figure 7, the t=50 ms (C) trace peaks at around 1 dB while the t=250 ms (A) trace peaks at about –2 dB, indicating approximately a 3 dB higher maximum SNR for the faster bit time.", "perturbed_explanation": "This is incorrect because in the actual figure the t=250 ms (A) trace peaks at around 0 dB and the t=50 ms (C) trace peaks at about –3 dB, so the slower bit time (A) has the higher peak SNR, not the faster one (C).", "claim": "In Figure 7, the t=50 ms (C) trace peaks at around 1 dB while the t=250 ms (A) trace peaks at about –2 dB, indicating approximately a 3 dB higher maximum SNR for the faster bit time.", "label": false }, { "paperid": "2409.16060v1", "paper_path": "./SciVer/papers/2409.16060v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "5" ], "image_path": "./SciVer/images/2409.16060v1_figure_5.png", "request_id": 704, "origin_statement": "In the SALT spectrum (MJD 59616), the peak flux of the [O III] λ5007 line (~8×10⁻¹⁶ erg cm⁻² s⁻¹ Å⁻¹) is almost double that of the broad H β component (~4.2×10⁻¹⁶ erg cm⁻² s⁻¹ Å⁻¹).", "perturbed_statement": "In the SALT spectrum (MJD 59616), the peak flux of the [O III] λ5007 line (~4×10⁻¹⁶ erg cm⁻² s⁻¹ Å⁻¹) is nearly the same as that of the broad H β component (~4.2×10⁻¹⁶ erg cm⁻² s⁻¹ Å⁻¹), indicating parity.", "perturbed_explanation": "The SALT panel in Fig. 5 shows the [O III] λ5007 peak at about 8×10⁻¹⁶, not 4×10⁻¹⁶, so it is roughly double rather than equal to the broad H β flux.", "claim": "In the SALT spectrum (MJD 59616), the peak flux of the [O III] λ5007 line (~4×10⁻¹⁶ erg cm⁻² s⁻¹ Å⁻¹) is nearly the same as that of the broad H β component (~4.2×10⁻¹⁶ erg cm⁻² s⁻¹ Å⁻¹), indicating parity.", "label": false }, { "paperid": "2410.07242v1", "paper_path": "./SciVer/papers/2410.07242v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "4.2" ], "image_path": "./SciVer/images/2410.07242v1-Table2-1.png", "request_id": 705, "origin_statement": "In Scenario 1 at n_c_max=150, adaptive SPx reduces mean control arm size to 160.3 from 200 for fixed SPx, while its power to detect a 20% effect only drops from 78.9% to 77.4%.", "perturbed_statement": "In Scenario 1 at n_c_max=150, adaptive SPx reduces mean control arm size to 150.3 from 200 for fixed SPx, while its power to detect a 20% effect drops from 78.9% to 75.4%.", "perturbed_explanation": "This statement is incorrect because Table 2 shows the adaptive SPx design’s mean control size is 160.3 (not 150.3) and its power to detect a 20% effect is 77.4% (not 75.4%).", "claim": "In Scenario 1 at n_c_max=150, adaptive SPx reduces mean control arm size to 150.3 from 200 for fixed SPx, while its power to detect a 20% effect drops from 78.9% to 75.4%.", "label": false }, { "paperid": "2411.03304v1", "paper_path": "./SciVer/papers/2411.03304v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4" ], "image_path": "./SciVer/images/2411.03304v1_figure_5.png", "request_id": 710, "origin_statement": "The estimated BFDR for 'age' is about 1.20, roughly 60 times greater than the BFDR of approximately 0.02 for 'lcavol', indicating much higher false discovery risk for age than lcavol.", "perturbed_statement": "The estimated BFDR for 'age' is about 1.50, roughly 75 times greater than the BFDR of approximately 0.02 for 'lcavol', indicating much higher false discovery risk for age than lcavol.", "perturbed_explanation": "This statement is incorrect because the BFDR value for 'age' in Figure 5 is approximately 1.20, not 1.50, making the ratio of its BFDR to that of 'lcavol' (0.02) about 60 times, not 75 times as claimed.", "claim": "The estimated BFDR for 'age' is about 1.50, roughly 75 times greater than the BFDR of approximately 0.02 for 'lcavol', indicating much higher false discovery risk for age than lcavol.", "label": false }, { "paperid": "2410.15632v1", "paper_path": "./SciVer/papers/2410.15632v1.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "6" ], "image_path": "./SciVer/images/2410.15632v1_figure_2.png", "request_id": 712, "origin_statement": "The spin-aligned G_↑↑ correlator at k_d=1 first reaches single-state isolation at t_s=18, six time slices earlier than the k_d=0 correlator which isolates at t_s=24.", "perturbed_statement": "The spin-aligned G_↑↑ correlator at k_d=1 first reaches single-state isolation at t_s=20, four time slices earlier than the k_d=0 correlator which isolates at t_s=24.", "perturbed_explanation": "The heatmap shows the first green (single-state isolation) highlight for k_d=1 at t_s=18, not t_s=20, so the stated isolation time and the four-slice difference are incorrect.", "claim": "The spin-aligned G_↑↑ correlator at k_d=1 first reaches single-state isolation at t_s=20, four time slices earlier than the k_d=0 correlator which isolates at t_s=24.", "label": false }, { "paperid": "2409.13671v1", "paper_path": "./SciVer/papers/2409.13671v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.13671v1_figure_5.png", "request_id": 716, "origin_statement": "The Contextual Bandit (CB) method achieves a maximum accuracy of 0.97 at iteration 18, exceeding ε-Greedy's peak accuracy (0.90 at iteration 7) by 0.07.", "perturbed_statement": "The Contextual Bandit (CB) method achieves a maximum accuracy of 0.95 at iteration 18, exceeding ε-Greedy's peak accuracy (0.90 at iteration 7) by 0.05.", "perturbed_explanation": "This is incorrect because the figure’s legend shows CB’s actual maximum accuracy is 0.97 (not 0.95) at iteration 18, so the claimed 0.95 and the 0.05 difference are wrong.", "claim": "The Contextual Bandit (CB) method achieves a maximum accuracy of 0.95 at iteration 18, exceeding ε-Greedy's peak accuracy (0.90 at iteration 7) by 0.05.", "label": false }, { "paperid": "2411.12914v1", "paper_path": "./SciVer/papers/2411.12914v1.json", "claim_type": "direct", "type": "chart", "item": "3(b)", "section": [ "4.1" ], "image_path": "./SciVer/images/2411.12914v1_figure_3(b).png", "request_id": 718, "origin_statement": "At epoch 350, the benign model’s classifier weights NC2 angular metric is about 0.0075, approximately 0.002 lower than the 10% trojaned model’s value of about 0.0095.", "perturbed_statement": "At epoch 350, the 10% trojaned model’s classifier weights NC2 angular metric is about 0.005, roughly 0.002 lower than the benign model’s value of about 0.0075.", "perturbed_explanation": "The classifier weights plot shows the 10% trojaned model’s NC2 angle at epoch 350 is around 0.0095, not 0.005, and this value is higher (not lower) than the benign model’s ~0.0075.", "claim": "At epoch 350, the 10% trojaned model’s classifier weights NC2 angular metric is about 0.005, roughly 0.002 lower than the benign model’s value of about 0.0075.", "label": false }, { "paperid": "2409.13221v2", "paper_path": "./SciVer/papers/2409.13221v2.json", "claim_type": "direct", "type": "chart", "item": "2", "section": [ "2.2" ], "image_path": "./SciVer/images/2409.13221v2_figure_2.png", "request_id": 726, "origin_statement": "Salamander-70B exhibits the largest P99.9 output length at approximately 820 tokens, which is about 280 tokens more than GPT2-xl’s P99.9 length of around 540 tokens.", "perturbed_statement": "Salamander-70B’s P99.9 output length is approximately 900 tokens, roughly 400 tokens more than GPT2-xl’s P99.9 length of about 500 tokens.", "perturbed_explanation": "The perturbed statement is incorrect because, according to Figure 2 (left), Salamander-70B’s 99.9th percentile length is around 820 tokens (not 900), and GPT2-xl’s is around 540 tokens (not 500). These specific values contradict the actual P99.9 lengths shown.", "claim": "Salamander-70B’s P99.9 output length is approximately 900 tokens, roughly 400 tokens more than GPT2-xl’s P99.9 length of about 500 tokens.", "label": false }, { "paperid": "2409.07192v1", "paper_path": "./SciVer/papers/2409.07192v1.json", "claim_type": "direct", "type": "chart", "item": "10", "section": [ "4.4" ], "image_path": "./SciVer/images/2409.07192v1_figure_10.png", "request_id": 728, "origin_statement": "New modeling practices (55) are over six times as many as existing modeling practices (9), indicating a strong shift towards model innovations.", "perturbed_statement": "New modeling practices (95) are over six times as many as existing modeling practices (9), indicating a strong shift towards model innovations.", "perturbed_explanation": "The statement is incorrect because the bar chart shows 55 new modeling practices, not 95.", "claim": "New modeling practices (95) are over six times as many as existing modeling practices (9), indicating a strong shift towards model innovations.", "label": false }, { "paperid": "2411.11786v1", "paper_path": "./SciVer/papers/2411.11786v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.11786v1_figure_3.png", "request_id": 730, "origin_statement": "In the two-component mixture (first two panels), the tempered distribution Qᵅ fills the gap between the original two peaks of X, showing nonzero density across roughly -1.3 to 1.3, whereas X has near-zero density between its two modes around ±1.", "perturbed_statement": "In the two-component mixture (first two panels), the tempered distribution Qᵅ shows two distinct sharp peaks at around ±1, matching the original X which is nearly uniform across the interval from -1.3 to 1.3.", "perturbed_explanation": "This is wrong because Qᵅ’s histogram in the second panel does not show two sharp peaks at ±1 but rather a continuous spread with no distinct modes. Conversely, X in the first panel is clearly bimodal with two peaks around -1 and 1, not uniform.", "claim": "In the two-component mixture (first two panels), the tempered distribution Qᵅ shows two distinct sharp peaks at around ±1, matching the original X which is nearly uniform across the interval from -1.3 to 1.3.", "label": false }, { "paperid": "2411.16095v1", "paper_path": "./SciVer/papers/2411.16095v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "4.2" ], "image_path": "./SciVer/images/2411.16095v1-Table1-1.png", "request_id": 731, "origin_statement": "The notation table defines 12 symbols, and exactly three of them (l_i, r_i, m_i) specify the left boundary, right boundary, and cut-off point, comprising 25% of all entries.", "perturbed_statement": "The notation table defines 10 symbols, and exactly four of them (l_i, r_i, m_i, n_i) specify the left boundary, right boundary, and cut-off point, comprising 40% of all entries.", "perturbed_explanation": "This statement is incorrect because the table actually lists 12 notations, not 10. Furthermore, only l_i, r_i, and m_i are boundary-related; n_i denotes a node and does not specify any boundary.", "claim": "The notation table defines 10 symbols, and exactly four of them (l_i, r_i, m_i, n_i) specify the left boundary, right boundary, and cut-off point, comprising 40% of all entries.", "label": false }, { "paperid": "2409.12720v1", "paper_path": "./SciVer/papers/2409.12720v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.3" ], "image_path": "./SciVer/images/2409.12720v1_figure_5.png", "request_id": 732, "origin_statement": "ConvNext-V2-P achieves the lowest latency at about 12 ms with AR_BOP of approximately 65%, while ConvNext-V2-T has around 20 ms latency and AR_BOP near 68%, indicating that the latter offers about 3 percentage points higher AR at the cost of ~8 ms additional latency.", "perturbed_statement": "ConvNext-V2-P achieves the lowest latency at about 12 ms with AR_BOP of approximately 68%, while ConvNext-V2-T has around 20 ms latency and AR_BOP near 65%, indicating that the latter offers about 3 percentage points lower AR at the cost of ~8 ms additional latency.", "perturbed_explanation": "The perturbed claim is incorrect because the chart shows ConvNext-V2-P at roughly 65% AR_BOP (not 68%) and ConvNext-V2-T at roughly 68% AR_BOP (not 65%). These inverted AR values contradict the source data.", "claim": "ConvNext-V2-P achieves the lowest latency at about 12 ms with AR_BOP of approximately 68%, while ConvNext-V2-T has around 20 ms latency and AR_BOP near 65%, indicating that the latter offers about 3 percentage points lower AR at the cost of ~8 ms additional latency.", "label": false }, { "paperid": "2410.04465v2", "paper_path": "./SciVer/papers/2410.04465v2.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "7" ], "image_path": "./SciVer/images/2410.04465v2-Table4-1.png", "request_id": 733, "origin_statement": "Among the listed Higgs decays, H→ZZ* exhibits the largest statistical uncertainty at ±9.81%, which is about 17 times greater than the ±0.58% uncertainty for H→bb.", "perturbed_statement": "Among the listed Higgs decays, H→cc exhibits the largest statistical uncertainty at ±9.81%, which is about 17 times greater than the ±0.58% uncertainty for H→bb.", "perturbed_explanation": "The perturbed statement is incorrect because H→cc actually has a statistical uncertainty of ±8.41%, not ±9.81%, and the ±9.81% uncertainty applies to the H→ZZ* decay channel rather than H→cc.", "claim": "Among the listed Higgs decays, H→cc exhibits the largest statistical uncertainty at ±9.81%, which is about 17 times greater than the ±0.58% uncertainty for H→bb.", "label": false }, { "paperid": "2409.12207v1", "paper_path": "./SciVer/papers/2409.12207v1.json", "claim_type": "direct", "type": "chart", "item": "21", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.12207v1_figure_21.png", "request_id": 734, "origin_statement": "When doubling the number of segments from 80 to 160, the co-analysis computation time increases from about 1.7 s to about 3.0 s, a roughly 76% increase.", "perturbed_statement": "When doubling the number of segments from 80 to 160, the co-analysis computation time increases from about 1.7 s to about 2.5 s, a roughly 47% increase.", "perturbed_explanation": "The perturbation is incorrect because the actual computation time at 160 segments, as shown in the figure, is approximately 3.0 s—not 2.5 s—so the stated increase and percentage are underestimated.", "claim": "When doubling the number of segments from 80 to 160, the co-analysis computation time increases from about 1.7 s to about 2.5 s, a roughly 47% increase.", "label": false }, { "paperid": "2409.02756v2", "paper_path": "./SciVer/papers/2409.02756v2.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "3" ], "image_path": "./SciVer/images/2409.02756v2_figure_1.png", "request_id": 736, "origin_statement": "The matching parameter Xi_AB at (2,0) is nearly the same (~0.48) for all three lattices, but at (4,2) it increases to approximately 0.73 for 16^4, 0.74 for 24^3×48, and 0.75 for 32^4, indicating convergence as d increases.", "perturbed_statement": "The matching parameter Xi_AB at (2,0) is nearly the same (~0.55) for all three lattices, but at (4,2) it increases to approximately 0.73 for 16^4, 0.74 for 24^3×48, and 0.78 for 32^4, indicating convergence as d increases.", "perturbed_explanation": "The perturbed statement incorrectly reports Xi_AB at (2,0) as ~0.55 and for the 32^4 lattice at (4,2) as 0.78. The figure shows these values are about 0.48 at (2,0) for all lattices and around 0.75 at (4,2) for 32^4, not 0.55 or 0.78.", "claim": "The matching parameter Xi_AB at (2,0) is nearly the same (~0.55) for all three lattices, but at (4,2) it increases to approximately 0.73 for 16^4, 0.74 for 24^3×48, and 0.78 for 32^4, indicating convergence as d increases.", "label": false }, { "paperid": "2409.16316v1", "paper_path": "./SciVer/papers/2409.16316v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "5.1" ], "image_path": "./SciVer/images/2409.16316v1_figure_3.png", "request_id": 738, "origin_statement": "The emulator retrievals have a mean bias error of +10.2 W/m² and a root-mean-square error of 49.1 W/m² compared to SARAH-3 SSI, indicating most retrieval errors lie within ±50 W/m² of the reference.", "perturbed_statement": "The emulator retrievals have a mean bias error of -10.2 W/m² and a root-mean-square error of 49.1 W/m² compared to SARAH-3 SSI, indicating most retrieval errors lie within ±50 W/m² of the reference.", "perturbed_explanation": "The figure explicitly shows an MBE of +10.2 W/m² (not -10.2 W/m²), so stating a negative bias error contradicts the ‘MBE = 10.2 W/m²’ label on the scatter plot.", "claim": "The emulator retrievals have a mean bias error of -10.2 W/m² and a root-mean-square error of 49.1 W/m² compared to SARAH-3 SSI, indicating most retrieval errors lie within ±50 W/m² of the reference.", "label": false }, { "paperid": "2409.18124v4", "paper_path": "./SciVer/papers/2409.18124v4.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4", "5.3" ], "image_path": "./SciVer/images/2409.18124v4-Table3-1.png", "request_id": 739, "origin_statement": "Lotus-G (Ours) achieves an average rank of 1.0, improving over Lotus-D's rank of 1.6, DSINE's 1.8, and other methods whose ranks range from 2.0 to 7.0.", "perturbed_statement": "Lotus-G (Ours) achieves an average rank of 2.0, improving over Lotus-D's rank of 1.6 and DSINE's 1.6, and other methods whose ranks range from 2.0 to 7.0.", "perturbed_explanation": "This is incorrect because Table 3 lists Lotus-G’s average rank as 1.0 (not 2.0) and DSINE’s average rank as 1.8 (not 1.6).", "claim": "Lotus-G (Ours) achieves an average rank of 2.0, improving over Lotus-D's rank of 1.6 and DSINE's 1.6, and other methods whose ranks range from 2.0 to 7.0.", "label": false }, { "paperid": "2410.07728v1", "paper_path": "./SciVer/papers/2410.07728v1.json", "claim_type": "direct", "type": "table", "item": "1", "section": [ "3.3", "3.5" ], "image_path": "./SciVer/images/2410.07728v1-Table1-1.png", "request_id": 741, "origin_statement": "Three concepts (perceived autonomy, perceived meaningfulness, and task load) have two questionnaire items each, while task motivation and task satisfaction have one item each.", "perturbed_statement": "Perceived autonomy, perceived meaningfulness, and task satisfaction each have two questionnaire items, whereas task motivation and task load each have one item.", "perturbed_explanation": "The perturbed statement incorrectly claims that task satisfaction has two items and task load has one. According to the table, task satisfaction is measured with one item and task load is measured with two items.", "claim": "Perceived autonomy, perceived meaningfulness, and task satisfaction each have two questionnaire items, whereas task motivation and task load each have one item.", "label": false }, { "paperid": "2411.18239v1", "paper_path": "./SciVer/papers/2411.18239v1.json", "claim_type": "direct", "type": "table", "item": "2", "section": [ "3.1" ], "image_path": "./SciVer/images/2411.18239v1-Table2-1.png", "request_id": 749, "origin_statement": "The fittest individual has a fitness of 0.493 after 6 evaluations, which is 0.022 higher than the second best fitness of 0.471 obtained with 7 evaluations.", "perturbed_statement": "The fittest individual has a fitness of 0.493 after 6 evaluations, which is 0.032 higher than the second best fitness of 0.471 obtained with 7 evaluations.", "perturbed_explanation": "The perturbation is incorrect because the actual difference between 0.493 and 0.471 is 0.022, not 0.032, as shown in Table 2.", "claim": "The fittest individual has a fitness of 0.493 after 6 evaluations, which is 0.032 higher than the second best fitness of 0.471 obtained with 7 evaluations.", "label": false }, { "paperid": "2410.10723v1", "paper_path": "./SciVer/papers/2410.10723v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.4" ], "image_path": "./SciVer/images/2410.10723v1_figure_4.png", "request_id": 752, "origin_statement": "The log-normal distribution's median AIC is roughly 1250, which is over 100 points lower than the median AIC of both the Weibull (~1430) and piecewise exponential (~1380) distributions.", "perturbed_statement": "The log-logistic distribution's median AIC is roughly 1250, which is over 100 points lower than the median AIC of both the Weibull (~1430) and piecewise exponential (~1380) distributions.", "perturbed_explanation": "This is incorrect because the plot shows the log-logistic distribution’s median AIC is around 1300, not 1250, whereas the log-normal distribution actually has the lower median AIC of approximately 1250.", "claim": "The log-logistic distribution's median AIC is roughly 1250, which is over 100 points lower than the median AIC of both the Weibull (~1430) and piecewise exponential (~1380) distributions.", "label": false }, { "paperid": "2411.07529v1", "paper_path": "./SciVer/papers/2411.07529v1.json", "claim_type": "direct", "type": "table", "item": "4", "section": [ "5" ], "image_path": "./SciVer/images/2411.07529v1-Table4-1.png", "request_id": 753, "origin_statement": "ChatGPT produced functional solutions in Python and Java but generated zero solutions in Elixir, Erlang, and Racket, resulting in a 100% failure rate in those three languages versus successful completions in the former two.", "perturbed_statement": "ChatGPT produced functional solutions in Python and C++ but generated zero solutions in Elixir, Erlang, and Racket, resulting in a 100% failure rate in those three languages versus successful completions in the former two.", "perturbed_explanation": "The perturbed statement is incorrect because the table reports stronger performance in Java (not C++). ChatGPT actually generated solutions in Java, whereas it faced more challenges in C++, so claiming functional solutions in C++ contradicts the table’s comparison.", "claim": "ChatGPT produced functional solutions in Python and C++ but generated zero solutions in Elixir, Erlang, and Racket, resulting in a 100% failure rate in those three languages versus successful completions in the former two.", "label": false }, { "paperid": "2411.01747v1", "paper_path": "./SciVer/papers/2411.01747v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.3.2" ], "image_path": "./SciVer/images/2411.01747v1_figure_3.png", "request_id": 754, "origin_statement": "At Level 2, approximately 85% of agent A’s failures were due to insufficient action implementation, compared to roughly 60% at Level 3.", "perturbed_statement": "At Level 2, approximately 60% of agent A’s failures were due to insufficient action implementation, compared to roughly 90% at Level 3.", "perturbed_explanation": "The perturbed claim is incorrect because the chart shows that at Level 2 the blue segment spans about 85% (not 60%) and at Level 3 the blue segment spans about 60% (not 90%).", "claim": "At Level 2, approximately 60% of agent A’s failures were due to insufficient action implementation, compared to roughly 90% at Level 3.", "label": false }, { "paperid": "2409.17504v1", "paper_path": "./SciVer/papers/2409.17504v1.json", "claim_type": "direct", "type": "chart", "item": "3", "section": [ "4.3", "4.4", "5" ], "image_path": "./SciVer/images/2409.17504v1_figure_3.png", "request_id": 756, "origin_statement": "As the subspace dimension k increases from 1 to 6, AUROC on TruthfulQA improves from about 64.4% to 78.8%, rising by over 14 points, then declines to roughly 72% for k values 7–10.", "perturbed_statement": "As k increases from 1 to 6, AUROC on TruthfulQA improves from about 64.4% to 75.2%, rising by around 10.8 points, then declines to roughly 68% for k values 7–10.", "perturbed_explanation": "The bar chart shows that at k=6 the AUROC is actually 78.8% (not 75.2%), and for k values 7–10 the AUROC remains around 72% (not 68%). These discrepancies make the perturbed statement incorrect.", "claim": "As k increases from 1 to 6, AUROC on TruthfulQA improves from about 64.4% to 75.2%, rising by around 10.8 points, then declines to roughly 68% for k values 7–10.", "label": false }, { "paperid": "2409.06337v1", "paper_path": "./SciVer/papers/2409.06337v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3" ], "image_path": "./SciVer/images/2409.06337v1_figure_4.png", "request_id": 760, "origin_statement": "In the low-temperature C/T vs T² plot (10–15 K), the linear fit intercepts the y-axis at a Sommerfeld coefficient γ ≈ 11.64 mJ·K⁻²·mol⁻¹.", "perturbed_statement": "In the low-temperature C/T vs T² plot (10–15 K), the linear fit intercepts the y-axis at a Sommerfeld coefficient γ ≈ 15.00 mJ·K⁻²·mol⁻¹.", "perturbed_explanation": "The perturbation is incorrect because the actual linear fit intercept in the C/T vs T² data is 11.64 mJ·K⁻²·mol⁻¹, not 15.00 mJ·K⁻²·mol⁻¹, as shown by the fitted red dashed line in Figure 4(b).", "claim": "In the low-temperature C/T vs T² plot (10–15 K), the linear fit intercepts the y-axis at a Sommerfeld coefficient γ ≈ 15.00 mJ·K⁻²·mol⁻¹.", "label": false }, { "paperid": "2409.01435v1", "paper_path": "./SciVer/papers/2409.01435v1.json", "claim_type": "direct", "type": "chart", "item": "1", "section": [ "5" ], "image_path": "./SciVer/images/2409.01435v1_figure_1.png", "request_id": 766, "origin_statement": "Under a 25% ByzMean attack on the Shakespeare dataset, LASA’s FPR remains near 0% across 50 rounds, while SignGuard’s FPR averages around 4%, meaning LASA misclassifies about 4% fewer benign updates.", "perturbed_statement": "Under a 25% ByzMean attack on the Shakespeare dataset, LASA’s FPR averages around 4% across 50 rounds, while SignGuard’s FPR remains near 0%, meaning LASA misclassifies about 4% more benign updates.", "perturbed_explanation": "The perturbed claim is incorrect because the figure shows LASA’s FPR is consistently near 0%, not around 4%, and SignGuard’s FPR hovers around 4%, not near 0%, exactly contradicting the swapped rates.", "claim": "Under a 25% ByzMean attack on the Shakespeare dataset, LASA’s FPR averages around 4% across 50 rounds, while SignGuard’s FPR remains near 0%, meaning LASA misclassifies about 4% more benign updates.", "label": false }, { "paperid": "2409.17424v1", "paper_path": "./SciVer/papers/2409.17424v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4.2" ], "image_path": "./SciVer/images/2409.17424v1-Table3-1.png", "request_id": 767, "origin_statement": "MysteryANN achieved 22492 QPS, which is 196 QPS higher than PyANNS’s 22296, representing roughly a 0.9% performance improvement.", "perturbed_statement": "MysteryANN achieved 250 QPS higher than PyANNS’s 22296, representing roughly a 1.1% performance improvement.", "perturbed_explanation": "The perturbed statement is incorrect because MysteryANN’s actual lead over PyANNS is 22492 − 22296 = 196 QPS, not 250 QPS, and the percentage improvement is about 0.88%, not 1.1%.", "claim": "MysteryANN achieved 250 QPS higher than PyANNS’s 22296, representing roughly a 1.1% performance improvement.", "label": false }, { "paperid": "2409.07335v1", "paper_path": "./SciVer/papers/2409.07335v1.json", "claim_type": "direct", "type": "table", "item": "3", "section": [ "4", "4.3.1" ], "image_path": "./SciVer/images/2409.07335v1-Table3-1.png", "request_id": 775, "origin_statement": "Bootstrapping's PGR p-value (0.002) is five times lower than Auxiliary Confidence's PGR p-value (0.01).", "perturbed_statement": "Bootstrapping's PGR p-value (0.005) is five times lower than Auxiliary Confidence's PGR p-value (0.01).", "perturbed_explanation": "The table lists Bootstrapping’s PGR p-value as 0.002, not 0.005, so the perturbed statement’s numerical detail contradicts the actual value in the provided data.", "claim": "Bootstrapping's PGR p-value (0.005) is five times lower than Auxiliary Confidence's PGR p-value (0.01).", "label": false }, { "paperid": "2409.07038v1", "paper_path": "./SciVer/papers/2409.07038v1.json", "claim_type": "direct", "type": "chart", "item": "4", "section": [ "3.2" ], "image_path": "./SciVer/images/2409.07038v1_figure_4.png", "request_id": 776, "origin_statement": "In panel (b) of Fig. 4, at a neutron star mass of 1.4 M⊙, the DDVT EOS yields a tidal deformability of about 150, roughly half the Lambda of the DD-LZ1 EOS (~300).", "perturbed_statement": "In panel (b) of Fig. 4, at a neutron star mass of 1.4 M⊙, the DDVT EOS yields a tidal deformability of about 260, roughly half the Lambda of the DD-LZ1 EOS (~300).", "perturbed_explanation": "The DDVT curve in panel (b) actually passes through a Lambda of around 150 at M=1.4 M⊙, not 260; the purple DDVT line lies well below the 10^2.4 mark on the tidal deformability axis, so 260 is inconsistent with the plotted data.", "claim": "In panel (b) of Fig. 4, at a neutron star mass of 1.4 M⊙, the DDVT EOS yields a tidal deformability of about 260, roughly half the Lambda of the DD-LZ1 EOS (~300).", "label": false }, { "paperid": "2411.08788v1", "paper_path": "./SciVer/papers/2411.08788v1.json", "claim_type": "direct", "type": "chart", "item": "5", "section": [ "4.4.1", "4.4.2" ], "image_path": "./SciVer/images/2411.08788v1_figure_5.png", "request_id": 778, "origin_statement": "At 60 cm downstream, the imposed head boundary condition shows a local minimum of approximately −0.02 cm, while the optimized boundary condition remains close to 0 cm, indicating a 0.02 cm underestimation of the imposed trough by the optimization.", "perturbed_statement": "At 60 cm downstream, the imposed head boundary condition shows a local maximum of approximately +0.02 cm, while the optimized boundary condition falls to around −0.02 cm, indicating a 0.04 cm inversion by the optimization.", "perturbed_explanation": "This statement is incorrect because the figure shows that at 60 cm downstream the imposed head (orange line) exhibits a local minimum of about −0.02 cm, not a maximum of +0.02 cm, and the optimized head (blue line) is close to 0 cm, not −0.02 cm as claimed.", "claim": "At 60 cm downstream, the imposed head boundary condition shows a local maximum of approximately +0.02 cm, while the optimized boundary condition falls to around −0.02 cm, indicating a 0.04 cm inversion by the optimization.", "label": false }, { "paperid": "2411.15453v1", "paper_path": "./SciVer/papers/2411.15453v1.json", "claim_type": "direct", "type": "chart", "item": "2(c)", "section": [ "3" ], "image_path": "./SciVer/images/2411.15453v1_figure_2(c).png", "request_id": 780, "origin_statement": "At a 1/4 spatial downsampling ratio, LLaVA-1.5 13B achieves about 58.2% ACC, exceeding the 7B model’s roughly 56.7% ACC by approximately 1.5 percentage points.", "perturbed_statement": "At a 1/4 spatial downsampling ratio, LLaVA-1.5 13B achieves about 58.2% ACC, exceeding the 7B model’s roughly 53.7% ACC by approximately 4.5 percentage points.", "perturbed_explanation": "This statement is incorrect because, according to the figure, the 7B model’s ACC at the 1/4 downsampling ratio is actually about 56.7%, not 53.7%, and the true difference is roughly 1.5 percentage points, not 4.5.", "claim": "At a 1/4 spatial downsampling ratio, LLaVA-1.5 13B achieves about 58.2% ACC, exceeding the 7B model’s roughly 53.7% ACC by approximately 4.5 percentage points.", "label": false }, { "paperid": "2411.12858v2", "paper_path": "./SciVer/papers/2411.12858v2.json", "claim_type": "direct", "type": "chart", "item": "6", "section": [ "5.1" ], "image_path": "./SciVer/images/2411.12858v2_figure_6.png", "request_id": 786, "origin_statement": "For the model after 1kk training steps, approximately 120 samples in P are needed to reduce the p-value below 0.05, whereas the model at 100k steps requires around 2000 samples to reach the same significance threshold.", "perturbed_statement": "For the model after 100k training steps, only about 500 samples in P are needed to reduce the p-value below 0.05, whereas the model at 1kk steps requires around 5000 samples to reach the same significance threshold.", "perturbed_explanation": "In the figure, the 100k model’s curve does not fall below the 0.05 line at 500 samples but around 2000 samples. Conversely, the 1kk model’s curve crosses the 0.05 threshold at about 120 samples, not 5000. Thus the stated sample counts contradict the plotted intersection points.", "claim": "For the model after 100k training steps, only about 500 samples in P are needed to reduce the p-value below 0.05, whereas the model at 1kk steps requires around 5000 samples to reach the same significance threshold.", "label": false }, { "paperid": "2411.14588v1", "paper_path": "./SciVer/papers/2411.14588v1.json", "claim_type": "direct", "type": "table", "item": "5", "section": [ "3.3.2" ], "image_path": "./SciVer/images/2411.14588v1-Table5-1.png", "request_id": 787, "origin_statement": "Under neutral selection and identical switching probabilities in the U-B approximation (s_y=s_r=1 and p_y=p_r=p), the total weighted first moment remains constant over time, equal to its initial value M^(1)(0).", "perturbed_statement": "Under neutral selection and identical switching probabilities in the U-B approximation, the total weighted first moment decays over time as M^(1)(t)=M^(1)(0)e^{-2pt}.", "perturbed_explanation": "The table’s “Total” row shows M^(1)(t)=M^(1)(0) with no exponential factor, so it does not decay as e^{-2pt}. The perturbation introduces an exponential term not present in the source.", "claim": "Under neutral selection and identical switching probabilities in the U-B approximation, the total weighted first moment decays over time as M^(1)(t)=M^(1)(0)e^{-2pt}.", "label": false }, { "paperid": "2411.07200v1", "paper_path": "./SciVer/papers/2411.07200v1.json", "claim_type": "parallel", "item1": "3", "item2": "5(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07200v1_figure_3.png", "item2_path": "./SciVer/images/2411.07200v1_figure_5(a).png", "section": [ "4.2" ], "request_id": 69, "origin_statement": "Grid-World cluster 2 ‘Falling into lava’ exhibits this behavior in over 90% of trajectories, while in Seaquest cluster 2 the submarine fills its oxygen tank on average 0.30 times, about twice Seaquest cluster 1’s 0.25.", "perturbed_statement": "Grid-World cluster 2 ‘Falling into lava’ exhibits this behavior in over 90% of trajectories, while in Seaquest cluster 2 the submarine fills its oxygen tank on average 0.20 times, about twice Seaquest cluster 1’s 0.25.", "perturbed_explanation": "The Seaquest chart shows cluster 2 averaging about 0.30 oxygen fills per episode, not 0.20 as stated, so the perturbed fill rate contradicts the displayed bar height.", "claim": "Grid-World cluster 2 ‘Falling into lava’ exhibits this behavior in over 90% of trajectories, while in Seaquest cluster 2 the submarine fills its oxygen tank on average 0.30 times, about twice Seaquest cluster 1’s 0.25.", "label": true }, { "paperid": "2410.02052v3", "paper_path": "./SciVer/papers/2410.02052v3.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02052v3_figure_1(a).png", "item2_path": "./SciVer/images/2410.02052v3_figure_1(b).png", "section": [ "4.3" ], "request_id": 105, "origin_statement": "At 10× tokens, R-MCTS achieves a 43.6% success rate on 234 tasks, over triple the 12.6% success rate Exploratory Learning achieves with 10 actions on 169 unseen tasks.", "perturbed_statement": "At 10× tokens, R-MCTS achieves a 43.6% success rate on 234 tasks, over triple the 14.0% success rate Exploratory Learning achieves with 10 actions on 169 unseen tasks.", "perturbed_explanation": "The perturbed statement wrongly claims Exploratory Learning hits 14.0% success at 10 actions. In Figure 1b, its actual success rate at 10 actions is 12.6%, not 14.0%.", "claim": "At 10× tokens, R-MCTS achieves a 43.6% success rate on 234 tasks, over triple the 12.6% success rate Exploratory Learning achieves with 10 actions on 169 unseen tasks.", "label": true }, { "paperid": "2411.00429v1", "paper_path": "./SciVer/papers/2411.00429v1.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00429v1_figure_2.png", "item2_path": "./SciVer/images/2411.00429v1-Table1-1.png", "section": [ "7.2" ], "request_id": 160, "origin_statement": "In Figure 2, the Hennig-Liao variant has an alienation coefficient of approximately 0.25 when omitting the 9-category variable, which is nearly three times the 0.09 range scaling reported for a sample size of 500 under a skewed distribution in Table 1.", "perturbed_statement": "In Figure 2, the Hennig-Liao variant has an alienation coefficient of approximately 0.25 when omitting the 9-category variable, which is nearly three times the 0.19 range scaling reported for a sample size of 500 under a skewed distribution in Table 1.", "perturbed_explanation": "Table 1 states that for a skewed distribution with sample size 500, the range scaling is 0.09, not 0.19. The perturbed statement misreports the table value, making it inconsistent with the actual data.", "claim": "In Figure 2, the Hennig-Liao variant has an alienation coefficient of approximately 0.25 when omitting the 9-category variable, which is nearly three times the 0.09 range scaling reported for a sample size of 500 under a skewed distribution in Table 1.", "label": true }, { "paperid": "2411.03025v1", "paper_path": "./SciVer/papers/2411.03025v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03025v1_figure_5.png", "item2_path": "./SciVer/images/2411.03025v1-Table6-1.png", "section": [ "5.8" ], "request_id": 163, "origin_statement": "At λ=1×10^-3, PROTEINS accuracy peaks at 80.2% (Fig. 5), matching the sparse experts’ 80.27% in Table 6 and outperforming the dense experts’ 77.65% by about 2.62%.", "perturbed_statement": "At λ=1×10^-3, PROTEINS accuracy peaks at 80.2% (Fig. 5), matching the sparse experts’ 81.27% in Table 6 and outperforming the dense experts’ 77.65% by about 2.62%.", "perturbed_explanation": "The perturbed statement claims sparse experts achieve 81.27% on PROTEINS, but Table 6 shows they actually achieve 80.27%, so the 81.27% figure contradicts the table.", "claim": "At λ=1×10^-3, PROTEINS accuracy peaks at 80.2% (Fig. 5), matching the sparse experts’ 80.27% in Table 6 and outperforming the dense experts’ 77.65% by about 2.62%.", "label": true }, { "paperid": "2410.16092v1", "paper_path": "./SciVer/papers/2410.16092v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16092v1_figure_5.png", "item2_path": "./SciVer/images/2410.16092v1-Table3-1.png", "section": [ "3.2.1" ], "request_id": 257, "origin_statement": "ChangeGuard produces 46 inconclusive predictions out of 224 code changes (≈21%), and these inconclusive cases occur in functions with a median cyclomatic complexity of about 9, versus a median of 5 for changes that are successfully reached.", "perturbed_statement": "ChangeGuard produces 56 inconclusive predictions out of 224 code changes (≈25%), and these inconclusive cases occur in functions with a median cyclomatic complexity of about 7, versus a median of 5 for changes that are successfully reached.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 reports only 46 inconclusive predictions, not 56, and Figure 5 shows the median cyclomatic complexity for unreached (inconclusive) functions is about 9, not 7.", "claim": "ChangeGuard produces 46 inconclusive predictions out of 224 code changes (≈21%), and these inconclusive cases occur in functions with a median cyclomatic complexity of about 9, versus a median of 5 for changes that are successfully reached.", "label": true }, { "paperid": "2409.14085v1", "paper_path": "./SciVer/papers/2409.14085v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14085v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.14085v1-Table4-1.png", "section": [ "3.2" ], "request_id": 275, "origin_statement": "On the open set, FunCodec achieves a WER of approximately 3.1% at 8 kbps, outperforming Encodec’s 3.36% at the same bitrate, while on the hidden set, the best ASR result is 5.39% at 7.5 kbps by D3, indicating nearly double the error rate under hidden conditions.", "perturbed_statement": "On the open set, FunCodec achieves a WER of approximately 3.6% at 8 kbps, outperforming Encodec’s 3.36% at the same bitrate, while on the hidden set, the best ASR result is 6.39% at 7.5 kbps by D3, indicating nearly double the error rate under hidden conditions.", "perturbed_explanation": "The WER values have been altered incorrectly. The plot shows FunCodec at about 3.1% WER (not 3.6%) at 8 kbps, and Table 4 reports D3’s WER as 5.39% (not 6.39%) at 7.5 kbps.", "claim": "On the open set, FunCodec achieves a WER of approximately 3.1% at 8 kbps, outperforming Encodec’s 3.36% at the same bitrate, while on the hidden set, the best ASR result is 5.39% at 7.5 kbps by D3, indicating nearly double the error rate under hidden conditions.", "label": true }, { "paperid": "2409.14381v1", "paper_path": "./SciVer/papers/2409.14381v1.json", "claim_type": "parallel", "item1": "2", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14381v1_figure_2.png", "item2_path": "./SciVer/images/2409.14381v1_figure_6.png", "section": [ "6.2" ], "request_id": 291, "origin_statement": "For Llama3-70B on BoolQ, the top FFN layer (layer 3) accounts for 19.9% of total Shapley value, and this layer also exhibits the highest individual Shapley value peak of approximately 0.67 in the corresponding layer-wise plot.", "perturbed_statement": "For Llama3-70B on BoolQ, the top FFN layer (layer 3) accounts for 19.9% of total Shapley value, and this layer also exhibits the highest individual Shapley value peak of approximately 0.45 in the corresponding layer-wise plot.", "perturbed_explanation": "The perturbed statement is incorrect because the layer-wise bar plot for Llama3-70B on BoolQ shows a peak Shapley value near 0.67 for layer 3, not 0.45, contradicting the actual plotted data.", "claim": "For Llama3-70B on BoolQ, the top FFN layer (layer 3) accounts for 19.9% of total Shapley value, and this layer also exhibits the highest individual Shapley value peak of approximately 0.67 in the corresponding layer-wise plot.", "label": true }, { "paperid": "2410.17196v2", "paper_path": "./SciVer/papers/2410.17196v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17196v2_figure_2.png", "item2_path": "./SciVer/images/2410.17196v2_figure_3.png", "section": [ "4.2" ], "request_id": 305, "origin_statement": "Mini-Omni’s response score declines to approximately 1.1 at 2.5× speaking speed, mirroring its poor 14% accuracy on US accents, highlighting its vulnerability to extreme speed and common accents.", "perturbed_statement": "Mini-Omni’s response score declines to approximately 2.1 at 2.5× speaking speed, mirroring its poor 14% accuracy on US accents, highlighting its vulnerability to extreme speed and common accents.", "perturbed_explanation": "The perturbed statement incorrectly reports Mini-Omni’s response score at 2.5× speed as 2.1. In Fig. 2, the actual response score for Mini-Omni at 2.5× speaking speed is about 1.1, not 2.1.", "claim": "Mini-Omni’s response score declines to approximately 1.1 at 2.5× speaking speed, mirroring its poor 14% accuracy on US accents, highlighting its vulnerability to extreme speed and common accents.", "label": true }, { "paperid": "2410.14059v2", "paper_path": "./SciVer/papers/2410.14059v2.json", "claim_type": "parallel", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.14059v2_figure_6.png", "item2_path": "./SciVer/images/2410.14059v2-Table2-1.png", "section": [ "4.3" ], "request_id": 321, "origin_statement": "CFGPT2-7B exhibits the highest average dialogue rounds at approximately 3.6, and the UCFE benchmark comprises 13 few-shot tasks sourced from analyst and regulatory reports.", "perturbed_statement": "GPT-4o-mini exhibits the highest average dialogue rounds at approximately 4.2, and the UCFE benchmark comprises 15 few-shot tasks sourced from analyst and regulatory reports.", "perturbed_explanation": "Figure 6 shows CFGPT2-7B—not GPT-4o-mini—has the highest average dialogue rounds at about 3.6, while GPT-4o-mini only reaches around 3.3 rounds. Table 2 lists exactly 13 few-shot tasks, not 15, so both the model ranking and task count are incorrect.", "claim": "CFGPT2-7B exhibits the highest average dialogue rounds at approximately 3.6, and the UCFE benchmark comprises 13 few-shot tasks sourced from analyst and regulatory reports.", "label": true }, { "paperid": "2409.19942v2", "paper_path": "./SciVer/papers/2409.19942v2.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19942v2_figure_3.png", "item2_path": "./SciVer/images/2409.19942v2_figure_4.png", "section": [ "3.6" ], "request_id": 342, "origin_statement": "1,160 video clips last between 2 and 4 seconds, and the maximum collision frequency of 152 occurs when cyclists move left and objects forward.", "perturbed_statement": "1,160 video clips last between 4 and 6 seconds, and the maximum collision frequency of 152 occurs when cyclists move left and objects forward.", "perturbed_explanation": "The perturbation mislabels the duration bin: the 1,160 clips actually fall in the 2–4 sec range, not 4–6 sec (the 4–6 sec bin contains around 760 clips). Thus stating they last 4–6 sec contradicts the histogram.", "claim": "1,160 video clips last between 2 and 4 seconds, and the maximum collision frequency of 152 occurs when cyclists move left and objects forward.", "label": true }, { "paperid": "2409.04766v1", "paper_path": "./SciVer/papers/2409.04766v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04766v1_figure_5.png", "item2_path": "./SciVer/images/2409.04766v1_figure_6.png", "section": [ "4.7" ], "request_id": 356, "origin_statement": "EyeDiap exhibits the highest epistemic uncertainty (3.29×10^-4), 77% greater than MPIIFaceGaze (1.52×10^-4), whereas in ETH-Gaze, subject 27 shows the highest aleatoric uncertainty among all subjects.", "perturbed_statement": "Gaze360 exhibits the highest epistemic uncertainty (3.29×10^-4), 77% greater than MPIIFaceGaze (1.52×10^-4), whereas in ETH-Gaze, subject 60 shows the highest aleatoric uncertainty among all subjects.", "perturbed_explanation": "This is incorrect because Figure 5 shows EyeDiap—not Gaze360—has the highest epistemic uncertainty at 3.29×10⁻⁴ (Gaze360 is 2.87×10⁻⁴). Additionally, Figure 6 indicates subject 27, not subject 60, has the maximum aleatoric uncertainty in ETH-Gaze.", "claim": "EyeDiap exhibits the highest epistemic uncertainty (3.29×10^-4), 77% greater than MPIIFaceGaze (1.52×10^-4), whereas in ETH-Gaze, subject 27 shows the highest aleatoric uncertainty among all subjects.", "label": true }, { "paperid": "2411.00154v1", "paper_path": "./SciVer/papers/2411.00154v1.json", "claim_type": "parallel", "item1": "3", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00154v1_figure_3.png", "item2_path": "./SciVer/images/2411.00154v1_figure_7.png", "section": [ "5.1" ], "request_id": 432, "origin_statement": "For a 500-document arXiv collection, aggregating MIA scores with the 6.9B model yields an AUROC of about 0.93, yet Figure 7 shows that even when the known partition size varies from 10 to 1000, the collection-level MIA AUROC remains near 0.95 for that collection size.", "perturbed_statement": "For a 500-document arXiv collection, aggregating MIA scores with the 6.9B model yields an AUROC of about 0.80, and collection-level MIA performance increases from 0.80 to 0.90 as the known partition size grows from 10 to 1000.", "perturbed_explanation": "This is incorrect because Figure 3 shows the 6.9B model actually achieves around 0.93 AUROC at 500 documents (not 0.80), and Figure 7 demonstrates that collection-level MIA AUROC stays roughly constant near 0.95 across known partition sizes, rather than rising from 0.80 to 0.90.", "claim": "For a 500-document arXiv collection, aggregating MIA scores with the 6.9B model yields an AUROC of about 0.93, yet Figure 7 shows that even when the known partition size varies from 10 to 1000, the collection-level MIA AUROC remains near 0.95 for that collection size.", "label": true }, { "paperid": "2409.03054v1", "paper_path": "./SciVer/papers/2409.03054v1.json", "claim_type": "parallel", "item1": "7", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03054v1_figure_7.png", "item2_path": "./SciVer/images/2409.03054v1_figure_9.png", "section": [ "6.2" ], "request_id": 434, "origin_statement": "Context-aware descriptions achieved a mean quality rating of approximately 4.3 versus 3.2 for context-free (a 1.1-point increase) and were selected in 16 of 24 Task 2 images (67%), compared to just 5 context-free picks.", "perturbed_statement": "Context-aware descriptions achieved a mean quality rating of approximately 4.3 versus 3.2 for context-free (a 1.1-point increase) and were selected in 18 of 24 Task 2 images (75%), compared to just 6 context-free picks.", "perturbed_explanation": "The perturbed statement incorrectly reports that context-aware descriptions were chosen in 18 of 24 images with 6 context-free picks. Figure 9 shows they were actually selected in 16 of 24 images (67%) with only 5 context-free selections.", "claim": "Context-aware descriptions achieved a mean quality rating of approximately 4.3 versus 3.2 for context-free (a 1.1-point increase) and were selected in 16 of 24 Task 2 images (67%), compared to just 5 context-free picks.", "label": true }, { "paperid": "2410.05782v1", "paper_path": "./SciVer/papers/2410.05782v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.05782v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.05782v1_figure_3(c).png", "section": [ "5.3" ], "request_id": 440, "origin_statement": "Raising highway noise from 1% to 50% increases ICoPro’s crash rate at 150×10k steps from ~10% to ~30%, while in Boxing, using rank-4 DiffLabeler cuts its average raw reward at 100 iterations to ~20 from ~65 under default labelling.", "perturbed_statement": "Raising highway noise from 1% to 50% increases ICoPro’s crash rate at 150×10k steps from ~10% to ~50%, while in Boxing, using rank-4 DiffLabeler cuts its average raw reward at 100 iterations to ~20 from ~65 under default labelling.", "perturbed_explanation": "The crash rate under 50% random corrective actions on the highway converges to about 30%, not 50%, as shown by the yellow curve in the %Crash plot of Figure 3(a).", "claim": "Raising highway noise from 1% to 50% increases ICoPro’s crash rate at 150×10k steps from ~10% to ~30%, while in Boxing, using rank-4 DiffLabeler cuts its average raw reward at 100 iterations to ~20 from ~65 under default labelling.", "label": true }, { "paperid": "2411.14164v1", "paper_path": "./SciVer/papers/2411.14164v1.json", "claim_type": "parallel", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14164v1_figure_5.png", "item2_path": "./SciVer/images/2411.14164v1-Table1-1.png", "section": [ "5.2" ], "request_id": 449, "origin_statement": "At 25% token retention, LLaVA-1.6-13B retains 87.78% POPE accuracy (Figure 5) with only a 0.06% drop from full retention, while achieving a 2.52× TTFT inference speedup (Table 1).", "perturbed_statement": "At 25% token retention, LLaVA-1.6-13B retains 87.78% POPE accuracy (Figure 5) with only a 0.06% drop from full retention, while achieving a 1.52× TTFT inference speedup (Table 1).", "perturbed_explanation": "The perturbed statement misreports the TTFT speedup: Table 1 shows a 2.52× speedup at 25% retention for LLaVA-1.6-13B, not 1.52×.", "claim": "At 25% token retention, LLaVA-1.6-13B retains 87.78% POPE accuracy (Figure 5) with only a 0.06% drop from full retention, while achieving a 2.52× TTFT inference speedup (Table 1).", "label": true }, { "paperid": "2409.10756v1", "paper_path": "./SciVer/papers/2409.10756v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10756v1_figure_5.png", "item2_path": "./SciVer/images/2409.10756v1_figure_6.png", "section": [ "4.3.2" ], "request_id": 475, "origin_statement": "Mixtral-8*7b’s average LLM-generated patch length is 14 lines compared to 91 lines for original patches, yet its SVD6 accuracy peaks at 0.86 for 100-line vulnerable blocks.", "perturbed_statement": "Mixtral-8*7b’s average LLM-generated patch length is 38 lines compared to 91 lines for original patches, yet its SVD6 accuracy peaks at 0.86 for 100-line vulnerable blocks.", "perturbed_explanation": "The perturbed statement incorrectly reports the average LLM-generated patch length as 38 lines. Figure 6 shows that Mixtral-8*7b’s LLM-generated patches average only 14 lines, while 38 lines refers to the few-shot variant, not the base LLM output.", "claim": "Mixtral-8*7b’s average LLM-generated patch length is 14 lines compared to 91 lines for original patches, yet its SVD6 accuracy peaks at 0.86 for 100-line vulnerable blocks.", "label": true }, { "paperid": "2410.08642v1", "paper_path": "./SciVer/papers/2410.08642v1.json", "claim_type": "parallel", "item1": "10", "item2": "11(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08642v1_figure_10.png", "item2_path": "./SciVer/images/2410.08642v1_figure_11(b).png", "section": [ "5.3" ], "request_id": 518, "origin_statement": "Figure 10 shows Gaza’s size tripled in 2020, while Figure 11 features a German-translated meme accusing two groups—Hamas and IS—of sharing identical founders, juxtaposing quantified territorial change with dual conspiratorial narratives in multimodal discourse.", "perturbed_statement": "Figure 10 shows Gaza’s size doubled in 2020, while Figure 11 features a German-translated meme accusing two groups—Hamas and IS—of sharing identical founders, juxtaposing quantified territorial change with dual conspiratorial narratives in multimodal discourse.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 10’s caption explicitly notes that Gaza’s size tripled in 2020, not doubled.", "claim": "Figure 10 shows Gaza’s size tripled in 2020, while Figure 11 features a German-translated meme accusing two groups—Hamas and IS—of sharing identical founders, juxtaposing quantified territorial change with dual conspiratorial narratives in multimodal discourse.", "label": true }, { "paperid": "2410.09247v1", "paper_path": "./SciVer/papers/2410.09247v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09247v1_figure_5.png", "item2_path": "./SciVer/images/2410.09247v1-Table3-1.png", "section": [ "3.2" ], "request_id": 568, "origin_statement": "Mistral-7b-v0.3's benchmark inflation of approximately 28.8 pp in Figure 5 is over 3.4 times larger than the 8.5 pp inflation reported for Mistral 7B in Table 3.", "perturbed_statement": "Mistral-7b-v0.3's benchmark inflation of approximately 18.8 pp in Figure 5 is over 2.2 times larger than the 8.5 pp inflation reported for Mistral 7B in Table 3.", "perturbed_explanation": "Figure 5 actually shows Mistral-7b-v0.3's benchmark inflation at around 28.8 pp, not 18.8 pp, so the perturbed inflation value and resulting ratio contradict the visual data.", "claim": "Mistral-7b-v0.3's benchmark inflation of approximately 28.8 pp in Figure 5 is over 3.4 times larger than the 8.5 pp inflation reported for Mistral 7B in Table 3.", "label": true }, { "paperid": "2410.19093v1", "paper_path": "./SciVer/papers/2410.19093v1.json", "claim_type": "parallel", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19093v1_figure_8.png", "item2_path": "./SciVer/images/2410.19093v1_figure_9.png", "section": [ "4.2.6" ], "request_id": 570, "origin_statement": "Figure 8 shows the second harmonic (2f1) peak at ∼0.12 mag, about 40% of the ∼0.30 mag fundamental f1 amplitude. In Fig. 9, this produces a sharp rise over ∼0.1 phase, ∼3× faster than the ∼0.3 phase decline.", "perturbed_statement": "Figure 8 shows the second harmonic (2f1) peak at ∼0.12 mag, about 70% of the ∼0.30 mag fundamental f1 amplitude. In Fig. 9, this produces a sharp rise over ∼0.1 phase, ∼3× faster than the ∼0.3 phase decline.", "perturbed_explanation": "The spectrum in Fig. 8 clearly marks the 2f1 peak at ~0.12 mag versus ~0.30 mag for f1, yielding a ratio of ~40%, not 70%. Thus stating 2f1 is ~70% of f1 contradicts the plotted amplitudes.", "claim": "Figure 8 shows the second harmonic (2f1) peak at ∼0.12 mag, about 40% of the ∼0.30 mag fundamental f1 amplitude. In Fig. 9, this produces a sharp rise over ∼0.1 phase, ∼3× faster than the ∼0.3 phase decline.", "label": true }, { "paperid": "2410.21357v1", "paper_path": "./SciVer/papers/2410.21357v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "1(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21357v1_figure_1(b).png", "item2_path": "./SciVer/images/2410.21357v1_figure_1(c).png", "section": [ "5.3" ], "request_id": 707, "origin_statement": "At a sampling window length of 1.0 with size 4, EDLM achieves a Gen PPL of 24.7, and EDLM-NCE reduces entropy to 7.67 in 10 s, compared to MDLM’s entropy of 7.80 at the same time.", "perturbed_statement": "At a sampling window length of 0.8 with size 4, EDLM achieves a Gen PPL of 24.7, and EDLM-NCE reduces entropy to 7.67 in 10 s, compared to MDLM’s entropy of 7.80 at the same time.", "perturbed_explanation": "The perturbation claims a Gen PPL of 24.7 at window length 0.8 and size 4, but the table shows a Gen PPL of 26.0 for window 0.8 and sampling size 4, so the stated 24.7 is incorrect.", "claim": "At a sampling window length of 1.0 with size 4, EDLM achieves a Gen PPL of 24.7, and EDLM-NCE reduces entropy to 7.67 in 10 s, compared to MDLM’s entropy of 7.80 at the same time.", "label": true }, { "paperid": "2411.04451v1", "paper_path": "./SciVer/papers/2411.04451v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.04451v1_figure_4.png", "item2_path": "./SciVer/images/2411.04451v1-Table2-1.png", "section": [ "3" ], "request_id": 799, "origin_statement": "In LMC protostars (20 detections at 2–5 μm), the mean CO₂ ice abundance (~22% w.r.t. H₂O) is nearly twice the mean CH₃OH ice abundance (~12%).", "perturbed_statement": "In SMC protostars (12 detections at 2–5 μm), the mean CO₂ ice abundance (~22% w.r.t. H₂O) is nearly twice the mean CH₃OH ice abundance (~12%).", "perturbed_explanation": "Table 2 shows that SMC 2–5 μm observations detect only H₂O and CO₂ ices, not CH₃OH. Moreover, Figure 4 indicates the SMC mean CO₂ abundance is about 15%, not 22%, so both the CH₃OH detection and the CO₂ percentage are incorrect for the SMC.", "claim": "In LMC protostars (20 detections at 2–5 μm), the mean CO₂ ice abundance (~22% w.r.t. H₂O) is nearly twice the mean CH₃OH ice abundance (~12%).", "label": true }, { "paperid": "2411.06070v1", "paper_path": "./SciVer/papers/2411.06070v1.json", "claim_type": "parallel", "item1": "4", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.06070v1-Table4-1.png", "item2_path": "./SciVer/images/2411.06070v1-Table6-1.png", "section": [ "4.4" ], "request_id": 816, "origin_statement": "Semantic reconstruction pre-training lifts link accuracy by 43.09 points (90.39% vs. 47.30% without pre-training in Table 4), and introducing a 20-token tree vocabulary in Table 6 further increases link-level performance by 2.32 points (from 88.50% to 90.82%).", "perturbed_statement": "Semantic reconstruction pre-training lifts link accuracy by 43.09 points (90.39% vs. 47.30% without pre-training in Table 4), and introducing a 20-token tree vocabulary in Table 6 further increases link-level performance by 5.32 points (from 85.50% to 90.82%).", "perturbed_explanation": "The perturbed claim incorrectly states a 5.32-point gain from 85.50% to 90.82%. Table 6 actually reports an increase from 88.50% to 90.82%, a 2.32-point improvement. The 85.50% baseline and 5.32-point gain do not match the table.", "claim": "Semantic reconstruction pre-training lifts link accuracy by 43.09 points (90.39% vs. 47.30% without pre-training in Table 4), and introducing a 20-token tree vocabulary in Table 6 further increases link-level performance by 2.32 points (from 88.50% to 90.82%).", "label": true }, { "paperid": "2411.07321v1", "paper_path": "./SciVer/papers/2411.07321v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07321v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.07321v1_figure_2(c).png", "section": [ "4.2" ], "request_id": 839, "origin_statement": "At 16 MPI tasks in the Small test, the CPU gridding step achieves about a 15× speed-up, whereas at 16 nodes in the Intermediate test, the same step only reaches roughly a 4× speed-up, indicating significantly diminished scalability for larger problem sizes.", "perturbed_statement": "At 16 MPI tasks in the Small test, the CPU gridding step achieves about a 15× speed-up, whereas at 16 nodes in the Intermediate test, the same step only reaches roughly an 8× speed-up, indicating significantly diminished scalability for larger problem sizes.", "perturbed_explanation": "The perturbed claim misstates the Intermediate test gridding speed-up at 16 nodes: the actual speed-up is approximately 4× (bottom left panel of Fig. 2), not 8×. Therefore the stated 8× value contradicts the plotted data.", "claim": "At 16 MPI tasks in the Small test, the CPU gridding step achieves about a 15× speed-up, whereas at 16 nodes in the Intermediate test, the same step only reaches roughly a 4× speed-up, indicating significantly diminished scalability for larger problem sizes.", "label": true }, { "paperid": "2409.00210v1", "paper_path": "./SciVer/papers/2409.00210v1.json", "claim_type": "parallel", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00210v1_figure_2.png", "item2_path": "./SciVer/images/2409.00210v1-Table2-1.png", "section": [ "2.2" ], "request_id": 841, "origin_statement": "Dynamic pulling at 0.006 m/s yields a ΔF of ~35 k_B T, roughly three times the experimental standard binding free energy of 12.17 ± 0.69 k_B T measured for N_exp = 274 by Nie et al.", "perturbed_statement": "Dynamic pulling at 0.006 m/s yields a ΔF of ~15 k_B T, matching the experimental standard binding free energy of 12.17 ± 0.69 k_B T reported for N_exp = 274 by Nie et al.", "perturbed_explanation": "The perturbed statement incorrectly lowers the dynamic ΔF at 0.006 m/s to ~15 k_B T; Figure 2c shows it is actually about 35 k_B T at that velocity, so it cannot match the ~12.17 k_B T experimental binding free energy.", "claim": "Dynamic pulling at 0.006 m/s yields a ΔF of ~35 k_B T, roughly three times the experimental standard binding free energy of 12.17 ± 0.69 k_B T measured for N_exp = 274 by Nie et al.", "label": true }, { "paperid": "2409.17424v1", "paper_path": "./SciVer/papers/2409.17424v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17424v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.17424v1_figure_1(b).png", "section": [ "2.1" ], "request_id": 887, "origin_statement": "The Filtered track indexes 10 million CLIP-embedded images with a 200 386-word tag vocabulary to process 100 000 image+tag queries, such as matching the green steam locomotive query image to the yellow tanker train in the database.", "perturbed_statement": "The Filtered track indexes 1 million CLIP-embedded images with a 200 386-word tag vocabulary to process 100 000 image+tag queries, such as matching the green steam locomotive query image to the yellow tanker train in the database.", "perturbed_explanation": "The perturbed statement erroneously claims the dataset contains 1 million images, whereas the context specifies the Filtered track uses 10 million CLIP-embedded images.", "claim": "The Filtered track indexes 10 million CLIP-embedded images with a 200 386-word tag vocabulary to process 100 000 image+tag queries, such as matching the green steam locomotive query image to the yellow tanker train in the database.", "label": true }, { "paperid": "2410.17351v2", "paper_path": "./SciVer/papers/2410.17351v2.json", "claim_type": "parallel", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.17351v2_figure_6.png", "item2_path": "./SciVer/images/2410.17351v2-Table2-1.png", "section": [ "5.3" ], "request_id": 1014, "origin_statement": "By 6×10^7 timesteps, the Investigate sub-policy fine-tuned for Stealthy Red converges around −95 reward, closely matching the from-scratch Stealthy curve; in Table 2, H-MARL Meta with fine-tuning achieves −162.51 ± 57.97 mean reward against Stealthy Red.", "perturbed_statement": "By 6×10^7 timesteps, the Investigate sub-policy fine-tuned for Stealthy Red converges around −80 reward, closely matching the from-scratch Stealthy curve; in Table 2, H-MARL Meta with fine-tuning achieves −162.51 ± 57.97 mean reward against Stealthy Red.", "perturbed_explanation": "The figure in Figure 6 shows the fine-tuned curve for Stealthy Red converging near −95, not −80. Thus claiming −80 contradicts the plotted reward values.", "claim": "By 6×10^7 timesteps, the Investigate sub-policy fine-tuned for Stealthy Red converges around −95 reward, closely matching the from-scratch Stealthy curve; in Table 2, H-MARL Meta with fine-tuning achieves −162.51 ± 57.97 mean reward against Stealthy Red.", "label": true }, { "paperid": "2409.16040v2", "paper_path": "./SciVer/papers/2409.16040v2.json", "claim_type": "parallel", "item1": "4", "item2": "7", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.16040v2_figure_4.png", "item2_path": "./SciVer/images/2409.16040v2-Table7-1.png", "section": [ "4.5" ], "request_id": 1048, "origin_statement": "In the ETTm2 benchmark, expert 3 in layer 8 receives a gating score of about 0.5, while increasing Topk from 2 to 4 yields identical average MSE (0.262) but increases inference time from 0.095 to 0.109 s/iter.", "perturbed_statement": "In the ETTm2 benchmark, expert 4 in layer 8 receives a gating score of about 0.5, while increasing Topk from 2 to 4 yields identical average MSE (0.265) but increases inference time from 0.095 to 0.109 s/iter.", "perturbed_explanation": "The perturbation is incorrect because Figure 4 shows the highest gating score at layer 8 resides with expert 3, not expert 4, and Table 7 reports an average MSE of 0.262 (not 0.265) when moving from Top2 to Top4.", "claim": "In the ETTm2 benchmark, expert 3 in layer 8 receives a gating score of about 0.5, while increasing Topk from 2 to 4 yields identical average MSE (0.262) but increases inference time from 0.095 to 0.109 s/iter.", "label": true }, { "paperid": "2410.07754v1", "paper_path": "./SciVer/papers/2410.07754v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.07754v1_figure_5.png", "item2_path": "./SciVer/images/2410.07754v1_figure_6.png", "section": [ "4" ], "request_id": 1062, "origin_statement": "The 90% credible interval of the radius for a 1.4 M⊙ neutron star is constrained to within ±0.35 km, while the combined H₀ posterior peaks at the injected value of 67.4 km s⁻¹ Mpc⁻¹.", "perturbed_statement": "The 90% credible interval of the radius for a 1.4 M⊙ neutron star is constrained to within ±0.7 km, while the combined H₀ posterior peaks at the injected value of 67.4 km s⁻¹ Mpc⁻¹.", "perturbed_explanation": "The perturbation doubles the quoted radius uncertainty. Figure 5 and its caption specify a total 90% credible width of 700 m around the 1.4 M⊙ radius (i.e. ±0.35 km), so stating ±0.7 km contradicts the actual constraint.", "claim": "The 90% credible interval of the radius for a 1.4 M⊙ neutron star is constrained to within ±0.35 km, while the combined H₀ posterior peaks at the injected value of 67.4 km s⁻¹ Mpc⁻¹.", "label": true }, { "paperid": "2411.18275v1", "paper_path": "./SciVer/papers/2411.18275v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18275v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.18275v1_figure_5(b).png", "section": [ "5.4" ], "request_id": 1088, "origin_statement": "The Dolphins model’s Final Score decreases from 37% at 5 iteration steps to 33% at 50 steps, while in budget variation its score declines from 38% at a 0.01 perturbation budget to 33% at 0.1, showing a consistent 4% drop across both settings.", "perturbed_statement": "The Dolphins model’s Final Score decreases from 35% at 5 iteration steps to 33% at 50 steps, while in budget variation its score declines from 36% at a 0.01 perturbation budget to 33% at 0.1, showing a consistent 4% drop across both settings.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5(a) shows the Dolphins score at 5 iteration steps is approximately 37%, not 35%, and Figure 5(b) shows its score at a 0.01 perturbation budget is about 38%, not 36%.", "claim": "The Dolphins model’s Final Score decreases from 37% at 5 iteration steps to 33% at 50 steps, while in budget variation its score declines from 38% at a 0.01 perturbation budget to 33% at 0.1, showing a consistent 4% drop across both settings.", "label": true }, { "paperid": "2410.18630v1", "paper_path": "./SciVer/papers/2410.18630v1.json", "claim_type": "parallel", "item1": "12", "item2": "13", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18630v1_figure_12.png", "item2_path": "./SciVer/images/2410.18630v1_figure_13.png", "section": [ "3.2.2" ], "request_id": 1092, "origin_statement": "The pipeline in Figure 12 transforms the colorized bregma model into a unified white point cloud over four iteration steps (a–d) in Figure 13, fully merging the segmented colored regions with the cranial surface by step (d).", "perturbed_statement": "The pipeline in Figure 12 transforms the colorized bregma model into a unified white point cloud over six iteration steps (a–f) in Figure 13, fully merging the segmented colored regions with the cranial surface by step (f).", "perturbed_explanation": "Figure 13 only contains five panels labeled (a) through (e), so there is no iteration step (f) or a total of six steps; the registration completes by panel (d), not (f).", "claim": "The pipeline in Figure 12 transforms the colorized bregma model into a unified white point cloud over four iteration steps (a–d) in Figure 13, fully merging the segmented colored regions with the cranial surface by step (d).", "label": true }, { "paperid": "2411.14713v1", "paper_path": "./SciVer/papers/2411.14713v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14713v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.14713v1_figure_1(b).png", "section": [ "1" ], "request_id": 1101, "origin_statement": "When history length increases from 20 to 100, DIEN’s AUC rises by ~0.006 on Movielens-100k and ~0.004 on Amazon-books, while DIEN+Llama2’s AUC falls by ~0.01 and ~0.003 respectively.", "perturbed_statement": "When history length increases from 20 to 100, DIEN’s AUC rises by ~0.01 on Movielens-100k and ~0.005 on Amazon-books, while DIEN+Llama2’s AUC falls by ~0.01 and ~0.003 respectively.", "perturbed_explanation": "The perturbed statement overstates DIEN’s improvements: on Movielens-100k, DIEN’s AUC actually increases from about 0.793 to 0.799 (≈0.006), not 0.01; on Amazon-books, it goes from about 0.834 to 0.837 (≈0.004), not 0.005.", "claim": "When history length increases from 20 to 100, DIEN’s AUC rises by ~0.006 on Movielens-100k and ~0.004 on Amazon-books, while DIEN+Llama2’s AUC falls by ~0.01 and ~0.003 respectively.", "label": true }, { "paperid": "2411.18317v1", "paper_path": "./SciVer/papers/2411.18317v1.json", "claim_type": "parallel", "item1": "5(b)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18317v1_figure_5(b).png", "item2_path": "./SciVer/images/2411.18317v1-Table5-1.png", "section": [ "3.2" ], "request_id": 1102, "origin_statement": "Fig.5(b) plots all 125 orbital‐slot configurations (5×5×5); Table 5 shows Model U2 attains the highest average of 450.92 ± 369.92%, 86.6% above Model U1’s 364.35 ± 328.29%.", "perturbed_statement": "Fig.5(b) plots all 150 orbital‐slot configurations (5×5×5); Table 5 shows Model U2 attains the highest average of 560.92 ± 369.92%, 86.6% above Model U1’s 364.35 ± 328.29%.", "perturbed_explanation": "The perturbed statement is incorrect because Fig. 5(b) actually shows 125 slots (5×5×5), not 150, and Table 5 lists Model U2’s average as 450.92 ± 369.92%, not 560.92 ± 369.92%.", "claim": "Fig.5(b) plots all 125 orbital‐slot configurations (5×5×5); Table 5 shows Model U2 attains the highest average of 450.92 ± 369.92%, 86.6% above Model U1’s 364.35 ± 328.29%.", "label": true }, { "paperid": "2409.14595v1", "paper_path": "./SciVer/papers/2409.14595v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14595v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.14595v1_figure_2(b).png", "section": [ "2" ], "request_id": 1121, "origin_statement": "In the 15-layer model, layers 4 and 5 exhibit a cosine similarity of 0.75, while in the 22-layer model layers 2 and 3 reach 0.95, highlighting more homogeneous mid-layer attention patterns in the larger network.", "perturbed_statement": "In the 15-layer model, layers 4 and 5 exhibit a cosine similarity of 0.85, while in the 22-layer model layers 2 and 3 reach 0.97, highlighting more homogeneous mid-layer attention patterns in the larger network.", "perturbed_explanation": "The perturbed values contradict the visual data. Image (a) shows layers 4 and 5 actually have a similarity of 0.75, not 0.85. Image (b) shows layers 2 and 3 have a similarity of 0.95–0.96, not 0.97.", "claim": "In the 15-layer model, layers 4 and 5 exhibit a cosine similarity of 0.75, while in the 22-layer model layers 2 and 3 reach 0.95, highlighting more homogeneous mid-layer attention patterns in the larger network.", "label": true }, { "paperid": "2410.06134v1", "paper_path": "./SciVer/papers/2410.06134v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(e)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06134v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.06134v1_figure_1(e).png", "section": [ "3.3", "1", "4.2" ], "request_id": 1164, "origin_statement": "At baseline, the in-distribution max logit density peaks around 15 while OOD peaks around 9, whereas under ALS10 the ID peak shifts to about 22 and OOD remains near 10, increasing the logit separation between known and unknown samples.", "perturbed_statement": "At baseline, the in-distribution max logit density peaks around 15 while OOD peaks around 9, whereas under ALS10 the ID peak shifts down to about 10 and OOD remains near 10, eliminating the logit separation between known and unknown samples.", "perturbed_explanation": "The perturbation wrongly claims the ALS10 in-distribution peak shifts down to 10; in the ALS10 plot the ID max logit actually peaks around 22, so the separation between ID and OOD still increases rather than vanishing.", "claim": "At baseline, the in-distribution max logit density peaks around 15 while OOD peaks around 9, whereas under ALS10 the ID peak shifts to about 22 and OOD remains near 10, increasing the logit separation between known and unknown samples.", "label": true }, { "paperid": "2411.03250v1", "paper_path": "./SciVer/papers/2411.03250v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03250v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.03250v1_figure_5(b).png", "section": [ "5.2" ], "request_id": 1187, "origin_statement": "On the Beijing dataset, TabSyn and DiffLM both concentrate around DCR ≈2.3 (with ratios over 25% in the 2–2.5 bin), whereas GReaT peaks at 4.0; on the Default dataset, Codi and GReaT center near 12.2, while TabSyn and DiffLM cluster below 0.5.", "perturbed_statement": "On the Beijing dataset, TabSyn and DiffLM both concentrate around DCR ≈2.3 (with ratios over 25% in the 2–2.5 bin), whereas GReaT peaks at 4.0; on the Default dataset, Codi and GReaT center near 8.5, while TabSyn and DiffLM cluster below 0.5.", "perturbed_explanation": "The Default dataset histograms show that Codi and GReaT have their peaks around DCR ≈12.2–12.5, not near 8.5. Changing their center to 8.5 contradicts the visual bars at around 12 on the Default plot.", "claim": "On the Beijing dataset, TabSyn and DiffLM both concentrate around DCR ≈2.3 (with ratios over 25% in the 2–2.5 bin), whereas GReaT peaks at 4.0; on the Default dataset, Codi and GReaT center near 12.2, while TabSyn and DiffLM cluster below 0.5.", "label": true }, { "paperid": "2410.03570v1", "paper_path": "./SciVer/papers/2410.03570v1.json", "claim_type": "parallel", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.03570v1_figure_7.png", "item2_path": "./SciVer/images/2410.03570v1_figure_8.png", "section": [ "3.3" ], "request_id": 1194, "origin_statement": "The dual-photopeak reconstruction predicts a TIA for lesion 3 of approximately 4.1 MBq·h, which is only about 2.5% higher than the 4.0 MBq·h measured from its TAC in Fig. 7.", "perturbed_statement": "The dual-photopeak reconstruction predicts a TIA for lesion 3 of approximately 5.1 MBq·h, which is about 27.5% higher than the 4.0 MBq·h measured from its TAC in Fig. 7.", "perturbed_explanation": "The perturbed statement misstates the dual-photopeak prediction: Fig. 8 shows a dual TIA of ~4.1 MBq·h, not 5.1 MBq·h, and the resulting percentage difference from the 4.0 MBq·h TAC value is ~2.5%, not 27.5%.", "claim": "The dual-photopeak reconstruction predicts a TIA for lesion 3 of approximately 4.1 MBq·h, which is only about 2.5% higher than the 4.0 MBq·h measured from its TAC in Fig. 7.", "label": true }, { "paperid": "2410.18302v1", "paper_path": "./SciVer/papers/2410.18302v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.18302v1_figure_3.png", "item2_path": "./SciVer/images/2410.18302v1-Table5-1.png", "section": [ "4.2" ], "request_id": 1275, "origin_statement": "For the near-resonance 2λ_bin −5λ_Nix +14λ_Kerberos −11λ_Styx, a 37 km inward shift in Styx’s semi-major axis changes Φ from full 0–360° circulation to approximately 115° of libration centered near 210° over 50 000 days.", "perturbed_statement": "For the near-resonance 3λ_Styx −5λ_Nix +2λ_Hydra, a 37 km inward shift in Styx’s semi-major axis changes Φ from full 0–360° circulation to approximately 115° of libration centered near 210° over 50 000 days.", "perturbed_explanation": "The plotted angle in Figure 3 corresponds to the combination 2λ_bin −5λ_Nix +14λ_Kerberos −11λ_Styx, not 3λ_Styx −5λ_Nix +2λ_Hydra. The perturbed statement misidentifies the resonance shown.", "claim": "For the near-resonance 2λ_bin −5λ_Nix +14λ_Kerberos −11λ_Styx, a 37 km inward shift in Styx’s semi-major axis changes Φ from full 0–360° circulation to approximately 115° of libration centered near 210° over 50 000 days.", "label": true }, { "paperid": "2409.07171v1", "paper_path": "./SciVer/papers/2409.07171v1.json", "claim_type": "parallel", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.07171v1_figure_6.png", "item2_path": "./SciVer/images/2409.07171v1-Table3-1.png", "section": [ "4.4" ], "request_id": 1279, "origin_statement": "At 40 views, AC-IND achieves around 36.07 dB PSNR on the Walnut Slice dataset (Fig. 6) and about 41.97 ± 3.76 dB PSNR on the Ellipse Material dataset (Table 3).", "perturbed_statement": "At 40 views, AC-IND achieves around 36.07 dB PSNR on the Walnut Slice dataset (Fig. 6) and about 45.23 ± 3.76 dB PSNR on the Ellipse Material dataset (Table 3).", "perturbed_explanation": "The perturbed statement wrongly attributes a 45.23 dB PSNR to AC-IND on the Ellipse Material dataset at 40 views. Table 3 shows that 45.23 dB is the result for AC-IND+; AC-IND’s actual PSNR is 41.97 ± 3.76 dB.", "claim": "At 40 views, AC-IND achieves around 36.07 dB PSNR on the Walnut Slice dataset (Fig. 6) and about 41.97 ± 3.76 dB PSNR on the Ellipse Material dataset (Table 3).", "label": true }, { "paperid": "2411.06145v2", "paper_path": "./SciVer/papers/2411.06145v2.json", "claim_type": "parallel", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06145v2_figure_4.png", "item2_path": "./SciVer/images/2411.06145v2-Table4-1.png", "section": [ "5.4" ], "request_id": 1343, "origin_statement": "Figure 4(a) shows Python translations omitting shutil.copyfileobj, reflecting missing required library errors which account for 17.11% of min-dependency translations from Java to Python, a nearly fourfold rise compared to 4.55% under the holistic strategy.", "perturbed_statement": "Figure 4(a) shows Python translations omitting shutil.copyfileobj, reflecting missing required library errors which account for 11.11% of min-dependency translations from Java to Python, a nearly double rise compared to 5.55% under the holistic strategy.", "perturbed_explanation": "The perturbed statement is incorrect because Table 4 reports 17.11% (not 11.11%) of min-dependency translations from Java to Python suffer missing library errors, and the holistic strategy value is 4.55% (not 5.55%). These mismatches contradict the actual table data.", "claim": "Figure 4(a) shows Python translations omitting shutil.copyfileobj, reflecting missing required library errors which account for 17.11% of min-dependency translations from Java to Python, a nearly fourfold rise compared to 4.55% under the holistic strategy.", "label": true }, { "paperid": "2410.02249v2", "paper_path": "./SciVer/papers/2410.02249v2.json", "claim_type": "parallel", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02249v2_figure_6.png", "item2_path": "./SciVer/images/2410.02249v2-Table3-1.png", "section": [ "4.3" ], "request_id": 1390, "origin_statement": "At around time step 800, airplane event density reaches its highest and generates the densest spike splitting among the five classes, while adding the SpikeSlicer increases compute from 56.36 G to 57.09 G (±0.73 G) and boosts tracking performance by 22.4% (51.0 to 62.4).", "perturbed_statement": "At around time step 700, airplane event density reaches its highest and generates the densest spike splitting among the five classes, while adding the SpikeSlicer increases compute from 56.36 G to 57.50 G (+1.14 G) and boosts tracking performance by 15.7% (51.0 to 62.4).", "perturbed_explanation": "The perturbed statement is incorrect because the airplane event density peak and densest splits occur at time step ~800, not 700. It also wrongly reports the compute rise as +1.14 G instead of the true +0.73 G, and the performance gain as 15.7% instead of the actual 22.4%.", "claim": "At around time step 800, airplane event density reaches its highest and generates the densest spike splitting among the five classes, while adding the SpikeSlicer increases compute from 56.36 G to 57.09 G (±0.73 G) and boosts tracking performance by 22.4% (51.0 to 62.4).", "label": true }, { "paperid": "2410.21038v2", "paper_path": "./SciVer/papers/2410.21038v2.json", "claim_type": "parallel", "item1": "6(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21038v2_figure_6(a).png", "item2_path": "./SciVer/images/2410.21038v2-Table2-1.png", "section": [ "3.4" ], "request_id": 1406, "origin_statement": "For PSR J1844-0346 (Ė = 4.25×10^36 erg s^–1), the predicted PWN SED at d = 2.4 kpc peaks near 10^10 Hz with a luminosity of ~10^37 erg s^–1 (Fig. 6), implying radiative output can exceed the current spin-down power.", "perturbed_statement": "For PSR J1844-0346 (Ė = 4.25×10^36 erg s^–1), the predicted PWN SED at d = 2.4 kpc peaks near 10^10 Hz with a luminosity of ~10^35 erg s^–1 (Fig. 6), implying radiative output can exceed the current spin-down power.", "perturbed_explanation": "Figure 6’s right-hand luminosity axis shows the ensemble of SEDs peaking around 10^37 erg s^–1 at ~10^10 Hz for d = 2.4 kpc, not 10^35 erg s^–1 as claimed. The perturbation underestimates the peak luminosity by two orders of magnitude.", "claim": "For PSR J1844-0346 (Ė = 4.25×10^36 erg s^–1), the predicted PWN SED at d = 2.4 kpc peaks near 10^10 Hz with a luminosity of ~10^37 erg s^–1 (Fig. 6), implying radiative output can exceed the current spin-down power.", "label": true }, { "paperid": "2410.06581v1", "paper_path": "./SciVer/papers/2410.06581v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06581v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.06581v1_figure_3(c).png", "section": [ "4.5" ], "request_id": 1411, "origin_statement": "Both LeCaRD and CAIL2022-LCR peak at 75% augmented positives, where LeCaRD achieves MAP 0.64 (vs. 0.55 at 0%) and CAIL2022-LCR achieves MAP 0.68 (vs. 0.59 at 0%), each gaining 0.09 over their unaugmented baselines.", "perturbed_statement": "Both LeCaRD and CAIL2022-LCR peak at 75% augmented positives, where LeCaRD achieves MAP 0.66 (vs. 0.55 at 0%) and CAIL2022-LCR achieves MAP 0.68 (vs. 0.59 at 0%), each gaining 0.09 over their unaugmented baselines.", "perturbed_explanation": "The perturbed statement misreports the LeCaRD MAP at 75% as 0.66, whereas the chart shows it is actually 0.64, making the claim incorrect.", "claim": "Both LeCaRD and CAIL2022-LCR peak at 75% augmented positives, where LeCaRD achieves MAP 0.64 (vs. 0.55 at 0%) and CAIL2022-LCR achieves MAP 0.68 (vs. 0.59 at 0%), each gaining 0.09 over their unaugmented baselines.", "label": true }, { "paperid": "2411.00112v1", "paper_path": "./SciVer/papers/2411.00112v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00112v1_figure_4.png", "item2_path": "./SciVer/images/2411.00112v1-Table3-1.png", "section": [ "3.2" ], "request_id": 1438, "origin_statement": "Figure 4’s surface peaks at around 2×10^10 at (−3, 3), and Table 3 shows at σ=0.1 the Cor-CFD-GD algorithm reduces its solution-gap RMSE from 61.73 at 1,000 sample pairs to 39.72 at 10,000 (a 36% reduction).", "perturbed_statement": "Figure 4’s surface peaks at around 2×10^10 at (−3, 3), and Table 3 shows at σ=1 the Cor-CFD-GD algorithm reduces its solution-gap RMSE from 61.73 at 1,000 sample pairs to 39.72 at 10,000 (a 36% reduction).", "perturbed_explanation": "The perturbed claim incorrectly assigns σ=1 to the RMSE values 61.73 and 39.72. In Table 3 those solution-gap RMSE numbers are reported for σ=0.1, not σ=1 (for σ=1 the RMSE values are 77.24 and 50.22).", "claim": "Figure 4’s surface peaks at around 2×10^10 at (−3, 3), and Table 3 shows at σ=0.1 the Cor-CFD-GD algorithm reduces its solution-gap RMSE from 61.73 at 1,000 sample pairs to 39.72 at 10,000 (a 36% reduction).", "label": true }, { "paperid": "2410.09487v1", "paper_path": "./SciVer/papers/2410.09487v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09487v1_figure_3.png", "item2_path": "./SciVer/images/2410.09487v1_figure_5.png", "section": [ "3.3" ], "request_id": 1482, "origin_statement": "The 24-hour autocorrelation for Southern Germany house 3 is about 0.5, and its median hourly consumption drops from around 0.52 kWh in January to 0.15 kWh in June, a decrease of roughly 71%.", "perturbed_statement": "The 24-hour autocorrelation for Southern Germany house 3 is about 0.8, and its median hourly consumption drops from around 0.52 kWh in January to 0.15 kWh in June, a decrease of roughly 71%.", "perturbed_explanation": "The perturbed statement overstates the 24-hour autocorrelation: Figure 3 shows this value is approximately 0.5, not 0.8.", "claim": "The 24-hour autocorrelation for Southern Germany house 3 is about 0.5, and its median hourly consumption drops from around 0.52 kWh in January to 0.15 kWh in June, a decrease of roughly 71%.", "label": true }, { "paperid": "2410.22135v2", "paper_path": "./SciVer/papers/2410.22135v2.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22135v2_figure_2.png", "item2_path": "./SciVer/images/2410.22135v2_figure_3.png", "section": [ "2.3" ], "request_id": 1493, "origin_statement": "On the Chest X-ray dataset, applying the mask lowers the Mean Magnitude of Channels at channel 1 from about 5.0 to 4.2, and the masked phase-difference histogram shows a sharper peak at π radians, reflecting higher inter-channel correlation alongside its improved segmentation performance.", "perturbed_statement": "On the Chest X-ray dataset, applying the mask raises the Mean Magnitude of Channels at channel 1 from about 5.0 to 5.8, and the masked phase-difference histogram shows a sharper peak at π radians, reflecting higher inter-channel correlation alongside its improved segmentation performance.", "perturbed_explanation": "The perturbation is incorrect because Figure 2 shows the masked MMC curve on Chest X-ray lies below the baseline, meaning the magnitude at channel 1 actually decreases from ≈5.0 to ≈4.2 rather than increasing to 5.8.", "claim": "On the Chest X-ray dataset, applying the mask lowers the Mean Magnitude of Channels at channel 1 from about 5.0 to 4.2, and the masked phase-difference histogram shows a sharper peak at π radians, reflecting higher inter-channel correlation alongside its improved segmentation performance.", "label": true }, { "paperid": "2409.13694v2", "paper_path": "./SciVer/papers/2409.13694v2.json", "claim_type": "parallel", "item1": "6", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13694v2_figure_6.png", "item2_path": "./SciVer/images/2409.13694v2-Table5-1.png", "section": [ "5.3.2" ], "request_id": 1504, "origin_statement": "At nine noise chunks, PruningRAG yields roughly an 8% reasoning score, which is 5.46 percentage points lower than the 13.46% score obtained using three cross-domain few-shot invalid premise examples.", "perturbed_statement": "At nine noise chunks, PruningRAG yields roughly an 8% reasoning score, which is 7.46 percentage points lower than the 16.54% score obtained using three cross-domain few-shot invalid premise examples.", "perturbed_explanation": "The perturbed statement misreports the cross-domain few-shot score as 16.54%, but Table 5 shows that with three cross-domain invalid premise examples the actual reasoning score is 13.46%, not 16.54%.", "claim": "At nine noise chunks, PruningRAG yields roughly an 8% reasoning score, which is 5.46 percentage points lower than the 13.46% score obtained using three cross-domain few-shot invalid premise examples.", "label": true }, { "paperid": "2410.19743v1", "paper_path": "./SciVer/papers/2410.19743v1.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19743v1_figure_3.png", "item2_path": "./SciVer/images/2410.19743v1-Table2-1.png", "section": [ "3.4" ], "request_id": 1506, "origin_statement": "In the MS sample from Figure 3, there are 2 parallel API calls (Para.=2) and 2 sequential dependencies, compared to the MS dataset averages of 2.2 parallel and 1.2 sequential relationships in Table 2.", "perturbed_statement": "In the MS sample from Figure 3, there are 2 parallel API calls (Para.=2) and 2 sequential dependencies, compared to the MS dataset averages of 3.2 parallel and 1.2 sequential relationships in Table 2.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 actually reports an average of 2.2 parallel relationships for MS, not 3.2.", "claim": "In the MS sample from Figure 3, there are 2 parallel API calls (Para.=2) and 2 sequential dependencies, compared to the MS dataset averages of 2.2 parallel and 1.2 sequential relationships in Table 2.", "label": true }, { "paperid": "2411.09499v1", "paper_path": "./SciVer/papers/2411.09499v1.json", "claim_type": "parallel", "item1": "5", "item2": "6(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09499v1_figure_5.png", "item2_path": "./SciVer/images/2411.09499v1_figure_6(a).png", "section": [ "5.3" ], "request_id": 1520, "origin_statement": "By episode 160, the A2C agent’s episodic reward reached nearly +2400, while the coupled RL (T2=5) optimisation achieved a peak energy absorption of about 1.5×10^6 J, roughly 7% higher than the network inversion peak (~1.4×10^6 J).", "perturbed_statement": "By episode 160, the A2C agent’s episodic reward reached nearly +3500, while the coupled RL (T2=5) optimisation achieved a peak energy absorption of about 1.6×10^6 J, roughly 14% higher than the network inversion peak (~1.4×10^6 J).", "perturbed_explanation": "The perturbed rewards value (+3500) conflicts with Figure 5, where the maximum episodic reward never exceeds about +2400. Likewise, the peak energy absorption for the coupled RL curve in Figure 6(a) is around 1.5×10^6 J, not 1.6×10^6 J, so the 1.6×10^6 J and 14% increase are incorrect.", "claim": "By episode 160, the A2C agent’s episodic reward reached nearly +2400, while the coupled RL (T2=5) optimisation achieved a peak energy absorption of about 1.5×10^6 J, roughly 7% higher than the network inversion peak (~1.4×10^6 J).", "label": true }, { "paperid": "2409.11317v1", "paper_path": "./SciVer/papers/2409.11317v1.json", "claim_type": "parallel", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11317v1_figure_4.png", "item2_path": "./SciVer/images/2409.11317v1-Table4-1.png", "section": [ "3.2" ], "request_id": 1522, "origin_statement": "At chirp mass [0.74,1.31] M☉ for IFAR = 1 yr, the hierarchical search achieves a VT ratio of ~1.3, and the flat search uses 30603 CPU core-hours at Hanford versus ~12322 core-hours for the coarse+fine hierarchical stages.", "perturbed_statement": "At chirp mass [0.74,1.31] M☉ for IFAR = 1 yr, the hierarchical search achieves a VT ratio of ~1.5, and the flat search uses 30603 CPU core-hours at Hanford versus ~12322 core-hours for the coarse+fine hierarchical stages.", "perturbed_explanation": "The VT ratio at chirp mass [0.74,1.31] and IFAR=1 yr is approximately 1.3 according to Figure 4, not 1.5. The rest of the values are unchanged and correct.", "claim": "At chirp mass [0.74,1.31] M☉ for IFAR = 1 yr, the hierarchical search achieves a VT ratio of ~1.3, and the flat search uses 30603 CPU core-hours at Hanford versus ~12322 core-hours for the coarse+fine hierarchical stages.", "label": true }, { "paperid": "2411.15462v1", "paper_path": "./SciVer/papers/2411.15462v1.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15462v1_figure_3.png", "item2_path": "./SciVer/images/2411.15462v1_figure_6.png", "section": [ "4.2" ], "request_id": 1535, "origin_statement": "In English top-scored tweets, offensive content constitutes around 20% of the top 0.25% tweets, while political targets represent about 30% of English hate in HateDay but only about 20% in public hate speech datasets.", "perturbed_statement": "In English top-scored tweets, offensive content constitutes around 50% of the top 0.25% tweets, while political targets represent about 30% of English hate in HateDay but only about 20% in public hate speech datasets.", "perturbed_explanation": "Figure 3 shows that offensive content in the top 50 English tweets is roughly 20%, not 50%. The claim of 50% offensive content contradicts the actual ~20% value from the image.", "claim": "In English top-scored tweets, offensive content constitutes around 20% of the top 0.25% tweets, while political targets represent about 30% of English hate in HateDay but only about 20% in public hate speech datasets.", "label": true }, { "paperid": "2411.03978v1", "paper_path": "./SciVer/papers/2411.03978v1.json", "claim_type": "parallel", "item1": "3(e)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03978v1_figure_3(e).png", "item2_path": "./SciVer/images/2411.03978v1-Table5-1.png", "section": [ "4.2" ], "request_id": 1539, "origin_statement": "The fruit embedding visualization shows three well-separated clusters for apple (red), banana (yellow), and grapes (purple), and in the CIFAR-10 Type clustering task, using the φ(w_{i}) subspace with concatenated embeddings achieves the highest NMI of 0.5271.", "perturbed_statement": "The fruit embedding visualization shows three well-separated clusters for apple (red), banana (yellow), and grapes (purple), and in the CIFAR-10 Type clustering task, using the φ(w_{i}) subspace with concatenated embeddings achieves an NMI of 0.6127.", "perturbed_explanation": "The perturbation is incorrect because Table 5 reports an NMI of 0.5271 for the φ(w_{i}) subspace with concatenated embeddings on the CIFAR-10 Type task, not 0.6127.", "claim": "The fruit embedding visualization shows three well-separated clusters for apple (red), banana (yellow), and grapes (purple), and in the CIFAR-10 Type clustering task, using the φ(w_{i}) subspace with concatenated embeddings achieves the highest NMI of 0.5271.", "label": true }, { "paperid": "2410.20952v2", "paper_path": "./SciVer/papers/2410.20952v2.json", "claim_type": "parallel", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20952v2_figure_6.png", "item2_path": "./SciVer/images/2410.20952v2-Table4-1.png", "section": [ "4.2" ], "request_id": 1544, "origin_statement": "At x=0.8, the density f(x) reaches approximately 1.4 in Figure 6, whereas p_2(0.8) = (4/3·0.8² − 1/3·0.8) ≈ 0.85, so f(x) exceeds p_2(x) by about 0.55.", "perturbed_statement": "At x=0.8, the density f(x) reaches approximately 0.9 in Figure 6, whereas p_2(0.8) = (4/3·0.8² − 1/3·0.8) ≈ 0.85, so f(x) exceeds p_2(x) by about 0.05.", "perturbed_explanation": "The perturbation is wrong because Figure 6 shows f(0.8) ≈ 1.4, not 0.9, so the density exceeds p_2(0.8) by roughly 0.55, not 0.05.", "claim": "At x=0.8, the density f(x) reaches approximately 1.4 in Figure 6, whereas p_2(0.8) = (4/3·0.8² − 1/3·0.8) ≈ 0.85, so f(x) exceeds p_2(x) by about 0.55.", "label": true }, { "paperid": "2409.06653v1", "paper_path": "./SciVer/papers/2409.06653v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06653v1_figure_4.png", "item2_path": "./SciVer/images/2409.06653v1-Table3-1.png", "section": [ "4.2.2" ], "request_id": 1574, "origin_statement": "Female participants identified 75 posts as human compared to 69 by male participants, and the logistic regression reports a sex coefficient of 0.2429 (p<0.001), reflecting significantly higher human-identification odds for women.", "perturbed_statement": "Female participants identified 80 posts as human compared to 65 by male participants, and the logistic regression reports a sex coefficient of 0.2429 (p<0.001), reflecting significantly higher human-identification odds for women.", "perturbed_explanation": "The perturbed statement misstates the counts from Figure 4, which actually show 75 human identifications by females and 69 by males, not 80 and 65 respectively.", "claim": "Female participants identified 75 posts as human compared to 69 by male participants, and the logistic regression reports a sex coefficient of 0.2429 (p<0.001), reflecting significantly higher human-identification odds for women.", "label": true }, { "paperid": "2410.15168v1", "paper_path": "./SciVer/papers/2410.15168v1.json", "claim_type": "parallel", "item1": "11", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15168v1_figure_11.png", "item2_path": "./SciVer/images/2410.15168v1-Table1-1.png", "section": [ "3.2" ], "request_id": 1669, "origin_statement": "Figure 11 shows the utilitarian method picks Blue with 100 total utility even though Amber scores 110 and is the Condorcet and majority winner (10 of 12 agents), while Table 1 indicates Range Voting (a utilitarian method) satisfies both majority and Condorcet criteria.", "perturbed_statement": "Figure 11 shows the utilitarian method picks Blue with 100 total utility even though Amber scores 110 and is the Condorcet and majority winner (9 of 12 agents), while Table 1 indicates Range Voting (a utilitarian method) satisfies both majority and Condorcet criteria.", "perturbed_explanation": "The perturbation incorrectly states that Amber is preferred by 9 of 12 agents, but Figure 11 specifies that 10 out of 12 agents prefer Amber, making the ‘9 of 12’ count factually wrong.", "claim": "Figure 11 shows the utilitarian method picks Blue with 100 total utility even though Amber scores 110 and is the Condorcet and majority winner (10 of 12 agents), while Table 1 indicates Range Voting (a utilitarian method) satisfies both majority and Condorcet criteria.", "label": true }, { "paperid": "2410.02426v1", "paper_path": "./SciVer/papers/2410.02426v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02426v1_figure_4.png", "item2_path": "./SciVer/images/2410.02426v1-Table3-1.png", "section": [ "4.5" ], "request_id": 1685, "origin_statement": "At 100,000 examples, OPT-125M instruction fine-tuned on generic data achieved 60% legal moves (Figure 4), whereas OPT-125M fine-tuned on NoGoal-WSM-10M achieved 52% legal moves and only 10% legal check/mate moves (Table 3), indicating NoGoal-WSM-10M yields 8 points fewer legal moves and a 10% check/mate rate.", "perturbed_statement": "At 100,000 examples, OPT-125M instruction fine-tuned on generic data achieved 60% legal moves (Figure 4), whereas OPT-125M fine-tuned on NoGoal-WSM-10M achieved 58% legal moves and only 10% legal check/mate moves (Table 3), indicating NoGoal-WSM-10M yields 2 points fewer legal moves and a 10% check/mate rate.", "perturbed_explanation": "The perturbed statement misreports the percentage of legal moves for the NoGoal-WSM-10M fine-tuned OPT-125M at 100,000 examples. Table 3 shows it achieved 52% legal moves, not 58%, so the claimed 2-point gap contradicts the actual 8-point difference.", "claim": "At 100,000 examples, OPT-125M instruction fine-tuned on generic data achieved 60% legal moves (Figure 4), whereas OPT-125M fine-tuned on NoGoal-WSM-10M achieved 52% legal moves and only 10% legal check/mate moves (Table 3), indicating NoGoal-WSM-10M yields 8 points fewer legal moves and a 10% check/mate rate.", "label": true }, { "paperid": "2410.05281v1", "paper_path": "./SciVer/papers/2410.05281v1.json", "claim_type": "parallel", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05281v1_figure_5.png", "item2_path": "./SciVer/images/2410.05281v1-Table4-1.png", "section": [ "5.3" ], "request_id": 1709, "origin_statement": "At a Young’s modulus ratio of 35, FFT homogenization takes ~3.75×10^3 s versus ~1.4×10^2 s for Micrometer (Fig. 5), a ~27× speedup; Table 4 reports Micrometer completes homogenization in 35 s with just 0.07% error against FFT’s 3001 s.", "perturbed_statement": "At a Young’s modulus ratio of 35, FFT homogenization takes ~3.75×10^3 s versus ~1.4×10^2 s for Micrometer (Fig. 5), a ~27× speedup; Table 4 reports Micrometer completes homogenization in 35 s with just 0.7% error against FFT’s 3001 s.", "perturbed_explanation": "The perturbation incorrectly states the Micrometer relative error as 0.7%, whereas Table 4 lists the homogenization error for Micrometer as 0.07% ± 0.05%, so the 0.7% figure contradicts the table data.", "claim": "At a Young’s modulus ratio of 35, FFT homogenization takes ~3.75×10^3 s versus ~1.4×10^2 s for Micrometer (Fig. 5), a ~27× speedup; Table 4 reports Micrometer completes homogenization in 35 s with just 0.07% error against FFT’s 3001 s.", "label": true }, { "paperid": "2409.03782v1", "paper_path": "./SciVer/papers/2409.03782v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03782v1_figure_3.png", "item2_path": "./SciVer/images/2409.03782v1-Table3-1.png", "section": [ "5.1" ], "request_id": 1740, "origin_statement": "At a 50% dropout rate on origImg, SSD300’s mAP drops to ≈0.65—about 0.33 lower than RetinaNet_v2’s ≈0.98—and Table 3 marks RetinaNet_v2 (M4) along with M1 and M2 as the top performers for origImg.", "perturbed_statement": "At a 50% dropout rate on origImg, SSD300’s mAP drops to ≈0.65—about 0.33 lower than RetinaNet_v2’s ≈0.98—and Table 3 marks only M1 and M2 as the top performers for origImg.", "perturbed_explanation": "The perturbed statement incorrectly omits M4 as a top performer for origImg. Table 3 clearly lists models M1, M2, and M4 as significantly best for mAP on the origImg dataset, so saying only M1 and M2 are top performers contradicts the table.", "claim": "At a 50% dropout rate on origImg, SSD300’s mAP drops to ≈0.65—about 0.33 lower than RetinaNet_v2’s ≈0.98—and Table 3 marks RetinaNet_v2 (M4) along with M1 and M2 as the top performers for origImg.", "label": true }, { "paperid": "2410.16503v1", "paper_path": "./SciVer/papers/2410.16503v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16503v1_figure_1.png", "item2_path": "./SciVer/images/2410.16503v1_figure_2.png", "section": [ "2.2" ], "request_id": 1810, "origin_statement": "By fusing the 18 OpenPose and 33 MediaPipe keypoints, the pipeline yields 51 keypoints per frame and extracts over 112,500 keypoints per minute.", "perturbed_statement": "By fusing the 18 OpenPose and 43 MediaPipe keypoints, the pipeline yields 61 keypoints per frame and extracts over 112,500 keypoints per minute.", "perturbed_explanation": "The perturbed statement incorrectly claims MediaPipe provides 43 keypoints. Figure 2 shows MediaPipe extracts 33 keypoints, so the total per frame is 51, not 61.", "claim": "By fusing the 18 OpenPose and 33 MediaPipe keypoints, the pipeline yields 51 keypoints per frame and extracts over 112,500 keypoints per minute.", "label": true }, { "paperid": "2411.11562v1", "paper_path": "./SciVer/papers/2411.11562v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11562v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.11562v1_figure_4(b).png", "section": [ "3.3", "1" ], "request_id": 1833, "origin_statement": "At ADU of 4000, sensor_01 reaches about 25 dB SNR and ~0.4×10^5 noise variance, while sensor_06 has ~15 dB SNR and ~3.8×10^5 variance.", "perturbed_statement": "At ADU of 4000, sensor_01 reaches about 25 dB SNR and ~0.4×10^5 noise variance, while sensor_06 has ~20 dB SNR and ~3.8×10^5 variance.", "perturbed_explanation": "The perturbed statement claims sensor_06 has ~20 dB SNR at 4000 ADU. However, the SNR vs ADU plot shows sensor_06’s curve is around 15 dB at 4000 ADU, so the 20 dB figure contradicts the image.", "claim": "At ADU of 4000, sensor_01 reaches about 25 dB SNR and ~0.4×10^5 noise variance, while sensor_06 has ~15 dB SNR and ~3.8×10^5 variance.", "label": true }, { "paperid": "2410.10995v2", "paper_path": "./SciVer/papers/2410.10995v2.json", "claim_type": "parallel", "item1": "2", "item2": "4(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.10995v2_figure_2.png", "item2_path": "./SciVer/images/2410.10995v2_figure_4(a).png", "section": [ "3.2" ], "request_id": 5, "origin_statement": "For Kiwi 23 XXL, the MT-GenEval ambiguous Spanish feminine-to-masculine QE ratio is around 0.945, while on GeNTE’s ambiguous instances its neutral-to-gendered ratio is about 0.958, showing both responses scored nearly 5% lower than their masculine or gendered alternatives.", "perturbed_statement": "For Kiwi 23 XXL, the MT-GenEval ambiguous Spanish feminine-to-masculine QE ratio is around 0.975, while on GeNTE’s ambiguous instances its neutral-to-gendered ratio is about 1.002, indicating marginally higher scores for feminine or neutral outputs.", "perturbed_explanation": "The perturbation misreports the Spanish feminine-to-masculine ratio as 0.975, but in Figure 2 the Kiwi 23 XXL bar for Spanish is approximately 0.945. It also wrongly claims a neutral-to-gendered ratio of 1.002, whereas Figure 4(a) shows Kiwi 23 XXL around 0.958, both well below 1.", "claim": "For Kiwi 23 XXL, the MT-GenEval ambiguous Spanish feminine-to-masculine QE ratio is around 0.945, while on GeNTE’s ambiguous instances its neutral-to-gendered ratio is about 0.958, showing both responses scored nearly 5% lower than their masculine or gendered alternatives.", "label": true }, { "paperid": "2411.14880v1", "paper_path": "./SciVer/papers/2411.14880v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14880v1_figure_3.png", "item2_path": "./SciVer/images/2411.14880v1_figure_5.png", "section": [ "5.2" ], "request_id": 8, "origin_statement": "The PDTB-3 Conjunction prototype has the lowest average cosine distance (about 0.6) among second-level prototypes, and 100% of its top ten nearest neighbors are labeled “Conjunction.”", "perturbed_statement": "The PDTB-3 Conjunction prototype has an average cosine distance of about 0.8, and 100% of its top ten nearest neighbors are labeled “Conjunction.”", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3(b) shows the average cosine distance for the PDTB-3 Conjunction prototype is approximately 0.6, not 0.8.", "claim": "The PDTB-3 Conjunction prototype has the lowest average cosine distance (about 0.6) among second-level prototypes, and 100% of its top ten nearest neighbors are labeled “Conjunction.”", "label": true }, { "paperid": "2409.17437v2", "paper_path": "./SciVer/papers/2409.17437v2.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17437v2_figure_1.png", "item2_path": "./SciVer/images/2409.17437v2-Table2-1.png", "section": [ "2" ], "request_id": 14, "origin_statement": "NGC 2403 lies at 3.0 Mpc with M_K ≈ -21.5 and stellar mass of 7.2×10^9 M⊙, more than twice that of NGC 247’s 3.2×10^9 M⊙ at M_K ≈ -19.5 and 3.7 Mpc.", "perturbed_statement": "NGC 2403 lies at 4.5 Mpc with M_K ≈ -21.5 and stellar mass of 7.2×10^9 M⊙, more than twice that of NGC 247’s 3.2×10^9 M⊙ at M_K ≈ -19.5 and 3.7 Mpc.", "perturbed_explanation": "The perturbed statement incorrectly gives NGC 2403’s distance as 4.5 Mpc, whereas Table 2 lists its distance as 3.0 Mpc, contradicting the provided data.", "claim": "NGC 2403 lies at 3.0 Mpc with M_K ≈ -21.5 and stellar mass of 7.2×10^9 M⊙, more than twice that of NGC 247’s 3.2×10^9 M⊙ at M_K ≈ -19.5 and 3.7 Mpc.", "label": true }, { "paperid": "2409.10066v1", "paper_path": "./SciVer/papers/2409.10066v1.json", "claim_type": "parallel", "item1": "6", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.10066v1_figure_6.png", "item2_path": "./SciVer/images/2409.10066v1-Table5-1.png", "section": [ "6.3" ], "request_id": 16, "origin_statement": "For the 6-lane straight road with 3 vehicles and a lane-change collision in Case 5, participants rated parameter range accuracy with a median of about 3, noticeably lower than the median of 4 for the 3-lane straight road rear-end collision in Case 1.", "perturbed_statement": "For the 6-lane straight road with 3 vehicles and a lane-change collision in Case 5, participants rated parameter range accuracy with a median of about 4, noticeably lower than the median of 4 for the 3-lane straight road rear-end collision in Case 1.", "perturbed_explanation": "The perturbation incorrectly raises the median rating for Case 5’s parameter range from about 3 to 4. In Figure 6, the boxplot for Parameter Range in Case 5 clearly shows a median around 3, not 4.", "claim": "For the 6-lane straight road with 3 vehicles and a lane-change collision in Case 5, participants rated parameter range accuracy with a median of about 3, noticeably lower than the median of 4 for the 3-lane straight road rear-end collision in Case 1.", "label": true }, { "paperid": "2411.14321v1", "paper_path": "./SciVer/papers/2411.14321v1.json", "claim_type": "parallel", "item1": "8(b)", "item2": "8(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14321v1_figure_8(b).png", "item2_path": "./SciVer/images/2411.14321v1_figure_8(c).png", "section": [ "4.1" ], "request_id": 25, "origin_statement": "On flat terrain, both the black Unitree-A1 quadruped (first image) and the white Unitree-Go2 quadruped (second image) achieved the maximum average survival time of 200 simulation steps, indicating zero failures within the allotted 200-step test duration.", "perturbed_statement": "On flat terrain, both the black Unitree-A1 quadruped (first image) and the white Unitree-Go2 quadruped (second image) achieved an average survival time of 190 simulation steps, indicating zero failures within the allotted 200-step test duration.", "perturbed_explanation": "The statement is incorrect because Table 3 specifies that both A1 and Go2 reached the 200‐step upper bound on flat terrain; neither robot’s average survival time was 190 steps as claimed.", "claim": "On flat terrain, both the black Unitree-A1 quadruped (first image) and the white Unitree-Go2 quadruped (second image) achieved the maximum average survival time of 200 simulation steps, indicating zero failures within the allotted 200-step test duration.", "label": true }, { "paperid": "2410.07110v1", "paper_path": "./SciVer/papers/2410.07110v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.07110v1-Table2-1.png", "item2_path": "./SciVer/images/2410.07110v1-Table4-1.png", "section": [ "4.2" ], "request_id": 26, "origin_statement": "On Split CIFAR-100 with buffer=500, switching from Reservoir (i.i.d. BWT = -58.22%) to Random updates (i.i.d. BWT = -32.53%) yields a 25.69% improvement in BWT, while the Challenging policy achieves an i.i.d. BWT of -41.78%, identical to the Ours method’s BWT in Table 2.", "perturbed_statement": "On Split CIFAR-100 with buffer=500, switching from Reservoir (i.i.d. BWT = -58.22%) to Random updates (i.i.d. BWT = -32.53%) yields a 30% improvement in BWT, while the Challenging policy achieves an i.i.d. BWT of -45.00%, identical to the Ours method’s BWT in Table 2.", "perturbed_explanation": "The statement is incorrect because the actual improvement from -58.22% to -32.53% is 25.69%, not 30%, and the Challenging policy’s i.i.d. BWT is -41.78%, not -45.00%, as shown in Table 4 and confirmed by the Ours entry in Table 2.", "claim": "On Split CIFAR-100 with buffer=500, switching from Reservoir (i.i.d. BWT = -58.22%) to Random updates (i.i.d. BWT = -32.53%) yields a 25.69% improvement in BWT, while the Challenging policy achieves an i.i.d. BWT of -41.78%, identical to the Ours method’s BWT in Table 2.", "label": true }, { "paperid": "2410.11385v1", "paper_path": "./SciVer/papers/2410.11385v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.11385v1_figure_1.png", "item2_path": "./SciVer/images/2410.11385v1_figure_2.png", "section": [ "4.2" ], "request_id": 32, "origin_statement": "While GPT-4o zero-shot accuracy on BA for 1*5 drops from 56% at I=3 to 13% at I=6 (a 43-point fall), Llama-3.1-405B's one-shot ICL accuracy on CI for 2*5 remains relatively stable, varying only from 73% at I=3 to 70% at I=6 (a 3-point decrease).", "perturbed_statement": "While GPT-4o zero-shot accuracy on BA for 1*5 drops from 56% at I=3 to 27% at I=6 (a 29-point fall), Llama-3.1-405B's one-shot ICL accuracy on CI for 2*5 increases from 73% at I=3 to 78% at I=6.", "perturbed_explanation": "The GPT-4o BA accuracy for 1*5 at I=6 is actually 13%, not 27%. Also, Llama-3.1-405B’s one-shot ICL accuracy on CI for 2*5 at I=6 is 70%, not 78%, so both figures contradict the plotted data.", "claim": "While GPT-4o zero-shot accuracy on BA for 1*5 drops from 56% at I=3 to 13% at I=6 (a 43-point fall), Llama-3.1-405B's one-shot ICL accuracy on CI for 2*5 remains relatively stable, varying only from 73% at I=3 to 70% at I=6 (a 3-point decrease).", "label": true }, { "paperid": "2410.09729v2", "paper_path": "./SciVer/papers/2410.09729v2.json", "claim_type": "parallel", "item1": "5", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09729v2_figure_5.png", "item2_path": "./SciVer/images/2410.09729v2_figure_9.png", "section": [ "3" ], "request_id": 46, "origin_statement": "Using the CLIP vision encoder with 672×672 inputs (Fig. 5), LLaVA produces a maximum predicted medication frequency of 35 for a target frequency of 32, while Idefics2 peaks at 29 for a true frequency of 28 (Fig. 9).", "perturbed_statement": "Using the CLIP vision encoder with 480×480 inputs (Fig. 5), LLaVA produces a maximum predicted medication frequency of 40 for a target frequency of 32, while Idefics2 peaks at 29 for a true frequency of 28 (Fig. 9).", "perturbed_explanation": "The perturbed statement is incorrect because Fig. 5 shows LLaVA’s CLIP vision encoder processes 672×672 images, not 480×480, and Fig. 9 indicates LLaVA’s maximum prediction frequency is 35, not 40.", "claim": "Using the CLIP vision encoder with 672×672 inputs (Fig. 5), LLaVA produces a maximum predicted medication frequency of 35 for a target frequency of 32, while Idefics2 peaks at 29 for a true frequency of 28 (Fig. 9).", "label": true }, { "paperid": "2411.12007v2", "paper_path": "./SciVer/papers/2411.12007v2.json", "claim_type": "parallel", "item1": "2(c)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.12007v2_figure_2(c).png", "item2_path": "./SciVer/images/2411.12007v2-Table2-1.png", "section": [ "4.1" ], "request_id": 55, "origin_statement": "At μ≈0.01, the best-fit r_sp≈0.04 (red dashed) versus G&S’s ≈0.02 (orange), and this model achieves an RMSE of 2.1×10^−3, over two hundred times lower than G&S’s 7.4×10^−1.", "perturbed_statement": "At μ≈0.01, the best-fit r_sp≈0.04 (red dashed) versus G&S’s ≈0.02 (orange), and this model achieves an RMSE of 3.0×10^−1, over two hundred times lower than G&S’s 7.4×10^−1.", "perturbed_explanation": "The perturbed statement incorrectly lists the RMSE for the best-fit model as 3.0×10^−1, whereas Table 2 reports the best-fit RMSE to be 2.1×10^−3.", "claim": "At μ≈0.01, the best-fit r_sp≈0.04 (red dashed) versus G&S’s ≈0.02 (orange), and this model achieves an RMSE of 2.1×10^−3, over two hundred times lower than G&S’s 7.4×10^−1.", "label": true }, { "paperid": "2410.21603v2", "paper_path": "./SciVer/papers/2410.21603v2.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21603v2_figure_1.png", "item2_path": "./SciVer/images/2410.21603v2_figure_2.png", "section": [ "4.1" ], "request_id": 56, "origin_statement": "At x=0.5, the lognormal model M2 in Figure 1 has density about 0.53, exceeding the exponential M1’s density around 0.40, and in Figure 2a (n=100) ABC-Wass yields a median posterior for M1 of approximately 0.85, higher than ABC-CvM’s median of about 0.78.", "perturbed_statement": "At x=0.5, the lognormal model M2 in Figure 1 has density about 0.53, exceeding the exponential M1’s density around 0.36, and in Figure 2a (n=100) ABC-Wass yields a median posterior for M1 of approximately 0.75, lower than ABC-CvM’s median of about 0.78.", "perturbed_explanation": "This statement is incorrect because in Figure 1 the exponential model M1’s density at x=0.5 is about 0.40, not 0.36, and in Figure 2a the ABC-Wass median posterior is about 0.85, not 0.75, and remains above ABC-CvM’s median (~0.78).", "claim": "At x=0.5, the lognormal model M2 in Figure 1 has density about 0.53, exceeding the exponential M1’s density around 0.40, and in Figure 2a (n=100) ABC-Wass yields a median posterior for M1 of approximately 0.85, higher than ABC-CvM’s median of about 0.78.", "label": true }, { "paperid": "2409.10955v1", "paper_path": "./SciVer/papers/2409.10955v1.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10955v1_figure_3.png", "item2_path": "./SciVer/images/2409.10955v1_figure_6.png", "section": [ "4.2.2" ], "request_id": 57, "origin_statement": "Under direct evidence, GPT-4 on the NQ dataset with high memory strength shows a red MA ratio of approximately 0.55 and blue CMA ratio of about 0.40; when requiring direct plus paraphrase evidence, its MA ratio drops to 0.22 while its CMA ratio climbs to 0.78.", "perturbed_statement": "Under direct evidence, GPT-4 on the NQ dataset with high memory strength shows a red MA ratio of approximately 0.65 and blue CMA ratio of about 0.40; when requiring direct plus paraphrase evidence, its MA ratio drops to 0.22 while its CMA ratio climbs to 0.78.", "perturbed_explanation": "The perturbation incorrectly states the MA ratio under direct evidence as ~0.65, whereas Figure 3 actually shows it as ~0.55 for GPT-4 on NQ with high memory strength.", "claim": "Under direct evidence, GPT-4 on the NQ dataset with high memory strength shows a red MA ratio of approximately 0.55 and blue CMA ratio of about 0.40; when requiring direct plus paraphrase evidence, its MA ratio drops to 0.22 while its CMA ratio climbs to 0.78.", "label": true }, { "paperid": "2409.15218v1", "paper_path": "./SciVer/papers/2409.15218v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "4(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15218v1_figure_4(b).png", "item2_path": "./SciVer/images/2409.15218v1_figure_4(d).png", "section": [ "3.2" ], "request_id": 59, "origin_statement": "Diagram (2) features two γ insertions on the charged scalars H_{1,2}^± and a φ_i^0–μ Yukawa coupling y_{f1}^{φ_i^0}, whereas diagram (4a) only has one γ insertion on H_1^±, the other being a W_L^± gauge vertex alongside neutrino/neutral fermion ν_i/N_i propagation.", "perturbed_statement": "Diagram (2) features three γ insertions on the charged scalars H_{1,2}^± and a φ_i^0–μ Yukawa coupling y_{f1}^{φ_i^0}, whereas diagram (4a) only has one γ insertion on H_1^±, the other being a W_L^± gauge vertex alongside neutrino/neutral fermion ν_i/N_i propagation.", "perturbed_explanation": "The perturbation is incorrect because diagram (2) clearly shows only two external photon (γ) lines attached to the charged scalars H_{1,2}^±, not three as stated.", "claim": "Diagram (2) features two γ insertions on the charged scalars H_{1,2}^± and a φ_i^0–μ Yukawa coupling y_{f1}^{φ_i^0}, whereas diagram (4a) only has one γ insertion on H_1^±, the other being a W_L^± gauge vertex alongside neutrino/neutral fermion ν_i/N_i propagation.", "label": true }, { "paperid": "2409.16400v1", "paper_path": "./SciVer/papers/2409.16400v1.json", "claim_type": "parallel", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16400v1_figure_7.png", "item2_path": "./SciVer/images/2409.16400v1_figure_8.png", "section": [ "5.6" ], "request_id": 81, "origin_statement": "The Top-2 method boosts recall to 76.69%, which is about 2.45 points higher than the precision of 74.24% achieved using a top-3 window.", "perturbed_statement": "The Top-2 method boosts recall to 78.69%, which is about 2.45 points higher than the precision of 74.24% achieved using a top-3 window.", "perturbed_explanation": "Figure 7 shows the Top-2 recall value is 76.69%, not 78.69%, so the perturbed recall figure contradicts the original data.", "claim": "The Top-2 method boosts recall to 76.69%, which is about 2.45 points higher than the precision of 74.24% achieved using a top-3 window.", "label": true }, { "paperid": "2411.07133v2", "paper_path": "./SciVer/papers/2411.07133v2.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07133v2_figure_3.png", "item2_path": "./SciVer/images/2411.07133v2-Table3-1.png", "section": [ "3.3" ], "request_id": 88, "origin_statement": "With Gemma-2-9b-it sampling, the highest average performance of 14.56 occurs at temperature 1.2 and top-p 0.9 (Figure 3), which exceeds the 13.92 AP achieved by Llama-3.1-Minitron-4B under Best-of-N reject sampling (Table 3) by 0.64 points.", "perturbed_statement": "With Gemma-2-9b-it sampling, the highest average performance of 14.56 occurs at temperature 1.2 and top-p 0.9, which exceeds the 14.92 AP achieved by Llama-3.1-Minitron-4B under Best-of-N reject sampling by 0.36 points.", "perturbed_explanation": "Table 3 actually reports an AP of 13.92 % for Llama-3.1-Minitron-4B with Best-of-N, not 14.92 %. The perturbed AP value contradicts the table.", "claim": "With Gemma-2-9b-it sampling, the highest average performance of 14.56 occurs at temperature 1.2 and top-p 0.9 (Figure 3), which exceeds the 13.92 AP achieved by Llama-3.1-Minitron-4B under Best-of-N reject sampling (Table 3) by 0.64 points.", "label": true }, { "paperid": "2411.10018v1", "paper_path": "./SciVer/papers/2411.10018v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10018v1_figure_3.png", "item2_path": "./SciVer/images/2411.10018v1-Table3-1.png", "section": [ "3.3" ], "request_id": 89, "origin_statement": "Comedy films average an emotional range of about -10.32, roughly 2.47 points lower than the highest-range dialogue phrase 'All rise.' at -7.85, indicating that even the most flexible lines exceed average genre variability.", "perturbed_statement": "Comedy films average an emotional range of about -9.50, roughly 1.65 points lower than the highest-range dialogue phrase 'All rise.' at -7.85, indicating that even the most flexible lines exceed average genre variability.", "perturbed_explanation": "The perturbation incorrectly states that comedy films average -9.50; Figure 3 shows the actual average emotional range for Comedy is around -10.32, not -9.50.", "claim": "Comedy films average an emotional range of about -10.32, roughly 2.47 points lower than the highest-range dialogue phrase 'All rise.' at -7.85, indicating that even the most flexible lines exceed average genre variability.", "label": true }, { "paperid": "2409.17730v1", "paper_path": "./SciVer/papers/2409.17730v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17730v1_figure_1(b).png", "item2_path": "./SciVer/images/2409.17730v1-Table2-1.png", "section": [ "4.2.2" ], "request_id": 111, "origin_statement": "Beam search with 4 beams yields an NDCG@10 of 0.187 versus Top-K's 0.1885 on ML-20M, while at step 4 in the Steam setting beam search's HitRate is 0.070, 5.4% below greedy's 0.074.", "perturbed_statement": "Beam search with 4 beams yields an NDCG@10 of 0.190 versus Top-K's 0.1885 on ML-20M, while at step 4 in the Steam setting beam search's HitRate is 0.075, 1.35% above greedy's 0.074.", "perturbed_explanation": "The perturbed NDCG@10 value of 0.190 at 4 beams contradicts Figure 1, which shows it is actually 0.187. Likewise, Table 2 reports a HitRate of 0.070 for beam search at step 4, not 0.075.", "claim": "Beam search with 4 beams yields an NDCG@10 of 0.187 versus Top-K's 0.1885 on ML-20M, while at step 4 in the Steam setting beam search's HitRate is 0.070, 5.4% below greedy's 0.074.", "label": true }, { "paperid": "2410.04797v1", "paper_path": "./SciVer/papers/2410.04797v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04797v1-Table2-1.png", "item2_path": "./SciVer/images/2410.04797v1-Table3-1.png", "section": [ "4.5" ], "request_id": 116, "origin_statement": "Attentive fusion boosts FEMH-vowel accuracy by 5.81% over Wav2vec2.0 (from 79.71% to 85.52%) and yields a further 1.46% gain over concat fusion (84.06% to 85.52%).", "perturbed_statement": "Attentive fusion boosts FEMH-vowel accuracy by 7.81% over Wav2vec2.0 (from 79.71% to 85.52%) and yields a further 1.46% gain over concat fusion (84.06% to 85.52%).", "perturbed_explanation": "The stated improvement of 7.81% is incorrect because the actual increase from 79.71% to 85.52% is 5.81%, not 7.81%, contradicting the figures in Table 2.", "claim": "Attentive fusion boosts FEMH-vowel accuracy by 5.81% over Wav2vec2.0 (from 79.71% to 85.52%) and yields a further 1.46% gain over concat fusion (84.06% to 85.52%).", "label": true }, { "paperid": "2410.06423v1", "paper_path": "./SciVer/papers/2410.06423v1.json", "claim_type": "parallel", "item1": "3", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.06423v1-Table3-1.png", "item2_path": "./SciVer/images/2410.06423v1-Table7-1.png", "section": [ "5.3" ], "request_id": 118, "origin_statement": "In the Adult dataset’s 48,842 records (32,650 male vs 16,192 female), FAIREDU reduces SPD by 88.55% (to 0.019 from 0.166), surpassing LTDD’s 83.13% reduction.", "perturbed_statement": "In the Adult dataset’s 50,000 records (32,650 male vs 16,192 female), FAIREDU reduces SPD by 95% (to 0.019 from 0.166), outperforming LTDD’s 83.13% reduction.", "perturbed_explanation": "The perturbed statement incorrectly states the Adult dataset size as 50,000 instead of the actual 48,842 from Table 3, and misreports FAIREDU’s SPD reduction as 95% rather than the 88.55% shown in Table 7.", "claim": "In the Adult dataset’s 48,842 records (32,650 male vs 16,192 female), FAIREDU reduces SPD by 88.55% (to 0.019 from 0.166), surpassing LTDD’s 83.13% reduction.", "label": true }, { "paperid": "2411.02542v1", "paper_path": "./SciVer/papers/2411.02542v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.02542v1-Table2-1.png", "item2_path": "./SciVer/images/2411.02542v1-Table4-1.png", "section": [ "6.2" ], "request_id": 121, "origin_statement": "With 1.17 million nodes and a 10.6% positive ratio, California exhibits the highest GCN-CP F1 improvement (+21.61) among states, rising from 24.00 to 45.61.", "perturbed_statement": "With 1.17 million nodes and a 12.6% positive ratio, California exhibits the highest GCN-CP F1 improvement (+21.61) among states, rising from 24.00 to 45.61.", "perturbed_explanation": "Table 2 lists California’s positive ratio as 0.106 (10.6%), not 12.6%, so the perturbed 12.6% contradicts the dataset description.", "claim": "With 1.17 million nodes and a 10.6% positive ratio, California exhibits the highest GCN-CP F1 improvement (+21.61) among states, rising from 24.00 to 45.61.", "label": true }, { "paperid": "2410.13762v1", "paper_path": "./SciVer/papers/2410.13762v1.json", "claim_type": "parallel", "item1": "7", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13762v1_figure_7.png", "item2_path": "./SciVer/images/2410.13762v1-Table5-1.png", "section": [ "3.5" ], "request_id": 133, "origin_statement": "In Figure 7, the worst-case velocity prediction (Re=157903) shows local errors up to ~10%, nearly double the overall average Relative L2 error for velocity (5.13%) reported in Table 5.", "perturbed_statement": "In Figure 7, the worst-case velocity prediction (Re=157903) shows local errors up to ~15%, nearly three times the overall average Relative L2 error for velocity (5.13%) reported in Table 5.", "perturbed_explanation": "The perturbation is incorrect because Figure 7’s error colorbar indicates maximum velocity errors around 10%, not 15%, so claiming local errors up to ~15% contradicts the visualized data.", "claim": "In Figure 7, the worst-case velocity prediction (Re=157903) shows local errors up to ~10%, nearly double the overall average Relative L2 error for velocity (5.13%) reported in Table 5.", "label": true }, { "paperid": "2410.21131v1", "paper_path": "./SciVer/papers/2410.21131v1.json", "claim_type": "parallel", "item1": "3(c)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21131v1_figure_3(c).png", "item2_path": "./SciVer/images/2410.21131v1-Table3-1.png", "section": [ "4.3" ], "request_id": 137, "origin_statement": "After fine-tuning, Llama 3 70B Instruct correctly classifies 90% of true High labels on the metric split and boosts its accuracy from 57% to 85%, a 28-point gain.", "perturbed_statement": "After fine-tuning, Llama 3 70B Instruct correctly classifies 95% of true High labels on the metric split and boosts its accuracy from 57% to 85%, a 30-point gain.", "perturbed_explanation": "The perturbation is incorrect because the confusion matrix shows 90% of true High labels are correctly classified (not 95%), and the accuracy improvement from 57% to 85% is 28 points, not 30.", "claim": "After fine-tuning, Llama 3 70B Instruct correctly classifies 90% of true High labels on the metric split and boosts its accuracy from 57% to 85%, a 28-point gain.", "label": true }, { "paperid": "2409.09622v1", "paper_path": "./SciVer/papers/2409.09622v1.json", "claim_type": "parallel", "item1": "2", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.09622v1_figure_2.png", "item2_path": "./SciVer/images/2409.09622v1_figure_3(b).png", "section": [ "4" ], "request_id": 149, "origin_statement": "The arrangement in Fig 2 of two rotationally symmetric paraboloids and a hyperboloid intersecting a horizontal plane yields 12 regions in R^3, and Fig 3 shows 36 real and 46 complex critical points computed by HypersurfaceRegions.jl.", "perturbed_statement": "The arrangement in Fig 2 of two rotationally symmetric paraboloids and a hyperboloid intersecting a horizontal plane yields 12 regions in R^3, and Fig 3 shows 42 real and 46 complex critical points computed by HypersurfaceRegions.jl.", "perturbed_explanation": "The perturbed statement asserts there are 42 real critical points, but Figure 3 clearly reports only 36 real critical points. This contradiction makes the perturbed claim incorrect.", "claim": "The arrangement in Fig 2 of two rotationally symmetric paraboloids and a hyperboloid intersecting a horizontal plane yields 12 regions in R^3, and Fig 3 shows 36 real and 46 complex critical points computed by HypersurfaceRegions.jl.", "label": true }, { "paperid": "2411.15413v1", "paper_path": "./SciVer/papers/2411.15413v1.json", "claim_type": "parallel", "item1": "3(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15413v1_figure_3(b).png", "item2_path": "./SciVer/images/2411.15413v1-Table3-1.png", "section": [ "3.4" ], "request_id": 161, "origin_statement": "About 1,000 heatmaps in the lower left lung region have 50-75% mask coverage, exceeding the 582 images reserved for testing, which constitute 20% of the 2,951-sample dataset.", "perturbed_statement": "About 500 heatmaps in the lower left lung region have 50-75% mask coverage, exceeding the 582 images reserved for testing, which constitute 20% of the 2,951-sample dataset.", "perturbed_explanation": "The chart shows roughly 1,000 heatmaps in the 50-75% bin for the lower left lung, not 500. Furthermore, 500 is less than the 582 test samples, so it does not exceed the number reserved for testing.", "claim": "About 1,000 heatmaps in the lower left lung region have 50-75% mask coverage, exceeding the 582 images reserved for testing, which constitute 20% of the 2,951-sample dataset.", "label": true }, { "paperid": "2409.17791v1", "paper_path": "./SciVer/papers/2409.17791v1.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17791v1_figure_2.png", "item2_path": "./SciVer/images/2409.17791v1-Table1-1.png", "section": [ "3.2" ], "request_id": 170, "origin_statement": "With SPO, Mistral-7B reaches a 90.2% TL;DR win rate (Table 1), which is 1.6 points higher than the 88.6% SPO win rate of LLaMA-13B shown in Figure 2 and represents a 3.6% lift over its DPO baseline of 86.6%.", "perturbed_statement": "With SPO, Mistral-7B reaches a 91.2% TL;DR win rate (Table 1), which is 2.6 points higher than the 88.6% SPO win rate of LLaMA-13B shown in Figure 2 and represents a 4.6% lift over its DPO baseline of 86.6%.", "perturbed_explanation": "Table 1 reports a 90.2% SPO win rate and a 3.6% improvement for Mistral-7B, not 91.2% or 4.6%. Additionally, Figure 2 shows LLaMA-13B’s SPO win rate at 88.6%, making the gap 1.6 points, not 2.6.", "claim": "With SPO, Mistral-7B reaches a 90.2% TL;DR win rate (Table 1), which is 1.6 points higher than the 88.6% SPO win rate of LLaMA-13B shown in Figure 2 and represents a 3.6% lift over its DPO baseline of 86.6%.", "label": true }, { "paperid": "2409.07107v1", "paper_path": "./SciVer/papers/2409.07107v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07107v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.07107v1_figure_4(b).png", "section": [ "4.2" ], "request_id": 192, "origin_statement": "In the cube sliding velocity estimation (Fig3), Implicit+GN drives cost from ~0.1 to ~1e-13 in about 20 iterations, while in the Go1 quadruped inverse dynamics (Fig4), Implicit+GN lowers gradient norm from ~1e-3 to ~1e-14 over 300 iterations.", "perturbed_statement": "In the cube sliding velocity estimation (Fig3), Implicit+GN drives cost from ~0.1 to ~1e-5 in about 50 iterations, while in the Go1 quadruped inverse dynamics (Fig4), Implicit+GN lowers gradient norm from ~1e-3 to ~1e-14 over 300 iterations.", "perturbed_explanation": "The perturbed statement incorrectly reports that the cost only drops to ~1e-5 and requires ~50 iterations, whereas Figure 3 shows the cost falls to ~1e-13 in about 20 iterations under Implicit+GN.", "claim": "In the cube sliding velocity estimation (Fig3), Implicit+GN drives cost from ~0.1 to ~1e-13 in about 20 iterations, while in the Go1 quadruped inverse dynamics (Fig4), Implicit+GN lowers gradient norm from ~1e-3 to ~1e-14 over 300 iterations.", "label": true }, { "paperid": "2409.14878v1", "paper_path": "./SciVer/papers/2409.14878v1.json", "claim_type": "parallel", "item1": "9", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14878v1_figure_9.png", "item2_path": "./SciVer/images/2409.14878v1-Table6-1.png", "section": [ "4.5.2" ], "request_id": 198, "origin_statement": "The supervised referee model achieves an overall F1 of 0.936, which is 18.6 percentage points higher than GPT-4’s F1 on the MMDA dataset (0.75).", "perturbed_statement": "The supervised referee model achieves an overall F1 of 0.936, which is 12.6 percentage points higher than GPT-4’s F1 on the MMDA dataset (0.75).", "perturbed_explanation": "The actual gap between the referee’s overall F1 (0.936) and GPT-4’s F1 on MMDA (0.75) is 0.186, or 18.6 percentage points, not 12.6 as claimed.", "claim": "The supervised referee model achieves an overall F1 of 0.936, which is 18.6 percentage points higher than GPT-4’s F1 on the MMDA dataset (0.75).", "label": true }, { "paperid": "2410.11378v1", "paper_path": "./SciVer/papers/2410.11378v1.json", "claim_type": "parallel", "item1": "5(b)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.11378v1_figure_5(b).png", "item2_path": "./SciVer/images/2410.11378v1_figure_5(c).png", "section": [ "4.8" ], "request_id": 201, "origin_statement": "By iteration 200, WPFed outperforms ProxyFL by about 15% under 40% adversaries and by about 22% under 60% adversaries, demonstrating an increasing performance gap as malicious clients rise.", "perturbed_statement": "By iteration 200, WPFed outperforms ProxyFL by about 15% under 40% adversaries but narrows to just 5% under 60% adversaries, suggesting better recovery when more clients are malicious.", "perturbed_explanation": "The perturbation is incorrect because in the 60% adversaries plot, ProxyFL’s accuracy at iteration 200 is around 72% while WPFed’s is about 94%, yielding a gap of roughly 22%, not 5% as stated.", "claim": "By iteration 200, WPFed outperforms ProxyFL by about 15% under 40% adversaries and by about 22% under 60% adversaries, demonstrating an increasing performance gap as malicious clients rise.", "label": true }, { "paperid": "2409.07124v1", "paper_path": "./SciVer/papers/2409.07124v1.json", "claim_type": "parallel", "item1": "1", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07124v1_figure_1.png", "item2_path": "./SciVer/images/2409.07124v1_figure_5.png", "section": [ "2.1" ], "request_id": 204, "origin_statement": "The Scutum–Crux spiral arm contributes 29% of the total N II luminosity of 1.60×10^40 erg/s (≈4.6×10^39 erg/s), while the best-fit model reaches a peak Galactic-plane intensity of ≈1.4×10^−4 erg cm^−2 s^−1 sr^−1 near l≈30°.", "perturbed_statement": "The Scutum–Crux spiral arm contributes 39% of the total N II luminosity of 1.60×10^40 erg/s (≈6.2×10^39 erg/s), while the best-fit model reaches a peak Galactic-plane intensity of ≈1.4×10^−4 erg cm^−2 s^−1 sr^−1 near l≈30°.", "perturbed_explanation": "The perturbed statement incorrectly gives the Scutum–Crux arm a 39% fraction. In Figure 5, the legend shows f_SC=0.29, not 0.39, making the stated fractional luminosity and its product with 1.60×10^40 erg/s inconsistent with the plotted model.", "claim": "The Scutum–Crux spiral arm contributes 29% of the total N II luminosity of 1.60×10^40 erg/s (≈4.6×10^39 erg/s), while the best-fit model reaches a peak Galactic-plane intensity of ≈1.4×10^−4 erg cm^−2 s^−1 sr^−1 near l≈30°.", "label": true }, { "paperid": "2409.15317v1", "paper_path": "./SciVer/papers/2409.15317v1.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15317v1_figure_3.png", "item2_path": "./SciVer/images/2409.15317v1-Table2-1.png", "section": [ "4.3" ], "request_id": 211, "origin_statement": "Figure 3a shows that for the noisy pilot, IDA intervenes roughly four times more during corrupted actions (~20 interventions) than during pilot actions (~5), and Table 2 reports IDA (diffusion) boosts the noisy pilot’s success rate from 21% to 83%.", "perturbed_statement": "Figure 3a shows that for the noisy pilot, IDA intervenes roughly four times more during corrupted actions (~20 interventions) than during pilot actions (~5), and Table 2 reports IDA (diffusion) boosts the noisy pilot’s success rate from 21% to 75%.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 indicates that IDA (diffusion) raises the noisy pilot’s success rate to 83%, not 75%. The 75% figure actually corresponds to the standalone Copilot (diffusion), not the interventional IDA.", "claim": "Figure 3a shows that for the noisy pilot, IDA intervenes roughly four times more during corrupted actions (~20 interventions) than during pilot actions (~5), and Table 2 reports IDA (diffusion) boosts the noisy pilot’s success rate from 21% to 83%.", "label": true }, { "paperid": "2410.09123v2", "paper_path": "./SciVer/papers/2410.09123v2.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09123v2_figure_1(a).png", "item2_path": "./SciVer/images/2410.09123v2_figure_1(c).png", "section": [ "5.5", "1" ], "request_id": 214, "origin_statement": "In WIKI, pairwise cosine similarities between relations vary from approximately –0.2 up to nearly 1.0, whereas in UMLS they remain between about 0.1 and 0.9, showing that UMLS relation embeddings are more tightly clustered than those in WIKI.", "perturbed_statement": "In WIKI, pairwise cosine similarities between relations vary from approximately –0.2 up to nearly 1.0, whereas in UMLS they remain between about –0.1 and 0.9, showing that UMLS relation embeddings are more tightly clustered than those in WIKI.", "perturbed_explanation": "The perturbed statement incorrectly claims UMLS similarities reach –0.1, but the UMLS curve in Figure 1(c) has a minimum around 0.1, not a negative value, contradicting the visual data.", "claim": "In WIKI, pairwise cosine similarities between relations vary from approximately –0.2 up to nearly 1.0, whereas in UMLS they remain between about 0.1 and 0.9, showing that UMLS relation embeddings are more tightly clustered than those in WIKI.", "label": true }, { "paperid": "2411.05521v2", "paper_path": "./SciVer/papers/2411.05521v2.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.05521v2_figure_3(a).png", "item2_path": "./SciVer/images/2411.05521v2-Table3-1.png", "section": [ "5.2" ], "request_id": 223, "origin_statement": "Imaging_studies queries increase from 214 in dev to 234 in test, while GPT-3.5’s execution accuracy on SQL with schema 5-shot (56.30%) is more than double its SPARQL 5-shot without schema accuracy (25.32%).", "perturbed_statement": "Imaging_studies queries increase from 220 in dev to 234 in test, while GPT-3.5’s execution accuracy on SQL with schema 5-shot (56.30%) is more than double its SPARQL 5-shot without schema accuracy (25.32%).", "perturbed_explanation": "The perturbed statement is incorrect because the figure shows 214 imaging_studies queries in the dev set, not 220.", "claim": "Imaging_studies queries increase from 214 in dev to 234 in test, while GPT-3.5’s execution accuracy on SQL with schema 5-shot (56.30%) is more than double its SPARQL 5-shot without schema accuracy (25.32%).", "label": true }, { "paperid": "2409.15621v1", "paper_path": "./SciVer/papers/2409.15621v1.json", "claim_type": "parallel", "item1": "5(e)", "item2": "5(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15621v1_figure_5(e).png", "item2_path": "./SciVer/images/2409.15621v1_figure_5(f).png", "section": [ "4.1.1" ], "request_id": 226, "origin_statement": "During dragging at 36 mm (e), the maximum compressive σ33 is about –0.8, whereas at 54 mm (f) it deepens to roughly –1.2, a 50% increase in magnitude and with the blue high‐stress region extending deeper into the slab.", "perturbed_statement": "During dragging at 36 mm (e), the maximum compressive σ33 is about –0.8, whereas at 54 mm (f) it only deepens to roughly –0.9, a 12% increase in magnitude with minimal additional stress penetration.", "perturbed_explanation": "The perturbed claim understates the stress at 54 mm: the actual image (f) shows a deep blue core corresponding to approximately –1.2 σ33, not –0.9, and a visibly larger stress penetration than implied.", "claim": "During dragging at 36 mm (e), the maximum compressive σ33 is about –0.8, whereas at 54 mm (f) it deepens to roughly –1.2, a 50% increase in magnitude and with the blue high‐stress region extending deeper into the slab.", "label": true }, { "paperid": "2409.09641v2", "paper_path": "./SciVer/papers/2409.09641v2.json", "claim_type": "parallel", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.09641v2_figure_7.png", "item2_path": "./SciVer/images/2409.09641v2-Table3-1.png", "section": [ "6.1.2" ], "request_id": 247, "origin_statement": "Six participants rated Usefulness as 7 in Figure 7a, which is roughly one-tenth of the 62 Complex feedback alerts recorded in Table 3.", "perturbed_statement": "Six participants rated Usefulness as 7 in Figure 7a, which is roughly one-tenth of the 39 Complex feedback alerts recorded in Table 3.", "perturbed_explanation": "Table 3 reports 62 instances of Complex feedback, not 39, so the perturbed statement contradicts the actual Complex feedback count.", "claim": "Six participants rated Usefulness as 7 in Figure 7a, which is roughly one-tenth of the 62 Complex feedback alerts recorded in Table 3.", "label": true }, { "paperid": "2409.10046v1", "paper_path": "./SciVer/papers/2409.10046v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10046v1_figure_1.png", "item2_path": "./SciVer/images/2409.10046v1_figure_2.png", "section": [ "2.1" ], "request_id": 274, "origin_statement": "Relative Humidity has a strong negative Pearson correlation of –0.44 with ignition in Fig 1, and Fig 2 (a) shows lightning-ignited events peaking at around 80% RH versus about 95% RH for non-ignitions.", "perturbed_statement": "Relative Humidity has a strong positive Pearson correlation of +0.44 with ignition in Fig 1, and Fig 2 (a) shows lightning-ignited events peaking at around 80% RH versus about 95% RH for non-ignitions.", "perturbed_explanation": "The Pearson correlation between RH and ignition probability in Fig 1 is –0.44 (indicating a negative relationship), not +0.44 as stated in the perturbed claim.", "claim": "Relative Humidity has a strong negative Pearson correlation of –0.44 with ignition in Fig 1, and Fig 2 (a) shows lightning-ignited events peaking at around 80% RH versus about 95% RH for non-ignitions.", "label": true }, { "paperid": "2409.03074v1", "paper_path": "./SciVer/papers/2409.03074v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03074v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.03074v1_figure_1(b).png", "section": [ "5.1" ], "request_id": 276, "origin_statement": "At E_lab≈20 MeV, the EFT n–n phase shift for Λ_nn=100 MeV is about 76°, whereas for Λ_nn=400 MeV it is around 79° (a ~3° gap), yet increasing the regulator exponent from n=2 to n=10 alters the phase shift by less than 1° at the same energy.", "perturbed_statement": "At E_lab≈20 MeV, the EFT n–n phase shift for Λ_nn=100 MeV is about 76°, whereas for Λ_nn=400 MeV it is around 79° (a ~5° gap), yet increasing the regulator exponent from n=2 to n=10 alters the phase shift by less than 1° at the same energy.", "perturbed_explanation": "The bottom panel shows the phase‐shift difference between Λ_nn=100 MeV (≈76°) and Λ_nn=400 MeV (≈79°) at 20 MeV is approximately 3°, not 5° as stated, so the claimed “~5° gap” contradicts the plotted values.", "claim": "At E_lab≈20 MeV, the EFT n–n phase shift for Λ_nn=100 MeV is about 76°, whereas for Λ_nn=400 MeV it is around 79° (a ~3° gap), yet increasing the regulator exponent from n=2 to n=10 alters the phase shift by less than 1° at the same energy.", "label": true }, { "paperid": "2409.07088v1", "paper_path": "./SciVer/papers/2409.07088v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07088v1_figure_4.png", "item2_path": "./SciVer/images/2409.07088v1_figure_5.png", "section": [ "4.1" ], "request_id": 284, "origin_statement": "WebNLG's distribution in Fig. 4 peaks at the 15–20 word range with normalized count ~0.18 and Fig. 5 shows its slope at 5.76 words/triplet, while GenWiki peaks at 20–25 words with ~0.15 normalized count and slope 0.87 words/triplet.", "perturbed_statement": "WebNLG's distribution in Fig. 4 peaks at the 15–20 word range with normalized count ~0.18 and Fig. 5 shows its slope at 3.22 words/triplet, while GenWiki peaks at 20–25 words with ~0.15 normalized count and slope 0.87 words/triplet.", "perturbed_explanation": "The perturbed statement incorrectly reports WebNLG's slope as 3.22 words/triplet; Fig. 5 actually shows WebNLG with the steepest slope of 5.76 words/triplet.", "claim": "WebNLG's distribution in Fig. 4 peaks at the 15–20 word range with normalized count ~0.18 and Fig. 5 shows its slope at 5.76 words/triplet, while GenWiki peaks at 20–25 words with ~0.15 normalized count and slope 0.87 words/triplet.", "label": true }, { "paperid": "2411.13677v1", "paper_path": "./SciVer/papers/2411.13677v1.json", "claim_type": "parallel", "item1": "7(b)", "item2": "7(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13677v1_figure_7(b).png", "item2_path": "./SciVer/images/2411.13677v1_figure_7(c).png", "section": [ "9" ], "request_id": 287, "origin_statement": "User 2 completed the task 11.3 seconds faster with BiDex (7.6 s vs 18.9 s) and rated its responsiveness 1.2 points higher (4.4 vs 3.2).", "perturbed_statement": "User 2 completed the task 9.5 seconds faster with BiDex (7.6 s vs 18.9 s) and rated its responsiveness 2.0 points higher (4.4 vs 3.2).", "perturbed_explanation": "The perturbed statement incorrectly reports the time savings and rating gap. In the chart, BiDex’s time for User 2 is 7.6 s and Apple Vision Pro is 18.9 s, a difference of 11.3 s (not 9.5 s). The responsiveness ratings differ by 1.2 points (4.4−3.2), not 2.0 points.", "claim": "User 2 completed the task 11.3 seconds faster with BiDex (7.6 s vs 18.9 s) and rated its responsiveness 1.2 points higher (4.4 vs 3.2).", "label": true }, { "paperid": "2410.23090v1", "paper_path": "./SciVer/papers/2410.23090v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23090v1_figure_5.png", "item2_path": "./SciVer/images/2410.23090v1-Table3-1.png", "section": [ "5.2" ], "request_id": 290, "origin_statement": "Under LLM Summarization, Qwen2.5-7B-SFT’s citation recall jumps from 2.9 to 14.1 while its GPT-4 win rate rises from 47.9% to 62.7%, showing that SFT notably improves both citation labeling and response quality.", "perturbed_statement": "Under LLM Summarization, Qwen2.5-7B-SFT’s citation recall jumps from 2.9 to 20.1 while its GPT-4 win rate rises from 47.9% to 62.7%, showing that SFT notably improves both citation labeling and response quality.", "perturbed_explanation": "The perturbed recall value is incorrect: Table 3 shows Qwen2.5-7B-SFT’s citation recall under LLM Summarization is 14.1, not 20.1.", "claim": "Under LLM Summarization, Qwen2.5-7B-SFT’s citation recall jumps from 2.9 to 14.1 while its GPT-4 win rate rises from 47.9% to 62.7%, showing that SFT notably improves both citation labeling and response quality.", "label": true }, { "paperid": "2409.13441v1", "paper_path": "./SciVer/papers/2409.13441v1.json", "claim_type": "parallel", "item1": "3(c)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13441v1_figure_3(c).png", "item2_path": "./SciVer/images/2409.13441v1-Table3-1.png", "section": [ "5", "4" ], "request_id": 292, "origin_statement": "Both the 42× greater 4-h accumulation of B.", "perturbed_statement": "In the hypothetical reaction, species B rises from 2 M at t=0 to about 7.3 M by 4 h, while in the aldol condensation, species C increases from 2 M at t=0 to roughly 4.8 M at 4 h, showing a >2× greater 4-h accumulation of B.", "perturbed_explanation": "The perturbed statement errs in reporting species B’s 4 h concentration as 7.3 M; Fig. 3(a) shows B is actually ~8.3 M at 4 h. This underestimates B’s true concentration and contradicts the plotted data.", "claim": "In the hypothetical reaction, species B rises from 2 M at t=0 to about 8.3 M by 4 h, while in the aldol condensation, species C increases from 2 M at t=0 to roughly 4.8 M at 4 h, showing a >2× greater 4-h accumulation of B.", "label": true }, { "paperid": "2409.06224v1", "paper_path": "./SciVer/papers/2409.06224v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06224v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.06224v1_figure_4(b).png", "section": [ "3" ], "request_id": 433, "origin_statement": "In the MIP-GAF dataset, the test split contains 2,822 images (17% of total), and 54% of its samples were rated as difficult to identify the MIP in the user study.", "perturbed_statement": "In the MIP-GAF dataset, the test split contains 3,000 images (20% of total), and 54% of its samples were rated as difficult to identify the MIP in the user study.", "perturbed_explanation": "The perturbed statement incorrectly claims the test split has 3,000 images (20%), whereas Figure 2 shows the test set actually contains 2,822 images, representing 17% of the dataset.", "claim": "In the MIP-GAF dataset, the test split contains 2,822 images (17% of total), and 54% of its samples were rated as difficult to identify the MIP in the user study.", "label": true }, { "paperid": "2411.00566v1", "paper_path": "./SciVer/papers/2411.00566v1.json", "claim_type": "parallel", "item1": "13", "item2": "14(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00566v1_figure_13.png", "item2_path": "./SciVer/images/2411.00566v1_figure_14(a).png", "section": [ "4.1" ], "request_id": 439, "origin_statement": "At n=32, the computed maximum points without isosceles triangles is 56, while for the 64×64 grid, standard search finds a 108-point construction and adding one transformer loop via PatternBoost yields a 110-point construction.", "perturbed_statement": "At n=32, the computed maximum points without isosceles triangles is 58, while for the 64×64 grid, standard search finds a 108-point construction and adding one transformer loop via PatternBoost yields a 112-point construction.", "perturbed_explanation": "The plot in Figure 13 shows the point at (32, 56), not (32, 58), and Figure 14 reports the transformer-enhanced construction at 110 points, not 112.", "claim": "At n=32, the computed maximum points without isosceles triangles is 56, while for the 64×64 grid, standard search finds a 108-point construction and adding one transformer loop via PatternBoost yields a 110-point construction.", "label": true }, { "paperid": "2411.13151v1", "paper_path": "./SciVer/papers/2411.13151v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13151v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.13151v1-Table2-1.png", "section": [ "4" ], "request_id": 444, "origin_statement": "Variant D, which uses only DDD (see Table 2), reduces the median number of timed elements from about 5×10^4 in B to roughly 2×10^4, a 60% drop.", "perturbed_statement": "Variant D, which uses only DDD, reduces the median number of timed elements from about 3×10^4 in B to roughly 1×10^4, a 67% drop.", "perturbed_explanation": "The figure shows the median |Ω| for B is around 5×10^4 (not 3×10^4) and for D is around 2×10^4 (not 1×10^4), so the stated numbers and percentage are incorrect.", "claim": "Variant D, which uses only DDD (see Table 2), reduces the median number of timed elements from about 5×10^4 in B to roughly 2×10^4, a 60% drop.", "label": true }, { "paperid": "2410.21647v3", "paper_path": "./SciVer/papers/2410.21647v3.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21647v3_figure_6(a).png", "item2_path": "./SciVer/images/2410.21647v3_figure_6(b).png", "section": [ "5.2" ], "request_id": 457, "origin_statement": "GPT-4o’s pass@1 decreases by 16 percentage points from 39% on self-contained contexts (Fig.6a) to 23% on file-level contexts, whereas on length complexity it decreases by 30 points from 67% for [0,66) tokens to 37% for [66,128) tokens (Fig.6b).", "perturbed_statement": "GPT-4o’s pass@1 decreases by 20 points from 39% on self-contained contexts (Fig.6a) to 23% on file-level contexts, whereas on length complexity it decreases by 20 points from 67% for [0,66) tokens to 47% for [66,128) tokens (Fig.6b).", "perturbed_explanation": "The perturbation is wrong because in Fig.6a the drop from self-contained (39%) to file-level (23%) is actually 16 points, not 20. In Fig.6b GPT-4o’s pass@1 for [66,128) tokens is about 37%, not 47%, so the stated 20-point drop to 47% is inaccurate.", "claim": "GPT-4o’s pass@1 decreases by 16 percentage points from 39% on self-contained contexts (Fig.6a) to 23% on file-level contexts, whereas on length complexity it decreases by 30 points from 67% for [0,66) tokens to 37% for [66,128) tokens (Fig.6b).", "label": true }, { "paperid": "2409.17476v1", "paper_path": "./SciVer/papers/2409.17476v1.json", "claim_type": "parallel", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17476v1_figure_5.png", "item2_path": "./SciVer/images/2409.17476v1-Table4-1.png", "section": [ "5.2.1" ], "request_id": 467, "origin_statement": "Under DP PopAtk in Figure 5, VAT reduces T-NDCG@50 to approximately 0.21%, a 40% drop from the best baseline’s ~0.35%; Table 4 shows VAT cuts T-NDCG@20 to 0.008 ± 0.001 from 0.012 ± 0.005, a roughly 33% decrease.", "perturbed_statement": "Under DP PopAtk in Figure 5, VAT reduces T-NDCG@50 to approximately 0.21%, a 40% drop from the best baseline’s ~0.35%; Table 4 shows VAT cuts T-NDCG@20 to 0.02 ± 0.001 from 0.012 ± 0.005, a roughly 33% decrease.", "perturbed_explanation": "The perturbed statement claims VAT’s T-NDCG@20 is 0.02 ± 0.001, but Table 4 reports VAT’s T-NDCG@20 as 0.008 ± 0.001. This contradicts the actual value in the table.", "claim": "Under DP PopAtk in Figure 5, VAT reduces T-NDCG@50 to approximately 0.21%, a 40% drop from the best baseline’s ~0.35%; Table 4 shows VAT cuts T-NDCG@20 to 0.008 ± 0.001 from 0.012 ± 0.005, a roughly 33% decrease.", "label": true }, { "paperid": "2410.18462v1", "paper_path": "./SciVer/papers/2410.18462v1.json", "claim_type": "parallel", "item1": "7(b)", "item2": "7(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18462v1_figure_7(b).png", "item2_path": "./SciVer/images/2410.18462v1_figure_7(c).png", "section": [ "2.2.4" ], "request_id": 496, "origin_statement": "At 150k steps, the sobel+flow variant travels approximately 40 units of distance versus the baseline’s 12, yet its average speed (~5) is about 15 units lower than the baseline’s ~20.", "perturbed_statement": "At 150k steps, the sobel+flow variant travels approximately 40 units of distance versus the baseline’s 12, yet its average speed (~25) is about 5 units higher than the baseline’s ~20.", "perturbed_explanation": "This is incorrect because the val/ep_avg_speed chart shows sobel+flow’s average speed at 150k steps is around 5, not 25, so it cannot exceed the baseline’s ~20 as claimed.", "claim": "At 150k steps, the sobel+flow variant travels approximately 40 units of distance versus the baseline’s 12, yet its average speed (~5) is about 15 units lower than the baseline’s ~20.", "label": true }, { "paperid": "2410.06971v2", "paper_path": "./SciVer/papers/2410.06971v2.json", "claim_type": "parallel", "item1": "3(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.06971v2_figure_3(b).png", "item2_path": "./SciVer/images/2410.06971v2-Table4-1.png", "section": [ "4.1" ], "request_id": 501, "origin_statement": "Figure 3's estimate of 0.0045 shows each additional log point of working age population raises industry complexity by 0.0045; Table 4 (col.4) shows a 1.002 increase in firms' logged real wages per unit complexity—over 200 times the complexity gain from population growth.", "perturbed_statement": "Figure 3's estimate of 0.005 shows each additional log point of working age population raises industry complexity by 0.005; Table 4 (col.4) shows a 1.002 increase in firms' logged real wages per unit complexity—over 200 times the complexity gain from population growth.", "perturbed_explanation": "The slope in Figure 3 is actually 0.0045, not 0.005, so the stated effect of population size on average industry complexity is incorrect.", "claim": "Figure 3's estimate of 0.0045 shows each additional log point of working age population raises industry complexity by 0.0045; Table 4 (col.4) shows a 1.002 increase in firms' logged real wages per unit complexity—over 200 times the complexity gain from population growth.", "label": true }, { "paperid": "2410.23494v1", "paper_path": "./SciVer/papers/2410.23494v1.json", "claim_type": "parallel", "item1": "11(a)", "item2": "11(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23494v1_figure_11(a).png", "item2_path": "./SciVer/images/2410.23494v1_figure_11(f).png", "section": [ "4" ], "request_id": 502, "origin_statement": "Figure 11 renders the pedestrian signal under seven lighting factor G settings across [-0.5, 0.5], yielding seven distinct noisy/brightness variations, compared to the single real-world pulley image by selennunes, for a 7:1 synthetic-to-real sample ratio.", "perturbed_statement": "Figure 11 renders the pedestrian signal under five lighting factor G settings across [-0.5, 0.5], yielding five distinct noisy/brightness variations, compared to two real-world pulley images by selennunes, for a 5:2 synthetic-to-real sample ratio.", "perturbed_explanation": "The perturbed statement misstates the number of simulated lighting variations and real images: Figure 11 actually shows seven lighting factor settings (not five), and the pulley figure depicts only one real-world image (not two).", "claim": "Figure 11 renders the pedestrian signal under seven lighting factor G settings across [-0.5, 0.5], yielding seven distinct noisy/brightness variations, compared to the single real-world pulley image by selennunes, for a 7:1 synthetic-to-real sample ratio.", "label": true }, { "paperid": "2410.21100v1", "paper_path": "./SciVer/papers/2410.21100v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21100v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.21100v1_figure_2(f).png", "section": [ "4.3" ], "request_id": 505, "origin_statement": "At ν=0.5%, mSSRM-PGA(m=20) yields ≈104 cumulative wealth on FF25EU versus ≈245 on FF100MEINV, over twice as high on FF100MEINV under the same transaction cost.", "perturbed_statement": "At ν=0.5%, mSSRM-PGA(m=20) yields ≈114 cumulative wealth on FF25EU versus ≈315 on FF100MEINV, over twice as high on FF100MEINV under the same transaction cost.", "perturbed_explanation": "The perturbed statement is wrong because the plots show that at ν=0.5%, mSSRM-PGA(m=20) achieves about 104 on FF25EU (not 114) and about 245 on FF100MEINV (not 315).", "claim": "At ν=0.5%, mSSRM-PGA(m=20) yields ≈104 cumulative wealth on FF25EU versus ≈245 on FF100MEINV, over twice as high on FF100MEINV under the same transaction cost.", "label": true }, { "paperid": "2409.08732v1", "paper_path": "./SciVer/papers/2409.08732v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08732v1_figure_1.png", "item2_path": "./SciVer/images/2409.08732v1_figure_2.png", "section": [ "1" ], "request_id": 513, "origin_statement": "In 2020Q2, ground-truth GDP plunged to -3.2% versus NCDENow’s -2.1% forecast, and at that quarter in Figure2, indicator A was 1.2 while the realized quarterly GDP stood at just 0.4.", "perturbed_statement": "In 2020Q2, ground-truth GDP plunged to -3.2% versus NCDENow’s -3.0% forecast, and at that quarter in Figure2, indicator A was 1.2 while the realized quarterly GDP stood at just 0.4.", "perturbed_explanation": "Figure 1 shows NCDENow’s nowcast for 2020Q2 is –2.1%, not –3.0%, so the perturbed forecast contradicts the reported model output.", "claim": "In 2020Q2, ground-truth GDP plunged to -3.2% versus NCDENow’s -2.1% forecast, and at that quarter in Figure2, indicator A was 1.2 while the realized quarterly GDP stood at just 0.4.", "label": true }, { "paperid": "2411.16659v1", "paper_path": "./SciVer/papers/2411.16659v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16659v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.16659v1_figure_1(b).png", "section": [ "2.1" ], "request_id": 537, "origin_statement": "The longest arrow in Fig 1a is twice as long as the shortest arrow, whereas in Fig 1b all arrows are uniform, each with identical maximum length.", "perturbed_statement": "The longest arrow in Fig 1a is 1.5 times as long as the shortest arrow, whereas in Fig 1b all arrows are uniform, each with identical maximum length.", "perturbed_explanation": "In Fig 1a the ratio between the longest and shortest arrows is actually 2:1, not 1.5:1, so stating a 1.5× difference contradicts the visual data.", "claim": "The longest arrow in Fig 1a is twice as long as the shortest arrow, whereas in Fig 1b all arrows are uniform, each with identical maximum length.", "label": true }, { "paperid": "2410.12851v3", "paper_path": "./SciVer/papers/2410.12851v3.json", "claim_type": "parallel", "item1": "3", "item2": "16", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.12851v3_figure_3.png", "item2_path": "./SciVer/images/2410.12851v3-Table16-1.png", "section": [ "6.1" ], "request_id": 551, "origin_statement": "“Conclusion Strength” has a separation score of about 0.35 in Figure 3, while “Response Length” yields a separation score of –0.463 in Table 16, representing an absolute gap of roughly 0.813, indicating strong conclusions in model summaries versus human brevity preferences.", "perturbed_statement": "“Conclusion Strength” has a separation score of about 0.35 in Figure 3, while “Response Length” yields a separation score of 0.463 in Table 16, representing an absolute gap of roughly 0.813, indicating strong conclusions in model summaries versus human brevity preferences.", "perturbed_explanation": "The perturbed statement incorrectly reports the separation score for “Response Length” as positive 0.463, but Table 16 shows its actual separation score is –0.463, contradicting the context.", "claim": "“Conclusion Strength” has a separation score of about 0.35 in Figure 3, while “Response Length” yields a separation score of –0.463 in Table 16, representing an absolute gap of roughly 0.813, indicating strong conclusions in model summaries versus human brevity preferences.", "label": true }, { "paperid": "2409.04941v1", "paper_path": "./SciVer/papers/2409.04941v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04941v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.04941v1_figure_3(b).png", "section": [ "4" ], "request_id": 554, "origin_statement": "At 60% GPU usage, cublas draws about 125 W compared to cuda-tensor’s roughly 90 W (a 35 W gap). The GPU model’s RMSE of 12.3 W is about one-third of that gap, indicating moderate prediction accuracy across different kernels.", "perturbed_statement": "At 60% GPU usage, cublas draws about 125 W compared to cuda-tensor’s roughly 90 W (a 35 W gap). The GPU model’s RMSE of 9.69 W is about one-third of that gap, indicating moderate prediction accuracy across different kernels.", "perturbed_explanation": "The perturbation incorrectly states the GPU model’s RMSE as 9.69 W, but the GPU regression model actually has an RMSE of 12.3 W, not 9.69 W.", "claim": "At 60% GPU usage, cublas draws about 125 W compared to cuda-tensor’s roughly 90 W (a 35 W gap). The GPU model’s RMSE of 12.3 W is about one-third of that gap, indicating moderate prediction accuracy across different kernels.", "label": true }, { "paperid": "2410.07571v2", "paper_path": "./SciVer/papers/2410.07571v2.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07571v2_figure_3.png", "item2_path": "./SciVer/images/2410.07571v2-Table2-1.png", "section": [ "4.2" ], "request_id": 562, "origin_statement": "At 3000 training steps, LLaMA-2-Chat-VL-MTL’s multimodal ASR drops to approximately 6%, roughly one-eighth of its text-only ASR of 48%, yet Table 2 reports its average multimodal helpfulness accuracy at 64.1%.", "perturbed_statement": "At 3000 training steps, LLaMA-2-Chat-VL-MTL’s multimodal ASR drops to approximately 6%, roughly one-eighth of its text-only ASR of 48%, yet Table 2 reports its average multimodal helpfulness accuracy at 66.1%.", "perturbed_explanation": "The perturbed statement incorrectly cites the average multimodal helpfulness accuracy as 66.1%, whereas Table 2 lists the actual average for LLaMA-2-Chat-VL-MTL as 64.1%.", "claim": "At 3000 training steps, LLaMA-2-Chat-VL-MTL’s multimodal ASR drops to approximately 6%, roughly one-eighth of its text-only ASR of 48%, yet Table 2 reports its average multimodal helpfulness accuracy at 64.1%.", "label": true }, { "paperid": "2410.20335v1", "paper_path": "./SciVer/papers/2410.20335v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20335v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.20335v1-Table2-1.png", "section": [ "5.2" ], "request_id": 578, "origin_statement": "At ε=0.5 and C₁=1e-3, the IFUTSVM-ID model achieves approximately 93% ACC (Figure 1), and in pairwise comparison with IFW-LSTSVM on KEEL datasets it records 43 wins, 2 ties, and 1 loss (Table 2).", "perturbed_statement": "At ε=0.5 and C₁=1e-3, the IFUTSVM-ID model achieves approximately 96% ACC (Figure 1), and in pairwise comparison with IFW-LSTSVM on KEEL datasets it records 41 wins, 2 ties, and 1 loss (Table 2).", "perturbed_explanation": "The figure shows ACC around 93% at ε=0.5 and C₁=1e-3, not 96%, and Table 2 reports 43 wins (not 41) against IFW-LSTSVM, so both details are contradicted.", "claim": "At ε=0.5 and C₁=1e-3, the IFUTSVM-ID model achieves approximately 93% ACC (Figure 1), and in pairwise comparison with IFW-LSTSVM on KEEL datasets it records 43 wins, 2 ties, and 1 loss (Table 2).", "label": true }, { "paperid": "2411.00848v1", "paper_path": "./SciVer/papers/2411.00848v1.json", "claim_type": "parallel", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00848v1_figure_4.png", "item2_path": "./SciVer/images/2411.00848v1-Table4-1.png", "section": [ "4.3.3" ], "request_id": 582, "origin_statement": "In Figure 4, FRI’s median accuracy (~0.89) exceeds PCA’s (~0.86), and in Table 4, the fused BPA m* from FRI assigns 83.5% mass to the correct label {Ve}.", "perturbed_statement": "In Figure 4, FRI’s median accuracy (~0.89) exceeds PCA’s (~0.86), and in Table 4, the fused BPA m* from FRI assigns 78.2% mass to the incorrect label {Vi}.", "perturbed_explanation": "The perturbed claim is incorrect because Table 4 shows that m* for FRI actually allocates 0.835 mass to {Ve} and only 0.030 to {Vi}, not 0.782 to {Vi}.", "claim": "In Figure 4, FRI’s median accuracy (~0.89) exceeds PCA’s (~0.86), and in Table 4, the fused BPA m* from FRI assigns 83.5% mass to the correct label {Ve}.", "label": true }, { "paperid": "2410.16833v2", "paper_path": "./SciVer/papers/2410.16833v2.json", "claim_type": "parallel", "item1": "12", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16833v2_figure_12.png", "item2_path": "./SciVer/images/2410.16833v2-Table1-1.png", "section": [ "5.1" ], "request_id": 595, "origin_statement": "The amplitude of density variation (2.0) in Fig. 12’s torus with R=10, r=1 exceeds about four times the initial density standard deviation (√0.2427≈0.49) of Fig. 1(a), per Table 1.", "perturbed_statement": "The amplitude of density variation (2.0) in Fig. 12’s torus with R=10, r=1 exceeds about five times the initial density standard deviation (√0.2427≈0.49) of Fig. 1(a), per Table 1.", "perturbed_explanation": "The ratio of the amplitude (2.0) to the standard deviation (≈0.49) is approximately 4.08, not five, so stating it exceeds five times contradicts the numeric relationship.", "claim": "The amplitude of density variation (2.0) in Fig. 12’s torus with R=10, r=1 exceeds about four times the initial density standard deviation (√0.2427≈0.49) of Fig. 1(a), per Table 1.", "label": true }, { "paperid": "2410.19346v2", "paper_path": "./SciVer/papers/2410.19346v2.json", "claim_type": "parallel", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19346v2_figure_5.png", "item2_path": "./SciVer/images/2410.19346v2-Table1-1.png", "section": [ "4.2" ], "request_id": 603, "origin_statement": "GPT-4o’s Info accuracy of 76.86% in Table1 closely matches its attacker accuracy of ~77% in Fig5(b), which is about 47% higher than its defender accuracy of ~30%.", "perturbed_statement": "GPT-4o’s Info accuracy of 80.86% in Table1 closely matches its attacker accuracy of ~80% in Fig5(b), which is about 47% higher than its defender accuracy of ~30%.", "perturbed_explanation": "The Table1 Info accuracy for GPT-4o is 76.86%, not 80.86%, and Fig5(b) shows its attacker accuracy is approximately 77%, not ~80%.", "claim": "GPT-4o’s Info accuracy of 76.86% in Table1 closely matches its attacker accuracy of ~77% in Fig5(b), which is about 47% higher than its defender accuracy of ~30%.", "label": true }, { "paperid": "2410.06992v2", "paper_path": "./SciVer/papers/2410.06992v2.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06992v2_figure_3.png", "item2_path": "./SciVer/images/2410.06992v2_figure_4.png", "section": [ "2.1.1" ], "request_id": 605, "origin_statement": "The model-generated sympy fix injects three lines for the _print_Indexed method, exactly matching the gold patch (illustrating solution leakage), while the django fix only adds two lines for __reversed__, incorrectly iterating over dict.keys() instead of the full dict.", "perturbed_statement": "The model-generated sympy fix injects four lines for the _print_Indexed method, exactly matching the gold patch (illustrating solution leakage), while the django fix only adds two lines for __reversed__, incorrectly iterating over dict.keys() instead of the full dict.", "perturbed_explanation": "Figure 3’s generated patch shows only three added lines for the _print_Indexed method (+ def _print_Indexed, + base,*index = expr.args, + return …), not four. Claiming four lines contradicts the actual image.", "claim": "The model-generated sympy fix injects three lines for the _print_Indexed method, exactly matching the gold patch (illustrating solution leakage), while the django fix only adds two lines for __reversed__, incorrectly iterating over dict.keys() instead of the full dict.", "label": true }, { "paperid": "2409.14838v1", "paper_path": "./SciVer/papers/2409.14838v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14838v1_figure_6.png", "item2_path": "./SciVer/images/2409.14838v1_figure_7.png", "section": [ "3.2.2" ], "request_id": 614, "origin_statement": "For ResNet18 under 90% accuracy tolerance, Design2 hits the target with a 4-bit ADC versus Design3’s 6-bit requirement, and at 4-bit cell precision Design2 occupies roughly 75 mm² and delivers about 140 TOPS/W energy efficiency.", "perturbed_statement": "For ResNet18 under 90% accuracy tolerance, Design2 hits the target with a 5-bit ADC versus Design3’s 6-bit requirement, and at 4-bit cell precision Design2 occupies roughly 75 mm² and delivers about 140 TOPS/W energy efficiency.", "perturbed_explanation": "The perturbed statement wrongly claims Design2 needs a 5-bit ADC to reach 90% accuracy, but Figure 6 shows it achieves that threshold with only a 4-bit ADC.", "claim": "For ResNet18 under 90% accuracy tolerance, Design2 hits the target with a 4-bit ADC versus Design3’s 6-bit requirement, and at 4-bit cell precision Design2 occupies roughly 75 mm² and delivers about 140 TOPS/W energy efficiency.", "label": true }, { "paperid": "2409.11839v1", "paper_path": "./SciVer/papers/2409.11839v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11839v1_figure_2.png", "item2_path": "./SciVer/images/2409.11839v1_figure_3.png", "section": [ "5.1" ], "request_id": 628, "origin_statement": "Despite treated stations starting at approximately 200 mcg/m3 of black smoke compared to 180 mcg/m3 for controls (Fig2), dynamic estimates reveal treated sites experienced an immediate 10 mcg/m3 drop post-submission, deepening to around 25 mcg/m3 reduction at 24 months relative to controls (Fig3).", "perturbed_statement": "Despite treated stations starting at approximately 200 mcg/m3 of black smoke compared to 180 mcg/m3 for controls (Fig2), dynamic estimates reveal treated sites experienced an immediate 15 mcg/m3 drop post-submission, deepening to around 40 mcg/m3 reduction at 24 months relative to controls (Fig3).", "perturbed_explanation": "The perturbation is incorrect because Figure 3 shows an immediate drop of about 10 mcg/m3 (not 15 mcg/m3) and a reduction settling around 25–30 mcg/m3 (not 40 mcg/m3) by 24 months after submission.", "claim": "Despite treated stations starting at approximately 200 mcg/m3 of black smoke compared to 180 mcg/m3 for controls (Fig2), dynamic estimates reveal treated sites experienced an immediate 10 mcg/m3 drop post-submission, deepening to around 25 mcg/m3 reduction at 24 months relative to controls (Fig3).", "label": true }, { "paperid": "2411.07795v2", "paper_path": "./SciVer/papers/2411.07795v2.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.07795v2-Table5-1.png", "item2_path": "./SciVer/images/2411.07795v2-Table6-1.png", "section": [ "3.4" ], "request_id": 636, "origin_statement": "Although DIV2K produces a slightly higher average PSNR (47.9 vs 47.8), its worst-case watermark decoding success under RandomResizedCrop is 95.2%, surpassing DALL·E 3’s worst-case 92.7% under JPEG compression.", "perturbed_statement": "Although DIV2K produces a slightly higher average PSNR (47.9 vs 47.8), its worst-case watermark decoding success under RandomResizedCrop is 95.2%, surpassing DALL·E 3’s worst-case 98.7% under JPEG compression.", "perturbed_explanation": "The perturbed statement is incorrect because Table 6 shows DALL·E 3’s success rate under JPEG compression is 92.7%, not 98.7%, contradicting the claimed 98.7% value.", "claim": "Although DIV2K produces a slightly higher average PSNR (47.9 vs 47.8), its worst-case watermark decoding success under RandomResizedCrop is 95.2%, surpassing DALL·E 3’s worst-case 92.7% under JPEG compression.", "label": true }, { "paperid": "2411.03940v1", "paper_path": "./SciVer/papers/2411.03940v1.json", "claim_type": "parallel", "item1": "2", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03940v1_figure_2.png", "item2_path": "./SciVer/images/2411.03940v1_figure_8.png", "section": [ "4.2" ], "request_id": 651, "origin_statement": "Figure 2 shows that the s3-mode shear eigenfunction has three zeroes in the crust in both full GR and Cowling, while Figure 8 reveals that the i1-mode radial eigenfunction presents a node inside the solid crust only in full GR and not under the Cowling approximation.", "perturbed_statement": "Figure 2 shows that the s3-mode shear eigenfunction has two zeroes in the crust in both full GR and Cowling, while Figure 8 reveals that the i1-mode radial eigenfunction presents a node inside the solid crust only under the Cowling approximation and not in full GR.", "perturbed_explanation": "In Fig. 2 the s3-mode eigenfunction clearly exhibits three zero crossings within the crust, not two. Moreover, Fig. 8 demonstrates that the radial node for the i1-mode appears in the full GR solution, not in the Cowling approximation.", "claim": "Figure 2 shows that the s3-mode shear eigenfunction has three zeroes in the crust in both full GR and Cowling, while Figure 8 reveals that the i1-mode radial eigenfunction presents a node inside the solid crust only in full GR and not under the Cowling approximation.", "label": true }, { "paperid": "2409.01274v1", "paper_path": "./SciVer/papers/2409.01274v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01274v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.01274v1_figure_7(b).png", "section": [ "5.1" ], "request_id": 667, "origin_statement": "For single-frame input, the PSNR gain in indoor scenes (~0.75 dB) is more than double that in far scenes (~0.30 dB), consistent with depth confidence being highest indoors (77.31 %) and lowest for far (31.21 %).", "perturbed_statement": "For single-frame input, the PSNR gain in indoor scenes (~0.75 dB) is more than double that in far scenes (~0.60 dB), consistent with depth confidence being highest indoors (77.31 %) and lowest for far (31.21 %).", "perturbed_explanation": "The perturbation claims a ~0.60 dB PSNR gain for far scenes, but Fig. 7(a) actually shows the single-frame PSNR gain for the Far attribute is only about ~0.30 dB, contradicting the stated value.", "claim": "For single-frame input, the PSNR gain in indoor scenes (~0.75 dB) is more than double that in far scenes (~0.30 dB), consistent with depth confidence being highest indoors (77.31 %) and lowest for far (31.21 %).", "label": true }, { "paperid": "2409.11874v1", "paper_path": "./SciVer/papers/2409.11874v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.11874v1-Table1-1.png", "item2_path": "./SciVer/images/2409.11874v1-Table2-1.png", "section": [ "3" ], "request_id": 668, "origin_statement": "By replacing the fourth reference character “e” with “a,” the generated text loses one point out of seven, reducing simple precision to 6/7 (85.71%), as shown by the per‐character scoring in Table 1 and the 6/7=85.71% calculation in Table 2.", "perturbed_statement": "By replacing the fourth reference character “e” with “a,” the generated text loses one point out of seven, reducing simple precision to 5/7 (71.43%), as shown by the per‐character scoring in Table 1 and the 5/7=71.43% calculation in Table 2.", "perturbed_explanation": "The precision is incorrectly stated: Table 2 shows 6 correct matches out of 7 for a precision of 6/7 (85.71%), not 5/7 (71.43%). The generated text actually scores six points, not five.", "claim": "By replacing the fourth reference character “e” with “a,” the generated text loses one point out of seven, reducing simple precision to 6/7 (85.71%), as shown by the per‐character scoring in Table 1 and the 6/7=85.71% calculation in Table 2.", "label": true }, { "paperid": "2411.15091v1", "paper_path": "./SciVer/papers/2411.15091v1.json", "claim_type": "parallel", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15091v1_figure_5.png", "item2_path": "./SciVer/images/2411.15091v1-Table2-1.png", "section": [ "4.4" ], "request_id": 669, "origin_statement": "Squarespace’s AI Crawlers toggle (Figure 5) promises that “your site won’t be scanned to train AI models” when enabled, yet only 17% of the 20.7% of artist sites hosted on Squarespace actually disallow AI crawlers according to Table 2.", "perturbed_statement": "Squarespace’s AI Crawlers toggle (Figure 5) promises that “your site won’t be scanned to train AI models” when enabled, yet 50% of the 20.7% of artist sites hosted on Squarespace actually disallow AI crawlers according to Table 2.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 shows that only 17% (not 50%) of Squarespace sites disallow AI crawlers, contradicting the claim that 50% do so.", "claim": "Squarespace’s AI Crawlers toggle (Figure 5) promises that “your site won’t be scanned to train AI models” when enabled, yet only 17% of the 20.7% of artist sites hosted on Squarespace actually disallow AI crawlers according to Table 2.", "label": true }, { "paperid": "2409.03247v1", "paper_path": "./SciVer/papers/2409.03247v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03247v1_figure_6(a).png", "item2_path": "./SciVer/images/2409.03247v1_figure_6(d).png", "section": [ "4.1.1" ], "request_id": 674, "origin_statement": "At minute 15, the Prompt System and the Rule System both reached accuracies of about 0.705, yet the Prompt System’s F1 score (~0.56) exceeded the Rule System’s (~0.45) by approximately 0.11.", "perturbed_statement": "At minute 15, the Prompt System and the Rule System both reached accuracies of about 0.74, yet the Prompt System’s F1 score (~0.56) exceeded the Rule System’s (~0.45) by approximately 0.11.", "perturbed_explanation": "The accuracy subfigure ((a)) shows both the Prompt and Rule systems ending near 0.705 at minute 15, not 0.74 as stated, so the perturbed accuracy value contradicts the plotted data.", "claim": "At minute 15, the Prompt System and the Rule System both reached accuracies of about 0.705, yet the Prompt System’s F1 score (~0.56) exceeded the Rule System’s (~0.45) by approximately 0.11.", "label": true }, { "paperid": "2411.06096v1", "paper_path": "./SciVer/papers/2411.06096v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06096v1_figure_5.png", "item2_path": "./SciVer/images/2411.06096v1_figure_6.png", "section": [ "5.4" ], "request_id": 683, "origin_statement": "The Blue Diff_NP in Figure 6 plunges to around -1.8 at 1M tokens, coinciding with the BA curve hitting roughly 0.3 at 1M tokens in Figure 5; both metrics then rise above 0.5 by 100M tokens.", "perturbed_statement": "The Blue Diff_NP in Figure 6 plunges to around -2.5 at 1M tokens, coinciding with the BA curve hitting roughly 0.15 at 1M tokens in Figure 5; both metrics then rise above 0.5 by 100M tokens.", "perturbed_explanation": "In Figure 6, Diff_NP actually bottoms at about -1.8 at 1M tokens, not -2.5. In Figure 5, the BA performance dips to approximately 0.3 at 1M tokens, not 0.15.", "claim": "The Blue Diff_NP in Figure 6 plunges to around -1.8 at 1M tokens, coinciding with the BA curve hitting roughly 0.3 at 1M tokens in Figure 5; both metrics then rise above 0.5 by 100M tokens.", "label": true }, { "paperid": "2410.15005v1", "paper_path": "./SciVer/papers/2410.15005v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15005v1_figure_4(b).png", "item2_path": "./SciVer/images/2410.15005v1-Table2-1.png", "section": [ "5", "5.2" ], "request_id": 704, "origin_statement": "FinMA-Full-7B exhibits a PCR_train-val of 0.1261 in Table 2 and an approximate 0.13 drop in Figure 4(b), almost double the PCR drop of FinMA-NLP-7B (0.0706), indicating stronger fine-tuning effects on the AlphaFin benchmark.", "perturbed_statement": "FinMA-Full-7B exhibits a PCR_train-val of 0.1361 in Table 2 and an approximate 0.14 drop in Figure 4(b), more than twice the PCR drop of FinMA-NLP-7B (0.054), indicating strong fine-tuning overlap on the AlphaFin benchmark.", "perturbed_explanation": "The perturbed statement misreports the PCR_train-val values: Table 2 lists FinMA-Full-7B as 0.1261 (not 0.1361) and FinMA-NLP-7B as 0.0706 (not 0.054), so the altered numbers contradict the actual benchmark results.", "claim": "FinMA-Full-7B exhibits a PCR_train-val of 0.1261 in Table 2 and an approximate 0.13 drop in Figure 4(b), almost double the PCR drop of FinMA-NLP-7B (0.0706), indicating stronger fine-tuning effects on the AlphaFin benchmark.", "label": true }, { "paperid": "2410.19279v1", "paper_path": "./SciVer/papers/2410.19279v1.json", "claim_type": "parallel", "item1": "8", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19279v1_figure_8.png", "item2_path": "./SciVer/images/2410.19279v1-Table1-1.png", "section": [ "3.4.1" ], "request_id": 721, "origin_statement": "While Figure 8 shows that abrupt peak transitions remain consistent across varying heart rate waveforms, Table 1 reveals PhysNet's MAE increases from 1.86 in slight motion to 8.20 in speaking — a 341% degradation — highlighting short-range models’ noise sensitivity despite stable peak patterns.", "perturbed_statement": "While Figure 8 shows that abrupt trough transitions remain consistent across varying heart rate waveforms, Table 1 reveals PhysNet's MAE increases from 1.86 in slight motion to 12.82 in speaking — a 588% degradation — highlighting short-range models’ noise sensitivity despite stable trough patterns.", "perturbed_explanation": "This statement is incorrect because Figure 8 illustrates abrupt peak (not trough) transitions in rPPG signals, and Table 1 reports PhysNet's MAE under speaking as 8.20 (not 12.82), making both the signal feature and error value contradicted.", "claim": "While Figure 8 shows that abrupt peak transitions remain consistent across varying heart rate waveforms, Table 1 reveals PhysNet's MAE increases from 1.86 in slight motion to 8.20 in speaking — a 341% degradation — highlighting short-range models’ noise sensitivity despite stable peak patterns.", "label": true }, { "paperid": "2411.01023v1", "paper_path": "./SciVer/papers/2411.01023v1.json", "claim_type": "parallel", "item1": "11", "item2": "13", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01023v1_figure_11.png", "item2_path": "./SciVer/images/2411.01023v1_figure_13.png", "section": [ "6.2" ], "request_id": 742, "origin_statement": "Increasing NPP from 1 to 50 raises the average Hits@3 by about 0.09 (from 0.33 to 0.42, Figure 11), exceeding the overall mean performance of ComplEx (0.402, Figure 13).", "perturbed_statement": "Increasing NPP from 1 to 20 raises the average Hits@3 by about 0.09 (from 0.33 to 0.42, Figure 11), exceeding the overall mean performance of ComplEx (0.402, Figure 13).", "perturbed_explanation": "The perturbed claim mislabels the NPP value: at NPP = 20 the mean Hits@3 is about 0.40, not 0.42 (which occurs at NPP = 50). Therefore stating a 0.42 average for NPP = 20 contradicts Figure 11.", "claim": "Increasing NPP from 1 to 50 raises the average Hits@3 by about 0.09 (from 0.33 to 0.42, Figure 11), exceeding the overall mean performance of ComplEx (0.402, Figure 13).", "label": true }, { "paperid": "2410.21129v1", "paper_path": "./SciVer/papers/2410.21129v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21129v1_figure_2.png", "item2_path": "./SciVer/images/2410.21129v1_figure_4.png", "section": [ "5.2" ], "request_id": 767, "origin_statement": "The wine instance has 12.2% alcohol contributing a ~0.055 increase in positive class probability, whereas the glass instance’s magnesium level of 3.36 lowers the build wind non-float probability by about 0.18.", "perturbed_statement": "The wine instance has 13.5% alcohol contributing a ~0.055 increase in positive class probability, whereas the glass instance’s magnesium level of 3.36 lowers the build wind non-float probability by about 0.18.", "perturbed_explanation": "This statement is wrong because the wine figure shows an alcohol value of 12.2, not 13.5, so the perturbed instance value contradicts the image.", "claim": "The wine instance has 12.2% alcohol contributing a ~0.055 increase in positive class probability, whereas the glass instance’s magnesium level of 3.36 lowers the build wind non-float probability by about 0.18.", "label": true }, { "paperid": "2410.08289v1", "paper_path": "./SciVer/papers/2410.08289v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08289v1_figure_5.png", "item2_path": "./SciVer/images/2410.08289v1_figure_6.png", "section": [ "5.1" ], "request_id": 769, "origin_statement": "In the pie charts, the ZeroShot model produced 23.4% incorrectly formatted samples compared to SFT’s 0.7%, and the density plot shows ZeroShot’s answer span distribution peaking around 0.25 through the passage, later than SFT’s peak at around 0.15.", "perturbed_statement": "In the pie charts, the ZeroShot model produced 23.4% incorrectly formatted samples compared to SFT’s 5.7%, and the density plot shows ZeroShot’s answer span distribution peaking around 0.15 through the passage, earlier than SFT’s peak at around 0.25.", "perturbed_explanation": "The perturbed statement incorrectly raises SFT’s incorrect format rate to 5.7% when the chart shows it is actually 0.7%, and it swaps the empirical peak positions: ZeroShot peaks at about 0.25 of passage length and SFT at about 0.15, not the reverse.", "claim": "In the pie charts, the ZeroShot model produced 23.4% incorrectly formatted samples compared to SFT’s 0.7%, and the density plot shows ZeroShot’s answer span distribution peaking around 0.25 through the passage, later than SFT’s peak at around 0.15.", "label": true }, { "paperid": "2409.19014v4", "paper_path": "./SciVer/papers/2409.19014v4.json", "claim_type": "parallel", "item1": "8(a)", "item2": "8(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19014v4_figure_8(a).png", "item2_path": "./SciVer/images/2409.19014v4_figure_8(b).png", "section": [ "7.2" ], "request_id": 773, "origin_statement": "On Spider, DINSOL’s EX score (~89%) increases to ~93% in FLEX (+4%), whereas on BIRD SuperSQL rises from ~56% EX to ~63% FLEX (+7%), showing BIRD models gain more under FLEX.", "perturbed_statement": "On Spider, DINSOL’s EX score (~85%) increases to ~95% in FLEX (+10%), whereas on BIRD SuperSQL rises from ~56% EX to ~63% FLEX (+7%), showing BIRD models gain more under FLEX.", "perturbed_explanation": "The perturbation misreports DINSOL’s Spider scores: the chart shows DINSOL at about 89% EX and 93% FLEX (a 4-point gain), not 85% EX and 95% FLEX (a 10-point gain).", "claim": "On Spider, DINSOL’s EX score (~89%) increases to ~93% in FLEX (+4%), whereas on BIRD SuperSQL rises from ~56% EX to ~63% FLEX (+7%), showing BIRD models gain more under FLEX.", "label": true }, { "paperid": "2410.20297v1", "paper_path": "./SciVer/papers/2410.20297v1.json", "claim_type": "parallel", "item1": "10", "item2": "11", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20297v1_figure_10.png", "item2_path": "./SciVer/images/2410.20297v1_figure_11.png", "section": [ "4.3" ], "request_id": 778, "origin_statement": "The evaluation audit interface logs 55,278 question-level items, and the leaderboard ranks Mistral-7B-Instruct-v0.1 with an average score of 62.69, which is 6.51 points below the top model traclm-v3-7b-instruct (69.20).", "perturbed_statement": "The evaluation audit interface logs 55,278 question-level items, and the leaderboard ranks Mistral-7B-Instruct-v0.1 with an average score of 68.42, which is 0.78 points below the top model traclm-v3-7b-instruct (69.20).", "perturbed_explanation": "This is incorrect because the leaderboard shows Mistral-7B-Instruct-v0.1’s actual average score is 62.69, not 68.42. The value 68.42 corresponds to Mistral-7B-Instruct-v0.3, so the stated average and the 0.78-point gap contradict the table.", "claim": "The evaluation audit interface logs 55,278 question-level items, and the leaderboard ranks Mistral-7B-Instruct-v0.1 with an average score of 62.69, which is 6.51 points below the top model traclm-v3-7b-instruct (69.20).", "label": true }, { "paperid": "2410.19796v1", "paper_path": "./SciVer/papers/2410.19796v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.19796v1-Table3-1.png", "item2_path": "./SciVer/images/2410.19796v1-Table4-1.png", "section": [ "5.2.2" ], "request_id": 788, "origin_statement": "On ViT-L-16, feature clipping reduces vanilla ECE from 5.24 to 5.04, yet the transformer yields zero samples with confidence >0.99 among 50,000 images, while ResNet-50 still makes 3,173 overconfident correct predictions at this threshold.", "perturbed_statement": "On ViT-L-16, feature clipping reduces vanilla ECE from 5.24 to 4.94, yet the transformer yields zero samples with confidence >0.99 among 50,000 images, while ResNet-50 still makes 3,173 overconfident correct predictions at this threshold.", "perturbed_explanation": "The perturbed statement incorrectly claims the post-clipping ECE is 4.94, but Table 3 reports the w/FC ECE for vanilla ViT-L-16 as 5.04, not 4.94.", "claim": "On ViT-L-16, feature clipping reduces vanilla ECE from 5.24 to 5.04, yet the transformer yields zero samples with confidence >0.99 among 50,000 images, while ResNet-50 still makes 3,173 overconfident correct predictions at this threshold.", "label": true }, { "paperid": "2410.17226v2", "paper_path": "./SciVer/papers/2410.17226v2.json", "claim_type": "parallel", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.17226v2_figure_5.png", "item2_path": "./SciVer/images/2410.17226v2-Table2-1.png", "section": [ "5.1" ], "request_id": 790, "origin_statement": "For the SD graph, increasing cluster diameter from 2 to 6 raises C-BFS relative time by about 2.6×, while parallel C-BFS still achieves a 945× speedup over sequential BFS (Par-Time=2.008s) as reported in Table 2.", "perturbed_statement": "For the SD graph, increasing cluster diameter from 2 to 6 raises C-BFS relative time by about 2.6×, while parallel C-BFS still achieves an 850× speedup over sequential BFS (Par-Time=2.008s) as reported in Table 2.", "perturbed_explanation": "The perturbed statement incorrectly reports the parallel C-BFS speedup for the SD graph as 850×. Table 2 actually shows a 945× speedup (Final column) for SD, so the 850× figure contradicts the provided data.", "claim": "For the SD graph, increasing cluster diameter from 2 to 6 raises C-BFS relative time by about 2.6×, while parallel C-BFS still achieves a 945× speedup over sequential BFS (Par-Time=2.008s) as reported in Table 2.", "label": true }, { "paperid": "2410.22086v2", "paper_path": "./SciVer/papers/2410.22086v2.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22086v2_figure_1.png", "item2_path": "./SciVer/images/2410.22086v2_figure_2.png", "section": [ "3.1" ], "request_id": 801, "origin_statement": "NGDiff, whose gradient updates always remain in the green region of Fig. 2, achieves a forgetting loss of about 3.5 with a retaining loss below 0.2 on the TOFU dataset, whereas random guessing yields a forgetting loss over 10.", "perturbed_statement": "NGDiff, whose gradient updates always remain in the green region of Fig. 2, achieves a forgetting loss of about 7 with a retaining loss around 0.5 on the TOFU dataset, whereas random guessing yields a forgetting loss under 5.", "perturbed_explanation": "This statement is incorrect because in Figure 1 the NGDiff marker (blue square) shows a forgetting loss near 3.5 and a retaining loss close to 0.1, not 7 and 0.5, and the random‐guess baseline (grey dashed line) indicates a forgetting loss above 10, not under 5.", "claim": "NGDiff, whose gradient updates always remain in the green region of Fig. 2, achieves a forgetting loss of about 3.5 with a retaining loss below 0.2 on the TOFU dataset, whereas random guessing yields a forgetting loss over 10.", "label": true }, { "paperid": "2410.02719v1", "paper_path": "./SciVer/papers/2410.02719v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02719v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.02719v1_figure_4(b).png", "section": [ "4.2" ], "request_id": 826, "origin_statement": "At similar uniformity (~−1.1) and alignment (~0.8), Uncertainty-RAG achieves an averaged STS of ~0.72, roughly 0.1 higher than Precise Chunking’s maximum STS of ~0.62 observed at uniformity ~−1.3 and alignment ~0.9.", "perturbed_statement": "At similar uniformity (~−1.1) and alignment (~0.8), Uncertainty-RAG achieves an averaged STS of ~0.72, roughly 0.2 higher than Precise Chunking’s maximum STS of ~0.62 observed at uniformity ~−1.3 and alignment ~0.9.", "perturbed_explanation": "The perturbation incorrectly states a 0.2 STS gap. According to the plots, Uncertainty-RAG’s STS (~0.72) exceeds Precise Chunking’s STS (~0.62) by only ~0.1, not 0.2.", "claim": "At similar uniformity (~−1.1) and alignment (~0.8), Uncertainty-RAG achieves an averaged STS of ~0.72, roughly 0.1 higher than Precise Chunking’s maximum STS of ~0.62 observed at uniformity ~−1.3 and alignment ~0.9.", "label": true }, { "paperid": "2409.13221v2", "paper_path": "./SciVer/papers/2409.13221v2.json", "claim_type": "parallel", "item1": "9", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13221v2_figure_9.png", "item2_path": "./SciVer/images/2409.13221v2-Table3-1.png", "section": [ "7.3" ], "request_id": 838, "origin_statement": "For the 65B model, increasing the migration ratio to 50% reduces fused stage latency from ≈380 ms to ≈240 ms (a ~37% drop), while RLHFuse’s simulated annealing pipeline schedule achieves a 1.48× overall latency speedup for the 65B/33B model with 16 micro-batches.", "perturbed_statement": "For the 65B model, increasing the migration ratio to 50% reduces fused stage latency from ≈380 ms to ≈200 ms (a ~47% drop), while RLHFuse’s simulated annealing pipeline schedule achieves a 1.6× overall latency speedup for the 65B/33B model with 16 micro-batches.", "perturbed_explanation": "The perturbed statement understates the fused stage latency at 50% migration: Figure 9 shows it remains around 240 ms, not 200 ms. It also overstates the speedup: Table 3 reports 1.48× for the 65B/33B setting with 16 micro-batches, not 1.6×.", "claim": "For the 65B model, increasing the migration ratio to 50% reduces fused stage latency from ≈380 ms to ≈240 ms (a ~37% drop), while RLHFuse’s simulated annealing pipeline schedule achieves a 1.48× overall latency speedup for the 65B/33B model with 16 micro-batches.", "label": true }, { "paperid": "2411.11786v1", "paper_path": "./SciVer/papers/2411.11786v1.json", "claim_type": "parallel", "item1": "2", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.11786v1-Table2-1.png", "item2_path": "./SciVer/images/2411.11786v1-Table6-1.png", "section": [ "5.1" ], "request_id": 843, "origin_statement": "Lowering r from 1.0 to 0.9 on CIFAR10 reduces FID by 3.464 points (from 28.551 to 25.087), a ~12.1% drop, while PT+CP halves the RF error on Adult tabular data compared to MP, reducing ΔAUC from 0.047 (0.029) to 0.022 (0.004).", "perturbed_statement": "Lowering r from 1.0 to 0.9 on CIFAR10 increases FID by 3.464 points (from 28.551 to 25.087), a ~12.1% rise, while PT+CP halves the RF error on Adult tabular data compared to MP, reducing ΔAUC from 0.047 (0.029) to 0.022 (0.004).", "perturbed_explanation": "The perturbation wrongly states that FID ‘increases by 3.464 points from 28.551 to 25.087’, whereas Table (b) shows FID actually decreases from 28.551 to 25.087 when r is lowered from 1.0 to 0.9.", "claim": "Lowering r from 1.0 to 0.9 on CIFAR10 reduces FID by 3.464 points (from 28.551 to 25.087), a ~12.1% drop, while PT+CP halves the RF error on Adult tabular data compared to MP, reducing ΔAUC from 0.047 (0.029) to 0.022 (0.004).", "label": true }, { "paperid": "2409.13199v1", "paper_path": "./SciVer/papers/2409.13199v1.json", "claim_type": "parallel", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.13199v1_figure_5.png", "item2_path": "./SciVer/images/2409.13199v1_figure_7.png", "section": [ "3.5" ], "request_id": 857, "origin_statement": "At 50% sparsity, Wanda-SP’s MMLU drops to around 27.1 on LLaMA3-8B whereas CFSP maintains about 32.3. Meanwhile, LLaMA3-8B’s first and last blocks have normalized importance scores of roughly 0.87 and 0.84, nearly three times higher than the lowest mid-block score (~0.28).", "perturbed_statement": "At 50% sparsity, Wanda-SP’s MMLU drops to around 27.1 on LLaMA3-8B whereas CFSP maintains about 35.3. Meanwhile, LLaMA3-8B’s first and last blocks have normalized importance scores of roughly 0.87 and 0.84, nearly three times higher than the lowest mid-block score (~0.28).", "perturbed_explanation": "The perturbed statement overestimates CFSP’s performance at 50% sparsity. Figure 5 shows CFSP achieves about 32.3 MMLU at 50% sparsity, not 35.3.", "claim": "At 50% sparsity, Wanda-SP’s MMLU drops to around 27.1 on LLaMA3-8B whereas CFSP maintains about 32.3. Meanwhile, LLaMA3-8B’s first and last blocks have normalized importance scores of roughly 0.87 and 0.84, nearly three times higher than the lowest mid-block score (~0.28).", "label": true }, { "paperid": "2409.03843v1", "paper_path": "./SciVer/papers/2409.03843v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03843v1_figure_3.png", "item2_path": "./SciVer/images/2409.03843v1_figure_4.png", "section": [ "5.2.4" ], "request_id": 876, "origin_statement": "Assigning a man identity decreases the mean sexism-against-men score by 0.74 points, while assigning a Republican identity increases mean agreement with Republican statements by 2.43 points, making the political identity’s effect over three times larger than the gender identity shift.", "perturbed_statement": "Assigning a man identity decreases the mean sexism-against-men score by 1.20 points, while assigning a Republican identity increases mean agreement with Republican statements by 2.43 points, making the political identity’s effect over three times larger than the gender identity shift.", "perturbed_explanation": "The perturbation is incorrect because Figure 3B shows the decrease in sexism-against-men under the man identity is –0.74 points, not –1.20 points.", "claim": "Assigning a man identity decreases the mean sexism-against-men score by 0.74 points, while assigning a Republican identity increases mean agreement with Republican statements by 2.43 points, making the political identity’s effect over three times larger than the gender identity shift.", "label": true }, { "paperid": "2410.04070v5", "paper_path": "./SciVer/papers/2410.04070v5.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04070v5_figure_2(a).png", "item2_path": "./SciVer/images/2410.04070v5-Table2-1.png", "section": [ "4.2" ], "request_id": 881, "origin_statement": "On the P-soups dataset, PAD attains a humor reward of 0.86 in Figure 2, elevating it 0.76 above the Base’s 0.10, and Table 2 shows PAD’s reward-model humor score is 0.82 versus Base’s −0.93, a 1.75-point gain.", "perturbed_statement": "On the P-soups dataset, PAD attains a humor reward of 1.86 in Figure 2, elevating it 1.76 above the Base’s 0.10, and Table 2 shows PAD’s reward-model humor score is 0.82 versus Base’s −0.93, a 1.75-point gain.", "perturbed_explanation": "The perturbed statement incorrectly claims PAD’s humor reward in Figure 2 is 1.86, but the actual value shown is 0.86.", "claim": "On the P-soups dataset, PAD attains a humor reward of 0.86 in Figure 2, elevating it 0.76 above the Base’s 0.10, and Table 2 shows PAD’s reward-model humor score is 0.82 versus Base’s −0.93, a 1.75-point gain.", "label": true }, { "paperid": "2410.18321v1", "paper_path": "./SciVer/papers/2410.18321v1.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.18321v1-Table3-1.png", "item2_path": "./SciVer/images/2410.18321v1-Table6-1.png", "section": [ "5.5.3" ], "request_id": 897, "origin_statement": "On CIFAR-10 with ResNet-50, Focal Calibration yields an smCE of 0.99% and an AdaECE of 0.75% post temperature scaling, making smCE about 32% higher than AdaECE.", "perturbed_statement": "On CIFAR-10 with ResNet-50, Focal Calibration yields an smCE of 0.95% and an AdaECE of 0.85% post temperature scaling, making smCE about 12% higher than AdaECE.", "perturbed_explanation": "The perturbed statement misreports both metrics: Table 3 shows the post-scaled smCE for ResNet-50 on CIFAR-10 is 0.99%, not 0.95%, and Table 6 shows the post-scaled AdaECE is 0.75%, not 0.85%.", "claim": "On CIFAR-10 with ResNet-50, Focal Calibration yields an smCE of 0.99% and an AdaECE of 0.75% post temperature scaling, making smCE about 32% higher than AdaECE.", "label": true }, { "paperid": "2411.13892v1", "paper_path": "./SciVer/papers/2411.13892v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13892v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.13892v1_figure_1(b).png", "section": [ "1" ], "request_id": 913, "origin_statement": "In the LightGCN embedding, tail items cluster around (1.5, −1.5) while in the SimGCL plot they shift to approximately (−1.0, −1.0), representing a ∼2.5-unit movement toward the user centroid at (0,0) along the x-axis.", "perturbed_statement": "In the LightGCN embedding, tail items cluster around (−1.5, −1.5) while in the SimGCL plot they shift to approximately (−1.0, −1.0), representing a ∼2.5-unit movement toward the user centroid at (0,0) along the x-axis.", "perturbed_explanation": "The LightGCN scatterplot shows tail items (orange) centered on positive x-values near +1.5, not −1.5, so stating they cluster around (−1.5, −1.5) contradicts the actual image.", "claim": "In the LightGCN embedding, tail items cluster around (1.5, −1.5) while in the SimGCL plot they shift to approximately (−1.0, −1.0), representing a ∼2.5-unit movement toward the user centroid at (0,0) along the x-axis.", "label": true }, { "paperid": "2409.07732v1", "paper_path": "./SciVer/papers/2409.07732v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07732v1_figure_4.png", "item2_path": "./SciVer/images/2409.07732v1_figure_5.png", "section": [ "4.2" ], "request_id": 920, "origin_statement": "Table 4 lists 11 topics spanning 21 distinct course modules, while Table 5 reformats all 21 module names in italics, preserving commas exactly as separators.", "perturbed_statement": "Table 4 lists 10 topics spanning 20 distinct course modules, while Table 5 reformats all 20 module names in italics, preserving commas exactly as separators.", "perturbed_explanation": "The perturbation is incorrect because Table 4 actually contains 11 topic rows, not 10, and the combined courses column across those rows lists 21 individual modules, not 20. Table 5 italicizes all 21 module names as shown.", "claim": "Table 4 lists 11 topics spanning 21 distinct course modules, while Table 5 reformats all 21 module names in italics, preserving commas exactly as separators.", "label": true }, { "paperid": "2411.00551v1", "paper_path": "./SciVer/papers/2411.00551v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.00551v1-Table4-1.png", "item2_path": "./SciVer/images/2411.00551v1-Table5-1.png", "section": [ "5.5" ], "request_id": 927, "origin_statement": "Swapping argmax for expectation raises Cv MAE from 0.659 to 0.703 (+0.044) while boosting MS from 83.3% to 84.6%. In comparison, decreasing OG strength from z=1.5 to z=1.0 lowers MAE by 75 (311 to 236) but reduces MS by 5.4% (80.3% to 74.9%).", "perturbed_statement": "Swapping argmax for expectation lowers Cv MAE from 0.659 to 0.603 while boosting MS from 83.3% to 84.6%. In comparison, decreasing OG strength from z=1.5 to z=1.0 lowers MAE by 75 (311 to 236) but reduces MS by 5.4% (80.3% to 74.9%).", "perturbed_explanation": "The perturbed statement incorrectly claims that expectation lowers the Cv MAE to 0.603. Table 4 shows the expectation‐based MAE for Cv is actually 0.703 (not 0.603) and is higher than the argmax MAE of 0.659, not lower.", "claim": "Swapping argmax for expectation raises Cv MAE from 0.659 to 0.703 (+0.044) while boosting MS from 83.3% to 84.6%. In comparison, decreasing OG strength from z=1.5 to z=1.0 lowers MAE by 75 (311 to 236) but reduces MS by 5.4% (80.3% to 74.9%).", "label": true }, { "paperid": "2409.01710v1", "paper_path": "./SciVer/papers/2409.01710v1.json", "claim_type": "parallel", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01710v1_figure_2.png", "item2_path": "./SciVer/images/2409.01710v1-Table2-1.png", "section": [ "4.1.1" ], "request_id": 939, "origin_statement": "Figure 2 shows that after compressing JPEG images on the mobile client, the secure edge server’s TEE perturbs and encodes them before forwarding; Table 2 confirms that VGG16_bn achieves the highest recognition accuracy of 91.94%, while GoogLeNet has the lowest at 91.22%, a 0.72% gap.", "perturbed_statement": "Figure 2 shows that after compressing JPEG images on the mobile client, the secure edge server’s TEE perturbs and encodes them before forwarding; Table 2 confirms that VGG16_bn achieves the highest recognition accuracy of 91.94%, while GoogLeNet has the lowest at 91.22%, a 0.82% gap.", "perturbed_explanation": "The perturbed statement erroneously reports the accuracy gap as 0.82%; according to Table 2, the true difference between 91.94% and 91.22% is 0.72%, not 0.82%.", "claim": "Figure 2 shows that after compressing JPEG images on the mobile client, the secure edge server’s TEE perturbs and encodes them before forwarding; Table 2 confirms that VGG16_bn achieves the highest recognition accuracy of 91.94%, while GoogLeNet has the lowest at 91.22%, a 0.72% gap.", "label": true }, { "paperid": "2409.11373v1", "paper_path": "./SciVer/papers/2409.11373v1.json", "claim_type": "parallel", "item1": "3(b)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11373v1_figure_3(b).png", "item2_path": "./SciVer/images/2409.11373v1_figure_3(c).png", "section": [ "4" ], "request_id": 942, "origin_statement": "65 of the top 80 architectures include one or two GNN-based layers, and 58 of them (73%) employ a learning rate at or below 10⁻².", "perturbed_statement": "75 of the top 80 architectures include one or two GNN-based layers, and 58 of them (73%) employ a learning rate at or below 10⁻².", "perturbed_explanation": "The perturbation is incorrect because the histogram shows only 26 + 39 = 65 architectures with exactly one or two GNN-based layers, not 75, as indicated by the blue bars at one and two GNN-layer counts.", "claim": "65 of the top 80 architectures include one or two GNN-based layers, and 58 of them (73%) employ a learning rate at or below 10⁻².", "label": true }, { "paperid": "2410.03030v1", "paper_path": "./SciVer/papers/2410.03030v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.03030v1_figure_6.png", "item2_path": "./SciVer/images/2410.03030v1_figure_7.png", "section": [ "5.2" ], "request_id": 946, "origin_statement": "In the Dense ResNet50 on ImageNet, at attenuation radius 40, high-frequency attenuation (removing outer band as in Fig.6 left) yields ~68% test accuracy (blurred images), while low-frequency attenuation (removing central band as in Fig.6 right) plummets accuracy to ~15%, highlighting the reliance on low frequencies.", "perturbed_statement": "In the Dense ResNet50 on ImageNet, at attenuation radius 40, high-frequency attenuation (removing outer band as in Fig.6 left) yields ~15% test accuracy, while low-frequency attenuation (removing central band as in Fig.6 right) yields ~68%, highlighting the reliance on high frequencies.", "perturbed_explanation": "Figure 7 (left) shows that at radius 40, high-frequency attenuation actually preserves about 68% accuracy and low-frequency attenuation drops accuracy to about 15%. The perturbed statement swaps these values, contradicting the plotted RA curves.", "claim": "In the Dense ResNet50 on ImageNet, at attenuation radius 40, high-frequency attenuation (removing outer band as in Fig.6 left) yields ~68% test accuracy (blurred images), while low-frequency attenuation (removing central band as in Fig.6 right) plummets accuracy to ~15%, highlighting the reliance on low frequencies.", "label": true }, { "paperid": "2410.06386v1", "paper_path": "./SciVer/papers/2410.06386v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.06386v1-Table4-1.png", "item2_path": "./SciVer/images/2410.06386v1-Table5-1.png", "section": [ "3.1.1", "3.1", "3.1.2" ], "request_id": 960, "origin_statement": "For the 15-measurement group, increasing the time step ratio from 1 (Case 1) to 20 (Case 5) raises the average relative error by 0.12% while TSR increases 20-fold. Meanwhile, the overall maximum absolute error drops from 2.56°C (Case 1) to 1.88°C (Case 5).", "perturbed_statement": "For the 15-measurement group, increasing the time step ratio from 1 (Case 1) to 20 (Case 5) raises the average relative error by 0.12% while TSR increases 20-fold. Meanwhile, the overall maximum absolute error drops from 2.56°C (Case 1) to 2.58°C (Case 5).", "perturbed_explanation": "The perturbed statement incorrectly states Case 5’s maximum absolute error as 2.58°C, whereas Table 5 reports a value of 1.88°C for Case 5, not 2.58°C.", "claim": "For the 15-measurement group, increasing the time step ratio from 1 (Case 1) to 20 (Case 5) raises the average relative error by 0.12% while TSR increases 20-fold. Meanwhile, the overall maximum absolute error drops from 2.56°C (Case 1) to 1.88°C (Case 5).", "label": true }, { "paperid": "2409.09205v1", "paper_path": "./SciVer/papers/2409.09205v1.json", "claim_type": "parallel", "item1": "10", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.09205v1_figure_10.png", "item2_path": "./SciVer/images/2409.09205v1-Table2-1.png", "section": [ "4.7" ], "request_id": 964, "origin_statement": "The audio modality imposes about 46 on the Temporal NASA-TLX subscale versus about 41 for the mixed modality, yet audio also yields the highest total SSQ score (626.14 compared to mixed’s 547.76), indicating that its elevated temporal demand aligns with greater VR sickness.", "perturbed_statement": "The audio modality imposes about 46 on the Temporal NASA-TLX subscale versus about 41 for the mixed modality, yet mixed also yields the highest total SSQ score (626.14 compared to audio’s 547.76), indicating that its elevated temporal demand aligns with greater VR sickness.", "perturbed_explanation": "This statement is wrong because Table 2 reports that the audio modality—not the mixed modality—has the highest total SSQ score of 626.14, while the mixed modality’s total SSQ score is 547.76.", "claim": "The audio modality imposes about 46 on the Temporal NASA-TLX subscale versus about 41 for the mixed modality, yet audio also yields the highest total SSQ score (626.14 compared to mixed’s 547.76), indicating that its elevated temporal demand aligns with greater VR sickness.", "label": true }, { "paperid": "2410.23703v1", "paper_path": "./SciVer/papers/2410.23703v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23703v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.23703v1_figure_1(b).png", "section": [ "4.2" ], "request_id": 970, "origin_statement": "Nearly half (~47%) of sampled trajectories yield QA accuracy below 0.2, while the most frequent relation accounts for roughly 15% of all transitions—over fifteen times the frequency (<1%) of many of the least common relations.", "perturbed_statement": "Over 60% of sampled trajectories yield QA accuracy below 0.2, while the most frequent relation accounts for roughly 15% of all transitions—which is only twice the frequency (<1%) of many least common relations.", "perturbed_explanation": "The perturbed statement incorrectly claims that over 60% of trajectories have QA accuracy below 0.2, whereas Figure 1(a) shows this proportion is closer to 47%. It also wrongly asserts that the most frequent relation is only twice as common as rare ones, when Figure 1(b) indicates it is about fifteen times more frequent.", "claim": "Nearly half (~47%) of sampled trajectories yield QA accuracy below 0.2, while the most frequent relation accounts for roughly 15% of all transitions—over fifteen times the frequency (<1%) of many of the least common relations.", "label": true }, { "paperid": "2410.03053v1", "paper_path": "./SciVer/papers/2410.03053v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.03053v1-Table1-1.png", "item2_path": "./SciVer/images/2410.03053v1-Table2-1.png", "section": [ "6.1" ], "request_id": 987, "origin_statement": "As p increases from 500 to 128000, the realized minimum‐variance volatility under the partially corrected estimator V_p(H♯) declines from 10.75% to 0.84%, while its discrepancy E D̂_p(H♯) remains virtually unchanged, falling only from 1.22 to 0.87.", "perturbed_statement": "As p increases from 500 to 128000, the realized minimum‐variance volatility under the partially corrected estimator V_p(H♯) declines from 10.75% to 2.50%, while its discrepancy E D̂_p(H♯) remains virtually unchanged, falling only from 1.22 to 1.80.", "perturbed_explanation": "The perturbed volatility 2.50% contradicts Table 1 which reports V_p(H♯)=0.84% at p=128000, and the perturbed discrepancy 1.80 contradicts Table 2 which shows E D̂_p(H♯)=0.87 at p=128000.", "claim": "As p increases from 500 to 128000, the realized minimum‐variance volatility under the partially corrected estimator V_p(H♯) declines from 10.75% to 0.84%, while its discrepancy E D̂_p(H♯) remains virtually unchanged, falling only from 1.22 to 0.87.", "label": true }, { "paperid": "2410.01805v1", "paper_path": "./SciVer/papers/2410.01805v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.01805v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.01805v1-Table2-1.png", "section": [ "4.2" ], "request_id": 992, "origin_statement": "On ∞Bench with Phi-3-mini-128K, Locret achieves an average accuracy of 34.73% while using only 18 GB total memory—15 GB less than MInference which uses 55 GB for a 32.25% average.", "perturbed_statement": "On ∞Bench with Phi-3-mini-128K, Locret achieves an average accuracy of 34.73% while using 55 GB total memory—15 GB less than MInference which uses 18 GB for a 32.25% average.", "perturbed_explanation": "The perturbed statement reverses the memory footprints: Locret actually uses about 18 GB total memory (not 55 GB) and MInference uses about 55 GB (not 18 GB), contradicting the memory values shown in Figure 4.", "claim": "On ∞Bench with Phi-3-mini-128K, Locret achieves an average accuracy of 34.73% while using only 18 GB total memory—15 GB less than MInference which uses 55 GB for a 32.25% average.", "label": true }, { "paperid": "2409.16829v1", "paper_path": "./SciVer/papers/2409.16829v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.16829v1-Table4-1.png", "item2_path": "./SciVer/images/2409.16829v1-Table5-1.png", "section": [ "5.3" ], "request_id": 1006, "origin_statement": "In case (i), the CT test using a neural network shows a high rejection rate of 0.441 with an average classification error of 0.608, while in case (iii), the same test produces a median p-value of 0.022 and a reduced error of 0.429.", "perturbed_statement": "In case (i), the CT test using a neural network shows a low rejection rate of 0.041 with an average classification error of 0.608, while in case (iii), the same test produces a median p-value of 0.022 and a reduced error of 0.429.", "perturbed_explanation": "The perturbed statement incorrectly reports the CT NN rejection rate for case (i) as 0.041. Table 4 shows the actual PR_NN for CT in case (i) is 0.441, not 0.041.", "claim": "In case (i), the CT test using a neural network shows a high rejection rate of 0.441 with an average classification error of 0.608, while in case (iii), the same test produces a median p-value of 0.022 and a reduced error of 0.429.", "label": true }, { "paperid": "2409.04319v1", "paper_path": "./SciVer/papers/2409.04319v1.json", "claim_type": "parallel", "item1": "4", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04319v1_figure_4.png", "item2_path": "./SciVer/images/2409.04319v1_figure_7.png", "section": [ "2.4" ], "request_id": 1018, "origin_statement": "In scheme-2a activation No. 2 (Fig. 4), Ip climbs to about 370 nA at ~85 min under p≈0.3 mbar; Fig. 7 shows that activation type v at rτ≈19.5 yields ℱ≈16, linking the high photocurrent to a figure-of-merit ratio of ~16 at rτ≈20.", "perturbed_statement": "In scheme-2a activation No. 2 (Fig. 4), Ip climbs to about 370 nA at ~85 min under p≈1.0 mbar; Fig. 7 shows that activation type v at rτ≈19.5 yields ℱ≈16, linking the high photocurrent to a figure-of-merit ratio of ~16 at rτ≈20.", "perturbed_explanation": "The perturbed statement incorrectly cites a pressure of ~1.0 mbar at 85 min, whereas Fig. 4 actually shows the pressure was ~0.3 mbar at that time. This contradicts the concrete pressure data in the activation curve.", "claim": "In scheme-2a activation No. 2 (Fig. 4), Ip climbs to about 370 nA at ~85 min under p≈0.3 mbar; Fig. 7 shows that activation type v at rτ≈19.5 yields ℱ≈16, linking the high photocurrent to a figure-of-merit ratio of ~16 at rτ≈20.", "label": true }, { "paperid": "2411.11225v2", "paper_path": "./SciVer/papers/2411.11225v2.json", "claim_type": "parallel", "item1": "4(b)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11225v2_figure_4(b).png", "item2_path": "./SciVer/images/2411.11225v2_figure_4(c).png", "section": [ "5.5", "5.5.2", "5.5.1" ], "request_id": 1025, "origin_statement": "When the augmentation loss weight increases from 1 to 2, Recall@10 rises from 0.50 to 0.51, whereas at weight 2 for the self-supervised loss, Recall@10 falls to about 0.49.", "perturbed_statement": "When the augmentation loss weight increases from 1 to 2, Recall@10 rises from 0.50 to 0.52, whereas at weight 2 for the self-supervised loss, Recall@10 falls to about 0.49.", "perturbed_explanation": "The perturbed Recall@10 of 0.52 at augmentation weight 2 contradicts the first chart, which shows Recall@10 peaking at approximately 0.51, not 0.52.", "claim": "When the augmentation loss weight increases from 1 to 2, Recall@10 rises from 0.50 to 0.51, whereas at weight 2 for the self-supervised loss, Recall@10 falls to about 0.49.", "label": true }, { "paperid": "2409.10069v1", "paper_path": "./SciVer/papers/2409.10069v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10069v1_figure_2(b).png", "item2_path": "./SciVer/images/2409.10069v1_figure_3(b).png", "section": [ "5.3" ], "request_id": 1027, "origin_statement": "With K=50 and L=10, the t-SNE visualization for FMNIST ‘T-shirt’ class shows the ℓ=10 anomaly-perturbed samples forming an outlying cluster around +18 on the first axis, while in CIFAR-10 the average AUC declines from about 0.87 at L=4 to approximately 0.82 at L=10.", "perturbed_statement": "With K=50 and L=10, the t-SNE visualization for FMNIST ‘T-shirt’ class shows the ℓ=10 anomaly-perturbed samples forming an outlying cluster around +5 on the first axis, while in CIFAR-10 the average AUC increases from about 0.87 at L=4 to approximately 0.90 at L=10.", "perturbed_explanation": "The perturbation is incorrect because in the t-SNE plot the ℓ=10 anomaly-perturbed points lie around +18 on the first axis, not +5. Moreover, the CIFAR-10 average AUC actually decreases from ≈0.87 at L=4 to ≈0.82 at L=10, rather than increasing to 0.90.", "claim": "With K=50 and L=10, the t-SNE visualization for FMNIST ‘T-shirt’ class shows the ℓ=10 anomaly-perturbed samples forming an outlying cluster around +18 on the first axis, while in CIFAR-10 the average AUC declines from about 0.87 at L=4 to approximately 0.82 at L=10.", "label": true }, { "paperid": "2411.10906v1", "paper_path": "./SciVer/papers/2411.10906v1.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.10906v1-Table1-1.png", "item2_path": "./SciVer/images/2411.10906v1-Table3-1.png", "section": [ "5.4" ], "request_id": 1084, "origin_statement": "At 500 episodes on Alien, both LSVI-UCB-Fixed and -Adaptive achieve 49635.66 total reward, but the Adaptive variant requires 0.72 hrs versus 3.19 hrs—an almost 78% reduction in process time.", "perturbed_statement": "At 500 episodes on Alien, both LSVI-UCB-Fixed and -Adaptive achieve 49635.66 total reward, but the Adaptive variant requires 0.27 hrs versus 3.19 hrs—an almost 78% reduction in process time.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 reports that LSVI-UCB-Adaptive on Alien at 500 episodes takes 0.72 hrs, not 0.27 hrs, to process.", "claim": "At 500 episodes on Alien, both LSVI-UCB-Fixed and -Adaptive achieve 49635.66 total reward, but the Adaptive variant requires 0.72 hrs versus 3.19 hrs—an almost 78% reduction in process time.", "label": true }, { "paperid": "2410.11668v1", "paper_path": "./SciVer/papers/2410.11668v1.json", "claim_type": "parallel", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.11668v1_figure_6.png", "item2_path": "./SciVer/images/2410.11668v1-Table2-1.png", "section": [ "5" ], "request_id": 1111, "origin_statement": "The PS masked HSE mass (SZ+X-ray) is 4.25^{+0.50}_{-0.45}×10^{14}M_{☉} (Table 2), about 0.45×10^{14}M_{☉} greater than the X-ray–only HSE mass (~3.80×10^{14}M_{☉}) shown in Fig. 6, a ~12% increase.", "perturbed_statement": "The PS masked HSE mass (SZ+X-ray) is 4.25^{+0.50}_{-0.45}×10^{14}M_{☉} (Table 2), about 0.75×10^{14}M_{☉} greater than the X-ray–only HSE mass (~3.50×10^{14}M_{☉}) shown in Fig. 6, a ~18% increase.", "perturbed_explanation": "Fig. 6 lists the X-ray–only HSE mass as ~3.80×10^{14}M_{☉}, not ~3.50×10^{14}M_{☉}. Therefore the true difference is ~0.45×10^{14}M_{☉}, not 0.75×10^{14}M_{☉}, and the percentage increase is closer to 12%, not 18%.", "claim": "The PS masked HSE mass (SZ+X-ray) is 4.25^{+0.50}_{-0.45}×10^{14}M_{☉} (Table 2), about 0.45×10^{14}M_{☉} greater than the X-ray–only HSE mass (~3.80×10^{14}M_{☉}) shown in Fig. 6, a ~12% increase.", "label": true }, { "paperid": "2409.12958v1", "paper_path": "./SciVer/papers/2409.12958v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12958v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.12958v1_figure_2(d).png", "section": [ "4.1" ], "request_id": 1117, "origin_statement": "MURI includes 107 level-1 languages—about 6.7× more than xP3’s 16—and features 27 languages without case marking, nearly double Aya Collection’s 14.", "perturbed_statement": "MURI includes 107 level-1 languages—about 6.7× more than xP3’s 20—and features 27 languages without case marking, nearly double Aya Collection’s 14.", "perturbed_explanation": "The perturbed statement claims xP3 has 20 level-1 languages, but the resource-level chart shows xP3 actually has only 16 languages at level 1, making the comparison inaccurate.", "claim": "MURI includes 107 level-1 languages—about 6.7× more than xP3’s 16—and features 27 languages without case marking, nearly double Aya Collection’s 14.", "label": true }, { "paperid": "2411.18215v1", "paper_path": "./SciVer/papers/2411.18215v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18215v1_figure_1.png", "item2_path": "./SciVer/images/2411.18215v1-Table2-1.png", "section": [ "4.2.1" ], "request_id": 1123, "origin_statement": "At about 90 days after detection, the PHOENIX model temperature dips to ~10,000 K in Figure 1, matching Table 2's peak-phase range of 9.5–12.0×10^3 K on 2023-10-18.", "perturbed_statement": "At about 90 days after detection, the PHOENIX model temperature dips to ~10,000 K in Figure 1, consistent with Table 2's peak-phase temperature range of 12.5–14.0×10^3 K on 2023-10-18.", "perturbed_explanation": "The perturbed claim misstates the peak-phase temperature in Table 2: the table actually lists a range of 9.5–12.0×10^3 K for 2023-10-18, not 12.5–14.0×10^3 K.", "claim": "At about 90 days after detection, the PHOENIX model temperature dips to ~10,000 K in Figure 1, matching Table 2's peak-phase range of 9.5–12.0×10^3 K on 2023-10-18.", "label": true }, { "paperid": "2409.07233v1", "paper_path": "./SciVer/papers/2409.07233v1.json", "claim_type": "parallel", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.07233v1_figure_4.png", "item2_path": "./SciVer/images/2409.07233v1-Table1-1.png", "section": [ "4.5" ], "request_id": 1, "origin_statement": "The CN rootogram has almost all bars within ±1 Tukey limits around zero indicating a good tail fit, unlike N which shows bars exceeding these limits at extremes; Table 1 gives CN’s Grade coefficient of -0.982, compared to N’s -0.844.", "perturbed_statement": "The CN rootogram has almost all bars within ±1 Tukey limits around zero indicating a good tail fit, unlike N which shows bars exceeding these limits at extremes; Table 1 gives CN’s Grade coefficient of -0.844, compared to N’s -0.844.", "perturbed_explanation": "The perturbed statement incorrectly reports both coefficients: Table 1 actually lists CN’s Grade coefficient as –0.982 (not –0.844), while N’s Grade coefficient is –0.844. Thus saying CN’s is –0.844 and matching N’s contradicts the table.", "claim": "The CN rootogram has almost all bars within ±1 Tukey limits around zero indicating a good tail fit, unlike N which shows bars exceeding these limits at extremes; Table 1 gives CN’s Grade coefficient of -0.982, compared to N’s -0.844.", "label": true }, { "paperid": "2409.06994v2", "paper_path": "./SciVer/papers/2409.06994v2.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06994v2_figure_3.png", "item2_path": "./SciVer/images/2409.06994v2-Table5-1.png", "section": [ "5.2" ], "request_id": 2, "origin_statement": "DN and RE both find cores of 31 nodes in the Airport network (Table 5), and their core sets share a Jaccard coefficient close to 1 (bright yellow in Figure 3), indicating almost identical core membership.", "perturbed_statement": "DN and RE both find cores of 31 nodes in the Airport network (Table 5), and their core sets share a Jaccard coefficient close to 0.2 (dark purple in Figure 3), indicating minimal overlap.", "perturbed_explanation": "Figure 3’s heatmap shows the intersection of DN and RE cores for Airport as bright yellow, corresponding to a Jaccard coefficient near 1, not a low value like 0.2. Thus the claimed dark purple (0.2) similarity contradicts the actual high overlap.", "claim": "DN and RE both find cores of 31 nodes in the Airport network (Table 5), and their core sets share a Jaccard coefficient close to 1 (bright yellow in Figure 3), indicating almost identical core membership.", "label": true }, { "paperid": "2409.06906v1", "paper_path": "./SciVer/papers/2409.06906v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06906v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.06906v1_figure_7(b).png", "section": [ "3.4" ], "request_id": 3, "origin_statement": "In both the standard and high-contrast WFC3 cutouts, the F218W point source core remains ≃2 pixels wide while the F606W core stays at ≃5 pixels, demonstrating a ≃2.5× increase in PSF width from UV to optical wavelengths independent of contrast settings.", "perturbed_statement": "In both the standard and high-contrast WFC3 cutouts, the F218W point source core remains ≃2 pixels wide while the F606W core stays at ≃6 pixels, demonstrating a 3× increase in PSF width from UV to optical wavelengths independent of contrast settings.", "perturbed_explanation": "The perturbed statement incorrectly asserts a 6-pixel width for the F606W core; the images clearly show it spans about 5 pixels, not 6, so the claimed 3× increase is unsupported.", "claim": "In both the standard and high-contrast WFC3 cutouts, the F218W point source core remains ≃2 pixels wide while the F606W core stays at ≃5 pixels, demonstrating a ≃2.5× increase in PSF width from UV to optical wavelengths independent of contrast settings.", "label": true }, { "paperid": "2410.09504v1", "paper_path": "./SciVer/papers/2410.09504v1.json", "claim_type": "parallel", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09504v1_figure_5.png", "item2_path": "./SciVer/images/2410.09504v1-Table2-1.png", "section": [ "4.2" ], "request_id": 6, "origin_statement": "In Figure 5, the third response posterior mean from bps achieved an RMSPE of 0.817, about 9.1% lower than the average RMSPE of 0.900 reported for the DOUBLE BPS model in Table 2.", "perturbed_statement": "In Figure 5, the third response posterior mean from bps achieved an RMSPE of 0.707, about 21.3% lower than the average RMSPE of 0.900 reported for the DOUBLE BPS model in Table 2.", "perturbed_explanation": "The perturbation wrongly states the third response RMSPE as 0.707, whereas Figure 5 clearly shows an RMSPE of 0.817 for the third response. This incorrect value contradicts the visual data in Figure 5.", "claim": "In Figure 5, the third response posterior mean from bps achieved an RMSPE of 0.817, about 9.1% lower than the average RMSPE of 0.900 reported for the DOUBLE BPS model in Table 2.", "label": true }, { "paperid": "2411.15223v1", "paper_path": "./SciVer/papers/2411.15223v1.json", "claim_type": "parallel", "item1": "6", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15223v1_figure_6.png", "item2_path": "./SciVer/images/2411.15223v1-Table1-1.png", "section": [ "3.3.2" ], "request_id": 11, "origin_statement": "Figure 6 shows AUC rising from 0.7819 to 0.7849 and Logloss falling from 0.4658 to 0.4629 as embedding size increases from 2 to 8, a trend confirmed by Table 1 and reflecting improved model performance.", "perturbed_statement": "Figure 6 shows AUC rising from 0.7819 to 0.7855 and Logloss falling from 0.4658 to 0.4629 as embedding size increases from 2 to 8, a trend confirmed by Table 1 and reflecting improved model performance.", "perturbed_explanation": "The perturbed statement misreports the final AUC value. Table 1 and Figure 6 both show the AUC at embedding size 8 is 0.7849, not 0.7855, so the claimed increase to 0.7855 is incorrect.", "claim": "Figure 6 shows AUC rising from 0.7819 to 0.7849 and Logloss falling from 0.4658 to 0.4629 as embedding size increases from 2 to 8, a trend confirmed by Table 1 and reflecting improved model performance.", "label": true }, { "paperid": "2409.16902v1", "paper_path": "./SciVer/papers/2409.16902v1.json", "claim_type": "parallel", "item1": "2(c)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.16902v1_figure_2(c).png", "item2_path": "./SciVer/images/2409.16902v1-Table3-1.png", "section": [ "3" ], "request_id": 17, "origin_statement": "On the UW-COT dataset, SAM 2-large achieves 58.0% AUC, outperforming ARTrack’s 57.0%, but Table 3 shows its 224.4 M-parameter model runs at only 24.2 FPS—23.0 FPS slower than the 38.9 M-parameter SAM 2-tiny’s 47.2 FPS.", "perturbed_statement": "On the UW-COT dataset, SAM 2-large achieves 58.0% AUC, outperforming ARTrack’s 57.0%, but Table 3 shows its 224.4 M-parameter model runs at only 34.8 FPS—12.4 FPS slower than the 38.9 M-parameter SAM 2-tiny’s 47.2 FPS.", "perturbed_explanation": "The perturbed statement incorrectly cites SAM 2-large’s speed as 34.8 FPS. According to Table 3, SAM 2-large actually runs at 24.2 FPS, not 34.8 FPS.", "claim": "On the UW-COT dataset, SAM 2-large achieves 58.0% AUC, outperforming ARTrack’s 57.0%, but Table 3 shows its 224.4 M-parameter model runs at only 24.2 FPS—23.0 FPS slower than the 38.9 M-parameter SAM 2-tiny’s 47.2 FPS.", "label": true }, { "paperid": "2411.15098v2", "paper_path": "./SciVer/papers/2411.15098v2.json", "claim_type": "parallel", "item1": "2", "item2": "4(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15098v2_figure_2.png", "item2_path": "./SciVer/images/2411.15098v2_figure_4(a).png", "section": [ "3.2" ], "request_id": 18, "origin_statement": "The multi-modal attention integration in Figure 2 concatenates condition image tokens with noisy image and text tokens, lowering the plateau training loss from about 0.36 (direct addition) to about 0.33 at 10 k steps, a roughly 0.03 (8%) improvement over the direct-addition method (Figure 4a).", "perturbed_statement": "The multi-modal attention integration in Figure 2 concatenates condition image tokens with noisy image and text tokens, lowering the plateau training loss from about 0.36 (direct addition) to about 0.30 at 10 k steps, a roughly 0.06 (17%) improvement over the direct-addition method (Figure 4a).", "perturbed_explanation": "The perturbed statement claims the multi-modal attention method reaches a plateau loss of about 0.30 at 10 k steps and a 0.06 reduction, but Figure 4a shows the plateau loss is actually around 0.33, a 0.03 reduction (≈8%), not 0.06 (≈17%).", "claim": "The multi-modal attention integration in Figure 2 concatenates condition image tokens with noisy image and text tokens, lowering the plateau training loss from about 0.36 (direct addition) to about 0.33 at 10 k steps, a roughly 0.03 (8%) improvement over the direct-addition method (Figure 4a).", "label": true }, { "paperid": "2409.14672v1", "paper_path": "./SciVer/papers/2409.14672v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.14672v1-Table2-1.png", "item2_path": "./SciVer/images/2409.14672v1-Table3-1.png", "section": [ "2.2" ], "request_id": 19, "origin_statement": "Audio responses scored a perfect 2.00 for relevance but only a 1.44 length score, which per Table 2 falls into the \"Short/Long\" category, underscoring that listeners found spoken replies too brief or verbose compared to text (which scored 1.70 length, closer to \"Adequate\").", "perturbed_statement": "Audio responses scored a perfect 1.98 for relevance but only a 1.54 length score, which per Table 2 falls into the \"Short/Long\" category, underscoring that listeners found spoken replies too brief or verbose compared to text (which scored 1.70 length, closer to \"Adequate\").", "perturbed_explanation": "The perturbed statement misreports the audio relevance score as 1.98 (it is actually 2.00 in Table 3) and the audio length score as 1.54 (it is actually 1.44 in Table 3). These values contradict the reported data.", "claim": "Audio responses scored a perfect 2.00 for relevance but only a 1.44 length score, which per Table 2 falls into the \"Short/Long\" category, underscoring that listeners found spoken replies too brief or verbose compared to text (which scored 1.70 length, closer to \"Adequate\").", "label": true }, { "paperid": "2409.06367v1", "paper_path": "./SciVer/papers/2409.06367v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06367v1_figure_5.png", "item2_path": "./SciVer/images/2409.06367v1_figure_6.png", "section": [ "5.3" ], "request_id": 22, "origin_statement": "SimpleNet’s I-AUROC on Texture-AD is 59.12% versus 99.58% on MVTec, yet its segmentation heatmap for the cloth category shows a persistent high-intensity border artifact along the top edge.", "perturbed_statement": "SimpleNet’s I-AUROC on Texture-AD is 75.00% versus 99.58% on MVTec, yet its segmentation heatmap for the cloth category shows a persistent high-intensity border artifact along the top edge.", "perturbed_explanation": "The perturbation incorrectly states that SimpleNet’s I-AUROC on Texture-AD is 75.00%, whereas Figure 5 reports the actual Texture-AD I-AUROC as 59.12%.", "claim": "SimpleNet’s I-AUROC on Texture-AD is 59.12% versus 99.58% on MVTec, yet its segmentation heatmap for the cloth category shows a persistent high-intensity border artifact along the top edge.", "label": true }, { "paperid": "2409.18042v2", "paper_path": "./SciVer/papers/2409.18042v2.json", "claim_type": "parallel", "item1": "8", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.18042v2_figure_8.png", "item2_path": "./SciVer/images/2409.18042v2-Table3-1.png", "section": [ "5.2" ], "request_id": 24, "origin_statement": "In Figure 8, EMOVA responds to a marriage congratulation with a high-pitched happy tone, and in Table 3, EMOVA achieves a 4.0% WER on LibriSpeech, 0.5 points lower than Mini-Omni’s 4.5%.", "perturbed_statement": "In Figure 8, EMOVA responds to a marriage congratulation with a high-pitched happy tone, and in Table 3, EMOVA achieves a 5.0% WER on LibriSpeech, 0.5 points higher than Mini-Omni’s 4.5%.", "perturbed_explanation": "Table 3 shows EMOVA’s LibriSpeech WER is actually 4.0%, not 5.0%. The perturbed 5.0% value contradicts the benchmark data in the table.", "claim": "In Figure 8, EMOVA responds to a marriage congratulation with a high-pitched happy tone, and in Table 3, EMOVA achieves a 4.0% WER on LibriSpeech, 0.5 points lower than Mini-Omni’s 4.5%.", "label": true }, { "paperid": "2411.10948v1", "paper_path": "./SciVer/papers/2411.10948v1.json", "claim_type": "parallel", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10948v1_figure_6.png", "item2_path": "./SciVer/images/2411.10948v1-Table3-1.png", "section": [ "5.3" ], "request_id": 28, "origin_statement": "On ARMv8, our 4-bit ShiftMM achieves about 25 Gflops on a (1,256,512) linear layer, outperforming PyTorch.fp16’s ~11 Gflops by ~14 Gflops; on FPGA under LUT-priority, our INT6 implementation saves 43.5 LUTs and 60.7 FFs compared to FP16.", "perturbed_statement": "On ARMv8, our 4-bit ShiftMM achieves about 25 Gflops on a (1,256,512) linear layer, outperforming PyTorch.fp16’s ~11 Gflops by ~14 Gflops; on FPGA under LUT-priority, our INT6 implementation saves 53.5 LUTs and 60.7 FFs compared to FP16.", "perturbed_explanation": "The perturbed claim states a saving of 53.5 LUTs under LUT-priority, but Table 3 reports a saving of only 43.5 LUTs for the INT6 implementation compared to FP16, so the 53.5 figure contradicts the table.", "claim": "On ARMv8, our 4-bit ShiftMM achieves about 25 Gflops on a (1,256,512) linear layer, outperforming PyTorch.fp16’s ~11 Gflops by ~14 Gflops; on FPGA under LUT-priority, our INT6 implementation saves 43.5 LUTs and 60.7 FFs compared to FP16.", "label": true }, { "paperid": "2410.06795v1", "paper_path": "./SciVer/papers/2410.06795v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.06795v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.06795v1-Table3-1.png", "section": [ "4.3" ], "request_id": 33, "origin_statement": "In PhD examples, the model answered attribute_recognition and object_recognition correctly but failed counting and positional_reasoning. On POPE, applying PATCH to MiniGPT-4 raised accuracy from 57.67% to 88.13%, a 30.46-point gain, more than double Hard Prompt’s 13.06-point improvement.", "perturbed_statement": "In PhD examples, the model answered attribute_recognition and object_recognition correctly but failed counting and positional_reasoning. On POPE, applying PATCH to MiniGPT-4 raised accuracy from 57.67% to 80.00%, a 22.33-point gain, outperforming Hard Prompt’s 13.06-point improvement.", "perturbed_explanation": "This is incorrect because Table 4 shows that applying PATCH to MiniGPT-4 achieves 88.13% accuracy with a +30.46-point gain, not 80.00% or a 22.33-point increase.", "claim": "In PhD examples, the model answered attribute_recognition and object_recognition correctly but failed counting and positional_reasoning. On POPE, applying PATCH to MiniGPT-4 raised accuracy from 57.67% to 88.13%, a 30.46-point gain, more than double Hard Prompt’s 13.06-point improvement.", "label": true }, { "paperid": "2410.09356v1", "paper_path": "./SciVer/papers/2410.09356v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09356v1_figure_3.png", "item2_path": "./SciVer/images/2410.09356v1_figure_4.png", "section": [ "4.3" ], "request_id": 34, "origin_statement": "Figure 4 shows the original traffic series spans ~800 time points and is split into two 400-point subsequences; Figure 3 illustrates that each subsequence is fed into an ST-Comp branch with two parallel Att-Conv layers and a Fusion-Graph module for interactive learning.", "perturbed_statement": "Figure 4 shows the original traffic series spans ~800 time points and is split into three 266-point subsequences; Figure 3 illustrates that each subsequence is fed into an ST-Comp branch with two parallel Att-Conv layers and a Fusion-Graph module for interactive learning.", "perturbed_explanation": "The perturbed statement incorrectly claims three 266-point subsequences; Figure 4 actually splits the ~800-point series into two subsequences of around 400 points each, not three.", "claim": "Figure 4 shows the original traffic series spans ~800 time points and is split into two 400-point subsequences; Figure 3 illustrates that each subsequence is fed into an ST-Comp branch with two parallel Att-Conv layers and a Fusion-Graph module for interactive learning.", "label": true }, { "paperid": "2411.11340v1", "paper_path": "./SciVer/papers/2411.11340v1.json", "claim_type": "parallel", "item1": "1(c)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.11340v1_figure_1(c).png", "item2_path": "./SciVer/images/2411.11340v1-Table1-1.png", "section": [ "2" ], "request_id": 36, "origin_statement": "For DLinear on ETTm1, Table 1 reports a Trend MSE of 0.3192 which is over three times its Seasonal MSE of 0.0969; in Figure 1 the Trend true values increase from around –1.1 to –0.9 while the Seasonal component varies within ±0.1.", "perturbed_statement": "For DLinear on ETTm1, Table 1 reports a Trend MSE of 0.0969 which is over three times its Seasonal MSE of 0.3192; in Figure 1 the Trend true values increase from around –1.1 to –0.9 while the Seasonal component varies within ±0.1.", "perturbed_explanation": "The perturbed statement swaps the MSE values: Table 1 actually shows Trend MSE = 0.3192 and Seasonal MSE = 0.0969, not Trend = 0.0969 and Seasonal = 0.3192.", "claim": "For DLinear on ETTm1, Table 1 reports a Trend MSE of 0.3192 which is over three times its Seasonal MSE of 0.0969; in Figure 1 the Trend true values increase from around –1.1 to –0.9 while the Seasonal component varies within ±0.1.", "label": true }, { "paperid": "2411.00157v1", "paper_path": "./SciVer/papers/2411.00157v1.json", "claim_type": "parallel", "item1": "7", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00157v1_figure_7.png", "item2_path": "./SciVer/images/2411.00157v1_figure_9.png", "section": [ "3.3" ], "request_id": 37, "origin_statement": "Figure 7 shows that at separation Sx=6c, trailing foil heave amplitude htr=1.8c yields a system power coefficient Csys_P ≈1.3 in the LEV regime at αle_T/4≈0.33, while figure 9(a) shows that for the same htr and Sx, the trailing foil power Ctr_P peaks at ~0.52 when wake phase Φ≈90°.", "perturbed_statement": "Figure 7 shows that at separation Sx=6c, trailing foil heave amplitude htr=1.8c yields a system power coefficient Csys_P ≈1.3 in the LEV regime at αle_T/4≈0.33, while figure 9(a) shows that for htr=1.6c and Sx=6c, the trailing foil power Ctr_P peaks at ~0.52 when wake phase Φ≈90°.", "perturbed_explanation": "Figure 9(a) legend only includes data for htr values of 0.6c, 0.8c, 1.0c, 1.2c, 1.4c, and 1.8c; there is no line or marker for htr=1.6c, so claiming a peak at that amplitude contradicts the plotted data.", "claim": "Figure 7 shows that at separation Sx=6c, trailing foil heave amplitude htr=1.8c yields a system power coefficient Csys_P ≈1.3 in the LEV regime at αle_T/4≈0.33, while figure 9(a) shows that for the same htr and Sx, the trailing foil power Ctr_P peaks at ~0.52 when wake phase Φ≈90°.", "label": true }, { "paperid": "2411.09289v1", "paper_path": "./SciVer/papers/2411.09289v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09289v1_figure_4.png", "item2_path": "./SciVer/images/2411.09289v1-Table2-1.png", "section": [ "4.2", "4.2.1" ], "request_id": 38, "origin_statement": "On TinyLlama-1.1B with 256K maximum context, StreamAdapter achieves a perplexity of 9.97, reducing by about 1.31 compared to the sliding window’s 11.28, matching the ~1.3 gap in Figure 4.", "perturbed_statement": "On TinyLlama-1.1B with 256K maximum context, StreamAdapter achieves a perplexity of 9.97, reducing by about 1.50 compared to the sliding window’s 11.28, matching the ~1.3 gap in Figure 4.", "perturbed_explanation": "The stated reduction of 1.50 is incorrect: the table reports sliding window perplexity of 11.28 and StreamAdapter perplexity of 9.97, giving a difference of 1.31, not 1.50.", "claim": "On TinyLlama-1.1B with 256K maximum context, StreamAdapter achieves a perplexity of 9.97, reducing by about 1.31 compared to the sliding window’s 11.28, matching the ~1.3 gap in Figure 4.", "label": true }, { "paperid": "2411.04554v1", "paper_path": "./SciVer/papers/2411.04554v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04554v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.04554v1_figure_7(b).png", "section": [ "6" ], "request_id": 39, "origin_statement": "Peri-midFormer achieves about 95% classification accuracy on the UEA Heartbeat dataset with only ~0.2 M parameters—roughly 17 points higher than TimesNet’s ~78% at ~4 M—and on ETTh2 long-term forecasting it attains the lowest MSE (~0.345) without Time-LLM’s 12750 G FLOPs.", "perturbed_statement": "Peri-midFormer achieves about 98% classification accuracy on the UEA Heartbeat dataset with only ~1 M parameters—roughly 20 points higher than TimesNet’s ~78% at ~4 M—and on ETTh2 long-term forecasting it attains the lowest MSE (~0.345) without Time-LLM’s 12750 G FLOPs.", "perturbed_explanation": "The perturbed statement misreports two key details from the classification graph: Peri-midFormer’s accuracy is ~95%, not 98%, and it uses ~0.2 M parameters, not ~1 M.", "claim": "Peri-midFormer achieves about 95% classification accuracy on the UEA Heartbeat dataset with only ~0.2 M parameters—roughly 17 points higher than TimesNet’s ~78% at ~4 M—and on ETTh2 long-term forecasting it attains the lowest MSE (~0.345) without Time-LLM’s 12750 G FLOPs.", "label": true }, { "paperid": "2409.14067v1", "paper_path": "./SciVer/papers/2409.14067v1.json", "claim_type": "parallel", "item1": "4(f)", "item2": "4(k)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14067v1_figure_4(f).png", "item2_path": "./SciVer/images/2409.14067v1_figure_4(k).png", "section": [ "3.4" ], "request_id": 40, "origin_statement": "On the Replica dataset (Table 3), our method achieves a PSNR of 23.6 dB and an LPIPS of 0.17 versus PNeRFLoc’s 18.2 dB PSNR and 0.31 LPIPS; Fig. 4 confirms our rendering has notably clearer textures and markedly less noise.", "perturbed_statement": "On the Replica dataset (Table 3), our method achieves a PSNR of 23.6 dB and an LPIPS of 0.17 versus PNeRFLoc’s 18.2 dB PSNR and 0.05 LPIPS; Fig. 4 confirms our rendering has notably clearer textures and markedly less noise.", "perturbed_explanation": "The perturbed statement claims PNeRFLoc’s LPIPS is 0.05, which contradicts Table 3 where PNeRFLoc’s LPIPS is reported as 0.31.", "claim": "On the Replica dataset (Table 3), our method achieves a PSNR of 23.6 dB and an LPIPS of 0.17 versus PNeRFLoc’s 18.2 dB PSNR and 0.31 LPIPS; Fig. 4 confirms our rendering has notably clearer textures and markedly less noise.", "label": true }, { "paperid": "2411.03966v1", "paper_path": "./SciVer/papers/2411.03966v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03966v1_figure_1.png", "item2_path": "./SciVer/images/2411.03966v1-Table1-1.png", "section": [ "3" ], "request_id": 43, "origin_statement": "The unigrams dataset includes 44,450 words annotated by 1,020 annotators for a total of 375,796 annotations, and just 2.2% (994) terms are labeled as high anxiety.", "perturbed_statement": "The unigrams dataset includes 44,450 words annotated by 1,020 annotators for a total of 375,796 annotations, and just 4.4% (994) terms are labeled as high anxiety.", "perturbed_explanation": "The perturbed statement incorrectly reports that 4.4% of terms are high anxiety, whereas Figure 1 shows the actual percentage is 2.2% (994 terms).", "claim": "The unigrams dataset includes 44,450 words annotated by 1,020 annotators for a total of 375,796 annotations, and just 2.2% (994) terms are labeled as high anxiety.", "label": true }, { "paperid": "2409.05755v1", "paper_path": "./SciVer/papers/2409.05755v1.json", "claim_type": "parallel", "item1": "1(c)", "item2": "1(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05755v1_figure_1(c).png", "item2_path": "./SciVer/images/2409.05755v1_figure_1(f).png", "section": [ "4.2", "4.1" ], "request_id": 44, "origin_statement": "In GenCat curves, GCN accuracy falls from roughly 0.92 at β = −1 to about 0.5 at β = 7, mirroring H_edge(𝒢), which declines from ~0.8 to near zero over the same β range.", "perturbed_statement": "In GenCat curves, GCN accuracy falls from roughly 0.85 at β = −1 to about 0.25 at β = 7, mirroring H_edge(𝒢), which declines from ~0.7 to around 0.2 over the same β range.", "perturbed_explanation": "The perturbation is incorrect because the baseline figure shows GCN starting at about 0.92 accuracy at β=−1, not 0.85, and dropping to around 0.5 by β=7, not 0.25. Likewise, H_edge(𝒢) drops from approximately 0.8 to nearly zero, not from 0.7 to 0.2.", "claim": "In GenCat curves, GCN accuracy falls from roughly 0.92 at β = −1 to about 0.5 at β = 7, mirroring H_edge(𝒢), which declines from ~0.8 to near zero over the same β range.", "label": true }, { "paperid": "2410.15355v1", "paper_path": "./SciVer/papers/2410.15355v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.15355v1-Table3-1.png", "item2_path": "./SciVer/images/2410.15355v1-Table4-1.png", "section": [ "6.3" ], "request_id": 45, "origin_statement": "LAC achieves a 16.47% higher classification accuracy on Cornell than BGRL (72.63% vs 56.16%) but only a 0.01 NMI gain over GCL-SPAN on Cora clustering (0.53 vs 0.52).", "perturbed_statement": "LAC achieves a 12.3% higher classification accuracy on Cornell than BGRL (72.63% vs 56.16%) but only a 0.10 NMI gain over GCL-SPAN on Cora clustering (0.53 vs 0.52).", "perturbed_explanation": "The actual classification accuracy gap on Cornell is 72.63%−56.16%=16.47%, not 12.3%. Also, the true NMI gain on Cora clustering is 0.53−0.52=0.01, not 0.10, contradicting the table values.", "claim": "LAC achieves a 16.47% higher classification accuracy on Cornell than BGRL (72.63% vs 56.16%) but only a 0.01 NMI gain over GCL-SPAN on Cora clustering (0.53 vs 0.52).", "label": true }, { "paperid": "2411.08909v1", "paper_path": "./SciVer/papers/2411.08909v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.08909v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.08909v1_figure_7(b).png", "section": [ "5.5" ], "request_id": 48, "origin_statement": "On t-SNE, the LC-PLM-G embeddings form eight distinct clusters with community 0 spanning roughly 50 units on the x-axis (from –20 to 30), whereas the corresponding cluster in the baseline model (Φ) spans about 150 units (from –80 to 70), demonstrating tighter grouping in LC-PLM-G.", "perturbed_statement": "In the LC-PLM-G embedding, community 0 spans roughly 200 units on the x-axis (from –90 to 110), while in the baseline model (Φ) it spans about 60 units (from –20 to 40), suggesting LC-PLM-G compresses clusters more tightly.", "perturbed_explanation": "The perturbed statement misreports the cluster spans: in the LC-PLM-G plot, community 0 actually ranges only from about –20 to 30 (≈50 units), not –90 to 110, and in the baseline plot it spans roughly –80 to 70 (≈150 units), not –20 to 40.", "claim": "On t-SNE, the LC-PLM-G embeddings form eight distinct clusters with community 0 spanning roughly 50 units on the x-axis (from –20 to 30), whereas the corresponding cluster in the baseline model (Φ) spans about 150 units (from –80 to 70), demonstrating tighter grouping in LC-PLM-G.", "label": true }, { "paperid": "2409.07355v1", "paper_path": "./SciVer/papers/2409.07355v1.json", "claim_type": "parallel", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07355v1_figure_5.png", "item2_path": "./SciVer/images/2409.07355v1_figure_7.png", "section": [ "5.3.2", "5.3.3" ], "request_id": 51, "origin_statement": "Comb-TA yields the highest average Spearman correlation (0.67) while also having the lowest average cosine similarity (0.45).", "perturbed_statement": "Comb-TA achieves the highest average Spearman correlation (0.65) while also showing the highest average cosine similarity (0.55).", "perturbed_explanation": "The Spearman average for Comb-TA in Figure 5 is 0.67, not 0.65, and Figure 7 shows its average cosine similarity as roughly 0.45 (the lowest), not 0.55 or the highest.", "claim": "Comb-TA yields the highest average Spearman correlation (0.67) while also having the lowest average cosine similarity (0.45).", "label": true }, { "paperid": "2411.11912v1", "paper_path": "./SciVer/papers/2411.11912v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.11912v1_figure_3.png", "item2_path": "./SciVer/images/2411.11912v1-Table4-1.png", "section": [ "6.3" ], "request_id": 53, "origin_statement": "The principal eigenvalue at layer 1 in VQARad (~1.6×10^10) is about 1000 times that in SLAKE (~1.4×10^7); concurrently, ABC’s CT accuracy (91.33%) surpasses ACO’s (86.67%) by 4.66 points.", "perturbed_statement": "The principal eigenvalue at layer 1 in VQARad (~1.6×10^10) is about 100 times that in SLAKE (~1.4×10^8); concurrently, ABC’s CT accuracy (91.33%) surpasses ACO’s (86.67%) by 4.66 points.", "perturbed_explanation": "The perturbed statement misstates the SLAKE layer 1 eigenvalue as ~1.4×10^8 (it is ~1.4×10^7 in Fig. 3a) and thus underestimates the ratio to VQARad (~1.6×10^10) as 100× instead of the correct ~1000×.", "claim": "The principal eigenvalue at layer 1 in VQARad (~1.6×10^10) is about 1000 times that in SLAKE (~1.4×10^7); concurrently, ABC’s CT accuracy (91.33%) surpasses ACO’s (86.67%) by 4.66 points.", "label": true }, { "paperid": "2409.13587v1", "paper_path": "./SciVer/papers/2409.13587v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.13587v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.13587v1_figure_2(c).png", "section": [ "4.2" ], "request_id": 60, "origin_statement": "On 28 qubits, a one-frequency fit yields amplitude r=0.820 and phase θ=−27.47°, while the three-frequency fit reduces the primary amplitude to r=0.553 (a 32.6% decrease) and adds a secondary component with r2≈0.347 and θ2≈−21.98°.", "perturbed_statement": "On 28 qubits, a one-frequency fit yields amplitude r=0.780 and phase θ=−25.00°, while the three-frequency fit reduces the primary amplitude to r=0.553 (a 32.6% decrease) and adds a secondary component with r2≈0.347 and θ2≈−21.98°.", "perturbed_explanation": "The perturbed amplitude r=0.780 contradicts the first plot’s legend, which shows r=0.820 for the one-frequency fit. The perturbed phase θ=−25.00° also conflicts with the legend’s actual value of θ=−27.471°.", "claim": "On 28 qubits, a one-frequency fit yields amplitude r=0.820 and phase θ=−27.47°, while the three-frequency fit reduces the primary amplitude to r=0.553 (a 32.6% decrease) and adds a secondary component with r2≈0.347 and θ2≈−21.98°.", "label": true }, { "paperid": "2409.01239v1", "paper_path": "./SciVer/papers/2409.01239v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01239v1_figure_5.png", "item2_path": "./SciVer/images/2409.01239v1_figure_6.png", "section": [ "2.2" ], "request_id": 65, "origin_statement": "Between September 2019 and January 2020, TOI-2379’s five ground-based transits observed exclusively in i′, R, and z′ bands each show ~0.3% depth, while TOI-2384’s six transits from December 2019 to February 2020 span g′ through z′ (including a MuSCAT2 g′ observation) with ~0.4% average depth.", "perturbed_statement": "Between September 2019 and January 2020, TOI-2379’s five ground-based transits observed exclusively in i′, R, and z′ bands each show ~0.3% depth, while TOI-2384’s six transits from December 2019 to February 2020 span g′ through z′ (including a MuSCAT2 g′ observation) with ~0.2% average depth.", "perturbed_explanation": "The perturbed statement incorrectly lowers TOI-2384’s average transit depth to ~0.2%, but Figure 6 clearly shows dips of about 0.4% (flux dropping from 1.00 to roughly 0.996), directly contradicting the claimed ~0.2% depth.", "claim": "Between September 2019 and January 2020, TOI-2379’s five ground-based transits observed exclusively in i′, R, and z′ bands each show ~0.3% depth, while TOI-2384’s six transits from December 2019 to February 2020 span g′ through z′ (including a MuSCAT2 g′ observation) with ~0.4% average depth.", "label": true }, { "paperid": "2410.17406v1", "paper_path": "./SciVer/papers/2410.17406v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.17406v1_figure_5.png", "item2_path": "./SciVer/images/2410.17406v1-Table3-1.png", "section": [ "5.5" ], "request_id": 66, "origin_statement": "With Aqua integrated, ProveRAG-Aqua retrieval yields 480/482 exploitation references from Aqua—matching NVD’s count—yet Aqua’s median attribution score (~0.7) in exploitation is more than double NVD’s (~0.3).", "perturbed_statement": "With Aqua integrated, ProveRAG-Aqua retrieval yields 480/482 exploitation references from Aqua—matching NVD’s 450/482 count—yet Aqua’s median attribution score (~0.7) in exploitation is more than double NVD’s (~0.3).", "perturbed_explanation": "The perturbed statement incorrectly claims NVD has only 450/482 relevant exploitation references, whereas Table 3 shows NVD actually contributed 480/482 exploitation references in the ProveRAG-Aqua setting.", "claim": "With Aqua integrated, ProveRAG-Aqua retrieval yields 480/482 exploitation references from Aqua—matching NVD’s count—yet Aqua’s median attribution score (~0.7) in exploitation is more than double NVD’s (~0.3).", "label": true }, { "paperid": "2411.01021v1", "paper_path": "./SciVer/papers/2411.01021v1.json", "claim_type": "parallel", "item1": "7", "item2": "13", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01021v1_figure_7.png", "item2_path": "./SciVer/images/2411.01021v1_figure_13.png", "section": [ "3.3.3" ], "request_id": 67, "origin_statement": "The RL chaser reaches the nominal trajectory in approximately 2 h (Fig 7) while consuming around 35 m/s Δv, roughly one-quarter of the ~130 m/s median Δv of the LD strategy (Fig 13).", "perturbed_statement": "The RL chaser reaches the nominal trajectory in approximately 3 h while consuming around 20 m/s Δv, roughly one-quarter of the ~130 m/s median Δv of the LD strategy.", "perturbed_explanation": "Figure 7 shows the RL-trained chaser converges to the nominal trajectory at about 2 h, not 3 h. Figure 13 indicates the RL strategy’s median Δv is approximately 35 m/s, not 20 m/s, so both the time and Δv values in the perturbed statement are incorrect.", "claim": "The RL chaser reaches the nominal trajectory in approximately 2 h (Fig 7) while consuming around 35 m/s Δv, roughly one-quarter of the ~130 m/s median Δv of the LD strategy (Fig 13).", "label": true }, { "paperid": "2411.02486v1", "paper_path": "./SciVer/papers/2411.02486v1.json", "claim_type": "parallel", "item1": "3", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02486v1_figure_3.png", "item2_path": "./SciVer/images/2411.02486v1_figure_8.png", "section": [ "4.2" ], "request_id": 68, "origin_statement": "Combining Fig.3 and Fig.8, the SVC scattering circuit uses vacuum preparation of depth 25 with 3 variational parameters, a single brickwall wavepacket layer of depth 2 with 20 parameters, and two translational-evolution layers totalling depth 4 with 24 parameters, giving depth 31 and 47 parameters.", "perturbed_statement": "Combining Fig.3 and Fig.8, the SVC scattering circuit uses vacuum preparation of depth 25 with 3 variational parameters, a single brickwall wavepacket layer of depth 2 with 16 parameters, and two translational-evolution layers totalling depth 4 with 24 parameters, giving depth 31 and 43 parameters.", "perturbed_explanation": "The perturbed statement wrongly claims the brickwall wavepacket layer has 16 parameters and totals 43 parameters. In Fig.3c the wavepacket layer clearly has 20 variational parameters, and the total parameter count from 3+20+24 is 47, not 43.", "claim": "Combining Fig.3 and Fig.8, the SVC scattering circuit uses vacuum preparation of depth 25 with 3 variational parameters, a single brickwall wavepacket layer of depth 2 with 20 parameters, and two translational-evolution layers totalling depth 4 with 24 parameters, giving depth 31 and 47 parameters.", "label": true }, { "paperid": "2411.18383v1", "paper_path": "./SciVer/papers/2411.18383v1.json", "claim_type": "parallel", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18383v1_figure_6.png", "item2_path": "./SciVer/images/2411.18383v1-Table4-1.png", "section": [ "4.2.1" ], "request_id": 70, "origin_statement": "GPT-4o fewshot correctly classified 46 of 55 positive comments (83.6%) per Figure 6 and achieved the highest weighted F1 score of 0.64 in Table 4.", "perturbed_statement": "GPT-4o fewshot correctly classified 44 of 55 positive comments (80.0%) per Figure 6 and achieved the highest weighted F1 score of 0.62 in Table 4.", "perturbed_explanation": "The perturbation is incorrect because Figure 6 shows GPT-4o fewshot correctly classifying 46 positive comments (83.6%), not 44 (80.0%). Additionally, Table 4 reports an F1 score of 0.64 for GPT-4o fewshot, not 0.62.", "claim": "GPT-4o fewshot correctly classified 46 of 55 positive comments (83.6%) per Figure 6 and achieved the highest weighted F1 score of 0.64 in Table 4.", "label": true }, { "paperid": "2410.10442v1", "paper_path": "./SciVer/papers/2410.10442v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.10442v1-Table5-1.png", "item2_path": "./SciVer/images/2410.10442v1-Table6-1.png", "section": [ "4.3" ], "request_id": 71, "origin_statement": "Under batch size=1, our method achieves 65.0% on ImageNet-R versus 62.7% on VisDA-2021, a 2.3 percentage point higher accuracy, while maintaining a consistent 0.5% boost over its normal setting on both datasets.", "perturbed_statement": "Under batch size=1, our method achieves 64.5% on ImageNet-R versus 62.7% on VisDA-2021, a 1.8 percentage point higher accuracy, while maintaining a consistent 0.5% boost over its normal setting on both datasets.", "perturbed_explanation": "The perturbed statement underreports the ImageNet-R batch size=1 accuracy as 64.5%, but Table 5 shows it is actually 65.0%. Consequently, the claimed 1.8% gap versus VisDA-2021 is incorrect—the true difference is 65.0%–62.7% = 2.3%.", "claim": "Under batch size=1, our method achieves 65.0% on ImageNet-R versus 62.7% on VisDA-2021, a 2.3 percentage point higher accuracy, while maintaining a consistent 0.5% boost over its normal setting on both datasets.", "label": true }, { "paperid": "2409.14762v1", "paper_path": "./SciVer/papers/2409.14762v1.json", "claim_type": "parallel", "item1": "9", "item2": "12", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14762v1_figure_9.png", "item2_path": "./SciVer/images/2409.14762v1_figure_12.png", "section": [ "3.4" ], "request_id": 72, "origin_statement": "In the Hard mode, GPT4 required 10 yes/no questions to identify the guitar, whereas BLOOM-7B reached the 20-question limit in its Hard mode and still failed to guess correctly.", "perturbed_statement": "In the Hard mode, GPT4 required 8 yes/no questions to identify the guitar, whereas BLOOM-7B reached the 20-question limit in its Hard mode and still failed to guess correctly.", "perturbed_explanation": "The perturbed statement incorrectly states that GPT4 took 8 questions in Hard mode. Figure 9 shows GPT4 actually asked 10 questions (Q1 through Q10) before correctly guessing 'Guitar'.", "claim": "In the Hard mode, GPT4 required 10 yes/no questions to identify the guitar, whereas BLOOM-7B reached the 20-question limit in its Hard mode and still failed to guess correctly.", "label": true }, { "paperid": "2410.03437v2", "paper_path": "./SciVer/papers/2410.03437v2.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.03437v2_figure_1.png", "item2_path": "./SciVer/images/2410.03437v2-Table1-1.png", "section": [ "3" ], "request_id": 73, "origin_statement": "Zebra tokenizes spatial fields into K discrete VQ-VAE codes and uniquely among four compared methods (CAPE, CODA, MPP, Zebra) supports all three: adaptive conditioning, temporal conditioning, and in-context inference.", "perturbed_statement": "Zebra tokenizes spatial fields into K discrete VQ-VAE codes and both MPP and Zebra among four compared methods (CAPE, CODA, MPP, Zebra) support all three: adaptive conditioning, temporal conditioning, and in-context inference.", "perturbed_explanation": "The perturbed statement incorrectly claims that MPP supports adaptive conditioning and in-context inference. However, Table 1 shows MPP has ✗ for adaptive conditioning and ✗ for in-context, so it does not support those features.", "claim": "Zebra tokenizes spatial fields into K discrete VQ-VAE codes and uniquely among four compared methods (CAPE, CODA, MPP, Zebra) supports all three: adaptive conditioning, temporal conditioning, and in-context inference.", "label": true }, { "paperid": "2410.17694v1", "paper_path": "./SciVer/papers/2410.17694v1.json", "claim_type": "parallel", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.17694v1_figure_6.png", "item2_path": "./SciVer/images/2410.17694v1-Table2-1.png", "section": [ "6" ], "request_id": 77, "origin_statement": "By leveraging both positive and negative user feedback (Fig. 6), SynthRAG-refinement raised its RM win rate by 16.8 percentage points (from 41.60% to 58.40%) and improved its LLM score by 0.08, from 4.52 to 4.60.", "perturbed_statement": "By leveraging both positive and negative user feedback (Fig. 6), SynthRAG-refinement raised its RM win rate by 20.4 percentage points (from 41.60% to 62.00%) and improved its LLM score by 0.08, from 4.52 to 4.60.", "perturbed_explanation": "The perturbed statement incorrectly claims an RM win rate of 62.00% and an increase of 20.4 percentage points. Table 2 shows the actual refined RM win rate is 58.40%, a 16.8-point increase from 41.60%.", "claim": "By leveraging both positive and negative user feedback (Fig. 6), SynthRAG-refinement raised its RM win rate by 16.8 percentage points (from 41.60% to 58.40%) and improved its LLM score by 0.08, from 4.52 to 4.60.", "label": true }, { "paperid": "2410.02401v5", "paper_path": "./SciVer/papers/2410.02401v5.json", "claim_type": "parallel", "item1": "3", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02401v5_figure_3.png", "item2_path": "./SciVer/images/2410.02401v5_figure_5(c).png", "section": [ "4.3" ], "request_id": 80, "origin_statement": "Although SynCo stalls at around 43% proxy accuracy by epoch 100 compared to MoCo-(τ=0.07)'s ~67% (Figure 3), it outperforms MoCo-v2 by 0.68% in Top-1 downstream accuracy at queue size 65 536 (48.42% vs 47.74%) (Figure 5c).", "perturbed_statement": "Although SynCo stalls at around 43% proxy accuracy by epoch 100 compared to MoCo-(τ=0.07)'s ~67% (Figure 3), it outperforms MoCo-v2 by 0.68% in Top-1 downstream accuracy at queue size 4 096 (48.42% vs 47.74%) (Figure 5c).", "perturbed_explanation": "The perturbed statement is wrong because at queue size 4 096 (Figure 5c), SynCo achieves 48.30% Top-1 while MoCo-v2 achieves 50.10%, so SynCo neither reaches 48.42% nor outperforms MoCo-v2 at that queue size.", "claim": "Although SynCo stalls at around 43% proxy accuracy by epoch 100 compared to MoCo-(τ=0.07)'s ~67% (Figure 3), it outperforms MoCo-v2 by 0.68% in Top-1 downstream accuracy at queue size 65 536 (48.42% vs 47.74%) (Figure 5c).", "label": true }, { "paperid": "2410.09343v1", "paper_path": "./SciVer/papers/2410.09343v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09343v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.09343v1_figure_4(b).png", "section": [ "5.1" ], "request_id": 83, "origin_statement": "With a high PR AUC of 0.96, the retriever maintains precision above 0.9 up to 0.9 recall, and applying a recall threshold of 0.8 yields a 49.2% validation accuracy—about 0.75% higher than the 48.45% zero-shot baseline.", "perturbed_statement": "With a high PR AUC of 0.86, the retriever maintains precision above 0.9 up to 0.9 recall, and applying a recall threshold of 0.8 yields a 49.2% validation accuracy—about 0.75% higher than the 48.45% zero-shot baseline.", "perturbed_explanation": "Figure 4(a) clearly shows the PR AUC is 0.96, not 0.86, so the perturbed statement incorrectly lowers the AUC value.", "claim": "With a high PR AUC of 0.96, the retriever maintains precision above 0.9 up to 0.9 recall, and applying a recall threshold of 0.8 yields a 49.2% validation accuracy—about 0.75% higher than the 48.45% zero-shot baseline.", "label": true }, { "paperid": "2409.03735v1", "paper_path": "./SciVer/papers/2409.03735v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03735v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.03735v1-Table1-1.png", "section": [ "6.2" ], "request_id": 84, "origin_statement": "In IoT, both the tulu-2-7B base and its AWQ-quantized 7B counterpart rate a fitness tracker sharing the owner’s time-at-home in an emergency as strongly unacceptable, whereas the tulu-2-13B base and its AWQ-quantized 13B counterpart rate the same flow as somewhat acceptable.", "perturbed_statement": "In IoT, both the tulu-2-7B base and its AWQ-quantized 7B counterpart rate a fitness tracker sharing the owner’s time-at-home in an emergency as strongly unacceptable, whereas the tulu-2-13B base and its AWQ-quantized 13B counterpart rate the same flow as strongly acceptable.", "perturbed_explanation": "The perturbation falsely states that the 13B models rate the flow as strongly acceptable, but Figure 7 shows both tulu-2-13B and tulu-2-13B-AWQ actually rate it as somewhat acceptable, not strongly acceptable.", "claim": "In IoT, both the tulu-2-7B base and its AWQ-quantized 7B counterpart rate a fitness tracker sharing the owner’s time-at-home in an emergency as strongly unacceptable, whereas the tulu-2-13B base and its AWQ-quantized 13B counterpart rate the same flow as somewhat acceptable.", "label": true }, { "paperid": "2410.19886v1", "paper_path": "./SciVer/papers/2410.19886v1.json", "claim_type": "parallel", "item1": "9", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19886v1_figure_9.png", "item2_path": "./SciVer/images/2410.19886v1-Table3-1.png", "section": [ "4.3" ], "request_id": 86, "origin_statement": "By combining the C and T kernels, the model reduces RMSE to 20.42 cycles (Fig. 9d), a 68% decrease from the RBF kernel’s 64.24 cycles (Fig. 9a), while the optimal σT parameter drops from 19.82 in the conventional RBF to 0.255 in the modified kernel (Table 3).", "perturbed_statement": "By combining the C and T kernels, the model reduces RMSE to 20.42 cycles (Fig. 9d), a 68% decrease from the RBF kernel’s 64.24 cycles (Fig. 9a), while the optimal σT parameter drops from 18.40 in the conventional RBF to 0.255 in the modified kernel (Table 3).", "perturbed_explanation": "The perturbed statement wrongly cites σT for the conventional RBF kernel as 18.40. Table 3 shows σT=19.82 for the conventional RBF and σDOD=18.40, so the value 18.40 does not correspond to σT.", "claim": "By combining the C and T kernels, the model reduces RMSE to 20.42 cycles (Fig. 9d), a 68% decrease from the RBF kernel’s 64.24 cycles (Fig. 9a), while the optimal σT parameter drops from 19.82 in the conventional RBF to 0.255 in the modified kernel (Table 3).", "label": true }, { "paperid": "2411.01703v1", "paper_path": "./SciVer/papers/2411.01703v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01703v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.01703v1_figure_5(b).png", "section": [ "3.2" ], "request_id": 93, "origin_statement": "At ε=32/255, the optimized visual guardrail reduces attack success to 32%, which is only marginally lower than the 34% success at text guardrail length L=16, illustrating similar robustness from mid-range hyperparameters in both modalities.", "perturbed_statement": "At ε=32/255, the optimized visual guardrail reduces attack success to 32%, which is only marginally lower than the 34% success at text guardrail length L=8, illustrating similar robustness from mid-range hyperparameters in both modalities.", "perturbed_explanation": "This is incorrect because at a text guardrail length of L=8 the observed attack success ratio is about 41%, not 34% as stated. The 34% figure corresponds to L=16, not L=8, according to the right-hand plot.", "claim": "At ε=32/255, the optimized visual guardrail reduces attack success to 32%, which is only marginally lower than the 34% success at text guardrail length L=16, illustrating similar robustness from mid-range hyperparameters in both modalities.", "label": true }, { "paperid": "2409.01672v2", "paper_path": "./SciVer/papers/2409.01672v2.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01672v2_figure_4.png", "item2_path": "./SciVer/images/2409.01672v2_figure_5.png", "section": [ "4.4.2" ], "request_id": 94, "origin_statement": "In Fig. 5 at k=20, FMR recovers about 10.5% of the top-k features from training in the test set versus nearly 0% for the fine-tuning baseline; Fig. 4’s heatmaps (column c) focus on main object regions, while baseline maps (column b) often highlight background.", "perturbed_statement": "In Fig. 5 at k=20, FMR recovers about 6% of the top-k features from training in the test set versus 5% for the fine-tuning baseline; Fig. 4’s heatmaps (column c) focus on main object regions, while baseline maps (column b) often highlight background.", "perturbed_explanation": "The perturbed statement misreports the quantitative overlap at k=20. In the original Fig. 5, FMR actually achieves around 10.5% overlap, and the fine-tuning baseline remains near 0% (not 5%). Thus both percentages in the perturbation contradict the plotted values.", "claim": "In Fig. 5 at k=20, FMR recovers about 10.5% of the top-k features from training in the test set versus nearly 0% for the fine-tuning baseline; Fig. 4’s heatmaps (column c) focus on main object regions, while baseline maps (column b) often highlight background.", "label": true }, { "paperid": "2410.04803v2", "paper_path": "./SciVer/papers/2410.04803v2.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04803v2-Table3-1.png", "item2_path": "./SciVer/images/2410.04803v2-Table6-1.png", "section": [ "4.5" ], "request_id": 96, "origin_statement": "Timer-XL’s Solar-Energy MSE drops from 0.198 (Table 3) to 0.162 when using RoPE plus variable embedding (Table 6), an 18.2% reduction.", "perturbed_statement": "Timer-XL’s Solar-Energy MSE drops from 0.198 (Table 3) to 0.172 when using RoPE plus variable embedding (Table 6), a 13.1% reduction.", "perturbed_explanation": "Table 6 reports a Solar-Energy MSE of 0.162 for RoPE with variable embedding, not 0.172. Consequently, both the stated target MSE and the 13.1% reduction are incorrect compared to the actual 0.036 (18.2%) decrease.", "claim": "Timer-XL’s Solar-Energy MSE drops from 0.198 (Table 3) to 0.162 when using RoPE plus variable embedding (Table 6), an 18.2% reduction.", "label": true }, { "paperid": "2411.12812v1", "paper_path": "./SciVer/papers/2411.12812v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12812v1_figure_1.png", "item2_path": "./SciVer/images/2411.12812v1_figure_2.png", "section": [ "1" ], "request_id": 99, "origin_statement": "Traditional insulin delivery (Figure 1) requires two phases—basal and bolus—whereas DIETS (Figure 2) employs three modules (dietary analysis, insulin delivery determination, glycemic prediction and retitration), adding an automated feedback loop absent in standard therapy.", "perturbed_statement": "Traditional insulin delivery (Figure 1) requires three phases—basal, bolus, and postprandial microdosing—whereas DIETS (Figure 2) employs four modules (dietary analysis, insulin delivery determination, risk assessment, glycemic prediction and retitration), adding an automated feedback loop absent in standard therapy.", "perturbed_explanation": "The perturbed statement incorrectly claims that Figure 1 shows three insulin phases (it only depicts two: basal and bolus) and that Figure 2 has four modules (it actually has three: dietary analysis, insulin delivery determination, and glycemic prediction and retitration).", "claim": "Traditional insulin delivery (Figure 1) requires two phases—basal and bolus—whereas DIETS (Figure 2) employs three modules (dietary analysis, insulin delivery determination, glycemic prediction and retitration), adding an automated feedback loop absent in standard therapy.", "label": true }, { "paperid": "2411.14516v1", "paper_path": "./SciVer/papers/2411.14516v1.json", "claim_type": "parallel", "item1": "4(c)", "item2": "4(g)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14516v1_figure_4(c).png", "item2_path": "./SciVer/images/2411.14516v1_figure_4(g).png", "section": [ "6.2" ], "request_id": 100, "origin_statement": "At the highest memorization counts shown, CIFAR SSIM is 0.8035 at 20000 samples while MRI SSIM drops to 0.7530 at 3005 samples, indicating faster quality degradation for MRI backdoors per image.", "perturbed_statement": "At the highest memorization counts shown, CIFAR SSIM is 0.9035 at 20000 samples while MRI SSIM drops to 0.7530 at 3005 samples, indicating faster quality degradation for MRI backdoors per image.", "perturbed_explanation": "The perturbed statement reports a CIFAR SSIM of 0.9035 at 20000 samples, but Figure 4’s top row shows the SSIM is actually 0.8035 at 20000 memorized samples. This discrepancy makes the perturbed claim incorrect.", "claim": "At the highest memorization counts shown, CIFAR SSIM is 0.8035 at 20000 samples while MRI SSIM drops to 0.7530 at 3005 samples, indicating faster quality degradation for MRI backdoors per image.", "label": true }, { "paperid": "2411.05608v1", "paper_path": "./SciVer/papers/2411.05608v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05608v1_figure_3.png", "item2_path": "./SciVer/images/2411.05608v1_figure_5.png", "section": [ "4.1" ], "request_id": 102, "origin_statement": "The observed median absolute Galactic latitude for the combined Baikal‐GVD and IceCube events, marked by the red line at ≃47°, is about 13° lower than the simulated peak near 60°, reflecting the Galactic excess seen in the sky map, where roughly 35% of high-energy events cluster within |b|<20°.", "perturbed_statement": "The observed median absolute Galactic latitude for the combined events (red line) is ≃57°, only about 3° below the simulated peak near 60°, reflecting the Galactic excess in Fig. 5, where roughly 50% of high-energy events are clustered within |b|<20°.", "perturbed_explanation": "This statement is incorrect because in Fig. 3 the red line shows an observed median |b| of about 47°, not 57°, and Fig. 5 visually indicates that around one-third (~35%) of events lie within |b|<20°, not half (50%).", "claim": "The observed median absolute Galactic latitude for the combined Baikal‐GVD and IceCube events, marked by the red line at ≃47°, is about 13° lower than the simulated peak near 60°, reflecting the Galactic excess seen in the sky map, where roughly 35% of high-energy events cluster within |b|<20°.", "label": true }, { "paperid": "2410.04088v1", "paper_path": "./SciVer/papers/2410.04088v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04088v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.04088v1-Table2-1.png", "section": [ "4.1" ], "request_id": 107, "origin_statement": "After 50 epochs, the DN-DETR-DC5-R50 + CRED variant converges to 45.8 AP, just 0.4 points below the 46.2 AP of DN-DETR-R50 + CRED, while using 94G FLOPs (vs 103G) and achieving 24 FPS (vs 23).", "perturbed_statement": "After 50 epochs, the DN-DETR-DC5-R50 + CRED variant converges to 45.8 AP, just 0.4 points below the 46.2 AP of DN-DETR-R50 + CRED, while using 90G FLOPs (vs 103G) and achieving 25 FPS (vs 23).", "perturbed_explanation": "The perturbed statement incorrectly states 90G FLOPs and 25 FPS. According to Table 2, DN-DETR-DC5-R50 + CRED actually uses 94G FLOPs and achieves 24 FPS, not 90G FLOPs and 25 FPS.", "claim": "After 50 epochs, the DN-DETR-DC5-R50 + CRED variant converges to 45.8 AP, just 0.4 points below the 46.2 AP of DN-DETR-R50 + CRED, while using 94G FLOPs (vs 103G) and achieving 24 FPS (vs 23).", "label": true }, { "paperid": "2411.01299v1", "paper_path": "./SciVer/papers/2411.01299v1.json", "claim_type": "parallel", "item1": "16", "item2": "17", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01299v1_figure_16.png", "item2_path": "./SciVer/images/2411.01299v1_figure_17.png", "section": [ "4.2" ], "request_id": 108, "origin_statement": "In the Random Forest model, Max Position and Max Load together account for about 56% of feature importance (31% + 25%), but in the Decision Tree, Max Position alone captures nearly 88%, with the next feature at under 12%, indicating a far more concentrated reliance.", "perturbed_statement": "In the Random Forest model, Max Position and Max Load together account for about 65% of feature importance (35% + 30%), but in the Decision Tree, Max Position alone captures nearly 88%, with the next feature at under 12%, indicating a far more concentrated reliance.", "perturbed_explanation": "The perturbed statement misreports the Random Forest importances: Figure 16 shows Max Position at about 31% and Max Load at about 25% (total ~56%), not 35% and 30% (total ~65%). This contradicts the actual values in the Random Forest feature importance plot.", "claim": "In the Random Forest model, Max Position and Max Load together account for about 56% of feature importance (31% + 25%), but in the Decision Tree, Max Position alone captures nearly 88%, with the next feature at under 12%, indicating a far more concentrated reliance.", "label": true }, { "paperid": "2410.23992v1", "paper_path": "./SciVer/papers/2410.23992v1.json", "claim_type": "parallel", "item1": "3(b)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23992v1_figure_3(b).png", "item2_path": "./SciVer/images/2410.23992v1_figure_3(c).png", "section": [ "5.3" ], "request_id": 109, "origin_statement": "In Figure 3(b) (-w/o NHC), the black square markers are confined to a single cluster around node indices 15–30 with values ~350–450, while in Figure 3(c) (-w/o HC), black square markers form two clusters spanning indices roughly 10–25 and 70–90, demonstrating broader grouping by the node constraint.", "perturbed_statement": "In Figure 3(b) (-w/o NHC), the black square markers are confined to a single cluster around node indices 20–40 with values ~350–450, while in Figure 3(c) (-w/o HC), black square markers form two clusters spanning indices roughly 10–25 and 70–90, demonstrating broader grouping by the node constraint.", "perturbed_explanation": "The perturbed statement incorrectly shifts the cluster in Figure 3(b) to node indices 20–40. In the actual plot, the black squares appear between indices 15 and 30, so the claimed 20–40 range contradicts the image.", "claim": "In Figure 3(b) (-w/o NHC), the black square markers are confined to a single cluster around node indices 15–30 with values ~350–450, while in Figure 3(c) (-w/o HC), black square markers form two clusters spanning indices roughly 10–25 and 70–90, demonstrating broader grouping by the node constraint.", "label": true }, { "paperid": "2411.00915v1", "paper_path": "./SciVer/papers/2411.00915v1.json", "claim_type": "parallel", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00915v1_figure_5.png", "item2_path": "./SciVer/images/2411.00915v1_figure_7.png", "section": [ "3.2" ], "request_id": 114, "origin_statement": "Fusing six video classification models into one LoRA adapter reduces accuracy to about 91%, compared to ~96% for six image classification models, while the mode switch alone introduces a 53 ms delay, representing 64% of the 83 ms merged inference time for three 256-token requests.", "perturbed_statement": "Fusing six video classification models reduces accuracy to about 88%, compared to about 98% for six image classification models, while the mode switch alone introduces a 30 ms delay, representing 64% of the 83 ms merged inference time for three 256-token requests.", "perturbed_explanation": "This statement is incorrect because Figure 5 shows that fusing six video classification models yields around 91% accuracy (not 88%) and six image classification models retain about 96% accuracy (not 98%). Figure 7 shows the mode switch delay is 53 ms, not 30 ms.", "claim": "Fusing six video classification models into one LoRA adapter reduces accuracy to about 91%, compared to ~96% for six image classification models, while the mode switch alone introduces a 53 ms delay, representing 64% of the 83 ms merged inference time for three 256-token requests.", "label": true }, { "paperid": "2411.12078v1", "paper_path": "./SciVer/papers/2411.12078v1.json", "claim_type": "parallel", "item1": "5", "item2": "9", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.12078v1_figure_5.png", "item2_path": "./SciVer/images/2411.12078v1-Table9-1.png", "section": [ "4.3" ], "request_id": 120, "origin_statement": "In the deco_hop task, f-RAG (soft+GA) only reaches about 0.57 average Top-10 score by 10,000 oracle calls, yet it achieves the highest average novelty of 0.924 among all ablated variants in Table 9.", "perturbed_statement": "In the deco_hop task, f-RAG (soft+GA) only reaches about 0.57 average Top-10 score by 10,000 oracle calls, yet it achieves the highest average novelty of 0.860 among all ablated variants in Table 9.", "perturbed_explanation": "The perturbed statement incorrectly claims an average novelty of 0.860 for f-RAG (soft+GA), but Table 9 shows its novelty is actually 0.924, not 0.860.", "claim": "In the deco_hop task, f-RAG (soft+GA) only reaches about 0.57 average Top-10 score by 10,000 oracle calls, yet it achieves the highest average novelty of 0.924 among all ablated variants in Table 9.", "label": true }, { "paperid": "2411.16459v1", "paper_path": "./SciVer/papers/2411.16459v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16459v1_figure_3.png", "item2_path": "./SciVer/images/2411.16459v1_figure_4.png", "section": [ "4" ], "request_id": 122, "origin_statement": "Although the integrated N2H+ intensity map shows peak values of about 2.0 K km s−1 localized in the inner cloud at velocities of 43–45 km s−1, this high-velocity component comprises ∼3% of all fitted spectra, whereas the 41–42 km s−1 component reaches ∼15%.", "perturbed_statement": "Although the integrated N2H+ intensity map shows peak values of about 2.0 K km s−1 localized in the inner cloud at velocities of 43–45 km s−1, this high-velocity component comprises ∼15% of all fitted spectra, whereas the 41–42 km s−1 component reaches ∼3%.", "perturbed_explanation": "The perturbation swaps the actual percentages: the velocity histogram (Figure 4, top panel) shows the 43–45 km s−1 component comprises only ∼3% of fitted spectra (yellow bins), while the 41–42 km s−1 component comprises ∼15% (pink bins). This contradicts the stated percentages.", "claim": "Although the integrated N2H+ intensity map shows peak values of about 2.0 K km s−1 localized in the inner cloud at velocities of 43–45 km s−1, this high-velocity component comprises ∼3% of all fitted spectra, whereas the 41–42 km s−1 component reaches ∼15%.", "label": true }, { "paperid": "2409.04384v1", "paper_path": "./SciVer/papers/2409.04384v1.json", "claim_type": "parallel", "item1": "6(f)", "item2": "7(e)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04384v1_figure_6(f).png", "item2_path": "./SciVer/images/2409.04384v1_figure_7(e).png", "section": [ "4.3" ], "request_id": 123, "origin_statement": "SGS achieves 28.4 dB PSNR in the 4×4×4 super-resolution on FFHQ but only 14.82 dB in the random-mask inpainting on FFHQ, a drop of about 13.6 dB in quality.", "perturbed_statement": "SGS achieves 14.82 dB PSNR in the 4×4×4 super-resolution on FFHQ but 28.4 dB in the random-mask inpainting on FFHQ, a gain of 13.6 dB in quality.", "perturbed_explanation": "This is incorrect because the reported PSNR for SGS in super-resolution is actually 28.4 dB (not 14.82 dB) and for inpainting is 14.82 dB (not 28.4 dB), so the values have been swapped.", "claim": "SGS achieves 28.4 dB PSNR in the 4×4×4 super-resolution on FFHQ but only 14.82 dB in the random-mask inpainting on FFHQ, a drop of about 13.6 dB in quality.", "label": true }, { "paperid": "2410.04422v5", "paper_path": "./SciVer/papers/2410.04422v5.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.04422v5_figure_1(a).png", "item2_path": "./SciVer/images/2410.04422v5_figure_1(b).png", "section": [ "1" ], "request_id": 125, "origin_statement": "GPT-4o achieves nearly 100% accuracy on direct retrieval tasks such as 'find the Key with Value 5', but for logic-based tasks like 'find the Key with the Value bigger than 7', accuracy drops from about 88% at 0.1k tokens to nearly 0% at 30k context length.", "perturbed_statement": "GPT-4o achieves nearly 100% accuracy on direct retrieval tasks such as 'find the Key with Value 5', but for logic-based tasks like 'find the Key with the Value bigger than 7', accuracy drops from about 95% at 0.1k tokens to nearly 0% at 30k context length.", "perturbed_explanation": "The perturbed statement incorrectly claims logic-based accuracy is 95% at 0.1k tokens. Figure 1b shows the logic-based retrieval accuracy is approximately 88% at 0.1k tokens, not 95%.", "claim": "GPT-4o achieves nearly 100% accuracy on direct retrieval tasks such as 'find the Key with Value 5', but for logic-based tasks like 'find the Key with the Value bigger than 7', accuracy drops from about 88% at 0.1k tokens to nearly 0% at 30k context length.", "label": true }, { "paperid": "2409.12479v1", "paper_path": "./SciVer/papers/2409.12479v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.12479v1_figure_2(b).png", "item2_path": "./SciVer/images/2409.12479v1-Table4-1.png", "section": [ "4.5.1" ], "request_id": 127, "origin_statement": "Increasing enrollment from 1 to 10 OOD samples boosts average AUC from roughly 88.8% to 90.8% (Figure 2), enabling MMEL to reach 90.70% AUC in Table 4, nearly matching ICE’s 90.90% despite using only ten samples versus 80 million.", "perturbed_statement": "Increasing enrollment from 1 to 10 OOD samples boosts average AUC from roughly 88.8% to 90.8% (Figure 2), enabling MMEL to reach 91.70% AUC in Table 4, surpassing ICE’s 90.90% despite using only ten samples versus 80 million.", "perturbed_explanation": "The perturbed statement incorrectly claims MMEL achieves 91.70% AUC. Table 4 reports MMEL’s AUC as 90.70%, not 91.70%, so the altered AUC figure contradicts the table data.", "claim": "Increasing enrollment from 1 to 10 OOD samples boosts average AUC from roughly 88.8% to 90.8% (Figure 2), enabling MMEL to reach 90.70% AUC in Table 4, nearly matching ICE’s 90.90% despite using only ten samples versus 80 million.", "label": true }, { "paperid": "2409.05061v2", "paper_path": "./SciVer/papers/2409.05061v2.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05061v2_figure_3.png", "item2_path": "./SciVer/images/2409.05061v2-Table2-1.png", "section": [ "4.1" ], "request_id": 129, "origin_statement": "The hierarchical solution framework in Figure 3 comprises five components, while Table 2 lists three tentative plan contexts; notably, only the allocation decision's tentative plan discards partly, whereas the other two are discarded entirely.", "perturbed_statement": "The hierarchical solution framework in Figure 3 comprises five components, while Table 2 lists three tentative plan contexts; notably, only the allocation decision's tentative plan discards entirely, whereas the other two are discarded partly.", "perturbed_explanation": "Table 2 specifies that the allocation decision tentative plan is \"Discarded: Partly,\" while the feasibility check and VFA feature plans are discarded entirely. The perturbed statement wrongly claims the opposite discard patterns.", "claim": "The hierarchical solution framework in Figure 3 comprises five components, while Table 2 lists three tentative plan contexts; notably, only the allocation decision's tentative plan discards partly, whereas the other two are discarded entirely.", "label": true }, { "paperid": "2409.16016v2", "paper_path": "./SciVer/papers/2409.16016v2.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16016v2_figure_6(a).png", "item2_path": "./SciVer/images/2409.16016v2_figure_6(b).png", "section": [ "2.3.2" ], "request_id": 132, "origin_statement": "In the vessel segmentation overlays (Fig.6a) all vessels appear uniformly in blue, while in the artery–vein segmentation overlays (Fig.6b) veins (blue) occupy about 60% of segmented vessel pixels, arteries (brown) about 30%, and crossings (white) about 10%.", "perturbed_statement": "In the vessel segmentation overlays (Fig.6a) all vessels appear uniformly in blue, while in the artery–vein segmentation overlays (Fig.6b) arteries (brown) occupy about 60% of segmented vessel pixels, veins (blue) about 30%, and crossings (white) about 10%.", "perturbed_explanation": "The perturbed statement misattributes the dominant share of segmented vessel pixels to arteries. In Fig.6b, the blue overlay (veins) clearly covers roughly 60% of the vessel area, not the brown overlay (arteries), which is closer to 30%.", "claim": "In the vessel segmentation overlays (Fig.6a) all vessels appear uniformly in blue, while in the artery–vein segmentation overlays (Fig.6b) veins (blue) occupy about 60% of segmented vessel pixels, arteries (brown) about 30%, and crossings (white) about 10%.", "label": true }, { "paperid": "2410.04203v1", "paper_path": "./SciVer/papers/2410.04203v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04203v1-Table2-1.png", "item2_path": "./SciVer/images/2410.04203v1-Table4-1.png", "section": [ "4.1" ], "request_id": 134, "origin_statement": "LN+Mix yields 47.45% LC WR (+3.18) on AlpacaEval (GPT4), whereas full RainbowPO achieves 51.66% LC WR, a further 4.21-point increase, demonstrating that integrating all three components surpasses the two-component variant.", "perturbed_statement": "LN+Mix yields 47.45% LC WR (+3.18) on AlpacaEval (GPT4), whereas full RainbowPO achieves 52.66% LC WR, a further 5.21-point increase, demonstrating that integrating all three components surpasses the two-component variant.", "perturbed_explanation": "Table 4 reports RainbowPO’s LC WR as 51.66%, not 52.66%, so the perturbed statement’s RainbowPO percentage and calculated gain (5.21) contradict the actual 51.66% and 4.21-point increase.", "claim": "LN+Mix yields 47.45% LC WR (+3.18) on AlpacaEval (GPT4), whereas full RainbowPO achieves 51.66% LC WR, a further 4.21-point increase, demonstrating that integrating all three components surpasses the two-component variant.", "label": true }, { "paperid": "2411.04709v1", "paper_path": "./SciVer/papers/2411.04709v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.04709v1-Table3-1.png", "item2_path": "./SciVer/images/2411.04709v1-Table4-1.png", "section": [ "5.3" ], "request_id": 136, "origin_statement": "Existing fake-image detectors like UnivFD achieve only 53.1% average accuracy on identifying image-generated videos (Table 3), whereas our VideoMAE-based detector attains 96.3% average same-domain accuracy in classifying videos (Table 4), demonstrating a 43.2-point performance boost.", "perturbed_statement": "Existing fake-image detectors like UnivFD achieve only 53.1% average accuracy on identifying image-generated videos (Table 3), whereas our VideoMAE-based detector attains 87.2% average same-domain accuracy in classifying videos (Table 4), demonstrating a 34.1-point performance boost.", "perturbed_explanation": "The perturbed statement wrongly cites 87.2% as the average same-domain accuracy. According to Table 4, 87.2% is actually the cross-domain average accuracy; the correct same-domain average accuracy is 96.3%.", "claim": "Existing fake-image detectors like UnivFD achieve only 53.1% average accuracy on identifying image-generated videos (Table 3), whereas our VideoMAE-based detector attains 96.3% average same-domain accuracy in classifying videos (Table 4), demonstrating a 43.2-point performance boost.", "label": true }, { "paperid": "2411.02640v1", "paper_path": "./SciVer/papers/2411.02640v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.02640v1-Table1-1.png", "item2_path": "./SciVer/images/2411.02640v1-Table2-1.png", "section": [ "3" ], "request_id": 139, "origin_statement": "The C2H5OH mechanism has 129 species, roughly 2.4× the 53 species in GRI-Mech 3.0, and the AMD MI250X’s 128 GB HBM2e memory provides nearly 1 GB per species in the ethanol mechanism.", "perturbed_statement": "The C2H5OH mechanism has 100 species, roughly 2.4× the 53 species in GRI-Mech 3.0, and the AMD MI250X’s 128 GB HBM2e memory provides nearly 1 GB per species in the ethanol mechanism.", "perturbed_explanation": "The perturbed statement incorrectly states that the C2H5OH mechanism has 100 species. Table 1 shows it actually has 129 species, so the species count is wrong.", "claim": "The C2H5OH mechanism has 129 species, roughly 2.4× the 53 species in GRI-Mech 3.0, and the AMD MI250X’s 128 GB HBM2e memory provides nearly 1 GB per species in the ethanol mechanism.", "label": true }, { "paperid": "2410.09133v1", "paper_path": "./SciVer/papers/2410.09133v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.09133v1-Table2-1.png", "item2_path": "./SciVer/images/2410.09133v1-Table3-1.png", "section": [ "4.2" ], "request_id": 142, "origin_statement": "On the electricity dataset, MVG-CRPS trains per epoch in 0.38 min—0.02 min faster than the log-score's 0.40 min—and achieves a CRPS of 0.0372 in the univariate N-HiTS model, beating the log-score's 0.0409 by about 9%.", "perturbed_statement": "On the electricity dataset, MVG-CRPS trains per epoch in 0.38 min—0.02 min faster than the log-score's 0.40 min—and achieves a CRPS of 0.0472 in the univariate N-HiTS model, beating the log-score's 0.0409 by about 9%.", "perturbed_explanation": "The perturbed statement claims MVG-CRPS achieves a CRPS of 0.0472, but Table 3 reports the actual CRPS for the electricity dataset under MVG-CRPS as 0.0372, making the 0.0472 figure incorrect.", "claim": "On the electricity dataset, MVG-CRPS trains per epoch in 0.38 min—0.02 min faster than the log-score's 0.40 min—and achieves a CRPS of 0.0372 in the univariate N-HiTS model, beating the log-score's 0.0409 by about 9%.", "label": true }, { "paperid": "2410.07196v1", "paper_path": "./SciVer/papers/2410.07196v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07196v1_figure_1.png", "item2_path": "./SciVer/images/2410.07196v1-Table1-1.png", "section": [ "1" ], "request_id": 148, "origin_statement": "EEGUnity aggregates EEG datasets from 11 global locations, while among the seven common EEG tools listed, only MNE-Python is open source on Python—the other four open-source tools (SPM, EEGLAB, FieldTrip, Brainstorm) run exclusively on MATLAB.", "perturbed_statement": "EEGUnity aggregates EEG datasets from 13 global locations, while among the seven common EEG tools listed, only MNE-Python is open source on Python—the other four open-source tools (SPM, EEGLAB, FieldTrip, Brainstorm) run exclusively on MATLAB.", "perturbed_explanation": "The perturbation claims EEGUnity aggregates from 13 locations, but Figure 1 shows only 11 dataset icons connecting to EEGUnity, so the number 13 contradicts the image.", "claim": "EEGUnity aggregates EEG datasets from 11 global locations, while among the seven common EEG tools listed, only MNE-Python is open source on Python—the other four open-source tools (SPM, EEGLAB, FieldTrip, Brainstorm) run exclusively on MATLAB.", "label": true }, { "paperid": "2411.07239v1", "paper_path": "./SciVer/papers/2411.07239v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07239v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.07239v1-Table1-1.png", "section": [ "4.1", "4.1.1" ], "request_id": 151, "origin_statement": "The LoRA-D2NO fine-tuned model (green) nearly matches the exact u(x) peak of roughly 0.075 at x≈3.5, corresponding to the lowest average relative error of 3.11% in Table 1, compared to 3.49% for the single-operator LoRA (blue).", "perturbed_statement": "The LoRA-D2NO fine-tuned model (green) nearly matches the exact u(x) peak of roughly 0.075 at x≈3.5, corresponding to the lowest average relative error of 2.11% in Table 1, compared to 3.49% for the single-operator LoRA (blue).", "perturbed_explanation": "Table 1 reports an average relative error of 3.11% for the PI-LoRA model pretrained with D2NO, not 2.11%, so the stated 2.11% contradicts the actual tabulated value.", "claim": "The LoRA-D2NO fine-tuned model (green) nearly matches the exact u(x) peak of roughly 0.075 at x≈3.5, corresponding to the lowest average relative error of 3.11% in Table 1, compared to 3.49% for the single-operator LoRA (blue).", "label": true }, { "paperid": "2410.19523v1", "paper_path": "./SciVer/papers/2410.19523v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19523v1_figure_3.png", "item2_path": "./SciVer/images/2410.19523v1_figure_4.png", "section": [ "6" ], "request_id": 153, "origin_statement": "In BRCA samples (Figure 3c), GE TDP peaks at about 0.11 while CN TDP reaches 1.0; this is mirrored in Figure 4 heatmaps, where the 5q chromosome arm exhibits a CN TDP near 1.0 and a GE TDP around 0.10 across multiple Hallmark pathways.", "perturbed_statement": "In BRCA samples (Figure 3c), GE TDP peaks at about 0.20 while CN TDP only reaches 0.8; this is mirrored in Figure 4 heatmaps, where the 5q chromosome arm exhibits a CN TDP near 0.8 and a GE TDP around 0.05 across multiple Hallmark pathways.", "perturbed_explanation": "The perturbation is incorrect because in Figure 3c the maximum GE TDP is about 0.11 (not 0.20) and the CN TDP reaches up to 1.0 (not only 0.8). In Figure 4 the 5q arm shows a CN TDP close to 1.0 and a GE TDP near 0.10, not 0.8 and 0.05.", "claim": "In BRCA samples (Figure 3c), GE TDP peaks at about 0.11 while CN TDP reaches 1.0; this is mirrored in Figure 4 heatmaps, where the 5q chromosome arm exhibits a CN TDP near 1.0 and a GE TDP around 0.10 across multiple Hallmark pathways.", "label": true }, { "paperid": "2410.18529v2", "paper_path": "./SciVer/papers/2410.18529v2.json", "claim_type": "parallel", "item1": "6(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.18529v2_figure_6(a).png", "item2_path": "./SciVer/images/2410.18529v2-Table3-1.png", "section": [ "4.3" ], "request_id": 155, "origin_statement": "The beaker example displays 5 entities, matching SCONE’s support for fewer than 6 entities per scenario, and uses 3 actions, well within SCONE’s limit of fewer than 6 actions.", "perturbed_statement": "The beaker example displays 6 entities, matching SCONE’s support for fewer than 6 entities per scenario, and uses 3 actions, well within SCONE’s limit of fewer than 6 actions.", "perturbed_explanation": "The perturbed statement incorrectly claims there are 6 beakers, but the image clearly shows only 5 beakers. Furthermore, SCONE supports fewer than 6 entities, so stating 6 entities contradicts both the visual and the table data.", "claim": "The beaker example displays 5 entities, matching SCONE’s support for fewer than 6 entities per scenario, and uses 3 actions, well within SCONE’s limit of fewer than 6 actions.", "label": true }, { "paperid": "2411.14736v1", "paper_path": "./SciVer/papers/2411.14736v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.14736v1-Table4-1.png", "item2_path": "./SciVer/images/2411.14736v1-Table5-1.png", "section": [ "4.2" ], "request_id": 157, "origin_statement": "In DR2new, PSRN+GWB yields log marginal likelihood 535438.09 (std 1.14) versus 535429.85 (std 1.02) for PSRN+CURN, a difference of 8.24 matching the log-BF of 8.24 (std 1.47) which strongly favors GWB over CURN.", "perturbed_statement": "In DR2new, PSRN+GWB yields log marginal likelihood 535438.09 (std 1.14) versus 535429.85 (std 1.02) for PSRN+CURN, a difference of 7.24 matching the log-BF of 7.24 (std 1.47) which strongly favors GWB over CURN.", "perturbed_explanation": "The table values show the difference between 535438.09 and 535429.85 is 8.24, and Table 5 reports a log-BF of 8.24 (not 7.24). Thus the claimed difference of 7.24 contradicts both the log-marginal likelihoods and the log-BF values.", "claim": "In DR2new, PSRN+GWB yields log marginal likelihood 535438.09 (std 1.14) versus 535429.85 (std 1.02) for PSRN+CURN, a difference of 8.24 matching the log-BF of 8.24 (std 1.47) which strongly favors GWB over CURN.", "label": true }, { "paperid": "2411.01289v1", "paper_path": "./SciVer/papers/2411.01289v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.01289v1-Table1-1.png", "item2_path": "./SciVer/images/2411.01289v1-Table2-1.png", "section": [ "4.1" ], "request_id": 159, "origin_statement": "Comparing scenarios, ML_CP’s specificity drops from 0.98 in binary classification to 0.97 in multi-level classification (a 1% decrease), whereas NPM’s specificity plummets by 24% from 0.99 to 0.75.", "perturbed_statement": "Comparing scenarios, ML_CP’s specificity drops from 0.98 in binary classification to 0.95 in multi-level classification (a 3% decrease), whereas NPM’s specificity plummets by 24% from 0.99 to 0.75.", "perturbed_explanation": "The perturbed statement incorrectly claims ML_CP’s multi-level specificity is 0.95 and a 3% drop; Table 2 actually shows ML_CP’s specificity as 0.97, representing only a 1% decrease from 0.98.", "claim": "Comparing scenarios, ML_CP’s specificity drops from 0.98 in binary classification to 0.97 in multi-level classification (a 1% decrease), whereas NPM’s specificity plummets by 24% from 0.99 to 0.75.", "label": true }, { "paperid": "2411.07393v1", "paper_path": "./SciVer/papers/2411.07393v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07393v1_figure_6(a).png", "item2_path": "./SciVer/images/2411.07393v1_figure_6(b).png", "section": [ "5.2", "5.1" ], "request_id": 165, "origin_statement": "The 2011 eruption peaked at a ΔV≈0.85 mag, nearly three times the maximum post-2015 variation of ≈0.31 mag observed around 2016.", "perturbed_statement": "The 2011 eruption peaked at a ΔV≈0.95 mag, nearly three times the maximum post-2015 variation of ≈0.31 mag observed around 2016.", "perturbed_explanation": "Figure 6 (1995–2023) shows the 2011 peak at about ΔV≈0.85 mag, not 0.95 mag, so the stated peak magnitude contradicts the plotted data.", "claim": "The 2011 eruption peaked at a ΔV≈0.85 mag, nearly three times the maximum post-2015 variation of ≈0.31 mag observed around 2016.", "label": true }, { "paperid": "2410.21119v1", "paper_path": "./SciVer/papers/2410.21119v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21119v1_figure_6.png", "item2_path": "./SciVer/images/2410.21119v1_figure_7.png", "section": [ "4.2.2" ], "request_id": 166, "origin_statement": "Figure 6a shows client 3 holds only labels 4 and 5, and in Figure 6b the stratified weights for client 3 are 0.944 on label 4 and 0.9734 on label 5, while all other label weights are below 0.005.", "perturbed_statement": "Figure 6a shows client 3 holds only labels 4 and 5, and in Figure 6b the stratified weights for client 3 are 0.944 on label 4 and 0.8434 on label 5, while all other label weights are below 0.005.", "perturbed_explanation": "The statement is incorrect because in Figure 6b the weight for client 3 on label 5 is actually 0.9734, not 0.8434, according to the heatmap data.", "claim": "Figure 6a shows client 3 holds only labels 4 and 5, and in Figure 6b the stratified weights for client 3 are 0.944 on label 4 and 0.9734 on label 5, while all other label weights are below 0.005.", "label": true }, { "paperid": "2410.23844v1", "paper_path": "./SciVer/papers/2410.23844v1.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23844v1_figure_2.png", "item2_path": "./SciVer/images/2410.23844v1-Table1-1.png", "section": [ "3.2.1" ], "request_id": 168, "origin_statement": "In the factual prompt “Beats Music is owned by”, the highest p(Apple) in the attention layers peaks at about 0.022 on the ‘by’ token, whereas for the Sample 3 commonsense prompt (“PersonX can tell PersonY...others want to to stop what they’re doing”), attention layers peak near 0.15.", "perturbed_statement": "In the factual prompt “Beats Music is owned by”, the highest p(Apple) in the attention layers peaks at about 0.022 on the ‘by’ token, whereas for the Sample 3 commonsense prompt (“PersonX can tell PersonY...others want to stop what they’re doing”), attention layers peak near 0.05.", "perturbed_explanation": "The perturbation is incorrect because the heatmap for the Sample 3 commonsense prompt actually shows attention-layer probabilities up to around 0.15 (deep red regions), not 0.05. This contradicts the color intensity scale in Figure 2’s commonsense attention panel.", "claim": "In the factual prompt “Beats Music is owned by”, the highest p(Apple) in the attention layers peaks at about 0.022 on the ‘by’ token, whereas for the Sample 3 commonsense prompt (“PersonX can tell PersonY...others want to to stop what they’re doing”), attention layers peak near 0.15.", "label": true }, { "paperid": "2410.08821v1", "paper_path": "./SciVer/papers/2410.08821v1.json", "claim_type": "parallel", "item1": "2(e)", "item2": "2(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08821v1_figure_2(e).png", "item2_path": "./SciVer/images/2410.08821v1_figure_2(f).png", "section": [ "5.1.4" ], "request_id": 175, "origin_statement": "When increasing fair top-k from 2 to 3, Vanilla RAG’s question recall jumps by ~2.5 points (79.8%→82.3%), whereas its token F1 stays flat at 35.2%. Our method achieves ~82.6% recall and ~36.7% F1, outperforming vanilla by 0.3 and 1.5 points respectively.", "perturbed_statement": "When increasing fair top-k from 2 to 3, Vanilla RAG’s question recall jumps by ~2.5 points (79.8%→82.3%), and its token F1 rises by ~0.8 points (35.2%→36.0%). Our method achieves ~82.6% recall and ~36.7% F1, outperforming vanilla by 0.3 and 1.5 points respectively.", "perturbed_explanation": "The perturbation incorrectly states that Vanilla RAG’s token F1 increases from 35.2% to 36.0% with higher top-k. In the second subfigure (T-F1), the vanilla line is flat at 35.2% for both top-2 and top-3, so there is no rise in token F1.", "claim": "When increasing fair top-k from 2 to 3, Vanilla RAG’s question recall jumps by ~2.5 points (79.8%→82.3%), whereas its token F1 stays flat at 35.2%. Our method achieves ~82.6% recall and ~36.7% F1, outperforming vanilla by 0.3 and 1.5 points respectively.", "label": true }, { "paperid": "2409.15552v1", "paper_path": "./SciVer/papers/2409.15552v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15552v1_figure_1(b).png", "item2_path": "./SciVer/images/2409.15552v1_figure_2(b).png", "section": [ "4.2.2" ], "request_id": 177, "origin_statement": "In the Chebyshev recurrence implementation (Eq. 13) with s=10, all internal stability polynomials remain bounded by amplitude 1, while ROCK2’s stability domain (s=13) extends along the real axis to approximately Re(z)=−6.", "perturbed_statement": "In the Chebyshev recurrence implementation (Eq. 13) with s=10, all internal stability polynomials remain bounded by amplitude 1, while ROCK2’s stability domain (s=13) extends along the real axis to approximately Re(z)=−4.", "perturbed_explanation": "Figure 2 shows that the ROCK2 stability domain for s=13 reaches approximately Re(z)=−6 on the horizontal axis, not −4 as stated in the perturbed claim.", "claim": "In the Chebyshev recurrence implementation (Eq. 13) with s=10, all internal stability polynomials remain bounded by amplitude 1, while ROCK2’s stability domain (s=13) extends along the real axis to approximately Re(z)=−6.", "label": true }, { "paperid": "2411.02725v1", "paper_path": "./SciVer/papers/2411.02725v1.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02725v1_figure_3.png", "item2_path": "./SciVer/images/2411.02725v1-Table1-1.png", "section": [ "4.1.3" ], "request_id": 183, "origin_statement": "NNES students average 7.3 queries per student (269 total queries from 37 enrolled), yet the heatmap peaks with five unique NNES users at 3 PM on Wednesday, underscoring concentrated midweek afternoon engagement.", "perturbed_statement": "NNES students average 8.5 queries per student (269 total queries from 37 enrolled), yet the heatmap peaks with seven unique NNES users at 3 PM on Wednesday, underscoring concentrated midweek afternoon engagement.", "perturbed_explanation": "The perturbed statement is incorrect because the table reports an average of 7.3 queries per NNES student (not 8.5), and the heatmap shows a maximum of five unique NNES users at 3 PM on Wednesday (not seven).", "claim": "NNES students average 7.3 queries per student (269 total queries from 37 enrolled), yet the heatmap peaks with five unique NNES users at 3 PM on Wednesday, underscoring concentrated midweek afternoon engagement.", "label": true }, { "paperid": "2411.07050v1", "paper_path": "./SciVer/papers/2411.07050v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07050v1_figure_4.png", "item2_path": "./SciVer/images/2411.07050v1-Table3-1.png", "section": [ "4.2" ], "request_id": 184, "origin_statement": "FedALA achieves a macro F1 of 43.32% with an inter-class STD of 29.19% (Figure 4), yet records the smallest Top-1 relative drop of 46.2% among FL algorithms (Table 3), showing its superior tail-class robustness despite high class variance.", "perturbed_statement": "FedALA achieves a macro F1 of 45.32% with an inter-class STD of 29.19% (Figure 4), yet records the smallest Top-1 relative drop of 46.2% among FL algorithms (Table 3), showing its superior tail-class robustness despite high class variance.", "perturbed_explanation": "The macro F1 score for FedALA is misreported as 45.32%, but Figure 4 clearly shows a macro F1 of 43.32% for FedALA, contradicting the visualized data.", "claim": "FedALA achieves a macro F1 of 43.32% with an inter-class STD of 29.19% (Figure 4), yet records the smallest Top-1 relative drop of 46.2% among FL algorithms (Table 3), showing its superior tail-class robustness despite high class variance.", "label": true }, { "paperid": "2410.19218v1", "paper_path": "./SciVer/papers/2410.19218v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19218v1_figure_1.png", "item2_path": "./SciVer/images/2410.19218v1-Table1-1.png", "section": [ "5.2" ], "request_id": 185, "origin_statement": "For the query in Figure 1, TaxoIndex moves Paper B from rank 89 to rank 2 while improving CSFCube N@5 from 0.372 under FFT to 0.458, an 8.6% gain.", "perturbed_statement": "For the query in Figure 1, TaxoIndex moves Paper B from rank 89 to rank 2 while boosting CSFCube N@5 from 0.372 under FFT to 0.578, a 20.6% gain.", "perturbed_explanation": "The perturbed statement incorrectly reports TaxoIndex’s N@5 on CSFCube as 0.578; the table shows the actual N@5 value for TaxoIndex is 0.458, not 0.578.", "claim": "For the query in Figure 1, TaxoIndex moves Paper B from rank 89 to rank 2 while improving CSFCube N@5 from 0.372 under FFT to 0.458, an 8.6% gain.", "label": true }, { "paperid": "2411.14711v1", "paper_path": "./SciVer/papers/2411.14711v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14711v1_figure_5.png", "item2_path": "./SciVer/images/2411.14711v1_figure_6.png", "section": [ "8.3" ], "request_id": 187, "origin_statement": "On ogbl-citation2, SIEG achieves an MRR of 89.87, outperforming the best heuristic encoding combo (SPD+AA+JA+RA) which attains 81.91 MRR by 7.96 points.", "perturbed_statement": "On ogbl-citation2, SIEG achieves an MRR of 89.87, which is 2.94 points lower than the top heuristic encoding combo (SPD+AA+JA+RA) at 92.81 MRR.", "perturbed_explanation": "The perturbed statement incorrectly elevates the SPD+AA+JA+RA heuristic encoding’s MRR to 92.81. In the actual results (Figure 6), SPD+AA+JA+RA attains only 81.91 MRR, so it does not outperform SIEG’s 89.87 MRR.", "claim": "On ogbl-citation2, SIEG achieves an MRR of 89.87, outperforming the best heuristic encoding combo (SPD+AA+JA+RA) which attains 81.91 MRR by 7.96 points.", "label": true }, { "paperid": "2409.11887v1", "paper_path": "./SciVer/papers/2409.11887v1.json", "claim_type": "parallel", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11887v1_figure_4.png", "item2_path": "./SciVer/images/2409.11887v1-Table1-1.png", "section": [ "5.3" ], "request_id": 191, "origin_statement": "At 2560 tokens, DocMamba’s GPU usage is 5.0 GB—88.3% less than LayoutLMv3’s 42.8 GB—while DocMamba attains a FUNSD F1 of 91.7% versus LayoutLMv3’s 90.3%.", "perturbed_statement": "At 2560 tokens, DocMamba’s GPU usage is 6.0 GB—88.3% less than LayoutLMv3’s 42.8 GB—while DocMamba attains a FUNSD F1 of 91.7% versus LayoutLMv3’s 90.3%.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 shows DocMamba’s GPU memory at input length 2560 is 5.0 GB, not 6.0 GB.", "claim": "At 2560 tokens, DocMamba’s GPU usage is 5.0 GB—88.3% less than LayoutLMv3’s 42.8 GB—while DocMamba attains a FUNSD F1 of 91.7% versus LayoutLMv3’s 90.3%.", "label": true }, { "paperid": "2410.22543v1", "paper_path": "./SciVer/papers/2410.22543v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22543v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.22543v1-Table2-1.png", "section": [ "2.4" ], "request_id": 193, "origin_statement": "In Fig.2’s argument-of-perihelion plot for asteroid 2001 YB5, the ascending node distance (Ra) crosses 1 AU at ω≈297°, and the descending node distance (Rd) crosses at ω≈114°, matching Table 2’s PHA values of 297.2° and 114.2°.", "perturbed_statement": "In Fig.2’s argument-of-perihelion plot for asteroid 2001 YB5, the ascending node distance (Ra) crosses 1 AU at ω≈290°, and the descending node distance (Rd) crosses at ω≈114°, matching Table 2’s PHA values of 290° and 114.2°.", "perturbed_explanation": "The perturbation incorrectly states the ascending node crossing at ω≈290° and cites a table value of 290°. Table 2 and the intersection arrow in Fig.2 both show the actual ascending node crossing at ω≈297.2°, not 290°.", "claim": "In Fig.2’s argument-of-perihelion plot for asteroid 2001 YB5, the ascending node distance (Ra) crosses 1 AU at ω≈297°, and the descending node distance (Rd) crosses at ω≈114°, matching Table 2’s PHA values of 297.2° and 114.2°.", "label": true }, { "paperid": "2409.16057v2", "paper_path": "./SciVer/papers/2409.16057v2.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16057v2_figure_3(a).png", "item2_path": "./SciVer/images/2409.16057v2_figure_3(b).png", "section": [ "4.2" ], "request_id": 194, "origin_statement": "The backdoored model's detection confidence for the larger giraffe drops from 1.00 to 0.45, while its RPN-RCNN inconsistency points around the giraffe’s torso rise from about 15 in the clean case to roughly 45 in the backdoored case.", "perturbed_statement": "The backdoored model's detection confidence for the larger giraffe drops from 1.00 to 0.65, while its RPN-RCNN inconsistency points around the giraffe’s torso rise from about 15 in the clean case to roughly 45 in the backdoored case.", "perturbed_explanation": "The perturbed claim incorrectly states the backdoored model’s detection confidence as 0.65, whereas the image clearly shows a confidence of 0.45 for the larger giraffe in the backdoored scenario.", "claim": "The backdoored model's detection confidence for the larger giraffe drops from 1.00 to 0.45, while its RPN-RCNN inconsistency points around the giraffe’s torso rise from about 15 in the clean case to roughly 45 in the backdoored case.", "label": true }, { "paperid": "2410.02810v1", "paper_path": "./SciVer/papers/2410.02810v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02810v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.02810v1_figure_1(b).png", "section": [ "3" ], "request_id": 195, "origin_statement": "StateAct completes each environment interaction with a single LLM call and no additional tools (Figure 1a), and its prompt skeleton in Figure 1b includes two few‐shot examples—Obs 1 and Obs 2—before the current interaction trace.", "perturbed_statement": "StateAct completes each environment interaction with two LLM calls and no additional tools (Figure 1a), and its prompt skeleton in Figure 1b includes three few‐shot examples—Obs 1, Obs 2, and Obs 3—before the current interaction trace.", "perturbed_explanation": "The perturbation is wrong because Figure 1a explicitly shows only one LLM call per interaction (not two), and Figure 1b displays exactly two few‐shot examples (Obs 1 and Obs 2), not three.", "claim": "StateAct completes each environment interaction with a single LLM call and no additional tools (Figure 1a), and its prompt skeleton in Figure 1b includes two few‐shot examples—Obs 1 and Obs 2—before the current interaction trace.", "label": true }, { "paperid": "2410.01677v3", "paper_path": "./SciVer/papers/2410.01677v3.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01677v3_figure_3(a).png", "item2_path": "./SciVer/images/2410.01677v3_figure_3(b).png", "section": [ "4.2" ], "request_id": 197, "origin_statement": "On GSM8k, Llama-3.1-70B’s reordering accuracy falls from 93.0% at one shuffle to 87.0% at three shuffles (6-point drop), yet Table 1 shows it retains 94.2% of its original BASE accuracy on average across all operations.", "perturbed_statement": "On GSM8k, Llama-3.1-70B’s reordering accuracy falls from 93.0% at one shuffle to 89.0% at three shuffles (4-point drop), yet Table 1 shows it retains 97.1% of its original BASE accuracy on average across all operations.", "perturbed_explanation": "This is incorrect because the chart shows Llama-3.1-70B’s accuracy at three reorders is 87.0%, not 89.0%, making the drop 6 points, not 4.0. Moreover, Table 1 reports a 94.2% average retention for Llama-3.1-70B, not 97.1%.", "claim": "On GSM8k, Llama-3.1-70B’s reordering accuracy falls from 93.0% at one shuffle to 89.0% at three shuffles (4-point drop), yet Table 1 shows it retains 97.1% of its original BASE accuracy on average across all operations.", "label": false }, { "paperid": "2409.06280v1", "paper_path": "./SciVer/papers/2409.06280v1.json", "claim_type": "parallel", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06280v1_figure_8.png", "item2_path": "./SciVer/images/2409.06280v1_figure_9.png", "section": [ "5.2.6" ], "request_id": 199, "origin_statement": "Lowering marking intensity from m=0.7, δ=8/255 to m=0.8, δ=4/255 cuts the unsupervised OOD detection AUC by 0.1752 (from 0.8085 to 0.6333), yet with perfect knowledge, a supervised detector still reaches 0.9906 AUC.", "perturbed_statement": "Lowering marking intensity from m=0.7, δ=8/255 to m=0.8, δ=4/255 cuts the unsupervised OOD detection AUC by 0.2052 (from 0.8085 to 0.6333), yet with perfect knowledge, a supervised detector still reaches 0.9506 AUC.", "perturbed_explanation": "The perturbed statement incorrectly reports the AUC drop as 0.2052 when the actual difference is 0.8085 – 0.6333 = 0.1752, and it misstates the perfect-knowledge supervised detector’s AUC as 0.9506 instead of the correct 0.9906.", "claim": "Lowering marking intensity from m=0.7, δ=8/255 to m=0.8, δ=4/255 cuts the unsupervised OOD detection AUC by 0.2052 (from 0.8085 to 0.6333), yet with perfect knowledge, a supervised detector still reaches 0.9506 AUC.", "label": false }, { "paperid": "2410.13500v1", "paper_path": "./SciVer/papers/2410.13500v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13500v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.13500v1_figure_6(b).png", "section": [ "1" ], "request_id": 202, "origin_statement": "In the DFC2019 scene in Fig.2, the large building missing in the ground-truth disparity map corresponds to numerous red inconsistent pixels in the epoch 0 disparity map (Fig.6), which are mostly eliminated by epoch 300, indicating a substantial reduction in inconsistent points.", "perturbed_statement": "In the DFC2019 scene in Fig.2, the large building missing in the ground-truth disparity map corresponds to numerous red inconsistent pixels in the epoch 0 disparity map (Fig.6), which are mostly eliminated by epoch 60, indicating a substantial reduction in inconsistent points.", "perturbed_explanation": "The perturbation incorrectly claims that most red inconsistent pixels are eliminated by epoch 60, but the Fig.6 maps still show a significant number of red inconsistent points at epoch 60. Only by epoch 300 are the red inconsistencies largely removed, so the statement contradicts the disparity evolution.", "claim": "In the DFC2019 scene in Fig.2, the large building missing in the ground-truth disparity map corresponds to numerous red inconsistent pixels in the epoch 0 disparity map (Fig.6), which are mostly eliminated by epoch 60, indicating a substantial reduction in inconsistent points.", "label": false }, { "paperid": "2410.14202v1", "paper_path": "./SciVer/papers/2410.14202v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.14202v1-Table2-1.png", "item2_path": "./SciVer/images/2410.14202v1-Table3-1.png", "section": [ "5.2.1" ], "request_id": 205, "origin_statement": "T5 with GPT rationales achieves an average prompt QWK of 0.726 in Table 3, surpassing LED’s 0.705 by 0.021, and likewise attains a trait-level average QWK of 0.708 in Table 2 versus LED’s 0.687, mirroring the same 0.021 advantage.", "perturbed_statement": "T5 with GPT rationales achieves an average prompt QWK of 0.746 in Table 3, surpassing LED’s 0.705 by 0.041, and likewise attains a trait-level average QWK of 0.728 in Table 2 versus LED’s 0.687, mirroring the same 0.041 advantage.", "perturbed_explanation": "The perturbed statement misreports both values: Table 3 shows T5+RMTS(G) has a prompt average QWK of 0.726 (not 0.746), and Table 2 shows its trait-level average QWK is 0.708 (not 0.728), so the claimed numbers and gap are incorrect.", "claim": "T5 with GPT rationales achieves an average prompt QWK of 0.746 in Table 3, surpassing LED’s 0.705 by 0.041, and likewise attains a trait-level average QWK of 0.728 in Table 2 versus LED’s 0.687, mirroring the same 0.041 advantage.", "label": false }, { "paperid": "2411.09556v1", "paper_path": "./SciVer/papers/2411.09556v1.json", "claim_type": "parallel", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09556v1_figure_4.png", "item2_path": "./SciVer/images/2411.09556v1-Table1-1.png", "section": [ "3.3" ], "request_id": 207, "origin_statement": "At 27860 r_g/c, the snapshot δ_e ranges from about 0.20 to 0.30 across the jet–disk interface, where the 2TC model yields a time-averaged 228 GHz flux of 2.7 Jy, roughly 15.6% lower than the 3.2 Jy from the 2T simulation.", "perturbed_statement": "At 27860 r_g/c, the snapshot δ_e ranges from about 0.20 to 0.30 across the jet–disk interface, where the 2TC model yields a time-averaged 228 GHz flux of 3.7 Jy, roughly 15.6% lower than the 3.2 Jy from the 2T simulation.", "perturbed_explanation": "The time-averaged 228 GHz flux for the 2TC model is listed in the table as 2.7 Jy, not 3.7 Jy. This misstates the tabulated value and therefore contradicts the provided data.", "claim": "At 27860 r_g/c, the snapshot δ_e ranges from about 0.20 to 0.30 across the jet–disk interface, where the 2TC model yields a time-averaged 228 GHz flux of 3.7 Jy, roughly 15.6% lower than the 3.2 Jy from the 2T simulation.", "label": false }, { "paperid": "2411.15583v1", "paper_path": "./SciVer/papers/2411.15583v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15583v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.15583v1-Table4-1.png", "section": [ "4.2" ], "request_id": 209, "origin_statement": "In the 13 studies listed in Table 4, 5 (≈38%) exhibited terminology confusion, whereas Figure 5 shows sample sizes ranged from 12 to 44 participants, meaning the largest sample was over three times larger than the smallest.", "perturbed_statement": "In the 13 studies listed in Table 4, 6 (≈46%) exhibited terminology confusion, whereas Figure 5 shows sample sizes ranged from 12 to 44 participants, meaning the largest sample was over three times larger than the smallest.", "perturbed_explanation": "The perturbation is incorrect because Table 4 indicates that only 5 studies (ES IDs 1, 5, 9, 10, 12) are marked with \"Terminology Confusion = YES,\" not 6.", "claim": "In the 13 studies listed in Table 4, 6 (≈46%) exhibited terminology confusion, whereas Figure 5 shows sample sizes ranged from 12 to 44 participants, meaning the largest sample was over three times larger than the smallest.", "label": false }, { "paperid": "2410.16843v1", "paper_path": "./SciVer/papers/2410.16843v1.json", "claim_type": "parallel", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16843v1_figure_2.png", "item2_path": "./SciVer/images/2410.16843v1-Table2-1.png", "section": [ "4.2" ], "request_id": 210, "origin_statement": "According to Figure 2, Llama-2-7b-chat rapidly climbs to ~2.5 training episodic reward within the first 500 episodes—more than double Llama-2-7b’s ~1.2 at the same point—while Table 2 shows Trustworthy-Alignment increases Llama-2-7b-chat’s ARC score by +3.50, slightly surpassing SFT’s +3.33 gain.", "perturbed_statement": "According to Figure 2, Llama-2-7b-chat rapidly climbs to ~3.0 training episodic reward within the first 500 episodes—more than double Llama-2-7b’s ~1.2 at the same point—while Table 2 shows Trustworthy-Alignment increases Llama-2-7b-chat’s ARC score by +3.50, slightly surpassing SFT’s +3.33 gain.", "perturbed_explanation": "The perturbed statement incorrectly claims that Llama-2-7b-chat reaches ~3.0 training episodic reward by 500 episodes. Figure 2 shows its episodic reward at 500 episodes is approximately 2.5, not 3.0.", "claim": "According to Figure 2, Llama-2-7b-chat rapidly climbs to ~3.0 training episodic reward within the first 500 episodes—more than double Llama-2-7b’s ~1.2 at the same point—while Table 2 shows Trustworthy-Alignment increases Llama-2-7b-chat’s ARC score by +3.50, slightly surpassing SFT’s +3.33 gain.", "label": false }, { "paperid": "2410.02409v1", "paper_path": "./SciVer/papers/2410.02409v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.02409v1-Table2-1.png", "item2_path": "./SciVer/images/2410.02409v1-Table3-1.png", "section": [ "3.1.2" ], "request_id": 213, "origin_statement": "In Table3 the row p=0, s=0 has |y|₀ = n+2 compared to Table2’s row p=0, s=ε with |y|₀ = n+1, yet both yield weight ℓn + mn, reflecting that adding one extra 0-component doesn’t affect the weight calculation 0·|y|₀ + ℓ·|y|_ℓ + m·|y|_m.", "perturbed_statement": "In Table3 the row p=0, s=0 has |y|₀ = n+3 compared to Table2’s row p=0, s=ε with |y|₀ = n+1, yet both yield weight ℓn + mn, reflecting that adding one extra 0-component doesn’t affect the weight calculation 0·|y|₀ + ℓ·|y|_ℓ + m·|y|_m.", "perturbed_explanation": "The perturbation is incorrect because Table3 actually lists |y|₀ = n+2 (not n+3) for the row p=0, s=0, so the stated value n+3 contradicts the data in the image.", "claim": "In Table3 the row p=0, s=0 has |y|₀ = n+3 compared to Table2’s row p=0, s=ε with |y|₀ = n+1, yet both yield weight ℓn + mn, reflecting that adding one extra 0-component doesn’t affect the weight calculation 0·|y|₀ + ℓ·|y|_ℓ + m·|y|_m.", "label": false }, { "paperid": "2409.19351v1", "paper_path": "./SciVer/papers/2409.19351v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "10", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19351v1_figure_2(b).png", "item2_path": "./SciVer/images/2409.19351v1_figure_10.png", "section": [ "3.3" ], "request_id": 217, "origin_statement": "Figure 2(b) shows a dense cluster of roughly 230 vehicles on the top-left highway segment at 14:50, matching the reported median traffic volume; Figure 10 indicates that at 14:50 with 40% penetration rate the estimated velocities exhibit no significant outliers, demonstrating the closest match to simulated speeds.", "perturbed_statement": "Figure 2(b) shows a dense cluster of roughly 250 vehicles on the top-left highway segment at 14:50, matching the reported median traffic volume; Figure 10 indicates that at 14:50 with 40% penetration rate the estimated velocities exhibit no significant outliers, demonstrating the closest match to simulated speeds.", "perturbed_explanation": "The perturbed statement incorrectly cites 250 vehicles at 14:50, whereas the contextual information specifies a median of 230 active vehicles for that time of day. This contradiction makes the perturbed vehicle count invalid.", "claim": "Figure 2(b) shows a dense cluster of roughly 250 vehicles on the top-left highway segment at 14:50, matching the reported median traffic volume; Figure 10 indicates that at 14:50 with 40% penetration rate the estimated velocities exhibit no significant outliers, demonstrating the closest match to simulated speeds.", "label": false }, { "paperid": "2409.08598v1", "paper_path": "./SciVer/papers/2409.08598v1.json", "claim_type": "parallel", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.08598v1_figure_4.png", "item2_path": "./SciVer/images/2409.08598v1-Table1-1.png", "section": [ "4.3" ], "request_id": 219, "origin_statement": "The self-contrast objective on RAF-DB with a pre-trained Swin-T yields about 92.6% accuracy, 0.9% higher than contrastive learning, and over 2.0% above the CLIP-text-supervised baseline of 90.55% in Table 1.", "perturbed_statement": "The self-contrast objective on RAF-DB with a pre-trained Swin-T yields about 93.5% accuracy, 1.8% higher than contrastive learning, and over 3.0% above the CLIP-text-supervised baseline of 90.55%.", "perturbed_explanation": "The chart in Figure 4 shows the self-contrast accuracy for Swin-T at about 92.6%, not 93.5%, and the margin over contrastive learning is approximately 0.97%, not 1.8%. These contradict the stated values.", "claim": "The self-contrast objective on RAF-DB with a pre-trained Swin-T yields about 93.5% accuracy, 1.8% higher than contrastive learning, and over 3.0% above the CLIP-text-supervised baseline of 90.55%.", "label": false }, { "paperid": "2410.04199v3", "paper_path": "./SciVer/papers/2410.04199v3.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04199v3_figure_3.png", "item2_path": "./SciVer/images/2410.04199v3-Table3-1.png", "section": [ "5.1" ], "request_id": 220, "origin_statement": "Gemini-1.5-Flash experiences only a 1.2% drop to 85.0% accuracy on GSM8K under LongGenBench (Table 3), and as seen in Figure 3 it sustains over 90% accuracy for the first five questions, whereas GPT-3.5-Turbo drops below 60% by question 12.", "perturbed_statement": "Gemini-1.5-Flash experiences only a 1.2% drop to 85.0% accuracy on GSM8K under LongGenBench (Table 3), and as seen in Figure 3 it sustains over 98% accuracy for the first ten questions, whereas GPT-3.5-Turbo drops below 50% by question 12.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 3 shows Gemini-1.5-Flash never sustains above 98% accuracy beyond the first two questions and falls below 90% by question ten. Additionally, GPT-3.5-Turbo does not drop below 50% until around question 18, not question 12.", "claim": "Gemini-1.5-Flash experiences only a 1.2% drop to 85.0% accuracy on GSM8K under LongGenBench (Table 3), and as seen in Figure 3 it sustains over 98% accuracy for the first ten questions, whereas GPT-3.5-Turbo drops below 50% by question 12.", "label": false }, { "paperid": "2411.03697v1", "paper_path": "./SciVer/papers/2411.03697v1.json", "claim_type": "parallel", "item1": "13", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03697v1_figure_13.png", "item2_path": "./SciVer/images/2411.03697v1-Table5-1.png", "section": [ "6.3" ], "request_id": 221, "origin_statement": "Figure 13 shows TATAA’s LUT-Non-L overhead is about 10–15%, and Table 5 reveals the quantization layout conversion uses 6558 LUTs (9.5% of 69 282), highlighting low non-linear LUT costs.", "perturbed_statement": "Figure 13 shows TATAA’s LUT-Non-L overhead is about 10–15%, and Table 5 reveals the quantization layout conversion uses 13 112 LUTs (18.9% of 69 282), highlighting moderate non-linear LUT costs.", "perturbed_explanation": "This perturbed statement is incorrect because Table 5 actually reports quantization layout conversion as using 6558 LUTs (9.5%), not 13 112 LUTs (18.9%).", "claim": "Figure 13 shows TATAA’s LUT-Non-L overhead is about 10–15%, and Table 5 reveals the quantization layout conversion uses 13 112 LUTs (18.9% of 69 282), highlighting moderate non-linear LUT costs.", "label": false }, { "paperid": "2411.07976v5", "paper_path": "./SciVer/papers/2411.07976v5.json", "claim_type": "parallel", "item1": "2(d)", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07976v5_figure_2(d).png", "item2_path": "./SciVer/images/2411.07976v5_figure_3(a).png", "section": [ "3.2" ], "request_id": 222, "origin_statement": "Figure 2 displays six guided local augmentations (c–h), which is 50% more than the four feature visualizations (a–d) shown in Figure 3.", "perturbed_statement": "Figure 2 displays seven guided local augmentations (c–h), which is 75% more than the five feature visualizations (a–d) shown in Figure 3.", "perturbed_explanation": "This is incorrect because Figure 2 only includes six guided augmentations labeled c through h, and Figure 3 presents exactly four feature visualizations labeled a through d. The numbers seven and five contradict the actual counts of subfigures.", "claim": "Figure 2 displays seven guided local augmentations (c–h), which is 75% more than the five feature visualizations (a–d) shown in Figure 3.", "label": false }, { "paperid": "2410.07484v2", "paper_path": "./SciVer/papers/2410.07484v2.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07484v2_figure_4.png", "item2_path": "./SciVer/images/2410.07484v2-Table2-1.png", "section": [ "4.2" ], "request_id": 227, "origin_statement": "By iteration 6, WALL-E achieves a ~95% success rate on ALFWorld tasks, and it incurs an average API cost of just $0.41 per Minecraft mission, 0.10 USD lower than GITM’s $0.51.", "perturbed_statement": "By iteration 6, WALL-E achieves only a ~90% success rate on ALFWorld tasks, and it incurs an average API cost of just $0.31 per Minecraft mission, 0.20 USD lower than GITM’s $0.51.", "perturbed_explanation": "The perturbed statement understates WALL-E’s success rate at iteration 6 (actual ≈95%, not 90%) and misreports its average API cost (actual $0.41, not $0.31), contradicting both the figure and the table.", "claim": "By iteration 6, WALL-E achieves only a ~90% success rate on ALFWorld tasks, and it incurs an average API cost of just $0.31 per Minecraft mission, 0.20 USD lower than GITM’s $0.51.", "label": false }, { "paperid": "2411.01370v1", "paper_path": "./SciVer/papers/2411.01370v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01370v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.01370v1_figure_5(d).png", "section": [ "5.1.3" ], "request_id": 228, "origin_statement": "The mean RGAP_LB decreases from 1.06% under stagewise-dependent scenario trees to 0.62% under stagewise-independent trees, a reduction of 0.44 percentage points.", "perturbed_statement": "The mean RGAP_LB decreases from 1.06% under stagewise-dependent scenario trees to 1.62% under stagewise-independent trees, a reduction of 0.44 percentage points.", "perturbed_explanation": "The second histogram shows the SI mean RGAP_LB is 0.62%, not 1.62%, so the perturbed statement’s SI mean contradicts the visual data.", "claim": "The mean RGAP_LB decreases from 1.06% under stagewise-dependent scenario trees to 1.62% under stagewise-independent trees, a reduction of 0.44 percentage points.", "label": false }, { "paperid": "2411.04093v1", "paper_path": "./SciVer/papers/2411.04093v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.04093v1-Table1-1.png", "item2_path": "./SciVer/images/2411.04093v1-Table2-1.png", "section": [ "2" ], "request_id": 229, "origin_statement": "In Table 2’s “Hollywood Strike” example, the left summary includes the novel unigram “workers,” illustrating PoliSum’s 40.4% unigram novelty—4.6 percentage points higher than XSum’s 35.8%.", "perturbed_statement": "In Table 2’s “Hollywood Strike” example, the left summary includes the novel unigram “workers,” illustrating PoliSum’s 22.8% unigram novelty—4.6 percentage points higher than XSum’s 35.8%.", "perturbed_explanation": "Table 1 reports PoliSum’s unigram novelty as 40.4%, not 22.8%. Therefore stating PoliSum’s novelty is 22.8% (and 4.6 points above XSum’s 35.8%) contradicts the actual statistic.", "claim": "In Table 2’s “Hollywood Strike” example, the left summary includes the novel unigram “workers,” illustrating PoliSum’s 22.8% unigram novelty—4.6 percentage points higher than XSum’s 35.8%.", "label": false }, { "paperid": "2410.05053v1", "paper_path": "./SciVer/papers/2410.05053v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05053v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.05053v1-Table2-1.png", "section": [ "3.5" ], "request_id": 231, "origin_statement": "From Figure 3 panels (a) and (d), the monolayer uptake plateau goes from about 14.8 mmol/g at 250 K to roughly 13.0 mmol/g at 400 K, while Table 2 shows its nematic order parameter declines from 0.7248 to 0.4804 over the same temperature range.", "perturbed_statement": "From Figure 3 panels (a) and (d), the monolayer uptake plateau goes from about 14.8 mmol/g at 250 K to roughly 12.0 mmol/g at 400 K, while Table 2 shows its nematic order parameter declines from 0.7248 to 0.4804 over the same temperature range.", "perturbed_explanation": "The perturbed statement understates the 400 K uptake plateau. In panel (d) of Figure 3, the monolayer plateau is about 13.0 mmol/g, not 12.0 mmol/g as claimed.", "claim": "From Figure 3 panels (a) and (d), the monolayer uptake plateau goes from about 14.8 mmol/g at 250 K to roughly 12.0 mmol/g at 400 K, while Table 2 shows its nematic order parameter declines from 0.7248 to 0.4804 over the same temperature range.", "label": false }, { "paperid": "2410.18514v1", "paper_path": "./SciVer/papers/2410.18514v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.18514v1-Table2-1.png", "item2_path": "./SciVer/images/2410.18514v1-Table3-1.png", "section": [ "5" ], "request_id": 234, "origin_statement": "The 220M-parameter MDM outperforms an equivalently sized ARM on BoolQ by 2.11% (62.17% vs 60.06%) per Table 2, and also surpasses the larger 1.5B GPT-2 on OpenBookQA by 2.40% (34.40% vs 32.00%) according to Table 3.", "perturbed_statement": "The 220M-parameter MDM outperforms an equivalently sized ARM on BoolQ by 3.11% (62.17% vs 60.06%) per Table 2, and also surpasses the larger 1.5B GPT-2 on OpenBookQA by 2.40% (34.40% vs 32.00%) according to Table 3.", "perturbed_explanation": "The perturbed statement incorrectly claims a 3.11% margin on BoolQ. Table 2 shows the actual difference between MDM (62.17%) and ARM (60.06%) is 2.11%, not 3.11%, making the stated margin erroneous.", "claim": "The 220M-parameter MDM outperforms an equivalently sized ARM on BoolQ by 3.11% (62.17% vs 60.06%) per Table 2, and also surpasses the larger 1.5B GPT-2 on OpenBookQA by 2.40% (34.40% vs 32.00%) according to Table 3.", "label": false }, { "paperid": "2411.09850v1", "paper_path": "./SciVer/papers/2411.09850v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09850v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.09850v1-Table1-1.png", "section": [ "4.2" ], "request_id": 235, "origin_statement": "On ImageNet Gaussian deblurring with noise variance σy=0.05, DPS-CM improves SSIM to 0.6147 and FID to 128.92, versus DPS’s 0.3449 SSIM and 208.49 FID, matching the cleaner strawberry and dog-fur detail seen in Figure 3(a).", "perturbed_statement": "On ImageNet Gaussian deblurring with noise variance σy=0.5, DPS-CM improves SSIM to 0.7541 and FID to 128.92, versus DPS’s 0.3449 SSIM and 208.49 FID, matching the cleaner strawberry and dog-fur detail seen in Figure 3(a).", "perturbed_explanation": "The perturbed statement misstates the noise variance and SSIM value. Table 1 and the caption specify σy=0.05 (not 0.5), and DPS-CM’s SSIM on ImageNet Gaussian deblurring is 0.6147 (not 0.7541).", "claim": "On ImageNet Gaussian deblurring with noise variance σy=0.5, DPS-CM improves SSIM to 0.7541 and FID to 128.92, versus DPS’s 0.3449 SSIM and 208.49 FID, matching the cleaner strawberry and dog-fur detail seen in Figure 3(a).", "label": false }, { "paperid": "2409.05305v1", "paper_path": "./SciVer/papers/2409.05305v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05305v1_figure_2.png", "item2_path": "./SciVer/images/2409.05305v1_figure_3.png", "section": [ "4.2" ], "request_id": 236, "origin_statement": "In the 1D anharmonic exponential potential experiment, the latent encoding increases from roughly 300 at energy ≈3 to about 800 at energy ≈8, while the symbolic interpretation framework identifies a candidate expression with an MSE of approximately 4×10⁻⁴, over two orders of magnitude lower than the simplest polynomial baseline.", "perturbed_statement": "In the 1D anharmonic exponential potential experiment, the latent encoding increases from roughly 300 at energy ≈3 to about 800 at energy ≈8, while the symbolic interpretation framework identifies a candidate expression with an MSE of approximately 4×10⁻², over two orders of magnitude lower than the simplest polynomial baseline.", "perturbed_explanation": "The perturbed MSE value (4×10⁻²) conflicts with Figure 3’s bar chart for the 1D anharmonic exponential potential, where the red \"Closest Match\" bar sits below 10⁻³—approximately 4×10⁻⁴—not at 4×10⁻².", "claim": "In the 1D anharmonic exponential potential experiment, the latent encoding increases from roughly 300 at energy ≈3 to about 800 at energy ≈8, while the symbolic interpretation framework identifies a candidate expression with an MSE of approximately 4×10⁻², over two orders of magnitude lower than the simplest polynomial baseline.", "label": false }, { "paperid": "2410.20063v2", "paper_path": "./SciVer/papers/2410.20063v2.json", "claim_type": "parallel", "item1": "3(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20063v2_figure_3(a).png", "item2_path": "./SciVer/images/2410.20063v2-Table2-1.png", "section": [ "4" ], "request_id": 237, "origin_statement": "For D−→K_S^0 π− tags, π-like efficiency is 40.48±0.24% vs μ-like 25.60±0.19% (Table 2), a 37% drop. In Fig. 3(b), the μ-like Mmiss² spectrum shows a D+→K_L^0 π+ peak of ~100 events at ~0.25 (GeV/c²)², over twice the ~40-event peak in π-like (Fig. 3(a)).", "perturbed_statement": "For D−→K_S^0 π− tags, π-like efficiency is 42.00±0.24% vs μ-like 25.60±0.19% (Table 2), a 39% drop. In Fig. 3(b), the μ-like Mmiss² spectrum shows a D+→K_L^0 π+ peak of ~85 events at ~0.25 (GeV/c²)², over twice the ~40-event peak in π-like (Fig. 3(a)).", "perturbed_explanation": "The perturbed π-like efficiency of 42.00±0.24% contradicts Table 2, which lists it as 40.48±0.24%. The stated ~85-event K_L^0 π+ peak contradicts Fig. 3(b), where that background actually reaches about 100 events.", "claim": "For D−→K_S^0 π− tags, π-like efficiency is 42.00±0.24% vs μ-like 25.60±0.19% (Table 2), a 39% drop. In Fig. 3(b), the μ-like Mmiss² spectrum shows a D+→K_L^0 π+ peak of ~85 events at ~0.25 (GeV/c²)², over twice the ~40-event peak in π-like (Fig. 3(a)).", "label": false }, { "paperid": "2411.11954v1", "paper_path": "./SciVer/papers/2411.11954v1.json", "claim_type": "parallel", "item1": "2", "item2": "6(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11954v1_figure_2.png", "item2_path": "./SciVer/images/2411.11954v1_figure_6(a).png", "section": [ "2.2.4" ], "request_id": 238, "origin_statement": "The maximum ferromagnetic prediction probability under the self-paced Hardest strategy (~0.35) is roughly half that under the Standard approach (~0.65), even though both occur throughout the ferromagnetic region between j2 = −2 and 0 in Fig. 2(a).", "perturbed_statement": "The maximum ferromagnetic prediction probability under the self-paced Hardest strategy (~0.55) is roughly half that under the Standard approach (~0.65), even though both occur throughout the ferromagnetic region between j2 = −2 and 0 in Fig. 2(a).", "perturbed_explanation": "In Fig. 6(a) (bottom panel), the Hardest strategy’s ferromagnetic (orange) curve actually peaks at about 0.35, not 0.55, so the stated ~0.55 maximum contradicts the plotted data.", "claim": "The maximum ferromagnetic prediction probability under the self-paced Hardest strategy (~0.55) is roughly half that under the Standard approach (~0.65), even though both occur throughout the ferromagnetic region between j2 = −2 and 0 in Fig. 2(a).", "label": false }, { "paperid": "2409.04043v1", "paper_path": "./SciVer/papers/2409.04043v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04043v1_figure_1.png", "item2_path": "./SciVer/images/2409.04043v1-Table1-1.png", "section": [ "3.1" ], "request_id": 241, "origin_statement": "Modeling civility yields the largest median sentiment boost for LLaMA3 (+0.271), more than 1.5× the boost observed for GPT3.5-turbo (+0.137) (Table 1), and converts LLaMA3’s average sentiment from about −0.40 (boxplot) to approximately −0.13.", "perturbed_statement": "Modeling civility yields the largest median sentiment boost for LLaMA3 (+0.371), more than 1.5× the boost observed for GPT3.5-turbo (+0.137) (Table 1), and converts LLaMA3’s average sentiment from about −0.40 (boxplot) to approximately −0.13.", "perturbed_explanation": "The perturbed statement wrongly reports LLaMA3’s median sentiment boost for modeling civility as +0.371. According to Table 1, the actual boost is +0.271, not +0.371.", "claim": "Modeling civility yields the largest median sentiment boost for LLaMA3 (+0.371), more than 1.5× the boost observed for GPT3.5-turbo (+0.137) (Table 1), and converts LLaMA3’s average sentiment from about −0.40 (boxplot) to approximately −0.13.", "label": false }, { "paperid": "2409.15155v1", "paper_path": "./SciVer/papers/2409.15155v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "1(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15155v1_figure_1(b).png", "item2_path": "./SciVer/images/2409.15155v1_figure_1(c).png", "section": [ "2.1", "2.2", "3.1", "1" ], "request_id": 242, "origin_statement": "In the head region delineated in (b), the kVCT artifact slice in (c) shows normalized intensity extremes at -1.0 and 1.0, whereas the MVCT slice peaks at around -0.75 without reaching -1.0, highlighting its reduced severe negative artifacts.", "perturbed_statement": "In the head region delineated in (b), the kVCT artifact slice in (c) shows normalized intensity extremes at -1.0 and 1.0, whereas the MVCT slice peaks at around -0.25 without reaching -1.0, highlighting its reduced severe negative artifacts.", "perturbed_explanation": "The perturbation is incorrect because the MVCT slice in (c) clearly contains pixel values down to about -0.75 (darker streaks), not just -0.25, so it does reach values below -0.25, contradicting the claim.", "claim": "In the head region delineated in (b), the kVCT artifact slice in (c) shows normalized intensity extremes at -1.0 and 1.0, whereas the MVCT slice peaks at around -0.25 without reaching -1.0, highlighting its reduced severe negative artifacts.", "label": false }, { "paperid": "2409.15440v1", "paper_path": "./SciVer/papers/2409.15440v1.json", "claim_type": "parallel", "item1": "2(c)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15440v1_figure_2(c).png", "item2_path": "./SciVer/images/2409.15440v1-Table5-1.png", "section": [ "5.3", "5.2" ], "request_id": 243, "origin_statement": "At 1.4 GHz, the central compact component has a flux density of 69.4 mJy (Table 5), which corresponds to a signal-to-noise ratio of about 630 relative to the 0.11 mJy/beam noise level in the 5″-beam VLA image (Figure 2c).", "perturbed_statement": "At 1.4 GHz, the central compact component has a flux density of 69.4 mJy (Table 5), which corresponds to a signal-to-noise ratio of about 630 relative to the 0.21 mJy/beam noise level in the 5″-beam VLA image (Figure 2c).", "perturbed_explanation": "The image caption for Figure 2c specifies an rms noise of 0.11 mJy/beam; the perturbed statement’s use of 0.21 mJy/beam directly contradicts this detail.", "claim": "At 1.4 GHz, the central compact component has a flux density of 69.4 mJy (Table 5), which corresponds to a signal-to-noise ratio of about 630 relative to the 0.21 mJy/beam noise level in the 5″-beam VLA image (Figure 2c).", "label": false }, { "paperid": "2411.16393v1", "paper_path": "./SciVer/papers/2411.16393v1.json", "claim_type": "parallel", "item1": "19", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16393v1_figure_19.png", "item2_path": "./SciVer/images/2411.16393v1-Table6-1.png", "section": [ "5.2" ], "request_id": 244, "origin_statement": "At t=100 μs, the soda-lime glass phase-field simulation without tangential load exhibits a crack branching angle of 55°, whereas with tangential load it is 44°, reflecting an 11° decrease. The material properties include E = 72 GPa and G_c = 9 N/mm.", "perturbed_statement": "At t=100 μs, the soda-lime glass phase-field simulation without tangential load exhibits a crack branching angle of 55°, whereas with tangential load it is 44°, reflecting an 11° decrease. The material properties include E = 72 GPa and G_c = 12 N/mm.", "perturbed_explanation": "The perturbed statement incorrectly lists the critical energy release rate G_c as 12 N/mm. Table 6 specifies G_c = 9 N/mm for soda-lime glass, so the value 12 N/mm contradicts the provided material properties.", "claim": "At t=100 μs, the soda-lime glass phase-field simulation without tangential load exhibits a crack branching angle of 55°, whereas with tangential load it is 44°, reflecting an 11° decrease. The material properties include E = 72 GPa and G_c = 12 N/mm.", "label": false }, { "paperid": "2411.15871v1", "paper_path": "./SciVer/papers/2411.15871v1.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15871v1_figure_3.png", "item2_path": "./SciVer/images/2411.15871v1-Table1-1.png", "section": [ "3.1" ], "request_id": 246, "origin_statement": "Llama 39B dedicates roughly 30% of training time to CP communication (Fig. 3), whereas enabling CP for Llama3.1-405B on 16 384 GPUs pushes cross-node transfers to 49.9% of total communication time (Table 1).", "perturbed_statement": "Llama 39B dedicates roughly 30% of training time to CP communication (Fig. 3), whereas enabling CP for Llama3.1-405B on 16 384 GPUs pushes cross-node transfers to only 25% of total communication time (Table 1).", "perturbed_explanation": "Table 1 reports that with CP enabled at 16 384 GPUs, cross-node communication is 7.52 s out of a total 15.36 s (7.84 s local + 7.52 s cross), which is 49.9%, not 25%.", "claim": "Llama 39B dedicates roughly 30% of training time to CP communication (Fig. 3), whereas enabling CP for Llama3.1-405B on 16 384 GPUs pushes cross-node transfers to only 25% of total communication time (Table 1).", "label": false }, { "paperid": "2411.07672v1", "paper_path": "./SciVer/papers/2411.07672v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "4(h)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07672v1_figure_4(b).png", "item2_path": "./SciVer/images/2411.07672v1_figure_4(h).png", "section": [ "5.2" ], "request_id": 250, "origin_statement": "The MLP-based reconstruction (Ŷ = MLP(X, A)) achieves perfect edge homophily of 1.00, compared to B = X’s 0.81, indicating a roughly 23% increase in intra-class connectivity.", "perturbed_statement": "The MLP-based reconstruction (Ŷ = MLP(X, A)) achieves edge homophily of 0.95, compared to B = X’s 0.81, indicating only a 17% increase in intra-class connectivity.", "perturbed_explanation": "The perturbed statement is wrong because the figure explicitly shows Ŷ = MLP(X, A) has h_edge = 1.00, not 0.95, so the homophily increase cannot be 17%.", "claim": "The MLP-based reconstruction (Ŷ = MLP(X, A)) achieves edge homophily of 0.95, compared to B = X’s 0.81, indicating only a 17% increase in intra-class connectivity.", "label": false }, { "paperid": "2410.00903v2", "paper_path": "./SciVer/papers/2410.00903v2.json", "claim_type": "parallel", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.00903v2_figure_5.png", "item2_path": "./SciVer/images/2410.00903v2_figure_7.png", "section": [ "5.3" ], "request_id": 251, "origin_statement": "Under separability and strong confounding, the proposed (reuse) estimator’s RMSE is about 45 in Fig 5, roughly four times its RMSE at n = 1,000 (≈11) and declines to about 1.8 at n = 4,000 in Fig 7.", "perturbed_statement": "Under separability and strong confounding, the proposed (reuse) estimator’s RMSE is about 45 in Fig 5, roughly nine times its RMSE at n = 1,000 (≈5) and declines to about 20 at n = 4,000 in Fig 7.", "perturbed_explanation": "The perturbed statement misreports the RMSE values from Fig 7: the actual RMSE for the proposed (reuse) estimator is about 11 at n = 1,000 (not 5) and about 1.8 at n = 4,000 (not 20). These discrepancies contradict the plotted results.", "claim": "Under separability and strong confounding, the proposed (reuse) estimator’s RMSE is about 45 in Fig 5, roughly nine times its RMSE at n = 1,000 (≈5) and declines to about 20 at n = 4,000 in Fig 7.", "label": false }, { "paperid": "2410.02936v1", "paper_path": "./SciVer/papers/2410.02936v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02936v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.02936v1_figure_1(d).png", "section": [ "2.2.1", "2.3" ], "request_id": 255, "origin_statement": "While the KiDS mock image (4×64×64 pixels) employs four-channel foreground amplitudes α=[0.6, 0.7, 0.3, 0.5] and noise ε=[0.1, 0.4, 0.5], the HST mock (1×101×101 pixels) uses a single-channel α=0.4, β=0.7, and ε=0.0.", "perturbed_statement": "While the KiDS mock image (4×64×64 pixels) employs four-channel foreground amplitudes α=[0.6, 0.7, 0.3, 0.5] and noise ε=[0.1, 0.4, 0.5], the HST mock (1×101×101 pixels) uses a single-channel α=0.4, β=0.5, and ε=0.0.", "perturbed_explanation": "The perturbed statement incorrectly lists the HST mock background amplitude β as 0.5, whereas the figure shows β=[0.7] for the HST mock image.", "claim": "While the KiDS mock image (4×64×64 pixels) employs four-channel foreground amplitudes α=[0.6, 0.7, 0.3, 0.5] and noise ε=[0.1, 0.4, 0.5], the HST mock (1×101×101 pixels) uses a single-channel α=0.4, β=0.5, and ε=0.0.", "label": false }, { "paperid": "2411.07504v1", "paper_path": "./SciVer/papers/2411.07504v1.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07504v1_figure_2.png", "item2_path": "./SciVer/images/2411.07504v1-Table1-1.png", "section": [ "3" ], "request_id": 256, "origin_statement": "Figure 2 describes AdaS&S’s two-stage process—training a supernet then RL-searching subnets across M feature fields—while Table 1 reveals AdaS&S is the only NAS one-shot method among eight compared that combines stability (√ in Stable) and resource adaptability (√ in Res).", "perturbed_statement": "Figure 2 outlines AdaS&S’s two-stage workflow—training a supernet then RL-searching subnets over M feature fields—while Table 1 shows AdaS&S is the only NAS one-shot method among eight compared marked unstable (× in Stable) yet resource-adaptive (√ in Res).", "perturbed_explanation": "The perturbation incorrectly states AdaS&S is marked unstable (× in Stable), but Table 1 lists AdaS&S with a check (√) in the Stable column, so it is actually marked stable rather than unstable.", "claim": "Figure 2 outlines AdaS&S’s two-stage workflow—training a supernet then RL-searching subnets over M feature fields—while Table 1 shows AdaS&S is the only NAS one-shot method among eight compared marked unstable (× in Stable) yet resource-adaptive (√ in Res).", "label": false }, { "paperid": "2411.12845v1", "paper_path": "./SciVer/papers/2411.12845v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12845v1_figure_3.png", "item2_path": "./SciVer/images/2411.12845v1_figure_4.png", "section": [ "4.1" ], "request_id": 258, "origin_statement": "Following the April 2022 structural break marked in Figure 3, the CPI-Common-SC shows a 0.61% downward revision by December 2022 for April 2022 estimates—just 25% of the 2.47% upward revision in the time-invariant CPI-Common benchmark.", "perturbed_statement": "Following the April 2022 structural break marked in Figure 3, the CPI-Common-SC shows a 1.61% downward revision by December 2022 for April 2022 estimates—just 65% of the 2.47% upward revision in the time-invariant CPI-Common benchmark.", "perturbed_explanation": "The perturbation is wrong because Figure 4 reports a 0.61% downward revision for CPI-Common-SC in April 2022, not 1.61%, and thus the claimed 65% ratio to the 2.47% benchmark revision is incorrect.", "claim": "Following the April 2022 structural break marked in Figure 3, the CPI-Common-SC shows a 1.61% downward revision by December 2022 for April 2022 estimates—just 65% of the 2.47% upward revision in the time-invariant CPI-Common benchmark.", "label": false }, { "paperid": "2410.04784v1", "paper_path": "./SciVer/papers/2410.04784v1.json", "claim_type": "parallel", "item1": "2", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04784v1-Table2-1.png", "item2_path": "./SciVer/images/2410.04784v1-Table7-1.png", "section": [ "3.2" ], "request_id": 259, "origin_statement": "Table 2 shows LLaMA-2-7B prefers Newspapers over Novels by 80.1% for birth date but only 58.2% for birth place, a 21.9-point difference. Table 7’s novel-style birth date template is “{}’s birthday is on the unforgettable day of {},” versus birth place’s “{} was born under the bright sky of {}.”", "perturbed_statement": "Table 2 shows LLaMA-2-7B prefers Newspapers over Novels by 70.1% for birth date but only 58.2% for birth place, a 21.9-point difference. Table 7’s novel-style birth date template is “{}’s birthday is on the unforgettable day of {},” versus birth place’s “{} was born under the bright sky of {}.”", "perturbed_explanation": "The perturbed statement misreports the birth date preference score. Table 2 actually shows an 80.1% preference for Newspapers over Novels on birth date, not 70.1%.", "claim": "Table 2 shows LLaMA-2-7B prefers Newspapers over Novels by 70.1% for birth date but only 58.2% for birth place, a 21.9-point difference. Table 7’s novel-style birth date template is “{}’s birthday is on the unforgettable day of {},” versus birth place’s “{} was born under the bright sky of {}.”", "label": false }, { "paperid": "2409.07135v1", "paper_path": "./SciVer/papers/2409.07135v1.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.07135v1-Table1-1.png", "item2_path": "./SciVer/images/2409.07135v1-Table3-1.png", "section": [ "4.4" ], "request_id": 260, "origin_statement": "For Set 1’s v₁(t) at 0.25 V, the DBSCAN model’s AER encoder first layer has 56 neurons, 5 fewer than the KMeans AER first layer (61 neurons).", "perturbed_statement": "For Set 1’s v₁(t) at 0.25 V, the DBSCAN model’s AER encoder first layer has 60 neurons, 1 fewer than the KMeans AER first layer (61 neurons).", "perturbed_explanation": "Table 3 shows DBSCAN’s AER first-layer encoder actually has 56 neurons, not 60, and thus it is 5 fewer than KMeans’s 61 neurons, not 1 fewer.", "claim": "For Set 1’s v₁(t) at 0.25 V, the DBSCAN model’s AER encoder first layer has 60 neurons, 1 fewer than the KMeans AER first layer (61 neurons).", "label": false }, { "paperid": "2410.20399v1", "paper_path": "./SciVer/papers/2410.20399v1.json", "claim_type": "parallel", "item1": "9(b)", "item2": "9(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20399v1_figure_9(b).png", "item2_path": "./SciVer/images/2410.20399v1_figure_9(f).png", "section": [ "4.1" ], "request_id": 262, "origin_statement": "At sequence length 4096, ThunderKittens long convolution achieves 61 TFLOPs (4.7× faster than FlashFFTConv’s 13 TFLOPs) and TK rotary encoding peaks at 0.9 TFLOPs (80% higher than FlashRotary’s 0.5 TFLOPs).", "perturbed_statement": "At sequence length 4096, ThunderKittens long convolution achieves 56 TFLOPs (4.7× faster than FlashFFTConv’s 13 TFLOPs) and TK rotary encoding peaks at 1.1 TFLOPs (80% higher than FlashRotary’s 0.5 TFLOPs).", "perturbed_explanation": "This is incorrect because the long convolution performance at sequence length 4096 for ThunderKittens is actually 61 TFLOPs (not 56), and the TK rotary encoding throughput at 4096 is 0.9 TFLOPs (not 1.1).", "claim": "At sequence length 4096, ThunderKittens long convolution achieves 56 TFLOPs (4.7× faster than FlashFFTConv’s 13 TFLOPs) and TK rotary encoding peaks at 1.1 TFLOPs (80% higher than FlashRotary’s 0.5 TFLOPs).", "label": false }, { "paperid": "2410.01485v1", "paper_path": "./SciVer/papers/2410.01485v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.01485v1-Table5-1.png", "item2_path": "./SciVer/images/2410.01485v1-Table6-1.png", "section": [ "4.4" ], "request_id": 264, "origin_statement": "Positioning 12 full attention layers in the middle achieves an average BABILong score of 0.27, whereas equipping all 32 layers as full attention yields a higher average of 0.29.", "perturbed_statement": "Positioning 12 full attention layers in the middle achieves an average BABILong score of 0.30, whereas equipping all 32 layers as full attention yields a higher average of 0.29.", "perturbed_explanation": "The perturbed statement incorrectly lists the average score for the middle-placed 12 full layers as 0.30. Table 5 shows this average is actually 0.27.", "claim": "Positioning 12 full attention layers in the middle achieves an average BABILong score of 0.30, whereas equipping all 32 layers as full attention yields a higher average of 0.29.", "label": false }, { "paperid": "2410.01964v1", "paper_path": "./SciVer/papers/2410.01964v1.json", "claim_type": "parallel", "item1": "12", "item2": "15", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01964v1_figure_12.png", "item2_path": "./SciVer/images/2410.01964v1_figure_15.png", "section": [ "4.2.2" ], "request_id": 265, "origin_statement": "The EDS line scan shows a plateau of ~260 a.u. Ta intensity matching the 100 nm beam simulation—implying the actual beam was ~100 nm (10× the intended 10 nm)—whereas dye penetrant testing on sample 14M1 detected cracks 1 and 3 but did not reveal crack 2.", "perturbed_statement": "The EDS line scan shows a plateau of ~260 a.u. Ta intensity matching the 50 nm beam simulation—implying the actual beam was ~50 nm (5× the intended 10 nm)—whereas dye penetrant testing on sample 14M1 detected cracks 2 and 3 but did not reveal crack 1.", "perturbed_explanation": "The perturbed beam diameter is incorrect: the measured EDS profile aligns with the 100 nm simulation, not 50 nm. Also, dye penetrant testing of 14M1 detected cracks 1 and 3, not cracks 2 and 3, so stating crack 1 was missed is false.", "claim": "The EDS line scan shows a plateau of ~260 a.u. Ta intensity matching the 50 nm beam simulation—implying the actual beam was ~50 nm (5× the intended 10 nm)—whereas dye penetrant testing on sample 14M1 detected cracks 2 and 3 but did not reveal crack 1.", "label": false }, { "paperid": "2410.06541v2", "paper_path": "./SciVer/papers/2410.06541v2.json", "claim_type": "parallel", "item1": "2(b)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06541v2_figure_2(b).png", "item2_path": "./SciVer/images/2410.06541v2_figure_3.png", "section": [ "5.1" ], "request_id": 266, "origin_statement": "On Llama-2-13B, linear and 2×MLP probes jump from ~27% at layer 18 to ~53% at layer 20 and then plateau, whereas on LLaVA1.5-7B for Flowers102, probes already exceed 90% by layer 2 and linear probes rebound from ~0.87 at layer 5 to ~94% at layer 29.", "perturbed_statement": "On Llama-2-13B, linear and 2×MLP probes jump from ~30% at layer 15 to ~50% at layer 22 and then plateau, whereas on LLaVA1.5-7B for Flowers102, probes already exceed 90% by layer 2 and linear probes rebound from ~0.87 at layer 5 to ~94% at layer 29.", "perturbed_explanation": "The perturbed statement incorrectly shifts the MMLU jump to layer 15 and layer 22 with ~30%→~50% accuracy. In the figure, the probes actually rise from about 27% at layer 18 to about 53% at layer 20, not at the stated layers or values.", "claim": "On Llama-2-13B, linear and 2×MLP probes jump from ~30% at layer 15 to ~50% at layer 22 and then plateau, whereas on LLaVA1.5-7B for Flowers102, probes already exceed 90% by layer 2 and linear probes rebound from ~0.87 at layer 5 to ~94% at layer 29.", "label": false }, { "paperid": "2411.07517v1", "paper_path": "./SciVer/papers/2411.07517v1.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07517v1_figure_3.png", "item2_path": "./SciVer/images/2411.07517v1-Table1-1.png", "section": [ "3.2" ], "request_id": 268, "origin_statement": "The 1.28 m observation area discretized at 0.01 m yields a 128×128 grid, and with a 1.21×10⁻⁵ s timestep at 340 m/s, sound moves ≈0.0041 m per step—about 0.41 of one grid cell.", "perturbed_statement": "The 1.28 m observation area discretized at 0.01 m yields a 128×128 grid, and with a 1.21×10⁻⁵ s timestep at 300 m/s, sound moves ≈0.0036 m per step—about 0.36 of one grid cell.", "perturbed_explanation": "Table 1 specifies the speed of sound in air as 340 m/s, not 300 m/s. Using 300 m/s contradicts the documented simulation condition, making the stated per-step travel distance incorrect.", "claim": "The 1.28 m observation area discretized at 0.01 m yields a 128×128 grid, and with a 1.21×10⁻⁵ s timestep at 300 m/s, sound moves ≈0.0036 m per step—about 0.36 of one grid cell.", "label": false }, { "paperid": "2410.17276v2", "paper_path": "./SciVer/papers/2410.17276v2.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17276v2_figure_3.png", "item2_path": "./SciVer/images/2410.17276v2_figure_4.png", "section": [ "5.2" ], "request_id": 269, "origin_statement": "At item index 0.5, ML-10M’s normalized item frequency is about 10^-5 versus ~5×10^-5 for Beauty (Fig.3), and in Fig.4 ML-10M’s adaptive-mixed sampling (AMNS) attains the highest NDCG@10 (~0.00495) at ~0.9 balance, surpassing RNS (~0.00475 at ~0.6).", "perturbed_statement": "At item index 0.5, ML-10M’s normalized item frequency is about 5×10^-5 versus ~10^-5 for Beauty (Fig.3), and in Fig.4 ML-10M’s adaptive-mixed sampling (AMNS) attains the highest NDCG@10 (~0.00495) at ~0.9 balance, surpassing RNS (~0.00475 at ~0.6).", "perturbed_explanation": "The perturbed statement incorrectly swaps the normalized frequencies from Fig.3. In the original histogram, ML-10M’s frequency at index 0.5 is approximately 10^-5 and Beauty’s is around 5×10^-5, not the other way around.", "claim": "At item index 0.5, ML-10M’s normalized item frequency is about 5×10^-5 versus ~10^-5 for Beauty (Fig.3), and in Fig.4 ML-10M’s adaptive-mixed sampling (AMNS) attains the highest NDCG@10 (~0.00495) at ~0.9 balance, surpassing RNS (~0.00475 at ~0.6).", "label": false }, { "paperid": "2410.08469v2", "paper_path": "./SciVer/papers/2410.08469v2.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.08469v2_figure_2.png", "item2_path": "./SciVer/images/2410.08469v2-Table1-1.png", "section": [ "4.1", "4.1.2" ], "request_id": 270, "origin_statement": "Cobwebbed pattern’s 'spider web' token receives one of the highest weights in Figure 2, and Table 1 shows that adding nonsensical tokens reduces Caltech101 accuracy by 0.12 percentage points (from 97.42% to 97.30%).", "perturbed_statement": "Cobwebbed pattern’s 'spider web' token receives one of the lightest weights in Figure 2, and Table 1 shows that adding nonsensical tokens increases Caltech101 accuracy by 0.12 percentage points (from 97.42% to 97.54%).", "perturbed_explanation": "The statement is wrong because in Figure 2 the 'spider web' token for the cobwebbed class is shaded darkest (highest weight), not lightest. Also, Table 1 reports a drop from 97.42% to 97.30%, not an increase to 97.54%.", "claim": "Cobwebbed pattern’s 'spider web' token receives one of the lightest weights in Figure 2, and Table 1 shows that adding nonsensical tokens increases Caltech101 accuracy by 0.12 percentage points (from 97.42% to 97.54%).", "label": false }, { "paperid": "2411.16342v1", "paper_path": "./SciVer/papers/2411.16342v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16342v1_figure_3.png", "item2_path": "./SciVer/images/2411.16342v1_figure_4.png", "section": [ "2.2" ], "request_id": 271, "origin_statement": "In the sequential inter-phase dataflow (Fig3b), the entire V×F intermediate matrix is stored to DRAM, generating O(V·F) off-chip accesses. In Fig4, a multi-accelerator system comprises four SAs, each featuring an 8-PE array and its own global buffer, thus dividing DRAM load by four.", "perturbed_statement": "In the sequential inter-phase dataflow (Fig3b), the entire V×F intermediate matrix is stored to DRAM, generating O(V·F) off-chip accesses. In Fig4, a multi-accelerator system comprises two SAs, each featuring an 8-PE array and its own global buffer, thus dividing DRAM load by two.", "perturbed_explanation": "The perturbed statement misreports the number of spatial accelerators: Figure 4 shows four SAs, not two. Consequently, DRAM load is quartered per accelerator, not halved.", "claim": "In the sequential inter-phase dataflow (Fig3b), the entire V×F intermediate matrix is stored to DRAM, generating O(V·F) off-chip accesses. In Fig4, a multi-accelerator system comprises two SAs, each featuring an 8-PE array and its own global buffer, thus dividing DRAM load by two.", "label": false }, { "paperid": "2409.05048v1", "paper_path": "./SciVer/papers/2409.05048v1.json", "claim_type": "parallel", "item1": "1", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05048v1_figure_1.png", "item2_path": "./SciVer/images/2409.05048v1-Table5-1.png", "section": [ "5" ], "request_id": 272, "origin_statement": "Figure 1(b) shows the average CPU time per iteration climbs from 0.5×10⁻³ s at s=1 to 3.8×10⁻³ s at s=5 (7.6× increase); concurrently, Table 5 reports that for the e40r0100 matrix, IT grows from 171 to 5610 (32.8×) and TIME from 0.2386 s to 10.3717 s (43.5×).", "perturbed_statement": "Figure 1(b) shows the average CPU time per iteration climbs from 0.5×10⁻³ s at s=1 to 3.8×10⁻³ s at s=5 (7.6× increase); concurrently, Table 5 reports that for the e40r0100 matrix, IT grows from 171 to 561 (3.3×) and TIME from 0.2386 s to 10.3717 s (43.5×).", "perturbed_explanation": "The perturbed statement incorrectly lists the IT for e40r0100 at s=5 as 561, but Table 5 actually shows IT=5610, so the claimed 3.3× growth contradicts the table’s 32.8× increase.", "claim": "Figure 1(b) shows the average CPU time per iteration climbs from 0.5×10⁻³ s at s=1 to 3.8×10⁻³ s at s=5 (7.6× increase); concurrently, Table 5 reports that for the e40r0100 matrix, IT grows from 171 to 561 (3.3×) and TIME from 0.2386 s to 10.3717 s (43.5×).", "label": false }, { "paperid": "2409.19611v1", "paper_path": "./SciVer/papers/2409.19611v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.19611v1-Table3-1.png", "item2_path": "./SciVer/images/2409.19611v1-Table4-1.png", "section": [ "7.1" ], "request_id": 273, "origin_statement": "Order 1 in Table 4 sequences four of the five CL Benchmark tasks (dbpedia, amazon, yahoo, ag), covering 80% of the CL Benchmark datasets listed in Table 3.", "perturbed_statement": "Order 1 in Table 4 sequences all five CL Benchmark tasks (dbpedia, amazon, yahoo, ag, yelp), covering 100% of the CL Benchmark datasets listed in Table 3.", "perturbed_explanation": "The perturbation incorrectly adds yelp to Order 1’s sequence. According to Table 4, Order 1 only includes dbpedia, amazon, yahoo, and ag, so it sequences four, not five, CL Benchmark tasks.", "claim": "Order 1 in Table 4 sequences all five CL Benchmark tasks (dbpedia, amazon, yahoo, ag, yelp), covering 100% of the CL Benchmark datasets listed in Table 3.", "label": false }, { "paperid": "2411.14497v1", "paper_path": "./SciVer/papers/2411.14497v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14497v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.14497v1_figure_4(b).png", "section": [ "4.2" ], "request_id": 278, "origin_statement": "On Vicuna-Bench, Llama-2-7B-star_instruct scores about 4.7 in math versus 2.1 for Llama-2-7B-evol_instruct (a ~2.6-point gain), while on MT-Bench it achieves roughly 7.8 in STEM compared to around 6.5 for evol_instruct.", "perturbed_statement": "On Vicuna-Bench, Llama-2-7B-star_instruct scores about 4.7 in math versus 2.1 for Llama-2-7B-evol_instruct (a ~2.6-point gain), while on MT-Bench it achieves roughly 6.5 in STEM compared to around 7.8 for evol_instruct.", "perturbed_explanation": "The perturbed statement flips the MT-Bench STEM scores: the star_instruct model actually scores about 7.8 (not 6.5) and evol_instruct about 6.5 (not 7.8), contradicting the radar chart data.", "claim": "On Vicuna-Bench, Llama-2-7B-star_instruct scores about 4.7 in math versus 2.1 for Llama-2-7B-evol_instruct (a ~2.6-point gain), while on MT-Bench it achieves roughly 6.5 in STEM compared to around 7.8 for evol_instruct.", "label": false }, { "paperid": "2411.05266v1", "paper_path": "./SciVer/papers/2411.05266v1.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.05266v1_figure_2.png", "item2_path": "./SciVer/images/2411.05266v1-Table1-1.png", "section": [ "2.1" ], "request_id": 279, "origin_statement": "Figure 2 displays a Sierpinski tetrahedron of 3D CGR points colored from purple to yellow, while Table 1 shows that the distance between the CGR coordinates of ATCAGGCAG and TGTAGGCAG declines from 0.667 at position 1 to 0.00007 by position 9.", "perturbed_statement": "Figure 2 displays a Sierpinski tetrahedron of 3D CGR points colored from purple to yellow, while Table 1 shows that the distance between the CGR coordinates of ATCAGGCAG and TGTAGGCAG declines from 0.5 at position 1 to 0.00007 by position 9.", "perturbed_explanation": "This statement is incorrect because Table 1 reports a distance of 0.66667 (approximately 0.667) at the first nucleotide (position 1), not 0.5, so the perturbed value contradicts the actual data in the table.", "claim": "Figure 2 displays a Sierpinski tetrahedron of 3D CGR points colored from purple to yellow, while Table 1 shows that the distance between the CGR coordinates of ATCAGGCAG and TGTAGGCAG declines from 0.5 at position 1 to 0.00007 by position 9.", "label": false }, { "paperid": "2411.13870v1", "paper_path": "./SciVer/papers/2411.13870v1.json", "claim_type": "parallel", "item1": "7(b)", "item2": "7(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13870v1_figure_7(b).png", "item2_path": "./SciVer/images/2411.13870v1_figure_7(c).png", "section": [ "3.4.2" ], "request_id": 280, "origin_statement": "Within the 6000 AU zoomed ALMA Band-7 field (panel b), 19 astrodendro cores are detected; the dendrogram (panel c) shows that leaf 16 peaks at ∼0.05 Jy beam⁻¹, ∼4× the ∼0.012 Jy beam⁻¹ peak of leaf 2.", "perturbed_statement": "Within the 6000 AU zoomed ALMA Band-7 field (panel b), 20 astrodendro cores are detected; the dendrogram (panel c) shows that leaf 16 peaks at ∼0.05 Jy beam⁻¹, ∼3× the ∼0.015 Jy beam⁻¹ peak of leaf 2.", "perturbed_explanation": "The perturbed claim is wrong because panel b actually labels only 19 structures (1–19), not 20, and panel c indicates leaf 2 has a peak flux of about 0.012 Jy beam⁻¹, not 0.015 Jy beam⁻¹, so the stated ratio is incorrect.", "claim": "Within the 6000 AU zoomed ALMA Band-7 field (panel b), 20 astrodendro cores are detected; the dendrogram (panel c) shows that leaf 16 peaks at ∼0.05 Jy beam⁻¹, ∼3× the ∼0.015 Jy beam⁻¹ peak of leaf 2.", "label": false }, { "paperid": "2409.11176v1", "paper_path": "./SciVer/papers/2409.11176v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11176v1_figure_4(b).png", "item2_path": "./SciVer/images/2409.11176v1-Table2-1.png", "section": [ "3.1" ], "request_id": 283, "origin_statement": "At Eu=4586 K (S(5) transition), the inner 0.5″ aperture flux (60 ± 3 × 10⁻⁴ erg s⁻¹ cm⁻² str⁻¹) is ~15× higher than the 0.5–2.0″ annulus (3.9 ± 0.5 × 10⁻⁴), corresponding to a ln(Nu/gu) difference of ≈4.2 between grey and black points.", "perturbed_statement": "At Eu=4586 K (S(5) transition), the inner 0.5″ aperture flux (60 ± 3 × 10⁻⁴ erg s⁻¹ cm⁻² str⁻¹) is ~8× higher than the 0.5–2.0″ annulus (3.9 ± 0.5 × 10⁻⁴), corresponding to a ln(Nu/gu) difference of ≈2.5 between grey and black points.", "perturbed_explanation": "The table lists the inner-to-outer flux ratio as 60/3.9≈15.4×, not ~8×, and in Figure 4 the vertical separation at Eu≈4586 K is about 4.2 in ln(Nu/gu), not 2.5, so both numerical values are inconsistent with the data.", "claim": "At Eu=4586 K (S(5) transition), the inner 0.5″ aperture flux (60 ± 3 × 10⁻⁴ erg s⁻¹ cm⁻² str⁻¹) is ~8× higher than the 0.5–2.0″ annulus (3.9 ± 0.5 × 10⁻⁴), corresponding to a ln(Nu/gu) difference of ≈2.5 between grey and black points.", "label": false }, { "paperid": "2410.14875v1", "paper_path": "./SciVer/papers/2410.14875v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.14875v1-Table1-1.png", "item2_path": "./SciVer/images/2410.14875v1-Table2-1.png", "section": [ "2.1" ], "request_id": 285, "origin_statement": "The dataset’s AI-text generation includes outputs from the Llama 65B model, and in the WP domain the training set contains 24,803 AI-generated texts, nearly four times the 6,536 human-written ones.", "perturbed_statement": "The dataset’s AI-text generation includes outputs from the Llama 65B model, and in the WP domain the training set contains 26,803 AI-generated texts, nearly four times the 6,536 human-written ones.", "perturbed_explanation": "The perturbed statement misstates the number of AI-generated training texts in the WP domain: Table 2 shows there are 24,803 AI-generated WP training examples, not 26,803.", "claim": "The dataset’s AI-text generation includes outputs from the Llama 65B model, and in the WP domain the training set contains 26,803 AI-generated texts, nearly four times the 6,536 human-written ones.", "label": false }, { "paperid": "2409.10951v1", "paper_path": "./SciVer/papers/2409.10951v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10951v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.10951v1_figure_2(c).png", "section": [ "3.1" ], "request_id": 286, "origin_statement": "In traditional contrastive (Figure 2b) the green protected-group normal examples are confined to less than 20% of the hemisphere’s circumference on the unit hypersphere, while FairAD (Figure 2c) disperses them across nearly 100% of the hemisphere, matching the distribution of blue unprotected normals.", "perturbed_statement": "In traditional contrastive (Figure 2b) the green protected-group normal examples are confined to less than 20% of the hemisphere’s circumference on the unit hypersphere, while FairAD (Figure 2c) disperses them across only about 50% of the hemisphere, matching the distribution of blue unprotected normals.", "perturbed_explanation": "The perturbed claim misstates the spread of protected-group normals under FairAD: Figure 2c actually shows green dots uniformly scattered over nearly the entire hemisphere, not limited to about half its circumference.", "claim": "In traditional contrastive (Figure 2b) the green protected-group normal examples are confined to less than 20% of the hemisphere’s circumference on the unit hypersphere, while FairAD (Figure 2c) disperses them across only about 50% of the hemisphere, matching the distribution of blue unprotected normals.", "label": false }, { "paperid": "2411.00254v1", "paper_path": "./SciVer/papers/2411.00254v1.json", "claim_type": "parallel", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00254v1_figure_5.png", "item2_path": "./SciVer/images/2411.00254v1-Table4-1.png", "section": [ "4.3" ], "request_id": 288, "origin_statement": "By epoch 19 in Fig. 5 before augmentation, validation accuracy drops to about 39.58%, whereas Table 4 reports the proposed NST method with XAI (LRP) achieves 92.47% accuracy, representing a 52.89 percentage-point improvement.", "perturbed_statement": "By epoch 19 in Fig. 5 before augmentation, validation accuracy drops to about 39.58%, whereas Table 4 reports the proposed NST method with XAI (LRP) achieves 99.88% accuracy, representing a 60.30 percentage-point improvement.", "perturbed_explanation": "The perturbed statement misquotes the accuracy of the proposed NST method with XAI. Table 4 shows its accuracy is 92.47%, not 99.88% (99.88% is the specificity value).", "claim": "By epoch 19 in Fig. 5 before augmentation, validation accuracy drops to about 39.58%, whereas Table 4 reports the proposed NST method with XAI (LRP) achieves 99.88% accuracy, representing a 60.30 percentage-point improvement.", "label": false }, { "paperid": "2411.01711v1", "paper_path": "./SciVer/papers/2411.01711v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.01711v1-Table5-1.png", "item2_path": "./SciVer/images/2411.01711v1-Table6-1.png", "section": [ "4.3" ], "request_id": 293, "origin_statement": "When 01 at t=1/2, profiles (3,3) and (4,4) are NE (Table 5) but yield only (2.25,2.25) in PD, below the (2.5,2.5) payoff at NE (2,3) (Table 6).", "perturbed_statement": "When 01 at t=1/2, profiles (3,3) and (4,4) are NE (Table 5) and yield (2.5,2.5) in PD, matching the maximum at NE (2,3) (Table 6).", "perturbed_explanation": "The perturbed statement incorrectly claims that profiles (3,3) and (4,4) yield (2.5,2.5) in Table 6, whereas the table actually shows their payoffs as (2.25,2.25).", "claim": "When 01 at t=1/2, profiles (3,3) and (4,4) are NE (Table 5) and yield (2.5,2.5) in PD, matching the maximum at NE (2,3) (Table 6).", "label": false }, { "paperid": "2411.15835v1", "paper_path": "./SciVer/papers/2411.15835v1.json", "claim_type": "parallel", "item1": "8(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15835v1_figure_8(b).png", "item2_path": "./SciVer/images/2411.15835v1-Table3-1.png", "section": [ "4.3" ], "request_id": 295, "origin_statement": "Figure 8b shows Q3's converted plan contains a single MultiJoin node with three input streams from TableSourceScan operators, and Table 3 reports all 11 benchmark queries (100%) were successfully converted by the TSC method.", "perturbed_statement": "Figure 8b shows Q3's converted plan contains a single MultiJoin node with four input streams from TableSourceScan operators, and Table 3 reports 10 out of 11 benchmark queries were successfully converted by the TSC method.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 8b illustrates exactly three input streams feeding the MultiJoin node, not four, and Table 3 indicates that all 11 queries (100%) were converted, not 10 out of 11.", "claim": "Figure 8b shows Q3's converted plan contains a single MultiJoin node with four input streams from TableSourceScan operators, and Table 3 reports 10 out of 11 benchmark queries were successfully converted by the TSC method.", "label": false }, { "paperid": "2411.01494v1", "paper_path": "./SciVer/papers/2411.01494v1.json", "claim_type": "parallel", "item1": "6", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01494v1_figure_6.png", "item2_path": "./SciVer/images/2411.01494v1_figure_8.png", "section": [ "4.4" ], "request_id": 297, "origin_statement": "NeMo’s mIoU improvement grows from +0.30 at zero negative objects to +3.21 at three negatives, and for positional queries the performance gain peaks at 2.94 mIoU for 8–10 word sentences, more than doubling the 1.40 gain seen on non-positional queries of the same length.", "perturbed_statement": "NeMo’s mIoU improvement grows from +0.30 at zero negative objects to +3.21 at three negatives, and for positional queries the performance gain peaks at 3.50 mIoU for 8–10 word sentences, more than doubling the 1.40 gain seen on non-positional queries of the same length.", "perturbed_explanation": "The perturbed statement incorrectly claims a 3.50 mIoU gain for positional queries of length 8–10, whereas Figure 8 actually shows the peak improvement is 2.94 mIoU at that sentence length.", "claim": "NeMo’s mIoU improvement grows from +0.30 at zero negative objects to +3.21 at three negatives, and for positional queries the performance gain peaks at 3.50 mIoU for 8–10 word sentences, more than doubling the 1.40 gain seen on non-positional queries of the same length.", "label": false }, { "paperid": "2410.13000v1", "paper_path": "./SciVer/papers/2410.13000v1.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.13000v1-Table1-1.png", "item2_path": "./SciVer/images/2410.13000v1-Table3-1.png", "section": [ "4.2" ], "request_id": 300, "origin_statement": "For ν<0.5, Table 3 shows the FEM mesh node count plateaus at 20,493 for m≥4, so by Table 1’s O(N m²⎡α⎤²+n) prediction cost, raising m from 4 to 6 increases cost by (6²/4²)=2.25×.", "perturbed_statement": "For ν<0.5, Table 3 shows the FEM mesh node count rises from 20,493 at m=4 to 25,493 at m=6, so by Table 1’s O(N m²⎡α⎤²+n) prediction cost, raising m from 4 to 6 increases cost by about (6²·25,493)/(4²·20,493)≈3.75×.", "perturbed_explanation": "This is wrong because Table 3 actually lists 20,493 mesh nodes at m=6 for ν<0.5, not 25,493; thus the cost scaling remains (36/16)=2.25×, not ≈3.75×.", "claim": "For ν<0.5, Table 3 shows the FEM mesh node count rises from 20,493 at m=4 to 25,493 at m=6, so by Table 1’s O(N m²⎡α⎤²+n) prediction cost, raising m from 4 to 6 increases cost by about (6²·25,493)/(4²·20,493)≈3.75×.", "label": false }, { "paperid": "2410.01839v1", "paper_path": "./SciVer/papers/2410.01839v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01839v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.01839v1_figure_2(d).png", "section": [ "3.3", "3.1" ], "request_id": 304, "origin_statement": "The small loop near (−2,0) with radius ≈2 has birth≈1.5 and death≈1.7 in both complete and true persistence diagrams; the boundary-split large loop merges to yield birth≈2.8 and death≈6.8, matching the true H₁ feature.", "perturbed_statement": "The small loop near (−2,0) with radius ≈2 has birth≈1.5 and death≈2.5 in both complete and true persistence diagrams; the boundary-split large loop merges to yield birth≈2.8 and death≈6.8, matching the true H₁ feature.", "perturbed_explanation": "The perturbed statement incorrectly reports the small loop’s death at 2.5. In the persistence diagrams (red circles and blue squares), the small loop’s death time is approximately 1.7, not 2.5.", "claim": "The small loop near (−2,0) with radius ≈2 has birth≈1.5 and death≈2.5 in both complete and true persistence diagrams; the boundary-split large loop merges to yield birth≈2.8 and death≈6.8, matching the true H₁ feature.", "label": false }, { "paperid": "2411.12509v1", "paper_path": "./SciVer/papers/2411.12509v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.12509v1-Table2-1.png", "item2_path": "./SciVer/images/2411.12509v1-Table3-1.png", "section": [ "4.1" ], "request_id": 307, "origin_statement": "Medium stadium utilisation (25–75%) in 2021/22 increased goal difference by 0.560 (Table 2 col 4) and boosted home win probability by 11.8 percentage points (Table 3 col 3).", "perturbed_statement": "Medium stadium utilisation (25–75%) in 2021/22 increased goal difference by 0.650 (Table 2 col 4) and boosted home win probability by 11.8 percentage points (Table 3 col 3).", "perturbed_explanation": "Table 2 col 4 reports the coefficient for S21/22 × stadium utilisation (0.25, 0.75) as 0.560**, not 0.650, so the stated goal difference increase is incorrect.", "claim": "Medium stadium utilisation (25–75%) in 2021/22 increased goal difference by 0.650 (Table 2 col 4) and boosted home win probability by 11.8 percentage points (Table 3 col 3).", "label": false }, { "paperid": "2411.11129v1", "paper_path": "./SciVer/papers/2411.11129v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11129v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.11129v1_figure_2(b).png", "section": [ "2.5", "2.3", "2.4" ], "request_id": 308, "origin_statement": "In Fig. 2 left panel, B′(s) peaks at roughly 0.005 around s = 0.5, while in the right panel B′ₖₚ(s) attains a slightly higher maximum of about 0.0052 near s = 0.6, illustrating the asymmetric formulation’s sharper profile.", "perturbed_statement": "In Fig. 2 left panel, B′(s) peaks at roughly 0.005 around s = 0.4, while in the right panel B′ₖₚ(s) attains a slightly higher maximum of about 0.0052 near s = 0.6, illustrating the asymmetric formulation’s sharper profile.", "perturbed_explanation": "The perturbed claim incorrectly locates the maximum of B′(s) at s = 0.4. The left panel clearly shows the magenta curve reaching its peak at s ≈ 0.5, not at 0.4.", "claim": "In Fig. 2 left panel, B′(s) peaks at roughly 0.005 around s = 0.4, while in the right panel B′ₖₚ(s) attains a slightly higher maximum of about 0.0052 near s = 0.6, illustrating the asymmetric formulation’s sharper profile.", "label": false }, { "paperid": "2411.01030v3", "paper_path": "./SciVer/papers/2411.01030v3.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01030v3_figure_3.png", "item2_path": "./SciVer/images/2411.01030v3-Table1-1.png", "section": [ "4.1" ], "request_id": 309, "origin_statement": "Since Birdie dynamically mixes training objectives (Table 1), the Transformer trained with Birdie reaches about 25 F1 on SQuAD V2 at 300 tokens, roughly 4 points higher than the 21 F1 achieved by the Transformer trained only with Next Token Prediction at the same length.", "perturbed_statement": "Since Birdie dynamically mixes training objectives (Table 1), the Transformer trained with Birdie reaches about 30 F1 on SQuAD V2 at 300 tokens, roughly 4 points higher than the 21 F1 achieved by the Transformer trained only with Next Token Prediction at the same length.", "perturbed_explanation": "The perturbed statement incorrectly claims the Birdie-trained Transformer scores about 30 F1 at 300 tokens. In Figure 3B, the Transformer (Birdie) actually scores approximately 25 F1 at 300 tokens, not 30.", "claim": "Since Birdie dynamically mixes training objectives (Table 1), the Transformer trained with Birdie reaches about 30 F1 on SQuAD V2 at 300 tokens, roughly 4 points higher than the 21 F1 achieved by the Transformer trained only with Next Token Prediction at the same length.", "label": false }, { "paperid": "2409.14704v2", "paper_path": "./SciVer/papers/2409.14704v2.json", "claim_type": "parallel", "item1": "3(b)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14704v2_figure_3(b).png", "item2_path": "./SciVer/images/2409.14704v2_figure_3(c).png", "section": [ "4.1" ], "request_id": 311, "origin_statement": "By step 1000, FID peaks near 270—over twice its minimum of ~110 at step 300—while average CLIP similarity drops from 0.335 at initialization to about 0.282, indicating reduced diversity and text-image alignment.", "perturbed_statement": "By step 2500, FID recovers to its minimum of ~90 while average CLIP similarity concurrently peaks at 0.34, surpassing the initial 0.335, suggesting restored diversity and alignment.", "perturbed_explanation": "This statement is wrong because in Figure 3(b) the lowest FID value is around 110 (not ~90) at both ~300 and ~2500 steps, and in Figure 3(c) the CLIP score stabilizes near 0.278–0.282 rather than peaking at 0.34.", "claim": "By step 2500, FID recovers to its minimum of ~90 while average CLIP similarity concurrently peaks at 0.34, surpassing the initial 0.335, suggesting restored diversity and alignment.", "label": false }, { "paperid": "2409.11531v1", "paper_path": "./SciVer/papers/2409.11531v1.json", "claim_type": "parallel", "item1": "6", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11531v1_figure_6.png", "item2_path": "./SciVer/images/2409.11531v1-Table1-1.png", "section": [ "5.2.1" ], "request_id": 315, "origin_statement": "In Scenario 1 the ESV group’s valence rose ∼1.8 points (from ~2.7 to ~4.5; Figure 6) and had the highest VADER sentiment shift (Δ=0.567; Table 1), exceeding the Control’s Δ0.516 and Text’s Δ0.363 scores.", "perturbed_statement": "In Scenario 1 the ESV group’s valence rose ∼2.3 points (from ~2.7 to ~4.5; Figure 6) and had the highest VADER sentiment shift (Δ=0.567; Table 1), exceeding the Control’s Δ0.516 and Text’s Δ0.363 scores.", "perturbed_explanation": "The perturbed statement misstates the valence increase: Figure 6 shows ESV valence rising from about 2.7 to 4.5, a change of roughly 1.8 points, not 2.3. This contradiction with the chart makes the claim incorrect.", "claim": "In Scenario 1 the ESV group’s valence rose ∼2.3 points (from ~2.7 to ~4.5; Figure 6) and had the highest VADER sentiment shift (Δ=0.567; Table 1), exceeding the Control’s Δ0.516 and Text’s Δ0.363 scores.", "label": false }, { "paperid": "2409.19136v1", "paper_path": "./SciVer/papers/2409.19136v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19136v1_figure_1.png", "item2_path": "./SciVer/images/2409.19136v1-Table2-1.png", "section": [ "5.1" ], "request_id": 317, "origin_statement": "On the confusion matrix, the tree correctly identifies 53 of 150 trips for user 068 (35.3%), about ten times the ~5 trips a random guess would label correctly, contributing to its overall .303 accuracy versus .035 for true random guessing.", "perturbed_statement": "On the confusion matrix, the tree correctly identifies 60 of 150 trips for user 068 (40%), about ten times the ~5 trips a random guess would label correctly, contributing to its overall .303 accuracy versus .035 for true random guessing.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 1 shows 53 correct predictions for user 068, not 60, and the recall is 35.3%, not 40%, contradicting the confusion matrix data.", "claim": "On the confusion matrix, the tree correctly identifies 60 of 150 trips for user 068 (40%), about ten times the ~5 trips a random guess would label correctly, contributing to its overall .303 accuracy versus .035 for true random guessing.", "label": false }, { "paperid": "2410.21329v1", "paper_path": "./SciVer/papers/2410.21329v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21329v1_figure_1.png", "item2_path": "./SciVer/images/2410.21329v1_figure_2.png", "section": [ "2.1" ], "request_id": 324, "origin_statement": "Within the Lambert conformal conic domain covering 50°–75°N and 20°W–45°E (including Finland), effective cloudiness values of exactly 0 or 1 account for over 60% of the 166,574 Meteosat-10 training images from November 2018 to October 2023.", "perturbed_statement": "Within the Lambert conformal conic domain covering 55°–80°N and 20°W–45°E (including Finland), effective cloudiness values of exactly 0 or 1 account for over 75% of the 166,574 Meteosat-10 training images from November 2018 to October 2023.", "perturbed_explanation": "Figure 1 defines the domain as 50°–75°N, not 55°–80°N. Figure 2’s histogram shows just over 60% of samples at cloud fraction 0 or 1, not 75%.", "claim": "Within the Lambert conformal conic domain covering 55°–80°N and 20°W–45°E (including Finland), effective cloudiness values of exactly 0 or 1 account for over 75% of the 166,574 Meteosat-10 training images from November 2018 to October 2023.", "label": false }, { "paperid": "2411.03401v1", "paper_path": "./SciVer/papers/2411.03401v1.json", "claim_type": "parallel", "item1": "6", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03401v1_figure_6.png", "item2_path": "./SciVer/images/2411.03401v1-Table5-1.png", "section": [ "4.1" ], "request_id": 326, "origin_statement": "At 1300 mm/s, increasing the volume of interest from 25 mm³ to 500 mm³ reduces the Poisson KS statistic from 0.23 to 0.06 while increasing the KS statistic for all uncertainties from 0.41 to 0.61, consistent with the dashed CDFs converging and the solid CDFs spreading in Fig. 6.", "perturbed_statement": "At 1300 mm/s, as the volume grows from 25 mm³ to 500 mm³, the Poisson KS statistic drops from 0.32 to 0.06 while the KS statistic for all uncertainties rises from 0.41 to 0.61.", "perturbed_explanation": "The table reports the Poisson KS statistic at 1300 mm/s for 25 mm³ as 0.23, not 0.32, so the perturbed Poisson value contradicts Table 5.", "claim": "At 1300 mm/s, as the volume grows from 25 mm³ to 500 mm³, the Poisson KS statistic drops from 0.32 to 0.06 while the KS statistic for all uncertainties rises from 0.41 to 0.61.", "label": false }, { "paperid": "2411.16474v1", "paper_path": "./SciVer/papers/2411.16474v1.json", "claim_type": "parallel", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16474v1_figure_8.png", "item2_path": "./SciVer/images/2411.16474v1_figure_9.png", "section": [ "4.4" ], "request_id": 328, "origin_statement": "Under a constant D flux of ≈9×10^19 m⁻² s⁻¹ for 143 h in the 143 h plasma case, the experimental TDS desorption peak is ∼510 K, whereas the DPA→143 h plasma case peaks at ∼530 K, indicating a 20 K shift due to damage.", "perturbed_statement": "Under a constant D flux of ≈5×10^19 m⁻² s⁻¹ for 143 h in the 143 h plasma case, the experimental TDS desorption peak is ∼550 K, whereas the DPA→143 h plasma case peaks at ∼530 K, indicating a 20 K shift due to damage.", "perturbed_explanation": "The perturbed statement misstates the D flux—Fig. 8 shows it is ≈9×10^19 m⁻² s⁻¹, not 5×10^19. It also gives the 143 h plasma TDS peak as ∼550 K, but Fig. 9’s experimental curve peaks at around 510 K.", "claim": "Under a constant D flux of ≈5×10^19 m⁻² s⁻¹ for 143 h in the 143 h plasma case, the experimental TDS desorption peak is ∼550 K, whereas the DPA→143 h plasma case peaks at ∼530 K, indicating a 20 K shift due to damage.", "label": false }, { "paperid": "2411.10304v1", "paper_path": "./SciVer/papers/2411.10304v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10304v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.10304v1_figure_2(b).png", "section": [ "3.1" ], "request_id": 329, "origin_statement": "In the diphoton-dominant case (red solid) the μ→eγ conversion rate grows from about 3×10^−19 at Z=20 to roughly 1×10^−18 at Z=40, while the dipole-dominant branching ratio (blue solid) falls from ~0.04 at Z=10 to ~0.01 by Z=60.", "perturbed_statement": "In the diphoton-dominant case (red solid) the μ→eγ conversion rate grows from about 3×10^−18 at Z=20 to roughly 1×10^−18 at Z=40, while the dipole-dominant branching ratio (blue solid) falls from ~0.04 at Z=10 to ~0.02 by Z=60.", "perturbed_explanation": "The perturbation claims a rate of 3×10^−18 at Z=20, but the red solid curve in Fig.2(a) shows about 3×10^−19 at that point. It also states the branching ratio drops to ~0.02 at Z=60, whereas the blue solid curve in Fig.2(b) indicates it falls to about 0.01.", "claim": "In the diphoton-dominant case (red solid) the μ→eγ conversion rate grows from about 3×10^−18 at Z=20 to roughly 1×10^−18 at Z=40, while the dipole-dominant branching ratio (blue solid) falls from ~0.04 at Z=10 to ~0.02 by Z=60.", "label": false }, { "paperid": "2411.16506v1", "paper_path": "./SciVer/papers/2411.16506v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16506v1_figure_5.png", "item2_path": "./SciVer/images/2411.16506v1-Table3-1.png", "section": [ "4.2" ], "request_id": 331, "origin_statement": "At m=200 on the warehouse-d setting, throughput falls to approximately 4.3 tasks/step with a total 1,000-step runtime of about 4.1 s, marginally lower than off+PIBT’s 4.395 s runtime reported for warehouse-d in Table 3.", "perturbed_statement": "At m=200 on the warehouse-d setting, throughput falls to approximately 4.3 tasks/step with a total 1,000-step runtime of about 5.0 s, exceeding off+PIBT’s 4.395 s runtime reported for warehouse-d in Table 3.", "perturbed_explanation": "The perturbed statement incorrectly states the runtime as 5.0 s at m=200, whereas Figure 9 shows it is about 4.1 s. This contradicts the image’s reported runtime for that setting.", "claim": "At m=200 on the warehouse-d setting, throughput falls to approximately 4.3 tasks/step with a total 1,000-step runtime of about 5.0 s, exceeding off+PIBT’s 4.395 s runtime reported for warehouse-d in Table 3.", "label": false }, { "paperid": "2409.20058v1", "paper_path": "./SciVer/papers/2409.20058v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.20058v1-Table2-1.png", "item2_path": "./SciVer/images/2409.20058v1-Table3-1.png", "section": [ "4.3", "5" ], "request_id": 332, "origin_statement": "In the Galactic bulge, the cutoffpl+pow model yields a GRXE 30–80 keV flux of 9.7±1.4 (66±10% of total; Table 2), whereas the IPM+pow model gives an IPM flux of 10.7+1.9/–1.7 (73±13%; Table 3), a ~7 percentage-point increase.", "perturbed_statement": "In the Galactic bulge, the cutoffpl+pow model yields a GRXE 30–80 keV flux of 8.7±1.4 (66±10% of total; Table 2), whereas the IPM+pow model gives an IPM flux of 10.7+1.9/–1.7 (83±13%; Table 3), a ~17 percentage-point increase.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 lists the GRXE cutoffpl 30–80 keV flux in the bulge as 9.7±1.4, not 8.7±1.4, and Table 3 gives the IPM flux fraction as 73±13%, not 83±13%.", "claim": "In the Galactic bulge, the cutoffpl+pow model yields a GRXE 30–80 keV flux of 8.7±1.4 (66±10% of total; Table 2), whereas the IPM+pow model gives an IPM flux of 10.7+1.9/–1.7 (83±13%; Table 3), a ~17 percentage-point increase.", "label": false }, { "paperid": "2410.18069v1", "paper_path": "./SciVer/papers/2410.18069v1.json", "claim_type": "parallel", "item1": "2", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18069v1_figure_2.png", "item2_path": "./SciVer/images/2410.18069v1_figure_5.png", "section": [ "5.1.1" ], "request_id": 333, "origin_statement": "In the spin-0 sector truncated at N=11, the 6th excited level reaches approximately E₆≈1.4 at r≈1, whereas in the spin-1 sector truncated at N=5 its top plotted level only climbs to about E₅≈1.3 at the same r, indicating slower excitation growth for s=1.", "perturbed_statement": "In the spin-0 sector truncated at N=11, the 6th excited level reaches approximately E₆≈1.4 at r≈1, whereas in the spin-1 sector truncated at N=5 its top plotted level only climbs to about E₅≈1.1 at the same r, indicating slower excitation growth for s=1.", "perturbed_explanation": "The perturbed claim understates the spin-1 top level energy: Figure 5.5 shows the highest plotted level at r≈1 is about E₅≈1.3, not 1.1, so the modified value contradicts the actual data in the spin-1 plot.", "claim": "In the spin-0 sector truncated at N=11, the 6th excited level reaches approximately E₆≈1.4 at r≈1, whereas in the spin-1 sector truncated at N=5 its top plotted level only climbs to about E₅≈1.1 at the same r, indicating slower excitation growth for s=1.", "label": false }, { "paperid": "2411.03896v1", "paper_path": "./SciVer/papers/2411.03896v1.json", "claim_type": "parallel", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03896v1_figure_1.png", "item2_path": "./SciVer/images/2411.03896v1_figure_4.png", "section": [ "5" ], "request_id": 334, "origin_statement": "Plik data yields the largest positive μ₀ shift (~0.6) and γ₀ peak (~0.9) compared to Camspec (μ₀≈0.4, γ₀≈0.75) and HiLLiPoP (μ₀≈0.2, γ₀≈0.6), all shifted right of their GR baselines (μ₀=0, γ₀=0.55).", "perturbed_statement": "HiLLiPoP data yields the largest positive μ₀ shift (~0.6) and γ₀ peak (~0.9) while Plik shows the smallest (μ₀≈0.2, γ₀≈0.6) compared to Camspec (μ₀≈0.4, γ₀≈0.75), all shifted right of their GR baselines (μ₀=0, γ₀=0.55).", "perturbed_explanation": "The perturbed statement wrongly assigns HiLLiPoP the highest μ₀ (~0.6) and γ₀ (~0.9) peaks and Plik the lowest (μ₀≈0.2, γ₀≈0.6). In Figure 1, the red Plik contours peak furthest right (around μ₀≈0.6), and in Figure 4 the red Plik PDF peaks at γ₀≈0.9, not the green HiLLiPoP curves.", "claim": "HiLLiPoP data yields the largest positive μ₀ shift (~0.6) and γ₀ peak (~0.9) while Plik shows the smallest (μ₀≈0.2, γ₀≈0.6) compared to Camspec (μ₀≈0.4, γ₀≈0.75), all shifted right of their GR baselines (μ₀=0, γ₀=0.55).", "label": false }, { "paperid": "2410.24169v1", "paper_path": "./SciVer/papers/2410.24169v1.json", "claim_type": "parallel", "item1": "5(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.24169v1_figure_5(b).png", "item2_path": "./SciVer/images/2410.24169v1-Table1-1.png", "section": [ "5.1" ], "request_id": 338, "origin_statement": "At batch size 4 for 100-atom systems, EScAIP consumes about 4 GB memory—roughly a quarter of EquiformerV2’s ~16 GB—and its EScAIP-Large model achieves a 215 meV OC20 All+MD test energy MAE compared to EquiformerV2’s 219 meV.", "perturbed_statement": "At batch size 4 for 100-atom systems, EScAIP consumes about 6 GB memory—roughly a quarter of EquiformerV2’s ~16 GB—and its EScAIP-Large model achieves a 205 meV OC20 All+MD test energy MAE compared to EquiformerV2’s 219 meV.", "perturbed_explanation": "Figure 5 shows that at batch size 4 for 100 atoms, EScAIP uses roughly 4 GB of memory, not 6 GB. Table 1 reports EScAIP-Large’s OC20 All+MD test energy MAE as 215 meV, not 205 meV.", "claim": "At batch size 4 for 100-atom systems, EScAIP consumes about 6 GB memory—roughly a quarter of EquiformerV2’s ~16 GB—and its EScAIP-Large model achieves a 205 meV OC20 All+MD test energy MAE compared to EquiformerV2’s 219 meV.", "label": false }, { "paperid": "2410.02320v2", "paper_path": "./SciVer/papers/2410.02320v2.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02320v2_figure_3.png", "item2_path": "./SciVer/images/2410.02320v2-Table4-1.png", "section": [ "6.1" ], "request_id": 339, "origin_statement": "For the SFT→IPO condition on the En→Ru test set, post-edits show a median log-prob increase of about +0.15 vs baseline (Fig. 3), and on English-German test data the same model yields a PE–MT gap of 0.157 (Table 4).", "perturbed_statement": "For the SFT→IPO condition on the En→Ru test set, post-edits show a median log-prob increase of about +0.25 vs baseline (Fig. 3), and on English-German test data the same model yields a PE–MT gap of 0.157 (Table 4).", "perturbed_explanation": "The perturbed statement claims a +0.25 median increase for En→Ru post-edits under SFT→IPO, but Figure 3 shows the median log-prob displacement is roughly +0.15, not +0.25, so the +0.25 figure contradicts the plot.", "claim": "For the SFT→IPO condition on the En→Ru test set, post-edits show a median log-prob increase of about +0.25 vs baseline (Fig. 3), and on English-German test data the same model yields a PE–MT gap of 0.157 (Table 4).", "label": false }, { "paperid": "2409.02554v1", "paper_path": "./SciVer/papers/2409.02554v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.02554v1-Table2-1.png", "item2_path": "./SciVer/images/2409.02554v1-Table3-1.png", "section": [ "2.2" ], "request_id": 340, "origin_statement": "In solar cycle 23, 158 HFG events constituted 47% of 335 bursts (Table 2), whereas only 30 LFG events reached 50 kHz (9% of the 335) (Table 3), indicating HFG events occurred over five times more frequently than those reaching the lowest frequencies.", "perturbed_statement": "In solar cycle 23, 168 HFG events constituted 47% of 335 bursts (Table 2), whereas only 33 LFG events reached 50 kHz (10% of the 335) (Table 3), indicating HFG events occurred over five times more frequently than those reaching the lowest frequencies.", "perturbed_explanation": "Table 2 reports 158 HFG events in cycle 23, not 168, and Table 3 shows 30 LFG events reached 50 kHz, not 33. Thus both figures in the perturbed statement contradict the source tables.", "claim": "In solar cycle 23, 168 HFG events constituted 47% of 335 bursts (Table 2), whereas only 33 LFG events reached 50 kHz (10% of the 335) (Table 3), indicating HFG events occurred over five times more frequently than those reaching the lowest frequencies.", "label": false }, { "paperid": "2409.04846v1", "paper_path": "./SciVer/papers/2409.04846v1.json", "claim_type": "parallel", "item1": "10", "item2": "11(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04846v1_figure_10.png", "item2_path": "./SciVer/images/2409.04846v1_figure_11(b).png", "section": [ "5.2" ], "request_id": 344, "origin_statement": "The Honeycomb SNR shows ten loops each about 12″ across (2.8 pc) while the Yew 6 SNR spans roughly 190″; thus each Honeycomb loop is about 6% of Yew 6’s overall diameter.", "perturbed_statement": "The Honeycomb SNR shows ten loops each about 24″ across (2.8 pc) while the Yew 6 SNR spans roughly 190″; thus each Honeycomb loop is about 12% of Yew 6’s overall diameter.", "perturbed_explanation": "This is incorrect because the Honeycomb loops are approximately 12″ in diameter (2.8 pc), not 24″. By doubling the loop size, the perturbed statement conflicts with the measured 12″ loop size shown in Figure 10.", "claim": "The Honeycomb SNR shows ten loops each about 24″ across (2.8 pc) while the Yew 6 SNR spans roughly 190″; thus each Honeycomb loop is about 12% of Yew 6’s overall diameter.", "label": false }, { "paperid": "2411.07070v2", "paper_path": "./SciVer/papers/2411.07070v2.json", "claim_type": "parallel", "item1": "3(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07070v2_figure_3(b).png", "item2_path": "./SciVer/images/2411.07070v2-Table1-1.png", "section": [ "5.2" ], "request_id": 346, "origin_statement": "On GPT-2-XL fine-tuned on PubMed_RCT, PARSING achieves 0.741 balanced accuracy with an AUC of 0.775, which is about 3.4 points above BA; Figure 3’s bottom panel shows loss+grad(task layer) peaks near 0.74 BA around epoch 16.", "perturbed_statement": "On GPT-2-XL fine-tuned on PubMed_RCT, PARSING achieves 0.741 balanced accuracy with an AUC of 0.785, which is about 4.4 points above BA; Figure 3’s bottom panel shows loss+grad(task layer) peaks near 0.74 BA around epoch 16.", "perturbed_explanation": "Table 1 reports PARSING’s AUC on PubMed_RCT as 0.775±0.008, not 0.785, so the perturbed AUC value (0.785) and the resulting 4.4-point gap contradict the actual 0.775 AUC (3.4-point difference).", "claim": "On GPT-2-XL fine-tuned on PubMed_RCT, PARSING achieves 0.741 balanced accuracy with an AUC of 0.785, which is about 4.4 points above BA; Figure 3’s bottom panel shows loss+grad(task layer) peaks near 0.74 BA around epoch 16.", "label": false }, { "paperid": "2411.03445v1", "paper_path": "./SciVer/papers/2411.03445v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03445v1_figure_3.png", "item2_path": "./SciVer/images/2411.03445v1-Table5-1.png", "section": [ "5.3" ], "request_id": 347, "origin_statement": "In Figure 3, sorted GTSRB models reach only about 0.82 AUC at 100 training models, while Table 5 reports Config D AUC of 0.90 for TDC22 ViT on GTSRB, indicating Config D outperforms even the highest-sample sorting approach.", "perturbed_statement": "In Figure 3, sorted GTSRB models reach only about 0.82 AUC at 100 training models, while Table 5 reports Config D AUC of 0.75 for TDC22 ViT on GTSRB, indicating Config D underperforms even the highest-sample sorting approach.", "perturbed_explanation": "The perturbed statement wrongly lists Config D AUC for TDC22 ViT on GTSRB as 0.75. Table 5 actually shows Config D AUC of 0.90 for that entry, not 0.75.", "claim": "In Figure 3, sorted GTSRB models reach only about 0.82 AUC at 100 training models, while Table 5 reports Config D AUC of 0.75 for TDC22 ViT on GTSRB, indicating Config D underperforms even the highest-sample sorting approach.", "label": false }, { "paperid": "2411.06171v1", "paper_path": "./SciVer/papers/2411.06171v1.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.06171v1-Table1-1.png", "item2_path": "./SciVer/images/2411.06171v1-Table3-1.png", "section": [ "4.4" ], "request_id": 348, "origin_statement": "On the TRACE benchmark, SEEKR (1%) with LLaMA-2-7B-Chat achieves OP 54.99 (BWT −2.61) in Order1 versus DER++’s 49.22 (BWT −8.32), and 57.04 (BWT −3.15) in Order3 versus DER++’s 55.89 (BWT −4.51).", "perturbed_statement": "On the TRACE benchmark, SEEKR (1%) with LLaMA-2-7B-Chat achieves OP 54.99 (BWT −2.61) in Order1 versus DER++’s 49.22 (BWT −8.32), and 58.26 (BWT −2.52) in Order3 versus DER++’s 55.89 (BWT −4.51).", "perturbed_explanation": "The perturbed statement misreports SEEKR’s Order3 performance: Table 1 shows OP 57.04 with BWT −3.15 for SEEKR (1%) in Order3, not OP 58.26 with BWT −2.52.", "claim": "On the TRACE benchmark, SEEKR (1%) with LLaMA-2-7B-Chat achieves OP 54.99 (BWT −2.61) in Order1 versus DER++’s 49.22 (BWT −8.32), and 58.26 (BWT −2.52) in Order3 versus DER++’s 55.89 (BWT −4.51).", "label": false }, { "paperid": "2411.15553v1", "paper_path": "./SciVer/papers/2411.15553v1.json", "claim_type": "parallel", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15553v1_figure_3.png", "item2_path": "./SciVer/images/2411.15553v1-Table2-1.png", "section": [ "4.2" ], "request_id": 349, "origin_statement": "At 200 iterations in the RN-50→Inc-v4 setting (Figure 3), FTM-E achieves ≈65% targeted attack success—about 21 percentage points higher than SI (~44%). In Table 2, RDI-FTM-E on RN-50 attains a 29.8% average against five transformers, 8.0 points above RDI-CFM’s 21.8%.", "perturbed_statement": "At 200 iterations in the RN-50→Inc-v4 setting (Figure 3), FTM-E achieves ≈65% targeted attack success—about 21 percentage points higher than SI (~44%). In Table 2, RDI-FTM-E on RN-50 attains a 35.0% average against five transformers, 13.2 points above RDI-CFM’s 21.8%.", "perturbed_explanation": "Table 2 reports RDI-FTM-E’s average success rate as 29.8%, not 35.0%, and its advantage over RDI-CFM is 8.0 points rather than 13.2, so the perturbed 35.0% figure contradicts the table.", "claim": "At 200 iterations in the RN-50→Inc-v4 setting (Figure 3), FTM-E achieves ≈65% targeted attack success—about 21 percentage points higher than SI (~44%). In Table 2, RDI-FTM-E on RN-50 attains a 35.0% average against five transformers, 13.2 points above RDI-CFM’s 21.8%.", "label": false }, { "paperid": "2411.16198v1", "paper_path": "./SciVer/papers/2411.16198v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16198v1_figure_6.png", "item2_path": "./SciVer/images/2411.16198v1_figure_7.png", "section": [ "4.4.2" ], "request_id": 350, "origin_statement": "In the MS COCO misclassification, insertion score is 0.2796 at ~20% reveal, while in the undetected case it peaks at 0.6190 at ~40%. Conversely, on LVIS, misclassification insertion is 0.6464 at 20%, exceeding the undetected insertion of 0.2816 at 35%.", "perturbed_statement": "In the MS COCO misclassification, insertion score is 0.6190 at ~40% reveal, while in the undetected case it peaks at 0.2796 at ~20%. Conversely, on LVIS, misclassification insertion is 0.6464 at 20%, exceeding the undetected insertion of 0.2816 at 35%.", "perturbed_explanation": "The perturbed statement incorrectly swaps the MS COCO misclassification metrics: the image shows an insertion of 0.2796 at ~20% reveal for the misclassification (not 0.6190 at ~40%), and an insertion of 0.6190 at ~40% applies to the undetected case.", "claim": "In the MS COCO misclassification, insertion score is 0.6190 at ~40% reveal, while in the undetected case it peaks at 0.2796 at ~20%. Conversely, on LVIS, misclassification insertion is 0.6464 at 20%, exceeding the undetected insertion of 0.2816 at 35%.", "label": false }, { "paperid": "2409.06801v1", "paper_path": "./SciVer/papers/2409.06801v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06801v1_figure_1.png", "item2_path": "./SciVer/images/2409.06801v1-Table2-1.png", "section": [ "5.2" ], "request_id": 351, "origin_statement": "With no offset (Δ=0), roughly 60% of ensemble DEMO plans exceed a 1% population tolerance, while disclosure avoidance induces maximum relative errors of only 0.69% in districts with ideal population 32–64k.", "perturbed_statement": "With no offset (Δ=0), roughly 60% of ensemble DEMO plans exceed a 1% population tolerance, while disclosure avoidance induces maximum relative errors of 1.5% in districts with ideal population 32–64k.", "perturbed_explanation": "The perturbation is incorrect because Table 2 reports a maximum DAS-induced error of 0.0069 (0.69%) for districts with ideal population 32–64 k, not 1.5%.", "claim": "With no offset (Δ=0), roughly 60% of ensemble DEMO plans exceed a 1% population tolerance, while disclosure avoidance induces maximum relative errors of 1.5% in districts with ideal population 32–64k.", "label": false }, { "paperid": "2409.00671v1", "paper_path": "./SciVer/papers/2409.00671v1.json", "claim_type": "parallel", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00671v1_figure_5.png", "item2_path": "./SciVer/images/2409.00671v1-Table2-1.png", "section": [ "5.6" ], "request_id": 353, "origin_statement": "Between early 2021 highs (~5800) and early 2022 lows (~4100), the SSEC fell ~29%, yet InvariantStock posted ARR 0.8315 and SR 3.7198, far exceeding DoubleAdapt’s ARR 0.1701 and SR 0.6037.", "perturbed_statement": "Between early 2021 highs (~5800) and early 2022 lows (~4100), the SSEC fell ~29%, yet InvariantStock posted ARR 0.7315 and SR 3.7198, far exceeding DoubleAdapt’s ARR 0.1701 and SR 0.6037.", "perturbed_explanation": "The perturbed statement incorrectly lists InvariantStock’s ARR as 0.7315, but Table 2 shows its actual ARR is 0.8315.", "claim": "Between early 2021 highs (~5800) and early 2022 lows (~4100), the SSEC fell ~29%, yet InvariantStock posted ARR 0.7315 and SR 3.7198, far exceeding DoubleAdapt’s ARR 0.1701 and SR 0.6037.", "label": false }, { "paperid": "2410.23910v1", "paper_path": "./SciVer/papers/2410.23910v1.json", "claim_type": "parallel", "item1": "4(c)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23910v1_figure_4(c).png", "item2_path": "./SciVer/images/2410.23910v1-Table1-1.png", "section": [ "4.2" ], "request_id": 355, "origin_statement": "For the FF(L+C) model, our uncertainty measure achieves a ROC-AUC of 0.7694 compared to PackE’s 0.4214 in Table 1—a 0.3480 gain—and in Figure 4 our ROC curve consistently lies above PackE’s across all false positive rates.", "perturbed_statement": "For the FF(L+C) model, our uncertainty measure achieves a ROC-AUC of 0.8523 compared to PackE’s 0.5314 in Table 1—a 0.3210 gain—and in Figure 4 our ROC curve consistently lies above PackE’s across all false positive rates.", "perturbed_explanation": "The perturbed statement misstates the Table 1 values: the actual ROC-AUCs are 0.7694 for our method and 0.4214 for PackE (not 0.8523 and 0.5314), so both reported numbers and the computed difference contradict the table.", "claim": "For the FF(L+C) model, our uncertainty measure achieves a ROC-AUC of 0.8523 compared to PackE’s 0.5314 in Table 1—a 0.3210 gain—and in Figure 4 our ROC curve consistently lies above PackE’s across all false positive rates.", "label": false }, { "paperid": "2411.09726v2", "paper_path": "./SciVer/papers/2411.09726v2.json", "claim_type": "parallel", "item1": "2", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09726v2_figure_2.png", "item2_path": "./SciVer/images/2411.09726v2-Table5-1.png", "section": [ "4" ], "request_id": 358, "origin_statement": "Across the 14 weather stations spread from S1 in northern Singapore to S14 on the southern coast, temperature shows a raw correlation of 0.64 with UTCI—over three times larger than wind speed’s 0.20 correlation—highlighting temperature as the dominant driver of outdoor thermal comfort.", "perturbed_statement": "Across the 14 weather stations spread from S1 in northern Singapore to S14 on the southern coast, temperature shows a raw correlation of 0.64 with UTCI—over five times larger than wind speed’s 0.12 correlation—highlighting temperature as the dominant driver of outdoor thermal comfort.", "perturbed_explanation": "The perturbed statement misquotes the wind speed–UTCI correlation. Table 5 reports a correlation of 0.20 for WS versus UTCI, not 0.12, so the claimed fivefold difference is incorrect.", "claim": "Across the 14 weather stations spread from S1 in northern Singapore to S14 on the southern coast, temperature shows a raw correlation of 0.64 with UTCI—over five times larger than wind speed’s 0.12 correlation—highlighting temperature as the dominant driver of outdoor thermal comfort.", "label": false }, { "paperid": "2411.04649v1", "paper_path": "./SciVer/papers/2411.04649v1.json", "claim_type": "parallel", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.04649v1_figure_5.png", "item2_path": "./SciVer/images/2411.04649v1-Table1-1.png", "section": [ "5.3.1" ], "request_id": 359, "origin_statement": "SBERT on MultiRC achieves perfect inter-annotator agreement (Fleiss’ κ=1.0 per Figure 5) while Table 1 shows SBERT relies on the global shorthand rule “(? || , but the algarve) → FALSE,” illustrating its dependence on token patterns.", "perturbed_statement": "SBERT on MultiRC achieves moderate inter-annotator agreement (Fleiss’ κ=0.8) while Table 1 shows SBERT relies on the global shorthand rule “(? || , but the algarve) → FALSE,” illustrating its dependence on token patterns.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 shows SBERT’s Fleiss’ κ on MultiRC is 1.0 (perfect agreement), not 0.8 as claimed.", "claim": "SBERT on MultiRC achieves moderate inter-annotator agreement (Fleiss’ κ=0.8) while Table 1 shows SBERT relies on the global shorthand rule “(? || , but the algarve) → FALSE,” illustrating its dependence on token patterns.", "label": false }, { "paperid": "2410.19483v1", "paper_path": "./SciVer/papers/2410.19483v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19483v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.19483v1_figure_3(a).png", "section": [ "3.3", "1" ], "request_id": 360, "origin_statement": "The activation range of the final layer (layer 9) spans over 2.7×10^4, dwarfing the weight range of layer 1 at approximately ±6, and for the “lego” scene, increasing bitwidth from 4 to 8 yields PSNR gains from around 18 dB to over 30 dB.", "perturbed_statement": "The activation range of the final layer (layer 9) spans over 2.7×10^3, dwarfing the weight range of layer 1 at approximately ±6, and for the “lego” scene, increasing bitwidth from 4 to 8 yields PSNR gains from around 18 dB to over 30 dB.", "perturbed_explanation": "The perturbation states the activation range of layer 9 is 2.7×10^3, but Fig. 2(b) shows its activation range actually exceeds 2.7×10^4, so the perturbed value contradicts the plotted data.", "claim": "The activation range of the final layer (layer 9) spans over 2.7×10^3, dwarfing the weight range of layer 1 at approximately ±6, and for the “lego” scene, increasing bitwidth from 4 to 8 yields PSNR gains from around 18 dB to over 30 dB.", "label": false }, { "paperid": "2409.05249v1", "paper_path": "./SciVer/papers/2409.05249v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05249v1_figure_4.png", "item2_path": "./SciVer/images/2409.05249v1-Table2-1.png", "section": [ "4.3" ], "request_id": 366, "origin_statement": "On the DC dataset, NetDPSyn achieves a low relative error of about 0.07 in IS mode and a positive Spearman rank correlation of 0.26, contrasting with NetShare’s negative correlation of -0.65.", "perturbed_statement": "On the DC dataset, NetDPSyn achieves a low relative error of about 0.17 in IS mode and a positive Spearman rank correlation of 0.26, contrasting with NetShare’s negative correlation of -0.45.", "perturbed_explanation": "The perturbed statement misreports the NetDPSyn relative error for IS mode as 0.17, while Figure 4 shows it is approximately 0.07. It also incorrectly states NetShare’s DC correlation as -0.45, but Table 2 reports -0.65.", "claim": "On the DC dataset, NetDPSyn achieves a low relative error of about 0.17 in IS mode and a positive Spearman rank correlation of 0.26, contrasting with NetShare’s negative correlation of -0.45.", "label": false }, { "paperid": "2411.13584v1", "paper_path": "./SciVer/papers/2411.13584v1.json", "claim_type": "parallel", "item1": "3", "item2": "4(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13584v1_figure_3.png", "item2_path": "./SciVer/images/2411.13584v1_figure_4(a).png", "section": [ "3.4.5" ], "request_id": 368, "origin_statement": "Figure 3 shows our retriever yields 12 distinct clusters spanning roughly –60 to +50 on t-SNE axes with minimal overlap, whereas G2PTL clusters overlap extensively. In Figure 4, the embedding distance vs geographical distance regression yields an MSE of 0.03 over 50,000 pairs.", "perturbed_statement": "Figure 3 shows our retriever yields 12 distinct clusters spanning roughly –60 to +50 on t-SNE axes with minimal overlap, whereas G2PTL clusters overlap extensively. In Figure 4, the embedding distance vs geographical distance regression yields an MSE of 0.3 over 50,000 pairs.", "perturbed_explanation": "The perturbed statement incorrectly reports MSE as 0.3, while Figure 4 clearly shows an MSE of 0.03 for the regression between embedding and geographical distances.", "claim": "Figure 3 shows our retriever yields 12 distinct clusters spanning roughly –60 to +50 on t-SNE axes with minimal overlap, whereas G2PTL clusters overlap extensively. In Figure 4, the embedding distance vs geographical distance regression yields an MSE of 0.3 over 50,000 pairs.", "label": false }, { "paperid": "2410.10289v1", "paper_path": "./SciVer/papers/2410.10289v1.json", "claim_type": "parallel", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10289v1_figure_4.png", "item2_path": "./SciVer/images/2410.10289v1-Table4-1.png", "section": [ "4.2" ], "request_id": 369, "origin_statement": "At K=10, averaged medical AUROC reaches 93.0%, about 5.3 points higher than the Base model’s medical image-level AUROC of 87.7%, per Table 4.", "perturbed_statement": "At K=10, averaged medical AUROC reaches 90.0%, roughly 2.3 points above the Base model’s medical image-level AUROC of 87.7%.", "perturbed_explanation": "The perturbation incorrectly lowers the medical AUROC at K=10 to 90.0%, whereas Figure 4 shows it is approximately 93.0%, contradicting the plotted value.", "claim": "At K=10, averaged medical AUROC reaches 90.0%, roughly 2.3 points above the Base model’s medical image-level AUROC of 87.7%.", "label": false }, { "paperid": "2410.10177v1", "paper_path": "./SciVer/papers/2410.10177v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10177v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.10177v1-Table2-1.png", "section": [ "6.3" ], "request_id": 371, "origin_statement": "LDM’s attack accuracy rises from about 80% with one query to roughly 92% with eight queries (Fig.3a), outperforming its DDIM counterpart by 10 points; Table 2 further confirms LDM achieves 92% accuracy on LFW at 200 sampling steps, 10 points above DDIM’s 82%.", "perturbed_statement": "LDM’s attack accuracy rises from about 80% with one query to roughly 85% with eight queries (Fig.3a), outperforming its DDIM counterpart by 10 points; Table 2 further confirms LDM achieves 92% accuracy on LFW at 200 sampling steps, 10 points above DDIM’s 82%.", "perturbed_explanation": "The perturbed statement incorrectly states that LDM’s attack accuracy is 85% with eight query images. In Fig. 3a, LDM actually achieves approximately 92% accuracy at eight queries, so the ‘85%’ figure contradicts the chart.", "claim": "LDM’s attack accuracy rises from about 80% with one query to roughly 85% with eight queries (Fig.3a), outperforming its DDIM counterpart by 10 points; Table 2 further confirms LDM achieves 92% accuracy on LFW at 200 sampling steps, 10 points above DDIM’s 82%.", "label": false }, { "paperid": "2409.00700v1", "paper_path": "./SciVer/papers/2409.00700v1.json", "claim_type": "parallel", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00700v1_figure_7.png", "item2_path": "./SciVer/images/2409.00700v1_figure_8.png", "section": [ "4.5" ], "request_id": 372, "origin_statement": "ID-FaceVC with audio inputs (Ours (A)) achieved an average naturalness MOS of 4.3—0.7 higher than FVMVC’s 3.6—while in face-based preference tests ID-FaceVC was chosen 57.14% over FVMVC’s 42.86%.", "perturbed_statement": "ID-FaceVC with audio inputs (Ours (A)) achieved an average naturalness MOS of 4.8—1.2 higher than FVMVC’s 3.6—while in face-based preference tests ID-FaceVC was chosen 57.14% over FVMVC’s 42.86%.", "perturbed_explanation": "The perturbed naturalness MOS of 4.8 (and difference of 1.2) contradicts Figure 7, where Ours (A) is actually rated 4.3 and FVMVC 3.6, giving a gap of 0.7 rather than 1.2.", "claim": "ID-FaceVC with audio inputs (Ours (A)) achieved an average naturalness MOS of 4.8—1.2 higher than FVMVC’s 3.6—while in face-based preference tests ID-FaceVC was chosen 57.14% over FVMVC’s 42.86%.", "label": false }, { "paperid": "2409.13175v1", "paper_path": "./SciVer/papers/2409.13175v1.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.13175v1_figure_2.png", "item2_path": "./SciVer/images/2409.13175v1_figure_4.png", "section": [ "3.2.1" ], "request_id": 374, "origin_statement": "Given three users and the constraint ∑_u a^u_t ≤ 2 (Fig 4), allocating one real-time recommendation (25 s) and two 2nd-cached recommendations (23 s each) yields an average watch time of ≈23.7 s, outperforming uniformly serving three 1st-cached recommendations (22 s each).", "perturbed_statement": "Given three users and the constraint ∑_u a^u_t ≤ 2 (Fig 4), allocating one real-time recommendation (25 s) and two 2nd-cached recommendations (24 s each) yields an average watch time of ≈24.3 s, outperforming uniformly serving three 1st-cached recommendations (22 s each).", "perturbed_explanation": "The perturbed statement claims that 2nd-cached recommendations yield 24 s watch time, but Figure 2 shows 2nd-cached watch time is actually 23 s, so the 24 s figure is incorrect.", "claim": "Given three users and the constraint ∑_u a^u_t ≤ 2 (Fig 4), allocating one real-time recommendation (25 s) and two 2nd-cached recommendations (24 s each) yields an average watch time of ≈24.3 s, outperforming uniformly serving three 1st-cached recommendations (22 s each).", "label": false }, { "paperid": "2411.01472v1", "paper_path": "./SciVer/papers/2411.01472v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2(h)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01472v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.01472v1_figure_2(h).png", "section": [ "4.2" ], "request_id": 379, "origin_statement": "Figure 2’s SIDD error maps reveal that our method reduces high-error pixels by over 50% compared to MZSR, and Table 1 reports our fine-tuned approach attains an average PSNR of 52.7 dB across five smartphone sensors—about 1.1 dB above MZSR and 0.9 dB above transfer learning.", "perturbed_statement": "Figure 2’s SIDD error maps reveal that our method reduces high-error pixels by over 50% compared to MZSR, and Table 1 reports our fine-tuned approach attains an average PSNR of 54.0 dB across five smartphone sensors—about 2.4 dB above MZSR and 2.2 dB above transfer learning.", "perturbed_explanation": "The perturbed statement misstates the PSNR and gain values: Table 1 actually shows a PSNR of 52.7 dB for the fine-tuned method (not 54.0 dB), with gains of 1.1 dB over MZSR and 0.9 dB over transfer learning (not 2.4 dB and 2.2 dB).", "claim": "Figure 2’s SIDD error maps reveal that our method reduces high-error pixels by over 50% compared to MZSR, and Table 1 reports our fine-tuned approach attains an average PSNR of 54.0 dB across five smartphone sensors—about 2.4 dB above MZSR and 2.2 dB above transfer learning.", "label": false }, { "paperid": "2410.21769v1", "paper_path": "./SciVer/papers/2410.21769v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21769v1_figure_1.png", "item2_path": "./SciVer/images/2410.21769v1-Table2-1.png", "section": [ "2.1" ], "request_id": 380, "origin_statement": "For the Janus compound TiSeH, the ABA-stacked 2H phase (Fig 1a,c) has an FM energy of –36.07 meV, which is 12.43 meV lower than the FM energy of the ABC-stacked 1T phase (–23.64 meV).", "perturbed_statement": "For the Janus compound TiSeH, the ABA-stacked 2H phase has an FM energy of –36.07 meV, which is 12.07 meV lower than the FM energy of the ABC-stacked 1T phase (–24.00 meV).", "perturbed_explanation": "The table lists the FM energy of 1T-TiSeH as –23.64 meV (not –24.00 meV) and the actual difference from –36.07 meV is 12.43 meV (not 12.07 meV).", "claim": "For the Janus compound TiSeH, the ABA-stacked 2H phase has an FM energy of –36.07 meV, which is 12.07 meV lower than the FM energy of the ABC-stacked 1T phase (–24.00 meV).", "label": false }, { "paperid": "2410.17020v2", "paper_path": "./SciVer/papers/2410.17020v2.json", "claim_type": "parallel", "item1": "3(d)", "item2": "3(g)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17020v2_figure_3(d).png", "item2_path": "./SciVer/images/2410.17020v2_figure_3(g).png", "section": [ "5.2" ], "request_id": 381, "origin_statement": "In the real street scene with seven visible vehicles, the segmentation output only identifies five distinct car regions, underestimating the number of cars by two.", "perturbed_statement": "In the real street scene with seven visible vehicles, the segmentation output only identifies six distinct car regions, underestimating the number of cars by one.", "perturbed_explanation": "The segmentation visualization clearly shows only five separate car segments; claiming six car regions contradicts the actual count in the segmentation output.", "claim": "In the real street scene with seven visible vehicles, the segmentation output only identifies six distinct car regions, underestimating the number of cars by one.", "label": false }, { "paperid": "2409.04050v1", "paper_path": "./SciVer/papers/2409.04050v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04050v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.04050v1_figure_4(d).png", "section": [ "4.2.2" ], "request_id": 382, "origin_statement": "EigenSR-β outperforms other methods by achieving the highest PSNR of 38.91 dB—1.08 dB above ESSAformer’s—and the lowest NRQMS of 17.85, 0.08 below SFCSR’s 17.93.", "perturbed_statement": "EigenSR-β outperforms other methods by achieving the highest PSNR of 38.91 dB—2.08 dB above ESSAformer’s—and the lowest NRQMS of 17.85, 0.08 below SFCSR’s 17.93.", "perturbed_explanation": "The actual PSNR gap between EigenSR-β (38.91 dB) and ESSAformer (37.83 dB) is 1.08 dB, not 2.08 dB as claimed in the perturbed statement.", "claim": "EigenSR-β outperforms other methods by achieving the highest PSNR of 38.91 dB—2.08 dB above ESSAformer’s—and the lowest NRQMS of 17.85, 0.08 below SFCSR’s 17.93.", "label": false }, { "paperid": "2410.22015v1", "paper_path": "./SciVer/papers/2410.22015v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22015v1_figure_5.png", "item2_path": "./SciVer/images/2410.22015v1-Table3-1.png", "section": [ "3.1.3" ], "request_id": 383, "origin_statement": "The hexbin heatmap shows its densest region at about 1100 pedestrians per hour and 50 µg/m3 NO2, closely matching Ciutat Vella’s average flow of 1167 ped/h and mean NO2 of 47.5 µg/m3 from Table 3.", "perturbed_statement": "The hexbin heatmap shows its densest region at about 1100 pedestrians per hour and 50 µg/m3 NO2, closely matching Ciutat Vella’s average flow of 1167 ped/h and mean NO2 of 65 µg/m3 from Table 3.", "perturbed_explanation": "The perturbation is incorrect because Table 3 actually lists Ciutat Vella’s mean NO2 concentration as 47.5 µg/m3, not 65 µg/m3.", "claim": "The hexbin heatmap shows its densest region at about 1100 pedestrians per hour and 50 µg/m3 NO2, closely matching Ciutat Vella’s average flow of 1167 ped/h and mean NO2 of 65 µg/m3 from Table 3.", "label": false }, { "paperid": "2411.02860v1", "paper_path": "./SciVer/papers/2411.02860v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02860v1_figure_4(b).png", "item2_path": "./SciVer/images/2411.02860v1-Table3-1.png", "section": [ "4.3" ], "request_id": 388, "origin_statement": "At 30 samples per class, Table 3 shows SDR increases from 7.33 dB (1 sample) to 10.09 dB, while Figure 4(b) shows SIR rising from 13.55 dB to 16.34 dB over the same memory range, highlighting more pronounced gains in interference suppression than in signal distortion reduction.", "perturbed_statement": "At 30 samples per class, Table 3 shows SDR increases from 7.33 dB (1 sample) to 10.09 dB, while Figure 4(b) shows SIR rising from 13.55 dB to 17.34 dB over the same memory range, highlighting more pronounced gains in interference suppression than in signal distortion reduction.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4(b) reports an SIR of 16.34 dB at 30 samples per class, not 17.34 dB as claimed.", "claim": "At 30 samples per class, Table 3 shows SDR increases from 7.33 dB (1 sample) to 10.09 dB, while Figure 4(b) shows SIR rising from 13.55 dB to 17.34 dB over the same memory range, highlighting more pronounced gains in interference suppression than in signal distortion reduction.", "label": false }, { "paperid": "2411.02091v1", "paper_path": "./SciVer/papers/2411.02091v1.json", "claim_type": "parallel", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02091v1_figure_4.png", "item2_path": "./SciVer/images/2411.02091v1-Table6-1.png", "section": [ "4.5" ], "request_id": 394, "origin_statement": "The median bug fixing time in the lib subsystem (about 100 days) is over three times longer than in drivers (around 30 days), which aligns with the parsimonious model’s SUBSYSTEM_R: lib coefficient of 1.10 indicating a significant log-scale increase.", "perturbed_statement": "The median bug fixing time in the lib subsystem (about 60 days) is over twice that in drivers (around 30 days), aligning with the parsimonious model’s SUBSYSTEM_R: lib coefficient of 1.10 indicating a significant log-scale increase.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 4 shows the median fixing time for the lib subsystem is approximately 100 days, not 60 days, contradicting the reported value.", "claim": "The median bug fixing time in the lib subsystem (about 60 days) is over twice that in drivers (around 30 days), aligning with the parsimonious model’s SUBSYSTEM_R: lib coefficient of 1.10 indicating a significant log-scale increase.", "label": false }, { "paperid": "2411.10703v1", "paper_path": "./SciVer/papers/2411.10703v1.json", "claim_type": "parallel", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10703v1_figure_6.png", "item2_path": "./SciVer/images/2411.10703v1-Table3-1.png", "section": [ "4.2" ], "request_id": 395, "origin_statement": "At the 30-min horizon, GlucoNet+KD student-only LSTM with (128,64) cells has an average RMSE of about 5.3 in Figure 6, roughly 2.6 lower than the 7.88 RMSE reported for the same configuration on the 2020 dataset in Table 3.", "perturbed_statement": "At the 30-min horizon, GlucoNet+KD student-only LSTM with (128,64) cells has an average RMSE of about 5.3 in Figure 6, roughly 1.6 lower than the 6.88 RMSE reported for the same configuration on the 2020 dataset in Table 3.", "perturbed_explanation": "This is incorrect because Table 3 actually reports an RMSE of 7.88 (not 6.88) for GlucoNet+KD with student LSTM (128,64) at the 30-min horizon on the 2020 dataset.", "claim": "At the 30-min horizon, GlucoNet+KD student-only LSTM with (128,64) cells has an average RMSE of about 5.3 in Figure 6, roughly 1.6 lower than the 6.88 RMSE reported for the same configuration on the 2020 dataset in Table 3.", "label": false }, { "paperid": "2411.12785v1", "paper_path": "./SciVer/papers/2411.12785v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.12785v1-Table4-1.png", "item2_path": "./SciVer/images/2411.12785v1-Table5-1.png", "section": [ "6.3" ], "request_id": 396, "origin_statement": "Ours (complete) cuts FairFace MS by 0.175 in age debiasing (from 0.528 to 0.353, Table 5) but only 0.049 in race debiasing (from 0.657 to 0.608, Table 4), indicating a ~3.6× larger MS improvement on age.", "perturbed_statement": "Ours (complete) cuts FairFace MS by 0.175 in age debiasing (from 0.528 to 0.353, Table 5) but only 0.130 in race debiasing (from 0.657 to 0.608, Table 4), indicating a ~1.3× larger MS improvement on age.", "perturbed_explanation": "The perturbed statement claims a 0.130 reduction in FairFace MS for race debiasing, but Table 4 shows the MS drops from 0.657 to 0.608, which is a 0.049 reduction—not 0.130.", "claim": "Ours (complete) cuts FairFace MS by 0.175 in age debiasing (from 0.528 to 0.353, Table 5) but only 0.130 in race debiasing (from 0.657 to 0.608, Table 4), indicating a ~1.3× larger MS improvement on age.", "label": false }, { "paperid": "2409.14940v1", "paper_path": "./SciVer/papers/2409.14940v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.14940v1-Table2-1.png", "item2_path": "./SciVer/images/2409.14940v1-Table3-1.png", "section": [ "4.2" ], "request_id": 398, "origin_statement": "Under the mixed attack ℓ₂ KNN setting, IF-Def+PST achieves 81.3% mixed accuracy on ModelNet40’s PCT backbone, compared to PointDP+PST’s 54.9% mixed accuracy on ScanObjectNN’s PCT, indicating a 26.4-point higher robustness on ModelNet40.", "perturbed_statement": "Under the mixed attack ℓ₂ KNN setting, IF-Def+PST achieves 78.6% mixed accuracy on ModelNet40’s PCT backbone, compared to PointDP+PST’s 54.9% mixed accuracy on ScanObjectNN’s PCT, indicating a 23.7-point higher robustness on ModelNet40.", "perturbed_explanation": "Table 2 reports that IF-Def+PST actually achieves 81.3% mixed accuracy under the ℓ₂ KNN mixed attack on the PCT backbone for ModelNet40, not 78.6% as stated.", "claim": "Under the mixed attack ℓ₂ KNN setting, IF-Def+PST achieves 78.6% mixed accuracy on ModelNet40’s PCT backbone, compared to PointDP+PST’s 54.9% mixed accuracy on ScanObjectNN’s PCT, indicating a 23.7-point higher robustness on ModelNet40.", "label": false }, { "paperid": "2409.08361v1", "paper_path": "./SciVer/papers/2409.08361v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08361v1_figure_4.png", "item2_path": "./SciVer/images/2409.08361v1_figure_5.png", "section": [ "5.2" ], "request_id": 399, "origin_statement": "Figure 4’s vertical line at [Fe/H] = –0.1 dex splits metal-poor and metal-rich hosts, yet both groups show planet CMFs spanning ~–0.1 to ~1.0; Figure 5’s ODR fit (slope = 5.6 ± 1.6) demonstrates planet CMF strongly increases with host-equivalent CMF, rising ~0.56 per 0.1 CMF.", "perturbed_statement": "Figure 4’s vertical line at [Fe/H] = –0.1 dex splits metal-poor and metal-rich hosts, yet both groups show planet CMFs spanning –0.1 to 1.0; Figure 5’s ODR fit (slope = 1.6 ± 1.6) demonstrates planet CMF strongly increases with host-equivalent CMF, rising ~0.16 per 0.1 CMF.", "perturbed_explanation": "The perturbed statement misreports the ODR slope from Figure 5 as 1.6 ± 1.6 instead of the actual 5.6 ± 1.6, and thus the implied rise of ~0.16 per 0.1 CMF contradicts the correct ~0.56 per 0.1 CMF derived from the true slope.", "claim": "Figure 4’s vertical line at [Fe/H] = –0.1 dex splits metal-poor and metal-rich hosts, yet both groups show planet CMFs spanning –0.1 to 1.0; Figure 5’s ODR fit (slope = 1.6 ± 1.6) demonstrates planet CMF strongly increases with host-equivalent CMF, rising ~0.16 per 0.1 CMF.", "label": false }, { "paperid": "2411.15060v1", "paper_path": "./SciVer/papers/2411.15060v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15060v1_figure_2.png", "item2_path": "./SciVer/images/2411.15060v1_figure_3.png", "section": [ "3.4" ], "request_id": 400, "origin_statement": "Reducing SRS source bands from 4 to 2 lowers the mean MS-SSIM from 0.464 to 0.334, and in Fig. 3a most points fall below the diagonal by about 0.1–0.2. Switching to OOD only drops MS-SSIM to 0.435, with Fig. 3c points tightly clustered around the diagonal.", "perturbed_statement": "Reducing SRS source bands from 4 to 2 lowers the mean MS-SSIM from 0.464 to 0.334, and in Fig. 3a most points fall below the diagonal by about 0.1–0.2. Switching to OOD only drops MS-SSIM to 0.390, with Fig. 3c points tightly clustered around the diagonal.", "perturbed_explanation": "The perturbed statement wrongly reports the OOD average MS-SSIM as 0.390, whereas Figure 2 and the context explicitly show the OOD mean MS-SSIM is 0.435.", "claim": "Reducing SRS source bands from 4 to 2 lowers the mean MS-SSIM from 0.464 to 0.334, and in Fig. 3a most points fall below the diagonal by about 0.1–0.2. Switching to OOD only drops MS-SSIM to 0.390, with Fig. 3c points tightly clustered around the diagonal.", "label": false }, { "paperid": "2410.09865v2", "paper_path": "./SciVer/papers/2410.09865v2.json", "claim_type": "parallel", "item1": "7(c)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09865v2_figure_7(c).png", "item2_path": "./SciVer/images/2410.09865v2-Table3-1.png", "section": [ "5.3" ], "request_id": 403, "origin_statement": "Expanding synthetic training data from 0.2 M to 1.0 M images boosts linear-probe accuracy on RAF-DB from about 91.2% to 92.5% (Fig. 7), nearly matching FERAnno’s 92.56% RAF-DB few-shot accuracy reported in Table 4.", "perturbed_statement": "Expanding synthetic training data from 0.2 M to 1.0 M images boosts linear-probe accuracy on RAF-DB from about 90.2% to 93.5% (Fig. 7), nearly matching FERAnno’s 93.56% RAF-DB few-shot accuracy reported in Table 4.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 7 actually shows RAF-DB accuracy rising from approximately 91.2% (not 90.2%) to 92.5% (not 93.5%), and Table 4 reports FERAnno’s RAF-DB accuracy as 92.56%, not 93.56%.", "claim": "Expanding synthetic training data from 0.2 M to 1.0 M images boosts linear-probe accuracy on RAF-DB from about 90.2% to 93.5% (Fig. 7), nearly matching FERAnno’s 93.56% RAF-DB few-shot accuracy reported in Table 4.", "label": false }, { "paperid": "2411.09534v1", "paper_path": "./SciVer/papers/2411.09534v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.09534v1-Table2-1.png", "item2_path": "./SciVer/images/2411.09534v1-Table3-1.png", "section": [ "4.3" ], "request_id": 404, "origin_statement": "Under optimal energy settings, the GAP9EVK’s cluster runs nine CV32E40P cores at 0.65 V/240 MHz, over five times the 48 MHz of the Ambiq Apollo3’s Cortex-M4 at 0.70 V, while offering roughly four times more RAM (1564 KiB vs 384 KiB).", "perturbed_statement": "Under optimal energy settings, the GAP9EVK’s cluster runs nine CV32E40P cores at 0.65 V/240 MHz, over five times the 48 MHz of the Ambiq Apollo3’s Cortex-M4 at 0.70 V, while offering roughly four times more RAM (1564 KiB vs 512 KiB).", "perturbed_explanation": "The perturbed statement incorrectly claims the Ambiq Apollo3 has 512 KiB of RAM, but Table 2 shows it actually has 384 KiB of RAM.", "claim": "Under optimal energy settings, the GAP9EVK’s cluster runs nine CV32E40P cores at 0.65 V/240 MHz, over five times the 48 MHz of the Ambiq Apollo3’s Cortex-M4 at 0.70 V, while offering roughly four times more RAM (1564 KiB vs 512 KiB).", "label": false }, { "paperid": "2411.05010v1", "paper_path": "./SciVer/papers/2411.05010v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05010v1_figure_1.png", "item2_path": "./SciVer/images/2411.05010v1_figure_2.png", "section": [ "1" ], "request_id": 405, "origin_statement": "Figure 1’s global optimum region corresponds to solutions passing 3 tests, whereas Figure 2 shows our method attains only a 32.5% correct-solution rate by iteration 10.", "perturbed_statement": "Figure 1’s global optimum region corresponds to solutions passing 4 tests, whereas Figure 2 shows our method attains only a 32.5% correct-solution rate by iteration 10.", "perturbed_explanation": "Figure 1’s legend and contours only indicate up to 3 tests passed—there is no region for 4 tests passed—so stating a 4-test optimum contradicts the image.", "claim": "Figure 1’s global optimum region corresponds to solutions passing 4 tests, whereas Figure 2 shows our method attains only a 32.5% correct-solution rate by iteration 10.", "label": false }, { "paperid": "2409.19663v2", "paper_path": "./SciVer/papers/2409.19663v2.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19663v2_figure_3.png", "item2_path": "./SciVer/images/2409.19663v2-Table1-1.png", "section": [ "5.3" ], "request_id": 406, "origin_statement": "Figure 3 reports a Pearson correlation of 0.33 between mean Precision and editing reliability, indicating a weak association, while Table 1 shows the Linear identifier achieves a precision of 0.856 on Llama3.1-8B with UnKE, demonstrating identifier performance outpaces reliability effects.", "perturbed_statement": "Figure 3 reports a Pearson correlation of 0.75 between mean Precision and editing reliability, indicating a weak association, while Table 1 shows the Linear identifier achieves a precision of 0.856 on Llama3.1-8B with UnKE, demonstrating identifier performance outpaces reliability effects.", "perturbed_explanation": "The perturbed claim is wrong because Figure 3 actually shows a Pearson correlation of 0.33 between mean Precision and editing reliability, not 0.75, so the reported correlation value contradicts the figure.", "claim": "Figure 3 reports a Pearson correlation of 0.75 between mean Precision and editing reliability, indicating a weak association, while Table 1 shows the Linear identifier achieves a precision of 0.856 on Llama3.1-8B with UnKE, demonstrating identifier performance outpaces reliability effects.", "label": false }, { "paperid": "2411.09458v1", "paper_path": "./SciVer/papers/2411.09458v1.json", "claim_type": "parallel", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09458v1_figure_4.png", "item2_path": "./SciVer/images/2411.09458v1-Table1-1.png", "section": [ "5.1.2" ], "request_id": 407, "origin_statement": "The full-likelihood estimate in Fig. 4 yields f_s/f_d ≈0.25, roughly half the neutral B^0 production fraction f_00 = 0.488 ± 0.010 from PDG Table 1.", "perturbed_statement": "The full-likelihood estimate in Fig. 4 yields f_s/f_d ≈0.38, nearly matching the neutral B^0 production fraction f_00 = 0.488 ± 0.010 from PDG Table 1.", "perturbed_explanation": "Fig. 4’s blue full-likelihood 1D marginal for f_s/f_d peaks around 0.25, not 0.38, and Table 1 lists f_00 as 0.488 ± 0.010, so f_s/f_d neither reaches 0.38 nor nearly equals f_00 as claimed.", "claim": "The full-likelihood estimate in Fig. 4 yields f_s/f_d ≈0.38, nearly matching the neutral B^0 production fraction f_00 = 0.488 ± 0.010 from PDG Table 1.", "label": false }, { "paperid": "2411.15743v1", "paper_path": "./SciVer/papers/2411.15743v1.json", "claim_type": "parallel", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15743v1_figure_8.png", "item2_path": "./SciVer/images/2411.15743v1_figure_9.png", "section": [ "6.1" ], "request_id": 408, "origin_statement": "At 1/25 frequency with three harmonics, TimesFM’s forecasts in Fig8 match the sine peaks within ±0.3 amplitude, whereas TTM outputs stay near zero. Fig9 shows TimesFM’s zero-shot MSE at this point is ≈10−4, compared to Timer’s ≈10−1 under the same conditions.", "perturbed_statement": "At 1/25 frequency with three harmonics, TimesFM’s forecasts in Fig8 match the sine peaks within ±0.3 amplitude, whereas TTM outputs stay near zero. Fig9 shows TimesFM’s zero-shot MSE at this point is ≈10−4, compared to Timer’s ≈10^1 under the same conditions.", "perturbed_explanation": "The perturbation incorrectly claims Timer’s MSE is ≈10^1 at the 1/25, three-harmonic mark. In Fig9, the blue Timer line at this frequency and harmonic level actually sits around 10^−1 (below 1), not at 10^1.", "claim": "At 1/25 frequency with three harmonics, TimesFM’s forecasts in Fig8 match the sine peaks within ±0.3 amplitude, whereas TTM outputs stay near zero. Fig9 shows TimesFM’s zero-shot MSE at this point is ≈10−4, compared to Timer’s ≈10^1 under the same conditions.", "label": false }, { "paperid": "2410.02099v1", "paper_path": "./SciVer/papers/2410.02099v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "1(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02099v1_figure_1(b).png", "item2_path": "./SciVer/images/2410.02099v1_figure_1(d).png", "section": [ "5.5.3" ], "request_id": 409, "origin_statement": "At T=250, our scheme (k=1, m=1024) achieves a pAUC of about 0.98, roughly 4 points higher than KB (2)'s 0.94, and under 50% token corruption, the same configuration drops to an AUC of about 0.57, a decline of approximately 41 points from its uncorrupted level (~0.98).", "perturbed_statement": "At T=250, our scheme (k=1, m=1024) achieves a pAUC of about 0.98, roughly 4 points higher than KB (2)'s 0.94, and under 50% token corruption, the same configuration drops to an AUC of about 0.65, a decline of approximately 33 points from its uncorrupted level (~0.98).", "perturbed_explanation": "The perturbed statement claims an AUC of 0.65 under 50% corruption and a 33-point drop, but the figure shows the AUC is approximately 0.57 at 50% corruption, corresponding to a 41-point decline from 0.98, not 33 points.", "claim": "At T=250, our scheme (k=1, m=1024) achieves a pAUC of about 0.98, roughly 4 points higher than KB (2)'s 0.94, and under 50% token corruption, the same configuration drops to an AUC of about 0.65, a decline of approximately 33 points from its uncorrupted level (~0.98).", "label": false }, { "paperid": "2409.02120v1", "paper_path": "./SciVer/papers/2409.02120v1.json", "claim_type": "parallel", "item1": "11", "item2": "15", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.02120v1_figure_11.png", "item2_path": "./SciVer/images/2409.02120v1_figure_15.png", "section": [ "7.1" ], "request_id": 410, "origin_statement": "On July 19, the downscaled median temperature is about 295.6 K with an interquartile range of ~2.3 K, compared to UrbClim’s 295.3 K median and ~1.8 K IQR. This greater variability coincides with regions where Fig. 11 shows wind speeds up to 6.85 m/s and RH around 81.7%.", "perturbed_statement": "On July 19, the downscaled median temperature is about 294.6 K with an interquartile range of ~1.5 K, compared to UrbClim’s 295.3 K median and ~1.8 K IQR, coinciding with regions where Fig. 11 shows wind speeds up to 6.85 m/s and RH around 81.7%.", "perturbed_explanation": "The perturbed statement misreports the downscaled median and IQR for July 19. Fig. 15 shows the downscaled median at approximately 295.6 K (not 294.6 K) and an IQR of about 2.3 K (not 1.5 K), contradicting the boxplot data.", "claim": "On July 19, the downscaled median temperature is about 294.6 K with an interquartile range of ~1.5 K, compared to UrbClim’s 295.3 K median and ~1.8 K IQR, coinciding with regions where Fig. 11 shows wind speeds up to 6.85 m/s and RH around 81.7%.", "label": false }, { "paperid": "2409.02246v1", "paper_path": "./SciVer/papers/2409.02246v1.json", "claim_type": "parallel", "item1": "9", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.02246v1_figure_9.png", "item2_path": "./SciVer/images/2409.02246v1-Table3-1.png", "section": [ "5.3" ], "request_id": 411, "origin_statement": "According to Figure 9(b) and Table 3, the jointly-optimized policy cuts average incidents lost per episode to 92 (compared to 110 under the Real policy) while reducing the 75th percentile response time from 16 to 12 iterations.", "perturbed_statement": "According to Figure 9(b) and Table 3, the jointly-optimized policy cuts average incidents lost per episode to 95 (compared to 110 under the Real policy) while reducing the 75th percentile response time from 16 to 12 iterations.", "perturbed_explanation": "The perturbed statement incorrectly reports an average of 95 incidents lost for the jointly-optimized policy, whereas Table 3 actually lists the average overflows (incidents lost) for the joint policy as 92. This contradicts the table data.", "claim": "According to Figure 9(b) and Table 3, the jointly-optimized policy cuts average incidents lost per episode to 95 (compared to 110 under the Real policy) while reducing the 75th percentile response time from 16 to 12 iterations.", "label": false }, { "paperid": "2410.02346v2", "paper_path": "./SciVer/papers/2410.02346v2.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02346v2_figure_1.png", "item2_path": "./SciVer/images/2410.02346v2-Table3-1.png", "section": [ "2.2" ], "request_id": 414, "origin_statement": "At a base temperature Td,0.1 of 200 K, MgFeSiO4 grains of radius 0.037 µm reach ~480 K, while 0.232 µm grains reach ~620 K (Δ≈140 K); under the same M0COM model for 150–200 K, CH₃CHO shows an abundance ratio of 0.22 relative to CH₃OH.", "perturbed_statement": "At a base temperature Td,0.1 of 200 K, MgFeSiO4 grains of radius 0.037 µm reach ~430 K, while 0.232 µm grains reach ~560 K (Δ≈130 K); under the same M0COM model for 150–200 K, CH₃CHO shows an abundance ratio of 0.022 relative to CH₃OH.", "perturbed_explanation": "The perturbed temperatures (~430 K and ~560 K, Δ≈130 K) contradict Figure 1, which shows ~480 K and ~620 K (Δ≈140 K) for 0.037 µm and 0.232 µm MgFeSiO4 grains at Td,0.1=200 K. The stated CH₃CHO abundance ratio of 0.022 is inconsistent with Table 3, which lists 0.22 under M0COM (150–200 K).", "claim": "At a base temperature Td,0.1 of 200 K, MgFeSiO4 grains of radius 0.037 µm reach ~430 K, while 0.232 µm grains reach ~560 K (Δ≈130 K); under the same M0COM model for 150–200 K, CH₃CHO shows an abundance ratio of 0.022 relative to CH₃OH.", "label": false }, { "paperid": "2411.02359v1", "paper_path": "./SciVer/papers/2411.02359v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.02359v1-Table3-1.png", "item2_path": "./SciVer/images/2411.02359v1-Table4-1.png", "section": [ "4.2" ], "request_id": 415, "origin_statement": "At ~10 GFLOPs, auxiliary losses increase the average success length by 1.42 (4.13 vs 2.71) in ABCDD, while the action consistency exit criterion boosts performance by only 0.18 (4.10 vs 3.92) over feature similarity in ABCD→D.", "perturbed_statement": "At ~10 GFLOPs, auxiliary losses increase the average success length by 1.42 (4.13 vs 2.71) in ABCDD, while the action consistency exit criterion boosts performance by only 0.25 (4.10 vs 3.92) over feature similarity in ABCD→D.", "perturbed_explanation": "The perturbation incorrectly states the performance boost as 0.25, but from Table 4 the actual difference between action consistency (4.10) and feature similarity (3.92) is 0.18, not 0.25.", "claim": "At ~10 GFLOPs, auxiliary losses increase the average success length by 1.42 (4.13 vs 2.71) in ABCDD, while the action consistency exit criterion boosts performance by only 0.25 (4.10 vs 3.92) over feature similarity in ABCD→D.", "label": false }, { "paperid": "2409.15329v1", "paper_path": "./SciVer/papers/2409.15329v1.json", "claim_type": "parallel", "item1": "8", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15329v1_figure_8.png", "item2_path": "./SciVer/images/2409.15329v1-Table2-1.png", "section": [ "7.1.2" ], "request_id": 417, "origin_statement": "With a mini-batch size of 1024, TD3-INVASE in scene 1 exceeds 60 dB average beamforming gain by around 500 episodes, whereas TD3 requires over 600 episodes to reach the same gain.", "perturbed_statement": "With a mini-batch size of 512, TD3-INVASE in scene 1 exceeds 60 dB average beamforming gain by around 500 episodes, whereas TD3 requires over 600 episodes to reach the same gain.", "perturbed_explanation": "The perturbation incorrectly states a mini-batch size of 512, but Table 2 specifies the mini-batch size is actually 1024. This contradicts the provided hyper-parameter.", "claim": "With a mini-batch size of 512, TD3-INVASE in scene 1 exceeds 60 dB average beamforming gain by around 500 episodes, whereas TD3 requires over 600 episodes to reach the same gain.", "label": false }, { "paperid": "2410.22782v1", "paper_path": "./SciVer/papers/2410.22782v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22782v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.22782v1_figure_4(b).png", "section": [ "4.2" ], "request_id": 418, "origin_statement": "At β=1.25, the d=48, r̅=10 variant achieves an intra-domain performance of 80.79, which is 24.18 points higher than MALoRA's inter-domain score of 56.61 at rank 16.", "perturbed_statement": "At β=1.25, the d=48, r̅=10 variant achieves an intra-domain performance of 80.79, which is 24.18 points higher than MALoRA's inter-domain score of 56.27 at rank 16.", "perturbed_explanation": "The inter-domain performance of MALoRA at rank 16 is actually 56.61 (not 56.27). The value 56.27 corresponds to MALoRA at rank 8 in Figure 4(a), so stating 56.27 at rank 16 contradicts the image.", "claim": "At β=1.25, the d=48, r̅=10 variant achieves an intra-domain performance of 80.79, which is 24.18 points higher than MALoRA's inter-domain score of 56.27 at rank 16.", "label": false }, { "paperid": "2411.17820v1", "paper_path": "./SciVer/papers/2411.17820v1.json", "claim_type": "parallel", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17820v1_figure_5.png", "item2_path": "./SciVer/images/2411.17820v1-Table1-1.png", "section": [ "4.2" ], "request_id": 419, "origin_statement": "Ours zero-shot model achieves the lowest AOE(5) of 6.19° in the Crossing (12%) scenario (Table 1), and in Figure 5 its predicted actions (red) follow the ground truth trajectory (green) almost exactly into the target star, deviating by less than 0.5 m in the final meter.", "perturbed_statement": "Ours zero-shot model achieves the lowest AOE(5) of 4.19° in the Crossing (12%) scenario (Table 1), and in Figure 5 its predicted actions (red) follow the ground truth trajectory (green) almost exactly into the target star, deviating by less than 0.5 m in the final meter.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 reports an AOE(5) of 6.19° for the Ours zero-shot model in the Crossing scenario, not 4.19°.", "claim": "Ours zero-shot model achieves the lowest AOE(5) of 4.19° in the Crossing (12%) scenario (Table 1), and in Figure 5 its predicted actions (red) follow the ground truth trajectory (green) almost exactly into the target star, deviating by less than 0.5 m in the final meter.", "label": false }, { "paperid": "2411.06529v1", "paper_path": "./SciVer/papers/2411.06529v1.json", "claim_type": "parallel", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06529v1_figure_8.png", "item2_path": "./SciVer/images/2411.06529v1_figure_9.png", "section": [ "3.4" ], "request_id": 420, "origin_statement": "At 14 iterations, TherINO’s L2 strain error drops to about 0.07% (Fig. 8), and on the speed-accuracy curve (Fig. 9), it attains this error at roughly 6 ms, while FNO-DEQ only reaches ≈0.07% after around 7 ms.", "perturbed_statement": "At 14 iterations, TherINO’s L2 strain error drops to about 0.10% (Fig. 8), and on the speed-accuracy curve (Fig. 9), it attains this error at roughly 5 ms, while FNO-DEQ only reaches ≈0.10% after around 8 ms.", "perturbed_explanation": "The perturbation wrongly states TherINO’s error at iteration 14 is 0.10%, but Fig. 8 shows it is about 0.07%. It also misreports the time to reach that error as 5 ms instead of ≈6 ms, and claims FNO-DEQ reaches 0.10% at 8 ms, whereas Fig. 9 indicates it hits ~0.07% around 7 ms.", "claim": "At 14 iterations, TherINO’s L2 strain error drops to about 0.10% (Fig. 8), and on the speed-accuracy curve (Fig. 9), it attains this error at roughly 5 ms, while FNO-DEQ only reaches ≈0.10% after around 8 ms.", "label": false }, { "paperid": "2410.14731v1", "paper_path": "./SciVer/papers/2410.14731v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.14731v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.14731v1_figure_4(b).png", "section": [ "5.3" ], "request_id": 423, "origin_statement": "At a 62.5% cache budget, evaluation loss drops to approximately 0.38 by 2.6K samples, while MatryoshkaKV with greedy search achieves about 90% relative accuracy at 50% cache utilization, outperforming the 82% achieved without Matryoshka training.", "perturbed_statement": "At a 62.5% cache budget, evaluation loss drops to approximately 0.45 by 2.6K samples, while MatryoshkaKV with greedy search achieves about 85% relative accuracy at 50% cache utilization, outperforming the 82% achieved without Matryoshka training.", "perturbed_explanation": "The figure shows that at 2.6K samples and 62.5% budget, the evaluation loss is about 0.38 (not 0.45). Additionally, MatryoshkaKV’s relative accuracy at 50% cache utilization is roughly 90%, not 85%, per the green curve in Figure 4 (Right).", "claim": "At a 62.5% cache budget, evaluation loss drops to approximately 0.45 by 2.6K samples, while MatryoshkaKV with greedy search achieves about 85% relative accuracy at 50% cache utilization, outperforming the 82% achieved without Matryoshka training.", "label": false }, { "paperid": "2409.02358v1", "paper_path": "./SciVer/papers/2409.02358v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.02358v1_figure_1.png", "item2_path": "./SciVer/images/2409.02358v1_figure_2.png", "section": [ "5.2" ], "request_id": 425, "origin_statement": "Instagram shows the highest standardized residual for body-shaming in negative experiences (6.32*, N=59 in Figure 1) and also a strong positive residual for self-esteem harms in negative effects (3.16*, N=39 in Figure 2), underscoring its pronounced appearance-related risks.", "perturbed_statement": "Instagram shows the highest standardized residual for body-shaming in negative experiences (5.32*, N=59 in Figure 1) and also a strong positive residual for self-esteem harms in negative effects (1.16*, N=39 in Figure 2), underscoring its pronounced appearance-related risks.", "perturbed_explanation": "Both numerical values in the perturbed statement are incorrect. Figure 1 reports a residual of 6.32 (not 5.32) for Instagram’s body-shaming discussions, and Figure 2 reports a residual of 3.16 (not 1.16) for Instagram’s self-esteem impacts.", "claim": "Instagram shows the highest standardized residual for body-shaming in negative experiences (5.32*, N=59 in Figure 1) and also a strong positive residual for self-esteem harms in negative effects (1.16*, N=39 in Figure 2), underscoring its pronounced appearance-related risks.", "label": false }, { "paperid": "2410.13371v2", "paper_path": "./SciVer/papers/2410.13371v2.json", "claim_type": "parallel", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13371v2_figure_4.png", "item2_path": "./SciVer/images/2410.13371v2_figure_6.png", "section": [ "3.2" ], "request_id": 427, "origin_statement": "In the 10-pixel discretization of Fig.4, the 7th pixel shows a gray level due to half-area effect, and Fig.6 reports that the maximum difference falls from about 38 gray levels at σ=0.1 to approximately 2 gray levels at σ=0.8.", "perturbed_statement": "In the 10-pixel discretization of Fig.4, the 5th pixel shows a gray level due to half-area effect, and Fig.6 reports that the maximum difference falls from about 20 gray levels at σ=0.2 to approximately 1 gray level at σ=0.7.", "perturbed_explanation": "This is incorrect because in Fig.4 the gray pixel is the 7th, not the 5th, pixel. Moreover, Fig.6 shows that at σ=0.7 the maximum difference is about 3 gray levels, not 1.", "claim": "In the 10-pixel discretization of Fig.4, the 5th pixel shows a gray level due to half-area effect, and Fig.6 reports that the maximum difference falls from about 20 gray levels at σ=0.2 to approximately 1 gray level at σ=0.7.", "label": false }, { "paperid": "2410.06313v1", "paper_path": "./SciVer/papers/2410.06313v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.06313v1_figure_6(a).png", "item2_path": "./SciVer/images/2410.06313v1-Table3-1.png", "section": [ "3.2" ], "request_id": 428, "origin_statement": "In 2008, health economics novelty peaked at about 1.4 SD above the mean (Figure 5), implying roughly 5.3% more citations since each SD rise in novelty yields a 3.8% citation increase (Table 3).", "perturbed_statement": "In 2010, health economics novelty peaked at about 1.4 SD above the mean (Figure 5), implying roughly 5.3% more citations since each SD rise in novelty yields a 3.8% citation increase (Table 3).", "perturbed_explanation": "The perturbation misstates the year of the novelty peak. Figure 5 shows the 1.4 SD peak occurs in 2008, not 2010; in 2010 the novelty value for health economics is near –0.6 SD, contradicting the claim.", "claim": "In 2010, health economics novelty peaked at about 1.4 SD above the mean (Figure 5), implying roughly 5.3% more citations since each SD rise in novelty yields a 3.8% citation increase (Table 3).", "label": false }, { "paperid": "2409.19974v2", "paper_path": "./SciVer/papers/2409.19974v2.json", "claim_type": "parallel", "item1": "4(c)", "item2": "7(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19974v2_figure_4(c).png", "item2_path": "./SciVer/images/2409.19974v2_figure_7(a).png", "section": [ "4.3.1" ], "request_id": 429, "origin_statement": "When the uniform grid spacing halves from Δx≈0.0625 to ≈0.03125, the L1 norm of p error drops from ~2×10^-2 to ~6×10^-3 (≈3.3× reduction), indicating near-second-order convergence; concurrently, the AMR simulation at t=0 employs a 64×64 coarse grid with two levels of 2× refinement (blue and red outlines).", "perturbed_statement": "When the uniform grid spacing halves from Δx≈0.0625 to ≈0.03125, the L1 norm of p error drops from ~2×10^-2 to ~2×10^-3 (≈10× reduction), indicating near-second-order convergence; concurrently, the AMR simulation at t=0 employs a 64×64 coarse grid with two levels of 2× refinement (blue and red outlines).", "perturbed_explanation": "The perturbed statement incorrectly claims the L1 error at Δx≈0.03125 is ~2×10^-3, whereas the log–log plot for p shows it is actually around ~6×10^-3 at that grid spacing.", "claim": "When the uniform grid spacing halves from Δx≈0.0625 to ≈0.03125, the L1 norm of p error drops from ~2×10^-2 to ~2×10^-3 (≈10× reduction), indicating near-second-order convergence; concurrently, the AMR simulation at t=0 employs a 64×64 coarse grid with two levels of 2× refinement (blue and red outlines).", "label": false }, { "paperid": "2411.01423v1", "paper_path": "./SciVer/papers/2411.01423v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01423v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.01423v1_figure_4(d).png", "section": [ "5.2" ], "request_id": 441, "origin_statement": "CLaSMO at τ=0 boosts average QED from 0.5876 to 0.6778 with a maximum of 0.9480, whereas at τ=0.6 it achieves an average of 0.6434 and a maximum QED of 0.9402, indicating a 0.0078 drop in peak performance under the stricter constraint.", "perturbed_statement": "CLaSMO at τ=0 boosts average QED from 0.5876 to 0.6778 with a maximum of 0.9580, whereas at τ=0.6 it achieves an average of 0.6500 and a maximum QED of 0.9402, indicating a 0.008 difference in peak performance.", "perturbed_explanation": "The perturbed statement incorrectly reports the maximum QED at τ=0 as 0.9580 (the true value is 0.9480) and the average QED at τ=0.6 as 0.6500 (the true average is 0.6434), contradicting the values given in the table.", "claim": "CLaSMO at τ=0 boosts average QED from 0.5876 to 0.6778 with a maximum of 0.9580, whereas at τ=0.6 it achieves an average of 0.6500 and a maximum QED of 0.9402, indicating a 0.008 difference in peak performance.", "label": false }, { "paperid": "2411.03862v1", "paper_path": "./SciVer/papers/2411.03862v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03862v1_figure_6.png", "item2_path": "./SciVer/images/2411.03862v1_figure_7.png", "section": [ "4.2" ], "request_id": 443, "origin_statement": "Figure 6’s rotation-only attack pivots the tiger by 75°, and Figure 7’s six-attack sample similarly exhibits a 75° tilt, demonstrating that the rotational distortion persists even when Gaussian blur (radius 4), JPEG compression (quality 25), color jitter (brightness 6), noise, and 75% cropping are also applied.", "perturbed_statement": "Figure 6’s rotation-only attack pivots the tiger by 90°, and Figure 7’s six-attack sample similarly exhibits a 90° tilt, demonstrating that the rotational distortion persists even when Gaussian blur (radius 4), JPEG compression (quality 25), color jitter (brightness 6), noise, and 75% cropping are also applied.", "perturbed_explanation": "The statement is incorrect because both Figure 6 and Figure 7 apply a 75° rotation, not 90°. The rotation angle in the captions and depicted samples is explicitly 75°, so claiming a 90° tilt contradicts the visual data.", "claim": "Figure 6’s rotation-only attack pivots the tiger by 90°, and Figure 7’s six-attack sample similarly exhibits a 90° tilt, demonstrating that the rotational distortion persists even when Gaussian blur (radius 4), JPEG compression (quality 25), color jitter (brightness 6), noise, and 75% cropping are also applied.", "label": false }, { "paperid": "2409.08388v1", "paper_path": "./SciVer/papers/2409.08388v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08388v1_figure_4.png", "item2_path": "./SciVer/images/2409.08388v1_figure_5.png", "section": [ "5.3" ], "request_id": 448, "origin_statement": "By stage 2, focal loss sustains about 85.6% accuracy versus 75.0% for Cross-Entropy, and the clustering-based exemplar selection yields approximately 90.2% average accuracy compared to 81.0% for herding, indicating both focal loss and clustering substantially improve performance.", "perturbed_statement": "By stage 2, focal loss sustains about 85.6% accuracy versus 75.0% for Cross-Entropy, and the clustering-based exemplar selection yields approximately 92.5% average accuracy compared to 81.0% for herding, indicating both focal loss and clustering substantially improve performance.", "perturbed_explanation": "The perturbed statement wrongly claims clustering-based exemplar selection yields 92.5% average accuracy, but Figure 5 shows it is around 90.2%, so the 92.5% figure contradicts the visual data.", "claim": "By stage 2, focal loss sustains about 85.6% accuracy versus 75.0% for Cross-Entropy, and the clustering-based exemplar selection yields approximately 92.5% average accuracy compared to 81.0% for herding, indicating both focal loss and clustering substantially improve performance.", "label": false }, { "paperid": "2409.06123v1", "paper_path": "./SciVer/papers/2409.06123v1.json", "claim_type": "parallel", "item1": "11", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06123v1_figure_11.png", "item2_path": "./SciVer/images/2409.06123v1-Table5-1.png", "section": [ "6.1.4" ], "request_id": 454, "origin_statement": "In the syn setting, SubTab FL exhibits per-silo delta accuracies around 0.60–0.70 (Figure 11), whereas CFL achieves a mean delta of only 0.20448 (Table 5), reducing the gap by over 0.44.", "perturbed_statement": "In the syn setting, SubTab FL exhibits per-silo delta accuracies around 0.50–0.60 (Figure 11), whereas CFL achieves a mean delta of only 0.20448 (Table 5), reducing the gap by over 0.32.", "perturbed_explanation": "The perturbation is incorrect because Figure 11 shows SubTab FL deltas on the syn dataset consistently between approximately 0.60 and 0.70, not 0.50–0.60. Consequently, the calculated gap reduction cannot be over 0.32 as stated.", "claim": "In the syn setting, SubTab FL exhibits per-silo delta accuracies around 0.50–0.60 (Figure 11), whereas CFL achieves a mean delta of only 0.20448 (Table 5), reducing the gap by over 0.32.", "label": false }, { "paperid": "2409.15568v1", "paper_path": "./SciVer/papers/2409.15568v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15568v1_figure_1.png", "item2_path": "./SciVer/images/2409.15568v1-Table1-1.png", "section": [ "5.1" ], "request_id": 459, "origin_statement": "Figure 1a’s leave-one-out protocol yields exactly one test rating per warm-start Sport test user, as Table 1 reports 8326 test ratings for 8326 users. In contrast, Figure 1b’s cold-start Games test set has 212 users providing 1304 ratings—about 6.15 ratings per user—reflecting full-history sampling.", "perturbed_statement": "Figure 1a’s leave-one-out protocol yields exactly one test rating per warm-start Sport test user, as Table 1 reports 9000 test ratings for 8326 users. In contrast, Figure 1b’s cold-start Games test set has 212 users providing 1000 ratings—about 4.72 ratings per user—reflecting full-history sampling.", "perturbed_explanation": "The perturbed statement misreports the test set sizes. Table 1 actually lists 8326 test ratings (not 9000) for the Sport dataset’s 8326 warm-start users, and 1304 test ratings (not 1000) for the 212 users in the Games cold-start set. These figures contradict the published statistics.", "claim": "Figure 1a’s leave-one-out protocol yields exactly one test rating per warm-start Sport test user, as Table 1 reports 9000 test ratings for 8326 users. In contrast, Figure 1b’s cold-start Games test set has 212 users providing 1000 ratings—about 4.72 ratings per user—reflecting full-history sampling.", "label": false }, { "paperid": "2410.12705v2", "paper_path": "./SciVer/papers/2410.12705v2.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.12705v2_figure_5(a).png", "item2_path": "./SciVer/images/2410.12705v2_figure_5(b).png", "section": [ "4.3" ], "request_id": 460, "origin_statement": "Under the contextualized condition, the model scores roughly 75% on Koreanic languages in the multiple-choice task but only about 45% in open-ended questions, revealing a 30-point gap between MCQ and OEQ accuracy on the Koreanic family.", "perturbed_statement": "Under the contextualized condition, the model scores roughly 75% on Koreanic languages in the multiple-choice task but about 65% in open-ended questions, revealing a 10-point gap between MCQ and OEQ accuracy on the Koreanic family.", "perturbed_explanation": "The open-ended (OEQ) contextualized accuracy for Koreanic languages is approximately 45%, not 65%, so stating 65% contradicts the OEQ radar data for the contextualized condition.", "claim": "Under the contextualized condition, the model scores roughly 75% on Koreanic languages in the multiple-choice task but about 65% in open-ended questions, revealing a 10-point gap between MCQ and OEQ accuracy on the Koreanic family.", "label": false }, { "paperid": "2410.06827v1", "paper_path": "./SciVer/papers/2410.06827v1.json", "claim_type": "parallel", "item1": "1", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06827v1_figure_1.png", "item2_path": "./SciVer/images/2410.06827v1_figure_7(b).png", "section": [ "2" ], "request_id": 468, "origin_statement": "Fig.1’s one‐loop triangle diagrams sum over five charged internal species (W±, χ±, c±, S^Q and fermions), whereas Fig.7’s one‐loop box diagrams involve exclusively the charged scalar S^Q, i.e. five species versus one.", "perturbed_statement": "Fig.1’s one‐loop triangle diagrams sum over five charged internal species (W±, χ±, c±, S^Q and fermions), whereas Fig.7’s one‐loop box diagrams involve both the charged scalar S^Q and W± bosons, i.e. one versus two species.", "perturbed_explanation": "Fig.7’s blob is labeled only S^Q, and the caption and context specify that G7 box diagrams include charged Higgs loops only. There is no W± involvement in Fig.7, so claiming two species contradicts the figure.", "claim": "Fig.1’s one‐loop triangle diagrams sum over five charged internal species (W±, χ±, c±, S^Q and fermions), whereas Fig.7’s one‐loop box diagrams involve both the charged scalar S^Q and W± bosons, i.e. one versus two species.", "label": false }, { "paperid": "2409.13828v1", "paper_path": "./SciVer/papers/2409.13828v1.json", "claim_type": "parallel", "item1": "3", "item2": "8(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.13828v1_figure_3.png", "item2_path": "./SciVer/images/2409.13828v1_figure_8(b).png", "section": [ "4.1" ], "request_id": 469, "origin_statement": "With a 50% masking ratio (Fig.3), MAE’s transformer decoder reconstructs PGD adversarial images with about 18 noisy patches, whereas reconstructed normal images contain under 5 visible noise artifacts (Fig.8b), indicating higher reconstruction noise for adversarial inputs.", "perturbed_statement": "With a 75% masking ratio (Fig.3), MAE’s transformer decoder reconstructs PGD adversarial images with about 18 noisy patches, whereas reconstructed normal images contain under 5 visible noise artifacts (Fig.8b), indicating higher reconstruction noise for adversarial inputs.", "perturbed_explanation": "The statement incorrectly claims a 75% masking ratio, but both Figure 3 and the caption for Figure 8(b) specify that the MAE model uses a 50% masking ratio, not 75%, so the masking detail contradicts the provided context.", "claim": "With a 75% masking ratio (Fig.3), MAE’s transformer decoder reconstructs PGD adversarial images with about 18 noisy patches, whereas reconstructed normal images contain under 5 visible noise artifacts (Fig.8b), indicating higher reconstruction noise for adversarial inputs.", "label": false }, { "paperid": "2409.20553v2", "paper_path": "./SciVer/papers/2409.20553v2.json", "claim_type": "parallel", "item1": "2", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.20553v2_figure_2.png", "item2_path": "./SciVer/images/2409.20553v2_figure_6.png", "section": [ "4.1" ], "request_id": 470, "origin_statement": "At the 2000 active/player and 2000 opponent skill pairing, Maia-2 achieves 55.7% move prediction accuracy (Figure 2), outperforming Maia 1500’s 51.8% accuracy (Figure 6) by 3.9 percentage points.", "perturbed_statement": "At the 2000 active/player and 2000 opponent skill pairing, Maia-2 achieves 55.7% move prediction accuracy (Figure 2), outperforming Maia 1500’s 54.8% accuracy (Figure 6) by 1.0 percentage point.", "perturbed_explanation": "Figure 6 reports Maia 1500’s accuracy at the 2000/2000 pairing as 51.8%, not 54.8%, so both the claimed 54.8% value and the 1.0 point advantage contradict the actual data.", "claim": "At the 2000 active/player and 2000 opponent skill pairing, Maia-2 achieves 55.7% move prediction accuracy (Figure 2), outperforming Maia 1500’s 54.8% accuracy (Figure 6) by 1.0 percentage point.", "label": false }, { "paperid": "2411.17674v1", "paper_path": "./SciVer/papers/2411.17674v1.json", "claim_type": "parallel", "item1": "5", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17674v1_figure_5.png", "item2_path": "./SciVer/images/2411.17674v1_figure_6(b).png", "section": [ "4.1" ], "request_id": 473, "origin_statement": "The LDA valence coefficient for Excited (4.41) is within 0.42 of that for Happy (4.83), and Excited achieves a recall around 0.55 compared to Neutral’s approximately 0.47, indicating slightly better sensitivity for high-valence Excited instances.", "perturbed_statement": "The LDA valence coefficient for Excited (4.41) is within 0.42 of that for Happy (4.83), and Excited achieves a recall around 0.55 compared to Neutral’s approximately 0.60, indicating slightly better sensitivity for high-valence Excited instances.", "perturbed_explanation": "The perturbed statement misstates Neutral’s recall as about 0.60; however, Figure 6(b) shows the diagonal cell for Neutral is much lighter (around 0.47), not 0.60, so the recall value is incorrect.", "claim": "The LDA valence coefficient for Excited (4.41) is within 0.42 of that for Happy (4.83), and Excited achieves a recall around 0.55 compared to Neutral’s approximately 0.60, indicating slightly better sensitivity for high-valence Excited instances.", "label": false }, { "paperid": "2410.01157v1", "paper_path": "./SciVer/papers/2410.01157v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.01157v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.01157v1-Table2-1.png", "section": [ "4.3" ], "request_id": 474, "origin_statement": "At ratio 5, Company A’s precision drops to about 34.3% (Figure 2), while Company B’s precision remains near 52.2% (Table 2), meaning that Company B’s precision is approximately 18 percentage points higher.", "perturbed_statement": "At ratio 5, Company A’s precision drops to about 34.3% (Figure 2), while Company B’s precision remains near 42.2% (Table 2), meaning that Company B’s precision is approximately 8 percentage points higher.", "perturbed_explanation": "The perturbed statement is incorrect because Table 2 reports Company B’s precision at ratio 5 as 52.22 ± 2.65%, not 42.2%. This contradiction of the actual precision value makes the claim false.", "claim": "At ratio 5, Company A’s precision drops to about 34.3% (Figure 2), while Company B’s precision remains near 42.2% (Table 2), meaning that Company B’s precision is approximately 8 percentage points higher.", "label": false }, { "paperid": "2409.12680v2", "paper_path": "./SciVer/papers/2409.12680v2.json", "claim_type": "parallel", "item1": "4(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.12680v2_figure_4(a).png", "item2_path": "./SciVer/images/2409.12680v2-Table3-1.png", "section": [ "4.2" ], "request_id": 478, "origin_statement": "STPG boosts the mIoU of the Truck tail class by about 25.8% over the supervised baseline (from 37.6% to 63.4%) in Cityscapes with 1/8 labeled data, and on PASCAL VOC 2012 it raises full-set mIoU by 8.33% compared to the supervised baseline (72.12% to 80.45%).", "perturbed_statement": "STPG boosts the mIoU of the Truck tail class by about 30.0% over the supervised baseline (from 37.6% to 63.4%) in Cityscapes with 1/8 labeled data, and on PASCAL VOC 2012 it raises full-set mIoU by 10.5% compared to the supervised baseline (72.12% to 80.45%).", "perturbed_explanation": "The perturbed statement incorrectly reports the Truck mIoU improvement as 30.0% rather than the actual 25.8% (63.4%–37.6%), and it wrongly claims a 10.5% gain on full-set PASCAL VOC, while the true increase is 8.33% (80.45%–72.12%).", "claim": "STPG boosts the mIoU of the Truck tail class by about 30.0% over the supervised baseline (from 37.6% to 63.4%) in Cityscapes with 1/8 labeled data, and on PASCAL VOC 2012 it raises full-set mIoU by 10.5% compared to the supervised baseline (72.12% to 80.45%).", "label": false }, { "paperid": "2411.00653v1", "paper_path": "./SciVer/papers/2411.00653v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00653v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.00653v1_figure_5(b).png", "section": [ "5.5" ], "request_id": 479, "origin_statement": "The model coherence score shows a Pearson linear correlation of 0.826 with node classification performance, compared to 0.725 with link prediction, yielding a 0.101 higher correlation for node classification.", "perturbed_statement": "The model coherence score shows a Pearson linear correlation of 0.826 with node classification performance, compared to 0.825 with link prediction, yielding only a 0.001 higher correlation for node classification.", "perturbed_explanation": "The perturbed statement incorrectly claims the link prediction correlation is 0.825 and the difference is 0.001. However, the actual link prediction correlation shown in Figure 5(b) is 0.725, making the true difference 0.101, not 0.001.", "claim": "The model coherence score shows a Pearson linear correlation of 0.826 with node classification performance, compared to 0.825 with link prediction, yielding only a 0.001 higher correlation for node classification.", "label": false }, { "paperid": "2409.07365v1", "paper_path": "./SciVer/papers/2409.07365v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "6(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07365v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.07365v1_figure_6(f).png", "section": [ "3.2" ], "request_id": 482, "origin_statement": "On the 1024×512 px panoramic image, the mask of valid pixels shows about 38% coverage (∼200 000 valid pixels), yet the EMBA refined map reconstructs a full-intensity image including a smooth dark band at the bottom beyond these valid areas.", "perturbed_statement": "On the 800×600 px panoramic image, the mask of valid pixels shows about 38% coverage (∼200 000 valid pixels), yet the EMBA refined map reconstructs a full-intensity image including a smooth dark band at the bottom beyond these valid areas.", "perturbed_explanation": "The perturbed statement misstates the map resolution as 800×600 px, but both the mask and the EMBA map are described as 1024×512 px in the caption and context.", "claim": "On the 800×600 px panoramic image, the mask of valid pixels shows about 38% coverage (∼200 000 valid pixels), yet the EMBA refined map reconstructs a full-intensity image including a smooth dark band at the bottom beyond these valid areas.", "label": false }, { "paperid": "2410.03415v1", "paper_path": "./SciVer/papers/2410.03415v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.03415v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.03415v1_figure_4(b).png", "section": [ "5.3" ], "request_id": 486, "origin_statement": "In Llama-2-7B-Chat the largest refusal score drop occurs at layer 14 and token position −1 with about −14, whereas in Llama-2-13B-Chat the peak decline is around layer 12 at token position −1 with roughly −10.", "perturbed_statement": "In Llama-2-7B-Chat the largest refusal score drop occurs at layer 16 and token position −2 with about −12, whereas in Llama-2-13B-Chat the peak decline is around layer 14 at token position −3 with roughly −8.", "perturbed_explanation": "The perturbation is incorrect because the actual Llama-2-7B-Chat heatmap shows its strongest refusal change at layer 14, token −1 (≈−14), not layer 16/token −2 (≈−12). Similarly, Llama-2-13B-Chat peaks at layer 12, token −1 (≈−10), not layer 14/token −3 (≈−8).", "claim": "In Llama-2-7B-Chat the largest refusal score drop occurs at layer 16 and token position −2 with about −12, whereas in Llama-2-13B-Chat the peak decline is around layer 14 at token position −3 with roughly −8.", "label": false }, { "paperid": "2409.16718v2", "paper_path": "./SciVer/papers/2409.16718v2.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16718v2_figure_3.png", "item2_path": "./SciVer/images/2409.16718v2_figure_4.png", "section": [ "4.3" ], "request_id": 487, "origin_statement": "For the text encoder’s bias at layer 1, Fig. 4 (a) shows a squared gradient sum of about 4.7 with a parameter change of ~0.33, and Fig. 4 (b) shows KD regularization reduces that change to ~0.27, a 0.06 drop.", "perturbed_statement": "For the text encoder’s bias at layer 1, Fig. 4 (a) shows a squared gradient sum of about 3.2 with a parameter change of ~0.33, and Fig. 4 (b) shows KD regularization reduces that change to ~0.19, a 0.14 drop.", "perturbed_explanation": "The perturbed statement is incorrect because Fig. 4 (a) reports a squared gradient sum of about 4.7 at layer 1 (not 3.2), and Fig. 4 (b) shows KD loss reduces the change from ~0.33 to ~0.27 (a 0.06 drop), not to ~0.19 (0.14 drop).", "claim": "For the text encoder’s bias at layer 1, Fig. 4 (a) shows a squared gradient sum of about 3.2 with a parameter change of ~0.33, and Fig. 4 (b) shows KD regularization reduces that change to ~0.19, a 0.14 drop.", "label": false }, { "paperid": "2411.08124v1", "paper_path": "./SciVer/papers/2411.08124v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.08124v1_figure_2.png", "item2_path": "./SciVer/images/2411.08124v1-Table3-1.png", "section": [ "2.3" ], "request_id": 488, "origin_statement": "With ks=5, ALPINE galaxies have average log U≈−2.1 and log n≈1.2 (Table 3), closely matching Figure 2’s right panels; there, the continuous‐SF BPASS model extends the H II region to N_H≈10^20.8 cm^−2 at T_e≈10^4 K, versus only ≈10^19.5 cm^−2 for log U=−4 (left).", "perturbed_statement": "With ks=5, ALPINE galaxies have average log U≈−3.1 and log n≈1.2 (Table 3), closely matching Figure 2’s right panels; there, the continuous‐SF BPASS model extends the H II region to N_H≈10^20.8 cm^−2 at T_e≈10^4 K, versus only ≈10^19.5 cm^−2 for log U=−4 (left).", "perturbed_explanation": "Table 3 reports an average ionisation parameter of log U≈−2.1 for ks=5, not −3.1 as stated, so the perturbed log U contradicts the tabulated value.", "claim": "With ks=5, ALPINE galaxies have average log U≈−3.1 and log n≈1.2 (Table 3), closely matching Figure 2’s right panels; there, the continuous‐SF BPASS model extends the H II region to N_H≈10^20.8 cm^−2 at T_e≈10^4 K, versus only ≈10^19.5 cm^−2 for log U=−4 (left).", "label": false }, { "paperid": "2410.16928v2", "paper_path": "./SciVer/papers/2410.16928v2.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16928v2_figure_3.png", "item2_path": "./SciVer/images/2410.16928v2_figure_5.png", "section": [ "4.2" ], "request_id": 489, "origin_statement": "At prediction length 96, increasing the lookback from 96 to 256 in Fig. 5 reduces xLSTM-Mixer’s MSE by about 0.08 (from ~0.35 to ~0.27), while in Fig. 3 the Weather initial embedding token (green) falls from roughly +0.5 at time 0 to about –1.7 by time 96.", "perturbed_statement": "At prediction length 96, increasing the lookback from 96 to 256 reduces xLSTM-Mixer’s MSE by about 0.12 (from ~0.35 to ~0.23), while in Fig. 3 the Weather initial embedding token (green) falls from roughly +0.5 at time 0 to about –2.5 by time 96.", "perturbed_explanation": "The MSE drop from lookback 96 to 256 for xLSTM-Mixer in Fig. 5 is actually about 0.08 (from ~0.35 to ~0.27), not 0.12 to ~0.23. Also, the Weather initial token in Fig. 3 declines only to about –1.7 at time 96, not –2.5.", "claim": "At prediction length 96, increasing the lookback from 96 to 256 reduces xLSTM-Mixer’s MSE by about 0.12 (from ~0.35 to ~0.23), while in Fig. 3 the Weather initial embedding token (green) falls from roughly +0.5 at time 0 to about –2.5 by time 96.", "label": false }, { "paperid": "2409.13844v1", "paper_path": "./SciVer/papers/2409.13844v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "7", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13844v1_figure_5(a).png", "item2_path": "./SciVer/images/2409.13844v1-Table7-1.png", "section": [ "3.5" ], "request_id": 490, "origin_statement": "Under CG optimization, the SW loss L_F falls from ~137 at iteration 0 to ~101 by iteration 3, while Table 7 shows ε decreases from 2.168 eV to 1.809 eV and λ increases from 21.0 to 34.62.", "perturbed_statement": "Under CG optimization, the SW loss L_F falls from ~137 at iteration 0 to ~95 by iteration 3, while Table 7 shows ε decreases from 2.168 eV to 1.809 eV and λ increases from 21.0 to 34.62.", "perturbed_explanation": "The perturbed loss value is incorrect: Figure 5 shows the loss L_F at iteration 3 is approximately 101, not 95, so the statement contradicts the plotted learning curve.", "claim": "Under CG optimization, the SW loss L_F falls from ~137 at iteration 0 to ~95 by iteration 3, while Table 7 shows ε decreases from 2.168 eV to 1.809 eV and λ increases from 21.0 to 34.62.", "label": false }, { "paperid": "2411.01088v1", "paper_path": "./SciVer/papers/2411.01088v1.json", "claim_type": "parallel", "item1": "2(d)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01088v1_figure_2(d).png", "item2_path": "./SciVer/images/2411.01088v1-Table3-1.png", "section": [ "7.2" ], "request_id": 493, "origin_statement": "On GPT2-FT, CRONOS reaches its 93.91% peak validation accuracy in the first ADMM iteration, whereas AdamW with a learning rate of 1.32 × 10^-4 requires at least five passes to approach its maximum ~93.69% accuracy.", "perturbed_statement": "On GPT2-FT, CRONOS reaches its 93.91% peak validation accuracy in the third ADMM iteration, whereas AdamW with a learning rate of 1.32 × 10^-4 requires only two passes to approach its maximum ~93.69% accuracy.", "perturbed_explanation": "The perturbation is incorrect because Figure 2(d) shows CRONOS (dashed line) already attains its plateaued accuracy by the first outer iteration, not the third. Additionally, AdamW only reaches near-93.7% after roughly five or more passes, not after just two.", "claim": "On GPT2-FT, CRONOS reaches its 93.91% peak validation accuracy in the third ADMM iteration, whereas AdamW with a learning rate of 1.32 × 10^-4 requires only two passes to approach its maximum ~93.69% accuracy.", "label": false }, { "paperid": "2410.08695v2", "paper_path": "./SciVer/papers/2410.08695v2.json", "claim_type": "parallel", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.08695v2_figure_6.png", "item2_path": "./SciVer/images/2410.08695v2-Table3-1.png", "section": [ "5.3" ], "request_id": 494, "origin_statement": "On SEEDBench InternVL2’s accuracy drops from about 79.6% at V2+L3 to 41.8% at V1+V3+L4, a 37.8-point fall, whereas Table 3 shows a 31.8-point decline on MMvet from 68.15% easy to 36.33% hard.", "perturbed_statement": "On SEEDBench InternVL2’s accuracy drops from about 79.6% at V2+L3 to 41.8% at V1+V3+L4, a 45.6-point fall, whereas Table 3 shows a 28.5-point decline on MMvet from 68.15% easy to 36.33% hard.", "perturbed_explanation": "The stated 45.6-point fall on SEEDBench is incorrect: the actual drop is 79.6% – 41.8% = 37.8 points. Similarly, the MMvet decline is 68.15% – 36.33% = 31.82 points, not 28.5 points as claimed.", "claim": "On SEEDBench InternVL2’s accuracy drops from about 79.6% at V2+L3 to 41.8% at V1+V3+L4, a 45.6-point fall, whereas Table 3 shows a 28.5-point decline on MMvet from 68.15% easy to 36.33% hard.", "label": false }, { "paperid": "2411.07565v1", "paper_path": "./SciVer/papers/2411.07565v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.07565v1-Table3-1.png", "item2_path": "./SciVer/images/2411.07565v1-Table4-1.png", "section": [ "4.2" ], "request_id": 495, "origin_statement": "At an absolute error of 1×10⁻⁵, mCDFCI on C₂ consumes 48 GB of memory and converges in 603.7 s, whereas on N₂ it uses only 24 GB but requires 1830.3 s, a roughly threefold longer time.", "perturbed_statement": "At an absolute error of 1×10⁻⁵, mCDFCI on C₂ consumes 48 GB of memory and converges in 603.7 s, whereas on N₂ it uses 48 GB but requires 1830.3 s, a roughly threefold longer time.", "perturbed_explanation": "The perturbed statement wrongly claims mCDFCI uses 48 GB for N₂ at 1×10⁻⁵. According to Table 4, it actually uses only 24.0 GB of memory at that threshold.", "claim": "At an absolute error of 1×10⁻⁵, mCDFCI on C₂ consumes 48 GB of memory and converges in 603.7 s, whereas on N₂ it uses 48 GB but requires 1830.3 s, a roughly threefold longer time.", "label": false }, { "paperid": "2410.08320v1", "paper_path": "./SciVer/papers/2410.08320v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.08320v1_figure_2.png", "item2_path": "./SciVer/images/2410.08320v1-Table3-1.png", "section": [ "4.2" ], "request_id": 498, "origin_statement": "On the PubMed corpus, the energy-based GoF test yields a critical value of -108.60 using true IK queries (Fig. 2) and achieves an AUROC of 0.9566 on PubMedQA (Table), vastly outperforming GPT-3.5’s 0.1623 and GPT-4’s 0.2088.", "perturbed_statement": "On the PubMed corpus, the energy-based GoF test yields a critical value of -120.00 using true IK queries (Fig. 2) and achieves an AUROC of 0.9566 on PubMedQA (Table), vastly outperforming GPT-3.5’s 0.1623 and GPT-4’s 0.2088.", "perturbed_explanation": "Figure 2 shows that the critical value estimated using true IK queries on the PubMed corpus is -108.60, not -120.00, so the altered critical value contradicts the figure.", "claim": "On the PubMed corpus, the energy-based GoF test yields a critical value of -120.00 using true IK queries (Fig. 2) and achieves an AUROC of 0.9566 on PubMedQA (Table), vastly outperforming GPT-3.5’s 0.1623 and GPT-4’s 0.2088.", "label": false }, { "paperid": "2411.04200v1", "paper_path": "./SciVer/papers/2411.04200v1.json", "claim_type": "parallel", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04200v1_figure_6(a).png", "item2_path": "./SciVer/images/2411.04200v1_figure_6(b).png", "section": [ "7.2" ], "request_id": 499, "origin_statement": "In medium-entropy U-R instances, the median fraction captured by CDPs rises from about 0.11 at |A|=4 to about 0.17 at |A|=16, whereas in low-entropy U-R instances it only increases from roughly 0.035 to 0.045 over the same range.", "perturbed_statement": "In medium-entropy U-R instances, the median fraction captured by CDPs rises from about 0.11 at |A|=4 to about 0.17 at |A|=16, whereas in low-entropy U-R instances it only increases from roughly 0.035 to 0.065 over the same range.", "perturbed_explanation": "The low-entropy U-R boxplot in Figure 6(b) shows the median at |A|=16 is around 0.045, not 0.065, so the claimed increase to 0.065 contradicts the plotted value.", "claim": "In medium-entropy U-R instances, the median fraction captured by CDPs rises from about 0.11 at |A|=4 to about 0.17 at |A|=16, whereas in low-entropy U-R instances it only increases from roughly 0.035 to 0.065 over the same range.", "label": false }, { "paperid": "2411.13694v1", "paper_path": "./SciVer/papers/2411.13694v1.json", "claim_type": "parallel", "item1": "2(c)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13694v1_figure_2(c).png", "item2_path": "./SciVer/images/2411.13694v1_figure_2(d).png", "section": [ "5.1" ], "request_id": 500, "origin_statement": "When three participants all display a green checkmark in step (c), only two contacts—Charlie and Bob—are added in step (d), meaning one participant’s contact is not shown in the final list.", "perturbed_statement": "When four participants all display a green checkmark in step (c), only two contacts—Charlie and Bob—are added in step (d), meaning two participants’ contacts are not shown in the final list.", "perturbed_explanation": "The first screenshot clearly asks “Do all 3 participants have a green checkmark,” showing there are only three participants. Claiming four participants contradicts the image’s displayed participant count.", "claim": "When four participants all display a green checkmark in step (c), only two contacts—Charlie and Bob—are added in step (d), meaning two participants’ contacts are not shown in the final list.", "label": false }, { "paperid": "2411.02142v1", "paper_path": "./SciVer/papers/2411.02142v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02142v1_figure_5.png", "item2_path": "./SciVer/images/2411.02142v1-Table3-1.png", "section": [ "4.1" ], "request_id": 503, "origin_statement": "At 15% CLM pretraining compute, the 470M model achieves its lowest validation perplexity of about 7.65, and Table 3 shows CLM’s β_D coefficient is 7.904, much smaller than MLM’s β_D of 42.614.", "perturbed_statement": "At 15% CLM pretraining compute, the 470M model achieves its lowest validation perplexity of about 7.65, and Table 3 shows CLM’s β_D coefficient is 42.614, much larger than MLM’s β_D of 7.904.", "perturbed_explanation": "The perturbed statement swaps the β_D values from Table 3. In reality, CLM’s β_D is 7.904 and MLM’s β_D is 42.614, so attributing 42.614 to CLM and 7.904 to MLM directly contradicts the table.", "claim": "At 15% CLM pretraining compute, the 470M model achieves its lowest validation perplexity of about 7.65, and Table 3 shows CLM’s β_D coefficient is 42.614, much larger than MLM’s β_D of 7.904.", "label": false }, { "paperid": "2410.19779v1", "paper_path": "./SciVer/papers/2410.19779v1.json", "claim_type": "parallel", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19779v1_figure_5.png", "item2_path": "./SciVer/images/2410.19779v1_figure_6.png", "section": [ "3.3" ], "request_id": 504, "origin_statement": "Increasing pre-training from 0B to 1B tokens raises MI accuracy from 56.5% to 60.4%, and t-SNE visualization shows that after autoregressive modeling, data points form four distinct clusters corresponding to DREAMER categories, compared to the mixed overlap in raw EEG.", "perturbed_statement": "Increasing pre-training from 0B to 1B tokens raises MI accuracy from 56.5% to 65.0%, and t-SNE visualization shows that after autoregressive modeling, data points form three distinct clusters corresponding to DREAMER categories, compared to the mixed overlap in raw EEG.", "perturbed_explanation": "The perturbed statement misreports two details: Figure 5 indicates MI accuracy reaches 60.4% at 1B tokens, not 65.0%. Moreover, Figure 6 after autoregressive modeling clearly shows four well-separated clusters (for four categories), not three clusters.", "claim": "Increasing pre-training from 0B to 1B tokens raises MI accuracy from 56.5% to 65.0%, and t-SNE visualization shows that after autoregressive modeling, data points form three distinct clusters corresponding to DREAMER categories, compared to the mixed overlap in raw EEG.", "label": false }, { "paperid": "2411.06966v1", "paper_path": "./SciVer/papers/2411.06966v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.06966v1-Table4-1.png", "item2_path": "./SciVer/images/2411.06966v1-Table5-1.png", "section": [ "5.3" ], "request_id": 506, "origin_statement": "On ImageNet classification, VRF’s OOD accuracy (58.5%) exceeds WSE/OSE by 0.7 percentage points, and in OOD detection, VRF (61.8%) outperforms the best baseline kNN detector (58.4%) by 3.4 points.", "perturbed_statement": "On ImageNet classification, VRF’s OOD accuracy (60.5%) exceeds WSE/OSE by 2.6 percentage points, and in OOD detection, VRF (65.0%) outperforms the best baseline kNN detector (58.4%) by 6.6 points.", "perturbed_explanation": "The perturbed statement misreports VRF’s actual OOD classification accuracy, which is 58.5% (not 60.5%), yielding only a 0.7-point gain over WSE/OSE’s 57.8%. It also inflates VRF’s OOD detection rate, which is 61.8% (not 65.0%).", "claim": "On ImageNet classification, VRF’s OOD accuracy (60.5%) exceeds WSE/OSE by 2.6 percentage points, and in OOD detection, VRF (65.0%) outperforms the best baseline kNN detector (58.4%) by 6.6 points.", "label": false }, { "paperid": "2411.07954v2", "paper_path": "./SciVer/papers/2411.07954v2.json", "claim_type": "parallel", "item1": "1", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.07954v2-Table1-1.png", "item2_path": "./SciVer/images/2411.07954v2-Table5-1.png", "section": [ "4.2" ], "request_id": 509, "origin_statement": "On Mortar Mayhem, AttentionTuner raises mean success from 20.8%±42.2 to 99.8%±0.4—a 79-point increase—significant by Welch’s t-test (p=0.016).", "perturbed_statement": "On Mortar Mayhem, AttentionTuner raises mean success from 20.8%±42.2 to 99.8%±0.4—a 79-point increase—significant by Welch’s t-test (p=0.16).", "perturbed_explanation": "The perturbed statement misreports the p-value: Table 5 shows p=0.016 for Mortar Mayhem, not p=0.16, contradicting the statistical significance result.", "claim": "On Mortar Mayhem, AttentionTuner raises mean success from 20.8%±42.2 to 99.8%±0.4—a 79-point increase—significant by Welch’s t-test (p=0.16).", "label": false }, { "paperid": "2410.17292v1", "paper_path": "./SciVer/papers/2410.17292v1.json", "claim_type": "parallel", "item1": "4", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17292v1_figure_4.png", "item2_path": "./SciVer/images/2410.17292v1_figure_7.png", "section": [ "5.2.2" ], "request_id": 512, "origin_statement": "At M≈10^9 GeV, Fig. 4 shows the ε_rel=10^−15 curve reaches Λ≈10^−3 eV and the ε_rel=10^−20 curve reaches Λ≈10^−2 eV, while in Fig. 7 the α=2×10^−10 and α=3×10^−4 iso-lines cross these Λ values respectively.", "perturbed_statement": "At M≈10^9 GeV, Fig. 4 shows the ε_rel=10^−15 curve reaches Λ≈10^−4 eV and the ε_rel=10^−20 curve reaches Λ≈10^−1 eV, while in Fig. 7 the α=2×10^−10 and α=3×10^−4 iso-lines cross these Λ values respectively.", "perturbed_explanation": "The perturbed statement misplaces the Λ crossings: in Fig. 4 the ε_rel=10^−15 and 10^−20 curves actually cross at Λ≈10^−3 eV and 10^−2 eV, not at 10^−4 eV and 10^−1 eV as claimed. Thus the Λ values no longer match the plotted curves.", "claim": "At M≈10^9 GeV, Fig. 4 shows the ε_rel=10^−15 curve reaches Λ≈10^−4 eV and the ε_rel=10^−20 curve reaches Λ≈10^−1 eV, while in Fig. 7 the α=2×10^−10 and α=3×10^−4 iso-lines cross these Λ values respectively.", "label": false }, { "paperid": "2410.22023v3", "paper_path": "./SciVer/papers/2410.22023v3.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22023v3_figure_5(a).png", "item2_path": "./SciVer/images/2410.22023v3_figure_5(b).png", "section": [ "3.6" ], "request_id": 514, "origin_statement": "In the SAVEE t-SNE plot (a), the happiness (HA, green) cluster comprises approximately 15 plus markers and 15 circles in a compact formation, while in the MELD t-SNE plot (b) the HA cluster spans over double the area with about 35 combined markers, indicating higher sample diversity.", "perturbed_statement": "In the SAVEE t-SNE plot (a), the happiness (HA, green) cluster comprises approximately 15 plus markers and 15 circles in a compact formation, while in the MELD t-SNE plot (b) the HA cluster contains only about 10 combined markers, indicating lower sample diversity.", "perturbed_explanation": "The perturbed statement contradicts the MELD visualization in (b), where the HA cluster actually includes around 35 markers (not only 10) and clearly spans a larger area than in SAVEE, so it does not reflect the true marker count or diversity.", "claim": "In the SAVEE t-SNE plot (a), the happiness (HA, green) cluster comprises approximately 15 plus markers and 15 circles in a compact formation, while in the MELD t-SNE plot (b) the HA cluster contains only about 10 combined markers, indicating lower sample diversity.", "label": false }, { "paperid": "2410.05341v2", "paper_path": "./SciVer/papers/2410.05341v2.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.05341v2-Table1-1.png", "item2_path": "./SciVer/images/2410.05341v2-Table2-1.png", "section": [ "3.4" ], "request_id": 515, "origin_statement": "NeuroBOLT’s inter-subject average correlation of R=0.473 (Table 1) exactly matches the ablation performance of the Temporal+Multi-scale Spectral module with three scales (T+MS w/ ℓ3), which also achieves Avg. R=0.473 (Table 2).", "perturbed_statement": "NeuroBOLT’s inter-subject average correlation of R=0.473 (Table 1) exactly matches the ablation performance of the Temporal+Multi-scale Spectral module with four scales (T+MS w/ ℓ4), which also achieves Avg. R=0.473 (Table 2).", "perturbed_explanation": "The perturbed statement incorrectly attributes the Avg. R=0.473 to the T+MS w/ ℓ4 setting. In Table 2, T+MS w/ ℓ4 actually yields Avg. R=0.460, whereas the ℓ3 configuration is the one with Avg. R=0.473.", "claim": "NeuroBOLT’s inter-subject average correlation of R=0.473 (Table 1) exactly matches the ablation performance of the Temporal+Multi-scale Spectral module with four scales (T+MS w/ ℓ4), which also achieves Avg. R=0.473 (Table 2).", "label": false }, { "paperid": "2410.07416v1", "paper_path": "./SciVer/papers/2410.07416v1.json", "claim_type": "parallel", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07416v1_figure_5.png", "item2_path": "./SciVer/images/2410.07416v1-Table1-1.png", "section": [ "5" ], "request_id": 516, "origin_statement": "In the MultiPDB model, Pearson’s r between H0 and the first peak mean μ̂1^{7:11} is −0.65 (purple), while r between H0 and the second peak mean μ̂2^{25:40} is −0.72 (green), indicating the second peak correlates about 0.07 more strongly than the first.", "perturbed_statement": "In the MultiPDB model, Pearson’s r between H0 and the first peak mean μ̂1^{7:11} is −0.65 (purple), while r between H0 and the second peak mean μ̂2^{25:40} is −0.56 (green), indicating the second peak correlates about 0.09 less strongly than the first.", "perturbed_explanation": "The perturbation is wrong because Table 1 and Fig. 5 report the correlation for μ̂2^{25:40} as −0.72, not −0.56, directly contradicting the stated value.", "claim": "In the MultiPDB model, Pearson’s r between H0 and the first peak mean μ̂1^{7:11} is −0.65 (purple), while r between H0 and the second peak mean μ̂2^{25:40} is −0.56 (green), indicating the second peak correlates about 0.09 less strongly than the first.", "label": false }, { "paperid": "2411.02790v1", "paper_path": "./SciVer/papers/2411.02790v1.json", "claim_type": "parallel", "item1": "3(d)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02790v1_figure_3(d).png", "item2_path": "./SciVer/images/2411.02790v1-Table3-1.png", "section": [ "6.2", "6.2.2" ], "request_id": 525, "origin_statement": "Figure 3 shows calibrated gMix scores rising linearly from about 0.24 to 0.42 as NDCG@10 increases from roughly 0.32 to 0.62; Table 3 reports that the Pearson correlation for CtrlCEIt in Comp Sci increases from 0.13 without calibration to 0.81 with calibration.", "perturbed_statement": "Figure 3 shows calibrated gMix scores rising linearly from about 0.24 to 0.42 as NDCG@10 increases from roughly 0.32 to 0.62; Table 3 reports that the Pearson correlation for CtrlCEIt in Comp Sci increases from 0.13 without calibration to 0.92 with calibration.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 shows the calibrated Pearson correlation for CtrlCEIt in Comp Sci is 0.81 (not 0.92).", "claim": "Figure 3 shows calibrated gMix scores rising linearly from about 0.24 to 0.42 as NDCG@10 increases from roughly 0.32 to 0.62; Table 3 reports that the Pearson correlation for CtrlCEIt in Comp Sci increases from 0.13 without calibration to 0.92 with calibration.", "label": false }, { "paperid": "2411.00174v1", "paper_path": "./SciVer/papers/2411.00174v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00174v1_figure_2(b).png", "item2_path": "./SciVer/images/2411.00174v1_figure_3.png", "section": [ "4" ], "request_id": 527, "origin_statement": "Under the easy protocol, the UNIV-M subset has about 480,000 total missing coordinates, and since missing frames are uniformly dropped (12.7% at Position 2), roughly 61,000 missing points occur at the second observed frame.", "perturbed_statement": "Under the easy protocol, the UNIV-M subset has about 480,000 total missing coordinates, and since missing frames are uniformly dropped (10% at Position 2), roughly 48,000 missing points occur at the second observed frame.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 3 shows that each position, including Position 2, has 12.7% missing frames, not 10%. Therefore, the calculated 48,000 missing points at Position 2 contradicts the actual 12.7% rate (≈61,000 missing).", "claim": "Under the easy protocol, the UNIV-M subset has about 480,000 total missing coordinates, and since missing frames are uniformly dropped (10% at Position 2), roughly 48,000 missing points occur at the second observed frame.", "label": false }, { "paperid": "2409.17531v2", "paper_path": "./SciVer/papers/2409.17531v2.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.17531v2-Table1-1.png", "item2_path": "./SciVer/images/2409.17531v2-Table3-1.png", "section": [ "4.3", "4.4.3" ], "request_id": 530, "origin_statement": "SimVG-TB with ViT-B/32 achieves 89.04% RefCOCO testA accuracy at 44 ms inference (Table 1), and after pre-training on 174 K images (Table 3) it reaches 92.80% testA, a 3.76% absolute gain at the same speed.", "perturbed_statement": "SimVG-TB with ViT-B/32 achieves 89.04% RefCOCO testA accuracy at 35 ms inference (Table 1), and after pre-training on 174 K images (Table 3) it reaches 92.80% testA, a 3.76% absolute gain at the same speed.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 shows that SimVG-TB runs at 44 ms, not 35 ms, for RefCOCO testA inference.", "claim": "SimVG-TB with ViT-B/32 achieves 89.04% RefCOCO testA accuracy at 35 ms inference (Table 1), and after pre-training on 174 K images (Table 3) it reaches 92.80% testA, a 3.76% absolute gain at the same speed.", "label": false }, { "paperid": "2409.11143v1", "paper_path": "./SciVer/papers/2409.11143v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11143v1_figure_5.png", "item2_path": "./SciVer/images/2409.11143v1-Table3-1.png", "section": [ "4.3", "4.3.2" ], "request_id": 531, "origin_statement": "Table 3 shows the AE model achieves 99.6% accuracy in the 10,20 task, while Figure 5 shows that the k=8 planning-token variant reaches over 95% accuracy by about 90k steps, around 10k steps earlier than the k=16 variant does.", "perturbed_statement": "Table 3 shows the NonAE model achieves 99.6% accuracy in the 10,20 task, while Figure 5 shows that the k=8 planning-token variant reaches over 95% accuracy by about 90k steps, around 10k steps earlier than the k=16 variant does.", "perturbed_explanation": "This is incorrect because Table 3 actually reports only 9.6% accuracy for the NonAE model on the 10,20 task, whereas the 99.6% accuracy is achieved by the AE model.", "claim": "Table 3 shows the NonAE model achieves 99.6% accuracy in the 10,20 task, while Figure 5 shows that the k=8 planning-token variant reaches over 95% accuracy by about 90k steps, around 10k steps earlier than the k=16 variant does.", "label": false }, { "paperid": "2411.15668v1", "paper_path": "./SciVer/papers/2411.15668v1.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15668v1_figure_3.png", "item2_path": "./SciVer/images/2411.15668v1_figure_6.png", "section": [ "3.3.3" ], "request_id": 540, "origin_statement": "At 50 kPa, the He II gas front arrives at 3 m after about 1.7 s (Fig. 3b); by then, the bath heat flux at T3 has declined to roughly 15 kW/m^2 from its peak ~19 kW/m^2, marking the transition toward the film boiling regime (Fig. 6b).", "perturbed_statement": "At 50 kPa, the He II gas front arrives at 3 m after about 0.8 s; by then, the bath heat flux at T3 has declined to roughly 10 kW/m^2 from its peak ~19 kW/m^2.", "perturbed_explanation": "Fig. 3b shows the simulated He II rise time at 50 kPa to x=3 m is about 1.7 s, not 0.8 s. Fig. 6b shows T3’s peak ~19 kW/m^2 occurs at ~0.5 s and only decays to ~15 kW/m^2 by ~1.7 s, so it cannot fall to ~10 kW/m^2 by 0.8 s.", "claim": "At 50 kPa, the He II gas front arrives at 3 m after about 0.8 s; by then, the bath heat flux at T3 has declined to roughly 10 kW/m^2 from its peak ~19 kW/m^2.", "label": false }, { "paperid": "2411.12892v1", "paper_path": "./SciVer/papers/2411.12892v1.json", "claim_type": "parallel", "item1": "3(d)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.12892v1_figure_3(d).png", "item2_path": "./SciVer/images/2411.12892v1-Table1-1.png", "section": [ "4.1" ], "request_id": 544, "origin_statement": "SSA yields an ℓ1 approximation error of 0.358 and assigns temperatures from 0.002 for node 7 (1 neighbor) up to 0.751 for node 0 (4 neighbors), a 375× range that matches sharper attention for more specific tokens.", "perturbed_statement": "SSA yields an ℓ1 approximation error of 0.543 and assigns temperatures from 0.005 for node 7 (1 neighbor) up to 0.751 for node 0 (4 neighbors), a 150× range that matches sharper attention for more specific tokens.", "perturbed_explanation": "The perturbation is wrong because SSA’s ℓ1 error is 0.358 (not 0.543) and node 7’s temperature is 0.002 (not 0.005), which produces a 375× range rather than a 150× range.", "claim": "SSA yields an ℓ1 approximation error of 0.543 and assigns temperatures from 0.005 for node 7 (1 neighbor) up to 0.751 for node 0 (4 neighbors), a 150× range that matches sharper attention for more specific tokens.", "label": false }, { "paperid": "2411.03743v1", "paper_path": "./SciVer/papers/2411.03743v1.json", "claim_type": "parallel", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03743v1_figure_5.png", "item2_path": "./SciVer/images/2411.03743v1-Table1-1.png", "section": [ "2.2.2" ], "request_id": 545, "origin_statement": "The diffcyt-powered stratified differential expression workflow (Table 1) yields a higher median Evaluability (~4.1) than Logical Coherence (~3.8) on SPDB dataset 4.", "perturbed_statement": "The diffcyt-powered stratified differential expression workflow (Table 1) yields a lower median Evaluability (~3.0) than Logical Coherence (~3.8) on SPDB dataset 4.", "perturbed_explanation": "Figure 3 shows that Dataset 4 has a median Evaluability score around 4.0–4.2, not 3.0, and that its Evaluability actually exceeds its Logical Coherence (~3.8), so the stated lower Evaluability of ~3.0 contradicts the figure.", "claim": "The diffcyt-powered stratified differential expression workflow (Table 1) yields a lower median Evaluability (~3.0) than Logical Coherence (~3.8) on SPDB dataset 4.", "label": false }, { "paperid": "2409.14836v2", "paper_path": "./SciVer/papers/2409.14836v2.json", "claim_type": "parallel", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14836v2_figure_1.png", "item2_path": "./SciVer/images/2409.14836v2_figure_4.png", "section": [ "4" ], "request_id": 547, "origin_statement": "RoPO uses a R^{d×n} rotation plus a 1×n magnitude stretching to preserve relative neuron angles (Fig 1), whereas its BIG orthogonal matrix composes forward and reverse integrated Givens rotations in just four sparse multiplications (Fig 4).", "perturbed_statement": "RoPO uses a 2×n rotation plus a 1×n magnitude stretching to preserve relative neuron angles, whereas its BIG orthogonal matrix composes forward and reverse integrated Givens rotations in just two sparse multiplications.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 1 specifies a R^{d×n} rotation (not 2×n), and Figure 4 explains that constructing the BIG orthogonal matrix requires four sparse multiplications (not two).", "claim": "RoPO uses a 2×n rotation plus a 1×n magnitude stretching to preserve relative neuron angles, whereas its BIG orthogonal matrix composes forward and reverse integrated Givens rotations in just two sparse multiplications.", "label": false }, { "paperid": "2410.20597v1", "paper_path": "./SciVer/papers/2410.20597v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20597v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.20597v1_figure_5(b).png", "section": [ "4.4" ], "request_id": 550, "origin_statement": "GAT_analysts experiences a 5.3-point Sharpe ratio drop (4.0 to –1.3) between 0 and 5 bps costs, while having a median turnover near 80%, over three times the analyst_matrix’s median turnover of about 25%.", "perturbed_statement": "GAT_analysts experiences a 5.3-point Sharpe ratio drop (4.0 to –1.3) between 0 and 5 bps costs, while having a median turnover near 30%, over three times the analyst_matrix’s median turnover of about 25%.", "perturbed_explanation": "The turnover box plot (Figure 5b) shows GAT_analysts’s median turnover is around 80%, not 30%, contradicting the claimed 30% median turnover.", "claim": "GAT_analysts experiences a 5.3-point Sharpe ratio drop (4.0 to –1.3) between 0 and 5 bps costs, while having a median turnover near 30%, over three times the analyst_matrix’s median turnover of about 25%.", "label": false }, { "paperid": "2409.11074v2", "paper_path": "./SciVer/papers/2409.11074v2.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11074v2_figure_1.png", "item2_path": "./SciVer/images/2409.11074v2_figure_2.png", "section": [ "3.1" ], "request_id": 552, "origin_statement": "Using MathPix OCR and LLM-based parsing (Figure 1), our pipeline produces JSON-annotated problems. In the RoMath-Synthetic dataset (Figure 2), the ‘Numbers’ domain has about 18,500 problems, nearly nine times the ~2,100 ‘Calculus’ problems, indicating a strong domain imbalance in synthetic data.", "perturbed_statement": "Using MathPix OCR and LLM-based parsing (Figure 1), our pipeline produces JSON-annotated problems. In the RoMath-Synthetic dataset (Figure 2), the ‘Numbers’ domain has about 15,000 problems, nearly nine times the ~2,100 ‘Calculus’ problems, indicating a strong domain imbalance in synthetic data.", "perturbed_explanation": "Figure 2 shows that the ‘Numbers’ domain in RoMath-Synthetic contains approximately 18,000–18,500 problems, not 15,000. Therefore, stating about 15,000 problems contradicts the actual distribution displayed in the chart.", "claim": "Using MathPix OCR and LLM-based parsing (Figure 1), our pipeline produces JSON-annotated problems. In the RoMath-Synthetic dataset (Figure 2), the ‘Numbers’ domain has about 15,000 problems, nearly nine times the ~2,100 ‘Calculus’ problems, indicating a strong domain imbalance in synthetic data.", "label": false }, { "paperid": "2409.03466v1", "paper_path": "./SciVer/papers/2409.03466v1.json", "claim_type": "parallel", "item1": "4", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03466v1_figure_4.png", "item2_path": "./SciVer/images/2409.03466v1_figure_5(b).png", "section": [ "4" ], "request_id": 553, "origin_statement": "Model C achieves a 100% recovery rate for transit depths above 300 ppm (Fig 4c), and the ROC heatmap (Fig 5) shows that at a 1.02% FAR cutoff the recovery remains at 100% for depths above 250 ppm.", "perturbed_statement": "Model C achieves only a 90% recovery rate for transit depths above 300 ppm (Fig 4c), and the ROC heatmap (Fig 5) shows that at a 2.2% FAR cutoff the recovery remains at 90% for depths above 250 ppm.", "perturbed_explanation": "This claim is incorrect because Fig 4c labels a 100% recovery rate (not 90%) for depths above 300 ppm, and Fig 5’s heatmap shows near-100% recovery (not 90%) at a 2.2% FAR threshold for depths above 250 ppm.", "claim": "Model C achieves only a 90% recovery rate for transit depths above 300 ppm (Fig 4c), and the ROC heatmap (Fig 5) shows that at a 2.2% FAR cutoff the recovery remains at 90% for depths above 250 ppm.", "label": false }, { "paperid": "2409.17090v1", "paper_path": "./SciVer/papers/2409.17090v1.json", "claim_type": "parallel", "item1": "2(c)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17090v1_figure_2(c).png", "item2_path": "./SciVer/images/2409.17090v1_figure_2(d).png", "section": [ "4.5" ], "request_id": 555, "origin_statement": "On UMIST, SRSG’s accuracy peaks at ~0.68 for K=5, surpassing the magenta baseline’s ~0.45 constant accuracy by 23 points; simultaneously, SRSG’s NMI reaches ~0.81 at K=4, 12 points above the yellow method’s ~0.69.", "perturbed_statement": "On UMIST, SRSG’s accuracy peaks at ~0.68 for K=5, surpassing the magenta baseline’s ~0.45 constant accuracy by 23 points; simultaneously, SRSG’s NMI reaches ~0.81 at K=6, 12 points above the yellow method’s ~0.69.", "perturbed_explanation": "The perturbed statement misplaces SRSG’s NMI peak at K=6. In the figure, SRSG’s NMI at K=6 is about 0.79, not 0.81; the actual 0.81 peak occurs at K=4.", "claim": "On UMIST, SRSG’s accuracy peaks at ~0.68 for K=5, surpassing the magenta baseline’s ~0.45 constant accuracy by 23 points; simultaneously, SRSG’s NMI reaches ~0.81 at K=6, 12 points above the yellow method’s ~0.69.", "label": false }, { "paperid": "2409.02076v6", "paper_path": "./SciVer/papers/2409.02076v6.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.02076v6_figure_1.png", "item2_path": "./SciVer/images/2409.02076v6-Table2-1.png", "section": [ "2.2" ], "request_id": 556, "origin_statement": "Daily diary and menu tasks each require 365 entries per year, about seven times the 52 weekly entries; spatially, the 19×19 urban grid encompasses 361 blocks, a 3.61-fold increase over the 10×10 (100 blocks).", "perturbed_statement": "Daily diary and menu tasks each require 360 entries per year, about seven times the 52 weekly entries; spatially, the 18×18 urban grid encompasses 324 blocks, a 3.24-fold increase over the 10×10 (100 blocks).", "perturbed_explanation": "This is incorrect because the table specifies daily tasks cover each of the 365 days of the year (not 360), and the long urban planning grid is 19×19 blocks (361 blocks), not 18×18 (324 blocks).", "claim": "Daily diary and menu tasks each require 360 entries per year, about seven times the 52 weekly entries; spatially, the 18×18 urban grid encompasses 324 blocks, a 3.24-fold increase over the 10×10 (100 blocks).", "label": false }, { "paperid": "2411.02099v2", "paper_path": "./SciVer/papers/2411.02099v2.json", "claim_type": "parallel", "item1": "3(b)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02099v2_figure_3(b).png", "item2_path": "./SciVer/images/2411.02099v2_figure_3(c).png", "section": [ "4.3" ], "request_id": 558, "origin_statement": "At ε=1, the HAR activity recognition accuracy peaks at approximately 95%, while over 90% of pixel attributions fall within ±0.001, with the zero-attribution bin reaching about 60,000 counts.", "perturbed_statement": "At ε=1.5, the HAR activity recognition accuracy peaks at approximately 98%, while over 90% of pixel attributions fall within ±0.001, with the zero-attribution bin reaching about 70,000 counts.", "perturbed_explanation": "This statement is incorrect because Figure 3(b) shows that at ε=1.5 the activity recognition accuracy is around 89%, not 98%. Furthermore, Figure 3(c) indicates the zero-attribution bin peaks at about 60,000 counts, not 70,000.", "claim": "At ε=1.5, the HAR activity recognition accuracy peaks at approximately 98%, while over 90% of pixel attributions fall within ±0.001, with the zero-attribution bin reaching about 70,000 counts.", "label": false }, { "paperid": "2409.01295v2", "paper_path": "./SciVer/papers/2409.01295v2.json", "claim_type": "parallel", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01295v2_figure_2.png", "item2_path": "./SciVer/images/2409.01295v2_figure_4.png", "section": [ "2.2" ], "request_id": 559, "origin_statement": "The mtcars mpg distribution peaks near 20 mpg, while in the iris data most observations with petal length over 5 cm correspond to petal widths above 1.5 cm.", "perturbed_statement": "The mtcars mpg distribution peaks near 25 mpg, while in the iris data most observations with petal length over 5 cm correspond to petal widths above 1.5 cm.", "perturbed_explanation": "The perturbation incorrectly states that the mpg distribution peaks near 25 mpg, but Figure 2(a) shows its highest density around 20 mpg, not 25 mpg.", "claim": "The mtcars mpg distribution peaks near 25 mpg, while in the iris data most observations with petal length over 5 cm correspond to petal widths above 1.5 cm.", "label": false }, { "paperid": "2411.02278v1", "paper_path": "./SciVer/papers/2411.02278v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02278v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.02278v1-Table3-1.png", "section": [ "5.2" ], "request_id": 560, "origin_statement": "At optimization level O1, w2c2's total decompiled lines (24,828) exceed RetDec's (7,487) by over three times; Figure 3 reflects this, showing w2c2's median O1 per-benchmark LoC around 200 compared to RetDec's ~60.", "perturbed_statement": "At optimization level O1, w2c2's total decompiled lines (28,332) exceed RetDec's (7,487) by nearly four times; Figure 3 reflects this, showing w2c2's median O1 per-benchmark LoC around 150 compared to RetDec's ~60.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 reports w2c2's O1 total decompiled lines as 24,828, not 28,332, and Figure 3's O1 boxplot for w2c2 shows a median around 200 lines per benchmark, not approximately 150.", "claim": "At optimization level O1, w2c2's total decompiled lines (28,332) exceed RetDec's (7,487) by nearly four times; Figure 3 reflects this, showing w2c2's median O1 per-benchmark LoC around 150 compared to RetDec's ~60.", "label": false }, { "paperid": "2411.14749v2", "paper_path": "./SciVer/papers/2411.14749v2.json", "claim_type": "parallel", "item1": "8", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14749v2_figure_8.png", "item2_path": "./SciVer/images/2411.14749v2-Table5-1.png", "section": [ "3.4.4" ], "request_id": 565, "origin_statement": "Table 5 shows the internal plateau model breaks at tb = 3.21^{+0.51}_{-0.38} d, ~0.65 d later than the flare model’s 2.56^{+0.90}_{-0.42} d break, and adopts a post-break slope α2 = −9.36^{+3.96}_{-3.76}, matching Figure 8’s right panel where fluxes plummet after ∼3 d across all bands.", "perturbed_statement": "Table 5 shows the internal plateau model breaks at tb = 2.21^{+0.51}_{-0.38} d, ~0.35 d earlier than the flare model’s 2.56^{+0.90}_{-0.42} d break, and adopts a post-break slope α2 = −7.36^{+3.96}_{-3.76}, matching Figure 8’s right panel where fluxes plummet after ∼3 d across all bands.", "perturbed_explanation": "The perturbed statement misreports the internal plateau break time and slope. Table 5 actually gives tb = 3.21^{+0.51}_{-0.38} d (not 2.21 d) and α2 = −9.36^{+3.96}_{-3.76} (not −7.36), so the break occurs around 3.2 d with a steeper decline than claimed.", "claim": "Table 5 shows the internal plateau model breaks at tb = 2.21^{+0.51}_{-0.38} d, ~0.35 d earlier than the flare model’s 2.56^{+0.90}_{-0.42} d break, and adopts a post-break slope α2 = −7.36^{+3.96}_{-3.76}, matching Figure 8’s right panel where fluxes plummet after ∼3 d across all bands.", "label": false }, { "paperid": "2409.17455v1", "paper_path": "./SciVer/papers/2409.17455v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17455v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.17455v1-Table2-1.png", "section": [ "3.2.2" ], "request_id": 571, "origin_statement": "At λ=0.8, Llama2-7b shows a macro F1 drop of 0.05 in Go Emotions synonym shortcuts compared to 0.10 for BERT, consistent with its Δ of 0.084 on synonyms in Emotions from Table 2, demonstrating LLMs roughly halve BERT’s vulnerability.", "perturbed_statement": "At λ=0.8, Llama2-7b shows a macro F1 drop of 0.12 in Go Emotions synonym shortcuts compared to 0.10 for BERT, consistent with its Δ of 0.084 on synonyms in Emotions from Table 2, demonstrating LLMs roughly halve BERT’s vulnerability.", "perturbed_explanation": "The perturbed statement misreports the Llama2-7b drop at λ=0.8, which the chart shows is about 0.05, not 0.12. This contradicts the macro F1 drop value depicted in Figure 3 for Llama2-7b under synonym shortcuts.", "claim": "At λ=0.8, Llama2-7b shows a macro F1 drop of 0.12 in Go Emotions synonym shortcuts compared to 0.10 for BERT, consistent with its Δ of 0.084 on synonyms in Emotions from Table 2, demonstrating LLMs roughly halve BERT’s vulnerability.", "label": false }, { "paperid": "2409.06290v1", "paper_path": "./SciVer/papers/2409.06290v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06290v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.06290v1_figure_3(b).png", "section": [ "4.2" ], "request_id": 573, "origin_statement": "EntAugment improves the Dunn index from 5.03×10⁻⁵ at baseline to 7.93×10⁻⁵, representing a 57.7% boost in cluster separability.", "perturbed_statement": "EntAugment increases the Dunn index from 5.03×10⁻⁵ at baseline to 8.93×10⁻⁵, marking a 77.5% gain in cluster separability.", "perturbed_explanation": "The perturbed statement is incorrect because the EntAugment Dunn index is 7.93×10⁻⁵ (not 8.93×10⁻⁵), and the actual improvement over the 5.03×10⁻⁵ baseline is about 57.7%, not 77.5%.", "claim": "EntAugment increases the Dunn index from 5.03×10⁻⁵ at baseline to 8.93×10⁻⁵, marking a 77.5% gain in cluster separability.", "label": false }, { "paperid": "2409.19764v2", "paper_path": "./SciVer/papers/2409.19764v2.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.19764v2-Table4-1.png", "item2_path": "./SciVer/images/2409.19764v2-Table5-1.png", "section": [ "5.2" ], "request_id": 576, "origin_statement": "STAtten boosts Spike-driven Transformer-V2 (8-512) from 79.98% to 80.67% at 288×288 on ImageNet (+0.69%), while on CIFAR10-DVS it increases the Spike-driven Transformer (2-256) from 80.0% to 81.1% (+1.1%).", "perturbed_statement": "STAtten boosts Spike-driven Transformer-V2 (8-512) from 79.49% to 80.67% at 288×288 on ImageNet (+1.18%), while on CIFAR10-DVS it increases the Spike-driven Transformer (2-256) from 80.0% to 82.2% (+2.2%).", "perturbed_explanation": "The perturbed statement incorrectly cites the ImageNet baseline as 79.49% instead of the actual 79.98% and wrongly claims CIFAR10-DVS accuracy reaches 82.2%, whereas Table 5 shows it improves only to 81.1%.", "claim": "STAtten boosts Spike-driven Transformer-V2 (8-512) from 79.49% to 80.67% at 288×288 on ImageNet (+1.18%), while on CIFAR10-DVS it increases the Spike-driven Transformer (2-256) from 80.0% to 82.2% (+2.2%).", "label": false }, { "paperid": "2411.02614v1", "paper_path": "./SciVer/papers/2411.02614v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02614v1_figure_1.png", "item2_path": "./SciVer/images/2411.02614v1_figure_2.png", "section": [ "1" ], "request_id": 577, "origin_statement": "Figure 1 shows proliferative DR as the only stage with neovascular tufts and cotton-wool spots, and Figure 2’s five proliferative DR augmentations all exhibit new vessel formations and cotton-wool lesions, whereas the five mild NPDR augmentations show only isolated hard exudates without any neovascularization.", "perturbed_statement": "Figure 1 shows proliferative DR as the only stage with neovascular tufts and cotton-wool spots, and Figure 2’s five proliferative DR augmentations all lack any new vessel formations, whereas the five mild NPDR augmentations show multiple neovascular tufts.", "perturbed_explanation": "The perturbed statement is wrong because in Figure 2’s second row (proliferative DR), each augmented image clearly shows new vessel formations and cotton-wool spots, and in the first row (mild NPDR) none of the augmentations display neovascular tufts—they only have isolated hard exudates.", "claim": "Figure 1 shows proliferative DR as the only stage with neovascular tufts and cotton-wool spots, and Figure 2’s five proliferative DR augmentations all lack any new vessel formations, whereas the five mild NPDR augmentations show multiple neovascular tufts.", "label": false }, { "paperid": "2409.05429v2", "paper_path": "./SciVer/papers/2409.05429v2.json", "claim_type": "parallel", "item1": "5", "item2": "6(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05429v2_figure_5.png", "item2_path": "./SciVer/images/2409.05429v2_figure_6(c).png", "section": [ "5" ], "request_id": 579, "origin_statement": "Flight trajectories at 1000–2000 m over central China emit about 10^1.7 kg of carbon per grid, as shown by the green shading on the map (Fig.6) and the colorbar (Fig.5).", "perturbed_statement": "Flight trajectories at 1000–2000 m over central China emit about 10^4.7 kg of carbon per grid, as shown by the green shading on the map (Fig.6) and the colorbar (Fig.5).", "perturbed_explanation": "The green shading in Fig.6 corresponds to roughly 10^1.7 kg according to the Fig.5 colorbar, not the ~10^4.7 kg implied in the perturbed statement, which would appear as orange-red.", "claim": "Flight trajectories at 1000–2000 m over central China emit about 10^4.7 kg of carbon per grid, as shown by the green shading on the map (Fig.6) and the colorbar (Fig.5).", "label": false }, { "paperid": "2411.05733v1", "paper_path": "./SciVer/papers/2411.05733v1.json", "claim_type": "parallel", "item1": "2(d)", "item2": "2(g)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05733v1_figure_2(d).png", "item2_path": "./SciVer/images/2411.05733v1_figure_2(g).png", "section": [ "5.2" ], "request_id": 580, "origin_statement": "With AUC∈[0.94,0.97], the non-private Weighted FTT boundary closely follows the 2σ Gaussian ellipse, correctly separating most points, whereas the private Weighted Logistic Regression (ε=1.0, δ=1e-5) uses a near-linear boundary that cuts through the ellipse.", "perturbed_statement": "With AUC∈[0.94,0.97], the non-private Weighted FTT boundary closely follows the 2σ Gaussian ellipse, and the private Weighted Logistic Regression (ε=1.0, δ=1e-5) also curves along the ellipse, aligning with the true distribution.", "perturbed_explanation": "The perturbed statement incorrectly asserts that the private Logistic Regression boundary curves along the 2σ ellipse. In the image, the DP model’s decision boundary is nearly a straight line slicing through the ellipse, not aligned with it.", "claim": "With AUC∈[0.94,0.97], the non-private Weighted FTT boundary closely follows the 2σ Gaussian ellipse, and the private Weighted Logistic Regression (ε=1.0, δ=1e-5) also curves along the ellipse, aligning with the true distribution.", "label": false }, { "paperid": "2409.11008v1", "paper_path": "./SciVer/papers/2409.11008v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11008v1_figure_4.png", "item2_path": "./SciVer/images/2409.11008v1-Table3-1.png", "section": [ "7.2" ], "request_id": 581, "origin_statement": "oLMM-VAE test MSE decreases by approximately 0.013 from 0.040 at 2 basis functions to 0.027 at 4 basis functions, matching its VI MSE of 0.027 (±0.0003) in Table 3. Meanwhile, sLMM-VAE’s test MSE remains around 0.017 across 2–12 basis functions, identical to its VI MSE of 0.017 (±0.0003) in Table 3.", "perturbed_statement": "oLMM-VAE test MSE decreases by approximately 0.013 from 0.040 at 2 basis functions to 0.027 at 4 basis functions, matching its VI MSE of 0.027 (±0.0003) in Table 3. Meanwhile, sLMM-VAE’s test MSE remains around 0.017 across 2–12 basis functions, identical to its VI MSE of 0.016 (±0.0003) in Table 3.", "perturbed_explanation": "The perturbation incorrectly states sLMM-VAE’s VI MSE as 0.016 (±0.0003), but Table 3 shows sLMM-VAE’s VI MSE is actually 0.017 (±0.0003).", "claim": "oLMM-VAE test MSE decreases by approximately 0.013 from 0.040 at 2 basis functions to 0.027 at 4 basis functions, matching its VI MSE of 0.027 (±0.0003) in Table 3. Meanwhile, sLMM-VAE’s test MSE remains around 0.017 across 2–12 basis functions, identical to its VI MSE of 0.016 (±0.0003) in Table 3.", "label": false }, { "paperid": "2410.21562v1", "paper_path": "./SciVer/papers/2410.21562v1.json", "claim_type": "parallel", "item1": "2(c)", "item2": "2(l)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21562v1_figure_2(c).png", "item2_path": "./SciVer/images/2410.21562v1_figure_2(l).png", "section": [ "2.3.2" ], "request_id": 584, "origin_statement": "The texture image displays a 3 × 3 grid of star‐shaped motifs, while the angular spectrum plot detects 8 vertical red boundary lines, partitioning the frequency domain into 8 angular sectors—over twice the number of motif columns.", "perturbed_statement": "The texture image displays a 4 × 4 grid of star‐shaped motifs, while the angular spectrum plot detects 6 vertical red boundary lines, partitioning the frequency domain into 6 angular sectors.", "perturbed_explanation": "The perturbed statement is incorrect because the actual texture shows only a 3 × 3 arrangement of star motifs (not 4 × 4), and the angular spectrum plot clearly marks 8 red boundary lines (not 6).", "claim": "The texture image displays a 4 × 4 grid of star‐shaped motifs, while the angular spectrum plot detects 6 vertical red boundary lines, partitioning the frequency domain into 6 angular sectors.", "label": false }, { "paperid": "2410.18766v2", "paper_path": "./SciVer/papers/2410.18766v2.json", "claim_type": "parallel", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.18766v2_figure_6.png", "item2_path": "./SciVer/images/2410.18766v2-Table3-1.png", "section": [ "4.4" ], "request_id": 585, "origin_statement": "Table 3 indicates that removing price increases the 60-min RMSE from 5.96×10⁻² to 6.46×10⁻², a 0.50×10⁻² jump versus 0.45×10⁻² when removing temperature. Fig.6a shows local RMSE spikes of about 3.6 in western Shenzhen without price, compared to around 2.8 in Fig.6b without temperature.", "perturbed_statement": "Table 3 indicates that removing temperature increases the 60-min RMSE from 5.96×10⁻² to 6.56×10⁻², a 0.60×10⁻² jump versus 0.50×10⁻² when removing price. Fig.6b shows local RMSE spikes of about 3.8 in western Shenzhen without temperature, compared to around 2.8 in Fig.6a without price.", "perturbed_explanation": "The perturbation misstates the table and figure values. Table 3 lists the 60-min RMSE without temperature as 6.41×10⁻², not 6.56×10⁻², and Fig.6b’s maximum local RMSE increase is about 3.6, not 3.8.", "claim": "Table 3 indicates that removing temperature increases the 60-min RMSE from 5.96×10⁻² to 6.56×10⁻², a 0.60×10⁻² jump versus 0.50×10⁻² when removing price. Fig.6b shows local RMSE spikes of about 3.8 in western Shenzhen without temperature, compared to around 2.8 in Fig.6a without price.", "label": false }, { "paperid": "2409.12587v1", "paper_path": "./SciVer/papers/2409.12587v1.json", "claim_type": "parallel", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.12587v1_figure_5.png", "item2_path": "./SciVer/images/2409.12587v1-Table2-1.png", "section": [ "4.1" ], "request_id": 588, "origin_statement": "For the 3-VB-TTA (Mixup) strategy, mean squared error decreases from 0.147 at step 1 to 0.038 at step 200, while in Figure 5 the weight coefficient for Mixup(α = 0.5) increases from around 0.3 to about 0.7 by step 150.", "perturbed_statement": "For the 3-VB-TTA (Mixup) strategy, mean squared error decreases from 0.147 at step 1 to 0.021 at step 200, while in Figure 5 the weight coefficient for Mixup(α = 0.5) increases from around 0.3 to about 0.9 by step 150.", "perturbed_explanation": "The table shows the MSE at step 200 is 0.038 (±0.014), not 0.021. Additionally, Figure 5 plots the Mixup(α = 0.5) weight rising to roughly 0.7 by step 150, not to 0.9.", "claim": "For the 3-VB-TTA (Mixup) strategy, mean squared error decreases from 0.147 at step 1 to 0.021 at step 200, while in Figure 5 the weight coefficient for Mixup(α = 0.5) increases from around 0.3 to about 0.9 by step 150.", "label": false }, { "paperid": "2409.16239v1", "paper_path": "./SciVer/papers/2409.16239v1.json", "claim_type": "parallel", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.16239v1_figure_2.png", "item2_path": "./SciVer/images/2409.16239v1-Table2-1.png", "section": [ "4.2" ], "request_id": 593, "origin_statement": "In GLaD(MTT) distillation, LADD attains ≈66% accuracy at 20 IPC (≈4.3×10^18 FLOPs; Fig.2b), closely aligning with its 65.5% accuracy for ResNet18 in cross-architecture evaluation (Table 2), surpassing baseline++ by over 10%.", "perturbed_statement": "In GLaD(MTT) distillation, LADD attains ≈76% accuracy at 20 IPC (≈4.3×10^18 FLOPs; Fig.2b), closely aligning with its 75.5% accuracy for ResNet18 in cross-architecture evaluation (Table 2), surpassing baseline++ by over 10%.", "perturbed_explanation": "Fig.2b shows LADD reaches about 66% accuracy at 20 IPC, not 76%. Additionally, Table 2 lists ResNet18’s LADD accuracy under GLaD(MTT) as 65.5%, not 75.5%.", "claim": "In GLaD(MTT) distillation, LADD attains ≈76% accuracy at 20 IPC (≈4.3×10^18 FLOPs; Fig.2b), closely aligning with its 75.5% accuracy for ResNet18 in cross-architecture evaluation (Table 2), surpassing baseline++ by over 10%.", "label": false }, { "paperid": "2411.09046v1", "paper_path": "./SciVer/papers/2411.09046v1.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09046v1_figure_2.png", "item2_path": "./SciVer/images/2411.09046v1-Table1-1.png", "section": [ "3.1.1" ], "request_id": 599, "origin_statement": "Under control, the magenta (τc=10 s) simulated outer diameter trace reproduces experimental peaks near 170 μm and troughs around 100 μm, achieving the lowest RMSRE of 0.1367 at τm=10^3τ̄m among all tested τc–τm combinations.", "perturbed_statement": "Under control, the green (τc=5 s) simulated outer diameter trace reproduces experimental peaks near 170 μm and troughs around 100 μm, achieving the lowest RMSRE of 0.1460 at τm=10^3τ̄m among all tested τc–τm combinations.", "perturbed_explanation": "This is incorrect because Table 1 shows the minimum RMSRE of 0.1367 occurs at τc=10 s (magenta), not at τc=5 s (green), and the reported 0.1460 value at τc=5 s is not the lowest error among the combinations.", "claim": "Under control, the green (τc=5 s) simulated outer diameter trace reproduces experimental peaks near 170 μm and troughs around 100 μm, achieving the lowest RMSRE of 0.1460 at τm=10^3τ̄m among all tested τc–τm combinations.", "label": false }, { "paperid": "2411.16119v1", "paper_path": "./SciVer/papers/2411.16119v1.json", "claim_type": "parallel", "item1": "2(d)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16119v1_figure_2(d).png", "item2_path": "./SciVer/images/2411.16119v1-Table1-1.png", "section": [ "4.2" ], "request_id": 600, "origin_statement": "For the Bmshj2018 model on Kodak images, using OLVQ with the factorized context model reduces bit-rate by 22.6% and raises SSIM from about 0.975 to 0.982 at roughly 0.25 bpp.", "perturbed_statement": "For the Bmshj2018 model on Kodak images, using OLVQ with the factorized context model reduces bit-rate by 15% and raises SSIM from about 0.975 to 0.990 at roughly 0.25 bpp.", "perturbed_explanation": "The table reports a 22.6% bitrate saving (not 15%) for the factorized context model on Bmshj2018, and the SSIM curve at ~0.25 bpp shows an increase from ~0.975 to ~0.982 (not 0.990). These discrepancies make the perturbed statement incorrect.", "claim": "For the Bmshj2018 model on Kodak images, using OLVQ with the factorized context model reduces bit-rate by 15% and raises SSIM from about 0.975 to 0.990 at roughly 0.25 bpp.", "label": false }, { "paperid": "2409.07770v1", "paper_path": "./SciVer/papers/2409.07770v1.json", "claim_type": "parallel", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.07770v1_figure_5.png", "item2_path": "./SciVer/images/2409.07770v1-Table3-1.png", "section": [ "5.1" ], "request_id": 601, "origin_statement": "On VoxCeleb2, Uni. pool reduces EER* by 7.9% compared to cls. token (1.99% vs 9.89%), and on VCTK with WavLM it achieves ~0.03 FRR*, roughly three times lower than cls. token’s ~0.09, per Figure 5 and Table 3.", "perturbed_statement": "On VoxCeleb2, Uni. pool reduces EER* by 5.8% compared to cls. token (4.05% vs 9.89%), and on VCTK with WavLM it achieves ~0.03 FRR*, roughly three times lower than cls. token’s ~0.09, per Figure 5 and Table 3.", "perturbed_explanation": "The perturbed statement incorrectly claims Uni. pool’s EER* on VoxCeleb2 is 4.05%, but Table 3 actually reports a 1.99% EER* for Uni. pool with WavLM on VoxCeleb2, not 4.05%.", "claim": "On VoxCeleb2, Uni. pool reduces EER* by 5.8% compared to cls. token (4.05% vs 9.89%), and on VCTK with WavLM it achieves ~0.03 FRR*, roughly three times lower than cls. token’s ~0.09, per Figure 5 and Table 3.", "label": false }, { "paperid": "2409.01722v2", "paper_path": "./SciVer/papers/2409.01722v2.json", "claim_type": "parallel", "item1": "6(b)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01722v2_figure_6(b).png", "item2_path": "./SciVer/images/2409.01722v2_figure_7(b).png", "section": [ "6.1" ], "request_id": 606, "origin_statement": "By the 100th round, each client in SecAgg has sent about 10,000 messages (Fig. 6b) and incurred roughly 10 s of computation (Fig. 7a), whereas in ACCESS-FL each client sends only about 100 messages and takes around 0.4 s.", "perturbed_statement": "By the 100th round, each client in SecAgg has sent about 1,000 messages and incurred roughly 1 s of computation, whereas in ACCESS-FL each client sends only about 100 messages and takes around 0.4 s.", "perturbed_explanation": "The perturbed statement contradicts the visual data: Fig. 6b shows SecAgg clients accumulate about 10,000 messages by round 100, not 1,000. Fig. 7a shows SecAgg client time is around 10 s, not 1 s.", "claim": "By the 100th round, each client in SecAgg has sent about 1,000 messages and incurred roughly 1 s of computation, whereas in ACCESS-FL each client sends only about 100 messages and takes around 0.4 s.", "label": false }, { "paperid": "2409.16708v1", "paper_path": "./SciVer/papers/2409.16708v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.16708v1-Table1-1.png", "item2_path": "./SciVer/images/2409.16708v1-Table2-1.png", "section": [ "4.4" ], "request_id": 607, "origin_statement": "Participants in the top quartile (Q4) overestimated their AI-assisted performance by an average of 1.68 items (17.68 estimated vs. 16.00 actual, Table 1), and the correlation between ΔEP and estimated performance was 0.72 (p<0.001, Table 2).", "perturbed_statement": "Participants in the top quartile (Q4) underestimated their AI-assisted performance by an average of 2.00 items (15.00 estimated vs. 17.00 actual, Table 1), and the correlation between ΔEP and estimated performance was −0.72 (p<0.001, Table 2).", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 shows Q4 mean estimate = 17.68 and actual performance = 16.00 (not 15.00 vs. 17.00), and Table 2 reports a positive correlation of +0.72 (not −0.72) between ΔEP and estimated performance.", "claim": "Participants in the top quartile (Q4) underestimated their AI-assisted performance by an average of 2.00 items (15.00 estimated vs. 17.00 actual, Table 1), and the correlation between ΔEP and estimated performance was −0.72 (p<0.001, Table 2).", "label": false }, { "paperid": "2410.06169v2", "paper_path": "./SciVer/papers/2410.06169v2.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.06169v2-Table3-1.png", "item2_path": "./SciVer/images/2410.06169v2-Table4-1.png", "section": [ "4.5" ], "request_id": 610, "origin_statement": "A radius of 7 yields the highest average accuracy of 68.6% in neighbor-aware attention, slightly outperforming the peak average of 68.4% achieved when dropping 10 non-active attention heads.", "perturbed_statement": "A radius of 3 yields the highest average accuracy of 68.6% in neighbor-aware attention, slightly outperforming the peak average of 68.4% achieved when dropping 10 non-active attention heads.", "perturbed_explanation": "The perturbation is incorrect because Table 3 shows that a radius of 3 yields an average of 68.0%, not 68.6%, and the highest average (68.6%) actually occurs at a radius of 7, not 3.", "claim": "A radius of 3 yields the highest average accuracy of 68.6% in neighbor-aware attention, slightly outperforming the peak average of 68.4% achieved when dropping 10 non-active attention heads.", "label": false }, { "paperid": "2409.11267v1", "paper_path": "./SciVer/papers/2409.11267v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11267v1_figure_2.png", "item2_path": "./SciVer/images/2409.11267v1_figure_3.png", "section": [ "4.2" ], "request_id": 611, "origin_statement": "The RL–MPC scheme (Fig 2) decouples an N_p–length discrete-action sequence into N_p separate decoupled Q-functions in an unrolled LSTM (Fig 3), each generating one Q_l from preprocessed state g_l(χ(k)), reducing action-output complexity from exponential to linear in horizon length.", "perturbed_statement": "The RL–MPC scheme (Fig 2) decouples an N_p–length discrete-action sequence into N_p+1 separate decoupled Q-functions in an unrolled LSTM (Fig 3), each generating one Q_l from preprocessed state g_l(χ(k)), reducing action-output complexity from exponential to linear in horizon length.", "perturbed_explanation": "The perturbed statement incorrectly claims there are N_p+1 decoupled Q-functions, but Figure 3 unrolls the LSTM exactly N_p times (l=0,…,N_p−1), yielding N_p decoupled Q-functions, not N_p+1.", "claim": "The RL–MPC scheme (Fig 2) decouples an N_p–length discrete-action sequence into N_p+1 separate decoupled Q-functions in an unrolled LSTM (Fig 3), each generating one Q_l from preprocessed state g_l(χ(k)), reducing action-output complexity from exponential to linear in horizon length.", "label": false }, { "paperid": "2410.21259v2", "paper_path": "./SciVer/papers/2410.21259v2.json", "claim_type": "parallel", "item1": "6", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21259v2_figure_6.png", "item2_path": "./SciVer/images/2410.21259v2_figure_8.png", "section": [ "4" ], "request_id": 615, "origin_statement": "GLM-4v scores 0.93 in basic-easy tasks, 0.03 above GPT-4o’s 0.90, reflected by their basic-easy ranks of 1 and 3.", "perturbed_statement": "GLM-4v scores 0.93 in basic-easy tasks, 0.03 above GPT-4o’s 0.90, reflected by their basic-easy ranks of 2 and 1.", "perturbed_explanation": "Figure 8 shows GLM-4v has the top basic-easy rank of 1 and GPT-4o is ranked 3; the perturbed ranks of 2 and 1 contradict these actual rankings.", "claim": "GLM-4v scores 0.93 in basic-easy tasks, 0.03 above GPT-4o’s 0.90, reflected by their basic-easy ranks of 2 and 1.", "label": false }, { "paperid": "2409.01696v1", "paper_path": "./SciVer/papers/2409.01696v1.json", "claim_type": "parallel", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01696v1_figure_3.png", "item2_path": "./SciVer/images/2409.01696v1-Table3-1.png", "section": [ "3.3" ], "request_id": 618, "origin_statement": "For ResNet-34 with PPA on Facescrub, removing the last-stage skip connection (RoLSS) drops PPA attack accuracy from about 89% to 77%—a 12 pp reduction at roughly 93.6% natural accuracy.", "perturbed_statement": "For ResNet-34 with PPA on Facescrub, removing the last-stage skip connection (RoLSS) drops PPA attack accuracy from about 89% to 74%—a 15 pp reduction at roughly 93.6% natural accuracy.", "perturbed_explanation": "The perturbation is incorrect because Figure 3 shows RoLSS yields an attack accuracy of about 77% (not 74%) at 93.6% natural accuracy, implying a drop of approximately 12 pp rather than 15 pp.", "claim": "For ResNet-34 with PPA on Facescrub, removing the last-stage skip connection (RoLSS) drops PPA attack accuracy from about 89% to 74%—a 15 pp reduction at roughly 93.6% natural accuracy.", "label": false }, { "paperid": "2411.01759v1", "paper_path": "./SciVer/papers/2411.01759v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01759v1_figure_2.png", "item2_path": "./SciVer/images/2411.01759v1_figure_3.png", "section": [ "3.1" ], "request_id": 620, "origin_statement": "By applying k=1 in the µs±σs threshold (Fig2), ~32% of filters are pruned per prunable conv. Hence, in a residual block (Fig3a) only the first conv loses ~32% of its filters, but in an inception block (Fig3b) all branches lose ~32% each.", "perturbed_statement": "By applying k=1 in the µs±σs threshold (Fig2), approximately 68% of filters are pruned per prunable layer. Consequently, in the residual block (Fig3a) both convolutional layers lose ~68% of filters, whereas in the inception block (Fig3b) all branches lose ~68% each.", "perturbed_explanation": "The perturbed statement is incorrect because: (1) under a µs±σs boundary roughly 32% of samples lie outside one standard deviation, not 68%; (2) Figure 3a marks the second convolutional layer as non-prunable, so only the first layer can have filters removed, not both.", "claim": "By applying k=1 in the µs±σs threshold (Fig2), approximately 68% of filters are pruned per prunable layer. Consequently, in the residual block (Fig3a) both convolutional layers lose ~68% of filters, whereas in the inception block (Fig3b) all branches lose ~68% each.", "label": false }, { "paperid": "2409.17266v1", "paper_path": "./SciVer/papers/2409.17266v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17266v1_figure_4.png", "item2_path": "./SciVer/images/2409.17266v1-Table2-1.png", "section": [ "4.3" ], "request_id": 621, "origin_statement": "By the final observation, decile 1’s cumulative excess return is approximately 0.15, about 0.20 higher than decile 10's roughly –0.05. Meanwhile, our GPT-4 method achieves the smallest average absolute α of 0.64, outperforming the 0.66 for GPT-3.5.", "perturbed_statement": "By the final observation, decile 1’s cumulative excess return is roughly –0.05, about 0.20 lower than decile 10's near 0.15. Meanwhile, our GPT-4 method achieves the highest average absolute α of 0.70, underperforming the 0.66 for GPT-3.5.", "perturbed_explanation": "The perturbed statement misreports decile 1’s end‐period return as –0.05, whereas the figure shows it around 0.15. It also fabricates an average |α| of 0.70 for GPT-4, but Table 2 lists GPT-4’s avg |α| as 0.64 (the lowest), not 0.70.", "claim": "By the final observation, decile 1’s cumulative excess return is roughly –0.05, about 0.20 lower than decile 10's near 0.15. Meanwhile, our GPT-4 method achieves the highest average absolute α of 0.70, underperforming the 0.66 for GPT-3.5.", "label": false }, { "paperid": "2410.13246v1", "paper_path": "./SciVer/papers/2410.13246v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13246v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.13246v1_figure_2(b).png", "section": [ "6.1" ], "request_id": 623, "origin_statement": "The Spearman correlation between Gen-Binary and Gen-Multi increases from 0.59 at the atomic level to 0.84 at the response level, while Dis-Context and Dis-Rating remain high (0.80 to 0.83).", "perturbed_statement": "The Spearman correlation between Gen-Binary and Gen-Multi increases from 0.62 at the atomic level to 0.84 at the response level, while Dis-Context and Dis-Rating remain high (0.80 to 0.83).", "perturbed_explanation": "In the atomic-level heatmap (Figure 2a), the correlation between Gen-Binary and Gen-Multi is 0.59, not 0.62, so the perturbed value contradicts the actual data.", "claim": "The Spearman correlation between Gen-Binary and Gen-Multi increases from 0.62 at the atomic level to 0.84 at the response level, while Dis-Context and Dis-Rating remain high (0.80 to 0.83).", "label": false }, { "paperid": "2409.05790v1", "paper_path": "./SciVer/papers/2409.05790v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05790v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.05790v1_figure_4.png", "section": [ "5.1" ], "request_id": 626, "origin_statement": "CVAE’s relative error density peaks at about 0.26 around zero—higher than DNN’s ~0.23—while the generated CHF data preserve the strong inverse relation with outlet quality, mirroring the real CHF drop from ~12,000 kW/m² at –0.5 quality to ~500 kW/m² at 1.0.", "perturbed_statement": "CVAE’s relative error density peaks at about 0.30 around zero—higher than DNN’s ~0.20—while the generated CHF data preserve the strong inverse relation with outlet quality, mirroring the real CHF drop from ~12,000 kW/m² at –0.5 quality to ~100 kW/m² at 1.0.", "perturbed_explanation": "The perturbation is incorrect because Figure 3(a) shows the CVAE peak frequency is approximately 0.26 (not 0.30) and DNN’s peak is about 0.23 (not 0.20). Additionally, in Figure 4 the CHF at outlet quality 1.0 remains near ~500 kW/m², not as low as ~100 kW/m².", "claim": "CVAE’s relative error density peaks at about 0.30 around zero—higher than DNN’s ~0.20—while the generated CHF data preserve the strong inverse relation with outlet quality, mirroring the real CHF drop from ~12,000 kW/m² at –0.5 quality to ~100 kW/m² at 1.0.", "label": false }, { "paperid": "2410.15698v1", "paper_path": "./SciVer/papers/2410.15698v1.json", "claim_type": "parallel", "item1": "11", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15698v1_figure_11.png", "item2_path": "./SciVer/images/2410.15698v1-Table1-1.png", "section": [ "5.4" ], "request_id": 632, "origin_statement": "VQ-CD’s Ant-dir mean return of 558.22 (Table 1) outperforms CoD by ~80 points, and in Figure 11a on full-replay D4RL tasks it achieves about 45 mean episode score by 7×10^5 steps, nearly twice PackNet’s ~23.", "perturbed_statement": "VQ-CD’s Ant-dir mean return of 450 in Table 1 underperforms CoD, and in Figure 11a it achieves only about 30 mean episode score by 7×10^5 steps, barely above PackNet’s ~23.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 reports VQ-CD’s mean return as 558.22 (not 450), and Figure 11a shows VQ-CD reaching around 45 mean episode score at 7×10^5 steps (not only 30).", "claim": "VQ-CD’s Ant-dir mean return of 450 in Table 1 underperforms CoD, and in Figure 11a it achieves only about 30 mean episode score by 7×10^5 steps, barely above PackNet’s ~23.", "label": false }, { "paperid": "2410.22364v2", "paper_path": "./SciVer/papers/2410.22364v2.json", "claim_type": "parallel", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22364v2_figure_1(a).png", "item2_path": "./SciVer/images/2410.22364v2_figure_1(b).png", "section": [ "1" ], "request_id": 634, "origin_statement": "At 150M used budget on ImageNet-100, Accelerated MoCo achieves 74.8% NN accuracy, compared to MoCo's 68.7% at 300M; similarly, at 1500M on ImageNet-1k, Accelerated MoCo reaches about 60.1% NN accuracy, outperforming MoCo's 59.2% at 3000M.", "perturbed_statement": "At 150M used budget on ImageNet-100, Accelerated MoCo achieves 74.8% NN accuracy, compared to MoCo's 68.7% at 300M; similarly, at 1500M on ImageNet-1k, Accelerated MoCo reaches about 62.0% NN accuracy, outperforming MoCo's 59.2% at 3000M.", "perturbed_explanation": "The perturbed statement incorrectly raises Accelerated MoCo’s NN accuracy on ImageNet-1k at 1500M from the true ~60.1% (as shown in the figure) to 62.0%, contradicting the plotted value.", "claim": "At 150M used budget on ImageNet-100, Accelerated MoCo achieves 74.8% NN accuracy, compared to MoCo's 68.7% at 300M; similarly, at 1500M on ImageNet-1k, Accelerated MoCo reaches about 62.0% NN accuracy, outperforming MoCo's 59.2% at 3000M.", "label": false }, { "paperid": "2409.11395v1", "paper_path": "./SciVer/papers/2409.11395v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.11395v1-Table1-1.png", "item2_path": "./SciVer/images/2409.11395v1-Table2-1.png", "section": [ "3.3" ], "request_id": 635, "origin_statement": "The joint white light eclipse depth of 0.003143 (3143 ppm) is about 5% lower than the spectroscopic eclipse depth at 8.125 μm (3321 ± 28 ppm), indicating a modest wavelength-dependent increase in eclipse depth toward the mid-infrared.", "perturbed_statement": "The joint white light eclipse depth of 0.003143 (3143 ppm) is about 10% lower than the spectroscopic eclipse depth at 8.125 μm (3521 ± 28 ppm), indicating a significant wavelength-dependent increase.", "perturbed_explanation": "The spectroscopic eclipse depth at 8.125 μm is listed as 3321 ± 28 ppm in Table 2, not 3521 ± 28 ppm; the introduced value contradicts the published data.", "claim": "The joint white light eclipse depth of 0.003143 (3143 ppm) is about 10% lower than the spectroscopic eclipse depth at 8.125 μm (3521 ± 28 ppm), indicating a significant wavelength-dependent increase.", "label": false }, { "paperid": "2411.13919v1", "paper_path": "./SciVer/papers/2411.13919v1.json", "claim_type": "parallel", "item1": "6", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13919v1_figure_6.png", "item2_path": "./SciVer/images/2411.13919v1-Table1-1.png", "section": [ "3.3" ], "request_id": 637, "origin_statement": "HDBSCAN has the top ARI of 0.55 and displays an almost uniform orange cluster after Q1 2023, whereas OPTICS, scoring –0.02 ARI, presents highly mixed cluster colors across the entire period.", "perturbed_statement": "HDBSCAN has the top ARI of 0.42 and displays an almost uniform orange cluster after Q1 2023, whereas OPTICS, scoring –0.02 ARI, presents highly mixed cluster colors across the entire period.", "perturbed_explanation": "The perturbation incorrectly states HDBSCAN’s ARI as 0.42, whereas Table 1 shows that HDBSCAN actually achieves an ARI of 0.55.", "claim": "HDBSCAN has the top ARI of 0.42 and displays an almost uniform orange cluster after Q1 2023, whereas OPTICS, scoring –0.02 ARI, presents highly mixed cluster colors across the entire period.", "label": false }, { "paperid": "2409.01393v1", "paper_path": "./SciVer/papers/2409.01393v1.json", "claim_type": "parallel", "item1": "3", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01393v1_figure_3.png", "item2_path": "./SciVer/images/2409.01393v1_figure_6.png", "section": [ "3.3" ], "request_id": 639, "origin_statement": "The En=278 keV (Ep=2059 keV) 7Li(p,n)7Be spectrum peaks near 45 keVee, matching the residual excess peak around 50 keVee in the room‐background‐subtracted 22Ne(α,n)25Mg spectrum in Fig.6(b).", "perturbed_statement": "The En=315 keV (Ep=2059 keV) 7Li(p,n)7Be spectrum peaks near 45 keVee, matching the residual excess peak around 50 keVee in the room‐background‐subtracted 22Ne(α,n)25Mg spectrum in Fig.6(b).", "perturbed_explanation": "The perturbed statement incorrectly assigns En=315 keV to the Ep=2059 keV run. According to Figure 3, Ep=2059 keV corresponds to En=278 keV, not 315 keV.", "claim": "The En=315 keV (Ep=2059 keV) 7Li(p,n)7Be spectrum peaks near 45 keVee, matching the residual excess peak around 50 keVee in the room‐background‐subtracted 22Ne(α,n)25Mg spectrum in Fig.6(b).", "label": false }, { "paperid": "2410.23317v1", "paper_path": "./SciVer/papers/2410.23317v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23317v1_figure_1(b).png", "item2_path": "./SciVer/images/2410.23317v1_figure_3.png", "section": [ "3.2", "4.2" ], "request_id": 642, "origin_statement": "The post-vision attention region highlighted in Figure 1(b) spans roughly tokens 2800–3000 (≈6.7% of a 3000-token prompt), while Prefill Sparsity drops to about 45% median at layer 14, compared to over 90% sparsity in the first five layers.", "perturbed_statement": "The post-vision attention region highlighted in Figure 1(b) spans roughly tokens 2500–3000 (≈16.7% of a 3000-token prompt), while Prefill Sparsity dips to about 55% median at layer 14, compared to over 90% sparsity in the first five layers.", "perturbed_explanation": "The perturbed token range (2500–3000) and percentage (16.7%) contradict Figure 1(b), which actually highlights tokens 2800–3000 (~6.7%). Similarly, Prefill Sparsity at layer 14 falls to around 45% median, not 55%, as shown in the sparsity plot.", "claim": "The post-vision attention region highlighted in Figure 1(b) spans roughly tokens 2500–3000 (≈16.7% of a 3000-token prompt), while Prefill Sparsity dips to about 55% median at layer 14, compared to over 90% sparsity in the first five layers.", "label": false }, { "paperid": "2409.15053v1", "paper_path": "./SciVer/papers/2409.15053v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15053v1_figure_1(b).png", "item2_path": "./SciVer/images/2409.15053v1_figure_2(a).png", "section": [ "3.2.3" ], "request_id": 645, "origin_statement": "Figure 1 shows degree-80 Chebyshev approximations for both [0.1,0.3] and [−1,−0.5], while Figure 2 uses degree 48 for [0.1,0.3] (40% fewer terms) and degree 10 for [−1,−0.5] (87.5% fewer terms).", "perturbed_statement": "Figure 1 shows degree-80 Chebyshev approximations for both [0.1,0.3] and [−1,−0.5], while Figure 2 uses degree 68 for [0.1,0.3] (15% fewer terms) and degree 20 for [−1,−0.5] (75% fewer terms).", "perturbed_explanation": "The perturbed statement is incorrect because Figure 2 actually uses degree 48 for the interval [0.1,0.3] and degree 10 for [−1,−0.5], not degrees 68 and 20 as claimed.", "claim": "Figure 1 shows degree-80 Chebyshev approximations for both [0.1,0.3] and [−1,−0.5], while Figure 2 uses degree 68 for [0.1,0.3] (15% fewer terms) and degree 20 for [−1,−0.5] (75% fewer terms).", "label": false }, { "paperid": "2411.06899v1", "paper_path": "./SciVer/papers/2411.06899v1.json", "claim_type": "parallel", "item1": "1(a)", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06899v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.06899v1_figure_2.png", "section": [ "1" ], "request_id": 646, "origin_statement": "LongSafetyBench includes ten tasks across three safety categories, yet the best open-source model Llama3-1.70b-Instruct achieves only a 0.49 Safe Response score despite its 0.75 Harm Awareness.", "perturbed_statement": "LongSafetyBench includes twelve tasks across three safety categories, yet the best open-source model Llama3-1.70b-Instruct achieves only a 0.49 Safe Response score despite its 0.75 Harm Awareness.", "perturbed_explanation": "The perturbed statement incorrectly claims that LongSafetyBench has twelve tasks; the benchmark actually comprises ten tasks as shown in Figure 1. This misstates the number of distinct tasks in the benchmark.", "claim": "LongSafetyBench includes twelve tasks across three safety categories, yet the best open-source model Llama3-1.70b-Instruct achieves only a 0.49 Safe Response score despite its 0.75 Harm Awareness.", "label": false }, { "paperid": "2410.20320v1", "paper_path": "./SciVer/papers/2410.20320v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20320v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.20320v1-Table2-1.png", "section": [ "5.2" ], "request_id": 648, "origin_statement": "GPAM outperforms GLM-4 by approximately 5 points at NOTA rate 0.5 (92.5% vs. 87.5%, Fig.3), whereas in the zero-NOTA 10-way-1-shot setting (Table 2), GLM-4 leads GPAM by 3.9 points (97.76% vs. 93.85%).", "perturbed_statement": "GPAM outperforms GLM-4 by approximately 6 points at NOTA rate 0.5 (93.5% vs. 87.5%, Fig.3), whereas in the zero-NOTA 10-way-1-shot setting (Table 2), GLM-4 leads GPAM by 5.5 points (97.76% vs. 92.26%).", "perturbed_explanation": "The perturbed statement misstates GPAM’s NOTA-0.5 advantage—Fig.3 shows a ~5-point gap (92.5% vs. 87.5%), not 6 points at 93.5%. It also wrongly inflates the zero-NOTA 10-way-1-shot margin: Table 2 reports a 3.91-point lead (97.76% vs. 93.85%), not 5.5 points.", "claim": "GPAM outperforms GLM-4 by approximately 6 points at NOTA rate 0.5 (93.5% vs. 87.5%, Fig.3), whereas in the zero-NOTA 10-way-1-shot setting (Table 2), GLM-4 leads GPAM by 5.5 points (97.76% vs. 92.26%).", "label": false }, { "paperid": "2411.09118v1", "paper_path": "./SciVer/papers/2411.09118v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.09118v1-Table3-1.png", "item2_path": "./SciVer/images/2411.09118v1-Table4-1.png", "section": [ "4.5" ], "request_id": 650, "origin_statement": "In Table 3, raising s_max from 0.4 to 1.2 lowers the average classification error from 32.90% to 31.86%, coinciding with the best FxTS-Loss setting (μ=2.0, α1=10.0, α2=1.00) in Table 4 which also achieves a 31.86% average error.", "perturbed_statement": "In Table 3, raising s_max from 0.4 to 1.2 lowers the average classification error from 32.90% to 31.86%, coinciding with the best FxTS-Loss setting (μ=3.0, α1=10.0, α2=2.23) in Table 4 which also achieves a 31.86% average error.", "perturbed_explanation": "The statement is incorrect because in Table 4 the parameter combination μ=3.0, α1=10.0, α2=2.23 yields an average error of 32.24%, not 31.86%. The true best FxTS-Loss setting with 31.86% average error is μ=2.0, α1=10.0, α2=1.00.", "claim": "In Table 3, raising s_max from 0.4 to 1.2 lowers the average classification error from 32.90% to 31.86%, coinciding with the best FxTS-Loss setting (μ=3.0, α1=10.0, α2=2.23) in Table 4 which also achieves a 31.86% average error.", "label": false }, { "paperid": "2409.03769v1", "paper_path": "./SciVer/papers/2409.03769v1.json", "claim_type": "parallel", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03769v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.03769v1_figure_4(b).png", "section": [ "6.2" ], "request_id": 653, "origin_statement": "At K=3, biased sampling yielded ~0.51 MRR versus random’s ~0.44 in Fig4(a), and in Fig4(b) at epoch2 (K=5), biased sampling peaks at 0.54 MRR compared to random’s 0.50.", "perturbed_statement": "At K=3, biased sampling yielded ~0.53 MRR versus random’s ~0.44 in Fig4(a), and in Fig4(b) at epoch2 (K=5), biased sampling peaks at 0.56 MRR compared to random’s 0.50.", "perturbed_explanation": "The first figure shows biased MRR at K=3 is actually 0.51 (not 0.53), and the second figure shows biased MRR at epoch2 is 0.54 (not 0.56), so both perturbed values contradict the charts.", "claim": "At K=3, biased sampling yielded ~0.53 MRR versus random’s ~0.44 in Fig4(a), and in Fig4(b) at epoch2 (K=5), biased sampling peaks at 0.56 MRR compared to random’s 0.50.", "label": false }, { "paperid": "2411.00028v2", "paper_path": "./SciVer/papers/2411.00028v2.json", "claim_type": "parallel", "item1": "2", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00028v2_figure_2.png", "item2_path": "./SciVer/images/2411.00028v2_figure_4(c).png", "section": [ "4.4" ], "request_id": 659, "origin_statement": "By leveraging the four-agent meta-path recommendation and self-update loop in Figure 2a, our full model achieves about 0.75 R² on Beijing, whereas removing semantic-guided fusion (w/o attn.) lowers it to roughly 0.68, a relative drop of 0.07 (Figure 4c).", "perturbed_statement": "By leveraging the four-agent meta-path recommendation and self-update loop in Figure 2a, our full model achieves about 0.85 R² on Beijing, whereas removing semantic-guided fusion (w/o attn.) lowers it to roughly 0.60, a relative drop of 0.25 (Figure 4c).", "perturbed_explanation": "The perturbation incorrectly states that the full model R² is 0.85 and that removing attention reduces it to 0.60. Figure 4c actually shows values around 0.75 for the full model and about 0.68 without semantic-guided fusion, a drop of 0.07, not 0.25.", "claim": "By leveraging the four-agent meta-path recommendation and self-update loop in Figure 2a, our full model achieves about 0.85 R² on Beijing, whereas removing semantic-guided fusion (w/o attn.) lowers it to roughly 0.60, a relative drop of 0.25 (Figure 4c).", "label": false }, { "paperid": "2409.01854v1", "paper_path": "./SciVer/papers/2409.01854v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01854v1_figure_4(b).png", "item2_path": "./SciVer/images/2409.01854v1_figure_4(c).png", "section": [ "4.4.3" ], "request_id": 660, "origin_statement": "Between 200 and 500 samples, AgentRE-wM+ recall rises from about 0.49 to 0.62 (a 0.13 increase), while its precision climbs from roughly 0.44 to 0.65 (a 0.21 increase), indicating stronger precision gains than recall as training data grows.", "perturbed_statement": "Between 200 and 500 samples, AgentRE-wM+ recall rises from about 0.49 to 0.62 (a 0.23 increase), while its precision climbs from roughly 0.44 to 0.65 (a 0.21 increase), indicating stronger precision gains than recall as training data grows.", "perturbed_explanation": "The perturbed statement misreports the recall increase as 0.23; according to Figure 4(b), recall actually grows from ~0.49 to ~0.62, which is a 0.13 increase, not 0.23.", "claim": "Between 200 and 500 samples, AgentRE-wM+ recall rises from about 0.49 to 0.62 (a 0.23 increase), while its precision climbs from roughly 0.44 to 0.65 (a 0.21 increase), indicating stronger precision gains than recall as training data grows.", "label": false }, { "paperid": "2411.18432v1", "paper_path": "./SciVer/papers/2411.18432v1.json", "claim_type": "parallel", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18432v1_figure_6.png", "item2_path": "./SciVer/images/2411.18432v1_figure_7.png", "section": [ "5.1.1" ], "request_id": 661, "origin_statement": "Out of the 68 H3 H8 hexagon grids in the large-scale Kowloon network, grid 62 experiences peaks of 300–600 vehicles per hour, approximately five times the average grid demand range of 100–200 vehicles per hour.", "perturbed_statement": "Out of the 68 H3 H8 hexagon grids in the large-scale Kowloon network, grid 62 experiences peaks of 150–250 vehicles per hour, approximately twice the average grid demand range of 100–200 vehicles per hour.", "perturbed_explanation": "This statement is incorrect because Figure 7 shows that grid 62 actually peaks between 300 and 600 vehicles per hour, not 150–250. The perturbed peak range contradicts the spatio-temporal demand data for grid 62.", "claim": "Out of the 68 H3 H8 hexagon grids in the large-scale Kowloon network, grid 62 experiences peaks of 150–250 vehicles per hour, approximately twice the average grid demand range of 100–200 vehicles per hour.", "label": false }, { "paperid": "2411.03549v1", "paper_path": "./SciVer/papers/2411.03549v1.json", "claim_type": "parallel", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03549v1_figure_7.png", "item2_path": "./SciVer/images/2411.03549v1-Table3-1.png", "section": [ "5", "4" ], "request_id": 663, "origin_statement": "COCONUTS-2b’s Teff of 450 K (Table 3) places it in the 400–500 K yellow group in Figure 7, where MF444W ranges from about 18 to 20.5 mag, predicting an absolute F444W magnitude near 19 mag.", "perturbed_statement": "COCONUTS-2b’s Teff of 450 K (Table 3) places it in the 350–400 K red group in Figure 7, where MF444W ranges from about 16 to 18 mag, predicting an absolute F444W magnitude near 17 mag.", "perturbed_explanation": "The perturbed statement wrongly assigns 450 K to the 350–400 K red group, whereas Figure 7 shows 450 K lies in the 400–500 K yellow band. It also misstates the red group’s MF444W range (actually ≈19–21 mag, not 16–18 mag).", "claim": "COCONUTS-2b’s Teff of 450 K (Table 3) places it in the 350–400 K red group in Figure 7, where MF444W ranges from about 16 to 18 mag, predicting an absolute F444W magnitude near 17 mag.", "label": false }, { "paperid": "2409.11577v1", "paper_path": "./SciVer/papers/2409.11577v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11577v1_figure_4.png", "item2_path": "./SciVer/images/2409.11577v1_figure_5.png", "section": [ "2.1" ], "request_id": 671, "origin_statement": "The 90% training sample (9000 white points in Figure 4) had 10% of targets doubled, yielding boxplot values that rise to almost 3.0 compared to a prior max of about 1.5 in Figure 5.", "perturbed_statement": "The 90% training sample (9000 white points in Figure 4) had 10% of targets doubled, yielding boxplot values that rise to almost 4.0 compared to a prior max of about 1.5 in Figure 5.", "perturbed_explanation": "Figure 5’s boxplot for the ‘With Outliers’ data shows outliers reaching just under 3.0, not 4.0. Claiming values rise to almost 4.0 contradicts the actual maximum depicted.", "claim": "The 90% training sample (9000 white points in Figure 4) had 10% of targets doubled, yielding boxplot values that rise to almost 4.0 compared to a prior max of about 1.5 in Figure 5.", "label": false }, { "paperid": "2409.19872v3", "paper_path": "./SciVer/papers/2409.19872v3.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19872v3_figure_1.png", "item2_path": "./SciVer/images/2409.19872v3_figure_2.png", "section": [ "1" ], "request_id": 672, "origin_statement": "By representing knowledge as unified vectorized key-value pairs instead of parametric neurons, UniKE boosts generality from 0.758 to 0.946 (+0.188) and locality from 0.575 to 0.934 (+0.359) over the naive TP+IKE combination.", "perturbed_statement": "By representing knowledge as parametric neurons instead of unified vectorized key-value pairs, UniKE boosts generality from 0.758 to 0.946 (+0.188) and locality from 0.575 to 0.934 (+0.359) over the naive TP+IKE combination.", "perturbed_explanation": "The perturbed statement wrongly asserts that UniKE uses parametric neurons as its knowledge form. According to Figure 1, UniKE actually employs unified vectorized key-value pairs, not parametric neurons.", "claim": "By representing knowledge as parametric neurons instead of unified vectorized key-value pairs, UniKE boosts generality from 0.758 to 0.946 (+0.188) and locality from 0.575 to 0.934 (+0.359) over the naive TP+IKE combination.", "label": false }, { "paperid": "2409.11927v1", "paper_path": "./SciVer/papers/2409.11927v1.json", "claim_type": "parallel", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11927v1_figure_3.png", "item2_path": "./SciVer/images/2409.11927v1-Table5-1.png", "section": [ "3" ], "request_id": 675, "origin_statement": "In Figure 3 the M2B spectrum’s purple relxill component produces a reflection hump peaking near 20 keV, and Table 5 indicates that at the 6.9 kpc distance corresponding to M2B the fit yields a spin a*≈0.464±0.016 and accretion rate ≈258+5−8 Ṁ_Edd.", "perturbed_statement": "In Figure 3 the M2B spectrum’s purple relxill component produces a reflection hump peaking near 20 keV, and Table 5 indicates that at the 6.9 kpc distance corresponding to M2B the fit yields a spin a*≈0.721±0.019 and accretion rate ≈119+4−3 Ṁ_Edd.", "perturbed_explanation": "Table 5 shows that at 6.9 kpc (M2B) the best-fit spin is a*≈0.464±0.016 and the accretion rate is ≈258+5−8 Ṁ_Edd, not a*≈0.721 or ≈119 Ṁ_Edd, so the perturbed values contradict the tabulated results.", "claim": "In Figure 3 the M2B spectrum’s purple relxill component produces a reflection hump peaking near 20 keV, and Table 5 indicates that at the 6.9 kpc distance corresponding to M2B the fit yields a spin a*≈0.721±0.019 and accretion rate ≈119+4−3 Ṁ_Edd.", "label": false }, { "paperid": "2410.02010v1", "paper_path": "./SciVer/papers/2410.02010v1.json", "claim_type": "parallel", "item1": "4(b)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02010v1_figure_4(b).png", "item2_path": "./SciVer/images/2410.02010v1-Table5-1.png", "section": [ "4.2" ], "request_id": 676, "origin_statement": "Focal yields about 82% test accuracy with a 1.5% gap in final-epoch validation but still outperforms RS’s average multi-label score on CheXpert (45.7% vs. 45.2%), despite RS achieving ~84% accuracy and a larger 2.5% gap.", "perturbed_statement": "Focal yields about 82% test accuracy with a 1.5% gap in final-epoch validation but still outperforms RS’s average multi-label score on CheXpert (44.7% vs. 45.2%), despite RS achieving ~84% accuracy and a larger 2.5% gap.", "perturbed_explanation": "The perturbed statement incorrectly lists Focal’s CheXpert average as 44.7%. According to Table 5, Focal actually attains a 45.71% average, not 44.7%, so it does outperform RS’s 45.23%, and the 44.7% figure contradicts the reported table value.", "claim": "Focal yields about 82% test accuracy with a 1.5% gap in final-epoch validation but still outperforms RS’s average multi-label score on CheXpert (44.7% vs. 45.2%), despite RS achieving ~84% accuracy and a larger 2.5% gap.", "label": false }, { "paperid": "2411.14215v1", "paper_path": "./SciVer/papers/2411.14215v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14215v1_figure_2.png", "item2_path": "./SciVer/images/2411.14215v1_figure_3.png", "section": [ "2.4" ], "request_id": 679, "origin_statement": "On zero-generalization problems, our human participants achieved 82% accuracy versus GPT-3’s 66%, a 16-point gap that narrows to just 2 points at three generalizations with 28% human and 26% GPT-3 accuracy.", "perturbed_statement": "On zero-generalization problems, our human participants achieved 82% accuracy versus GPT-3’s 66%, a 16-point gap that narrows to just 4 points at three generalizations with 28% human and 32% GPT-3 accuracy.", "perturbed_explanation": "Figure 3 shows GPT-3’s accuracy at three generalizations is approximately 26%, not 32%. The perturbed statement’s claim of 32% contradicts the chart’s reported GPT-3 performance.", "claim": "On zero-generalization problems, our human participants achieved 82% accuracy versus GPT-3’s 66%, a 16-point gap that narrows to just 4 points at three generalizations with 28% human and 32% GPT-3 accuracy.", "label": false }, { "paperid": "2411.02653v1", "paper_path": "./SciVer/papers/2411.02653v1.json", "claim_type": "parallel", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02653v1_figure_4.png", "item2_path": "./SciVer/images/2411.02653v1-Table2-1.png", "section": [ "4.2" ], "request_id": 682, "origin_statement": "For the default asteroid (d = 5.1 km, β = 0°), at Φ = 5 the spherical shape exhibits an MAE of ~0.19%, about 0.03% higher than the biaxial ellipsoid’s ~0.16% error.", "perturbed_statement": "For the default asteroid (d = 5.1 km, β = 0°), at Φ = 5 the spherical shape exhibits an MAE of ~0.17%, about 0.03% higher than the biaxial ellipsoid’s ~0.16% error.", "perturbed_explanation": "Figure 4 (left panel) shows that at Φ=5 and β=0° the spherical asteroid’s MAE is approximately 0.19%, not 0.17%; this mismatch makes the perturbed statement incorrect.", "claim": "For the default asteroid (d = 5.1 km, β = 0°), at Φ = 5 the spherical shape exhibits an MAE of ~0.17%, about 0.03% higher than the biaxial ellipsoid’s ~0.16% error.", "label": false }, { "paperid": "2411.16471v1", "paper_path": "./SciVer/papers/2411.16471v1.json", "claim_type": "parallel", "item1": "6(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16471v1_figure_6(b).png", "item2_path": "./SciVer/images/2411.16471v1-Table2-1.png", "section": [ "5.4" ], "request_id": 684, "origin_statement": "Figure 6 shows the zero‐baseline black points at about 19.23 fs for the 470 nm filter and 9.45 fs for the 375 nm filter, meaning the 470 nm amplitude is roughly twice that of the 375 nm case, matching Table 2’s weighted averages.", "perturbed_statement": "Figure 6 shows the zero‐baseline black points at about 19.23 fs for the 470 nm filter and 12.06 fs for the 375 nm filter, meaning the 470 nm amplitude is roughly 1.6 × that of the 375 nm case, as seen in Table 2.", "perturbed_explanation": "Table 2 gives a weighted‐average zero‐baseline amplitude of 9.45 ± 1.09 fs for the 375 nm filter, not 12.06 fs. The 12.06 fs value in Table 2 refers specifically to Mimosa’s UD fit at 375 nm, and Figure 6’s black point at zero baseline clearly lies near 9.5 fs for 375 nm, contradicting the stated 12.06 fs.", "claim": "Figure 6 shows the zero‐baseline black points at about 19.23 fs for the 470 nm filter and 12.06 fs for the 375 nm filter, meaning the 470 nm amplitude is roughly 1.6 × that of the 375 nm case, as seen in Table 2.", "label": false }, { "paperid": "2410.20609v1", "paper_path": "./SciVer/papers/2410.20609v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20609v1_figure_1(b).png", "item2_path": "./SciVer/images/2410.20609v1-Table4-1.png", "section": [ "3.1.2" ], "request_id": 686, "origin_statement": "The red LBA radio components in Fig. 1 extend over ∼4.8 kpc (comparable to the 5 kpc scale bar) and have a 1.4 GHz luminosity of 5.2×10^27 W Hz⁻¹, which is ∼3 times lower than their 178 MHz luminosity of 1.7×10^28 W Hz⁻¹.", "perturbed_statement": "The red LBA radio components in Fig. 1 extend over ∼2.4 kpc (half the 5 kpc scale bar) and have a 1.4 GHz luminosity of 5.2×10^27 W Hz⁻¹, which is ∼6 times lower than their 178 MHz luminosity of 1.7×10^28 W Hz⁻¹.", "perturbed_explanation": "The perturbed statement incorrectly halves the true largest angular extent: Table 4 gives 4.8 kpc, not 2.4 kpc. It also overstates the luminosity ratio: 1.7×10^28 W Hz⁻¹ at 178 MHz vs 5.2×10^27 W Hz⁻¹ at 1.4 GHz yields a factor of ~3, not ~6.", "claim": "The red LBA radio components in Fig. 1 extend over ∼2.4 kpc (half the 5 kpc scale bar) and have a 1.4 GHz luminosity of 5.2×10^27 W Hz⁻¹, which is ∼6 times lower than their 178 MHz luminosity of 1.7×10^28 W Hz⁻¹.", "label": false }, { "paperid": "2410.02475v1", "paper_path": "./SciVer/papers/2410.02475v1.json", "claim_type": "parallel", "item1": "2(b)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02475v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.02475v1_figure_2(d).png", "section": [ "5.3" ], "request_id": 691, "origin_statement": "Our geometry-unaware policy improves success rates by 45.2 points over full observations on the toy figure (66.4% vs. 21.2%), and by 31.4 points on the video game console (43.8% vs. 12.4%).", "perturbed_statement": "Our geometry-unaware policy improves success rates by 75.2 points over full observations on the toy figure (66.4% vs. 21.2%), and by 31.4 points on the video game console (43.8% vs. 12.4%).", "perturbed_explanation": "The claimed improvement of 75.2 points on the toy figure is incorrect. The actual difference between 66.4% and 21.2% is 45.2 percentage points, not 75.2.", "claim": "Our geometry-unaware policy improves success rates by 75.2 points over full observations on the toy figure (66.4% vs. 21.2%), and by 31.4 points on the video game console (43.8% vs. 12.4%).", "label": false }, { "paperid": "2410.01888v1", "paper_path": "./SciVer/papers/2410.01888v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01888v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.01888v1_figure_5(b).png", "section": [ "6.1", "6.2" ], "request_id": 696, "origin_statement": "Under the Conditional treatment, the Younger group, which had the highest model Top-1 accuracy (~81%), experienced a 4.7% improvement in human accuracy, while the Older group, with the lowest model accuracy (~58%), saw a 1.5% decrease.", "perturbed_statement": "Under the Conditional treatment, the Younger group, which had the highest model Top-1 accuracy (~81%), experienced a 4.7% improvement in human accuracy, while the Older group, with the lowest model accuracy (~58%), saw a 1.5% improvement.", "perturbed_explanation": "The second figure shows that the Older group actually had about a –1.5% change (a decrease) under the Conditional treatment, not a positive 1.5% improvement.", "claim": "Under the Conditional treatment, the Younger group, which had the highest model Top-1 accuracy (~81%), experienced a 4.7% improvement in human accuracy, while the Older group, with the lowest model accuracy (~58%), saw a 1.5% improvement.", "label": false }, { "paperid": "2411.08982v1", "paper_path": "./SciVer/papers/2411.08982v1.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.08982v1-Table1-1.png", "item2_path": "./SciVer/images/2411.08982v1-Table2-1.png", "section": [ "5" ], "request_id": 697, "origin_statement": "DBRX’s accuracy-driven (Lynx-Acc) configuration hits 73.16% on GSM8k with 12 experts, outpacing Mixtral’s 58.61% at 6 experts by over 14 points.", "perturbed_statement": "DBRX’s accuracy-driven (Lynx-Acc) configuration hits 75.00% on GSM8k with 12 experts, outpacing Mixtral’s 58.61% at 6 experts by over 14 points.", "perturbed_explanation": "Table 1 shows that DBRX’s Lynx-Acc score on GSM8k with 12 experts is actually 73.16%, not 75.00%, so the reported 75.00% contradicts the table.", "claim": "DBRX’s accuracy-driven (Lynx-Acc) configuration hits 75.00% on GSM8k with 12 experts, outpacing Mixtral’s 58.61% at 6 experts by over 14 points.", "label": false }, { "paperid": "2410.19986v1", "paper_path": "./SciVer/papers/2410.19986v1.json", "claim_type": "parallel", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19986v1_figure_4.png", "item2_path": "./SciVer/images/2410.19986v1-Table3-1.png", "section": [ "4.2" ], "request_id": 700, "origin_statement": "After adversarial harmonization in Fig. 4, the age softmax predictions converge to the uniform chance value of about 0.014, and the harmonized (both) method in Table 3 achieves a voicing balanced accuracy of 52.65%, surpassing the balanced control’s 52.60% by 0.05%.", "perturbed_statement": "After adversarial harmonization in Fig. 4, the age softmax predictions converge to the uniform chance value of about 0.014, and the harmonized (both) method in Table 3 achieves a voicing balanced accuracy of 53.00%, surpassing the balanced control’s 52.60% by 0.40%.", "perturbed_explanation": "The perturbed statement is incorrect because Table 3 reports a voicing balanced accuracy of 52.65% for the harmonized (both) method, not 53.00%, and the improvement over the balanced control (52.60%) is 0.05%, not 0.40%.", "claim": "After adversarial harmonization in Fig. 4, the age softmax predictions converge to the uniform chance value of about 0.014, and the harmonized (both) method in Table 3 achieves a voicing balanced accuracy of 53.00%, surpassing the balanced control’s 52.60% by 0.40%.", "label": false }, { "paperid": "2409.14444v1", "paper_path": "./SciVer/papers/2409.14444v1.json", "claim_type": "parallel", "item1": "5(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14444v1_figure_5(b).png", "item2_path": "./SciVer/images/2409.14444v1-Table4-1.png", "section": [ "4.4" ], "request_id": 701, "origin_statement": "In Variant 11’s policy evolution (Figure 5b), SSBI remains at about 40% of samples across all epochs while BI and SBI split the remaining 60%; Table C1 reports Variant 11 achieves an average AUC of 90.97%, below the CDFA’s 92.36%.", "perturbed_statement": "In Variant 11’s policy evolution (Figure 5b), SSBI remains at about 60% of samples across all epochs while BI and SBI split the remaining 40%; Table C1 reports Variant 11 achieves an average AUC of 95.00%, exceeding the CDFA’s 92.36%.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5b shows SSBI holds at roughly 40%, not 60%, and Table C1 lists Variant 11’s average AUC as 90.97%, not 95.00%.", "claim": "In Variant 11’s policy evolution (Figure 5b), SSBI remains at about 60% of samples across all epochs while BI and SBI split the remaining 40%; Table C1 reports Variant 11 achieves an average AUC of 95.00%, exceeding the CDFA’s 92.36%.", "label": false }, { "paperid": "2411.10545v1", "paper_path": "./SciVer/papers/2411.10545v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10545v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.10545v1_figure_2(c).png", "section": [ "4" ], "request_id": 708, "origin_statement": "Information Sampling for Alignment (ISA) achieves an 84.93% winrate on the Anthropic Golden HH dataset—2.22 points above full-data alignment—and reaches 25.23% on the OpenAssistant dataset, reducing the gap to the full-data winrate (26.87%) to just 1.64 points.", "perturbed_statement": "Information Sampling for Alignment (ISA) achieves an 84.93% winrate on the Anthropic Golden HH dataset—2.22 points above full-data alignment—and reaches 27.23% on the OpenAssistant dataset, reducing the gap to the full-data winrate (26.87%) to just 0.36 points.", "perturbed_explanation": "The OpenAssistant winrate for ISA in the referenced figure is 25.23%, not 27.23%, so the claimed winrate and resulting 0.36-point gap contradict the chart data.", "claim": "Information Sampling for Alignment (ISA) achieves an 84.93% winrate on the Anthropic Golden HH dataset—2.22 points above full-data alignment—and reaches 27.23% on the OpenAssistant dataset, reducing the gap to the full-data winrate (26.87%) to just 0.36 points.", "label": false }, { "paperid": "2411.18473v1", "paper_path": "./SciVer/papers/2411.18473v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18473v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.18473v1_figure_3(c).png", "section": [ "4.3" ], "request_id": 709, "origin_statement": "At roughly 16 MB on Mip-NeRF360, HEMGS yields 27.95 dB, outperforming Context-GS’s 27.72 dB at 20 MB by 0.23 dB; likewise on DeepBlending at 4 MB, HEMGS’s 30.30 dB exceeds HAC’s 29.98 dB at 4.5 MB by 0.32 dB.", "perturbed_statement": "At roughly 16 MB on Mip-NeRF360, HEMGS yields 28.05 dB, outperforming Context-GS’s 27.72 dB at 20 MB by 0.33 dB; likewise on DeepBlending at 4 MB, HEMGS’s 30.30 dB exceeds HAC’s 30.05 dB at 4.5 MB by 0.25 dB.", "perturbed_explanation": "According to the plot, HEMGS at 16 MB on Mip-NeRF360 actually achieves about 27.95 dB, not 28.05 dB. Similarly, HAC’s PSNR on DeepBlending at 4.5 MB is 29.98 dB, not 30.05 dB, so both cited values contradict the data.", "claim": "At roughly 16 MB on Mip-NeRF360, HEMGS yields 28.05 dB, outperforming Context-GS’s 27.72 dB at 20 MB by 0.33 dB; likewise on DeepBlending at 4 MB, HEMGS’s 30.30 dB exceeds HAC’s 30.05 dB at 4.5 MB by 0.25 dB.", "label": false }, { "paperid": "2411.18200v1", "paper_path": "./SciVer/papers/2411.18200v1.json", "claim_type": "parallel", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.18200v1-Table2-1.png", "item2_path": "./SciVer/images/2411.18200v1-Table3-1.png", "section": [ "4.4" ], "request_id": 713, "origin_statement": "The PVD CNN employs eight convolutional layers—starting with a 7×7 stride-2 layer—followed by a single 3×3 max-pooling stage, whereas the moment-map CNN uses seven convolutional layers, beginning with a 32-filter 11×11 stride-4 layer, interleaved with five 3×3 max pools.", "perturbed_statement": "The PVD CNN employs nine convolutional layers—starting with a 7×7 stride-2 layer—followed by a single 3×3 max-pooling stage, whereas the moment-map CNN uses seven convolutional layers, beginning with a 32-filter 11×11 stride-4 layer, interleaved with three 3×3 max pools.", "perturbed_explanation": "This is incorrect because Table 2 defines only eight convolutional layers in the PVD architecture, not nine, and Table 3 shows five 3×3 max-pooling layers in the moment-map network, not three.", "claim": "The PVD CNN employs nine convolutional layers—starting with a 7×7 stride-2 layer—followed by a single 3×3 max-pooling stage, whereas the moment-map CNN uses seven convolutional layers, beginning with a 32-filter 11×11 stride-4 layer, interleaved with three 3×3 max pools.", "label": false }, { "paperid": "2410.15705v1", "paper_path": "./SciVer/papers/2410.15705v1.json", "claim_type": "parallel", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.15705v1-Table1-1.png", "item2_path": "./SciVer/images/2410.15705v1-Table3-1.png", "section": [ "6" ], "request_id": 716, "origin_statement": "In Model (a), Table 1 shows that for n=5000, r=0.5, m=0.5 the 50% quantile of S (minimum model size) is 1, and Table 3 reports that using the estimated active set j** yields a median ASE of 0.018, matching the true-set performance.", "perturbed_statement": "In Model (a), Table 1 shows that for n=5000, r=0.5, m=0.5 the 50% quantile of S (minimum model size) is 2, and Table 3 reports that using the estimated active set j** yields a median ASE of 0.018, matching the true-set performance.", "perturbed_explanation": "The perturbation falsely states that the 50% quantile of S is 2 for n=5000, r=0.5, m=0.5 in Model (a). However, Table 1 indicates this median quantile is actually 1, not 2.", "claim": "In Model (a), Table 1 shows that for n=5000, r=0.5, m=0.5 the 50% quantile of S (minimum model size) is 2, and Table 3 reports that using the estimated active set j** yields a median ASE of 0.018, matching the true-set performance.", "label": false }, { "paperid": "2410.10880v1", "paper_path": "./SciVer/papers/2410.10880v1.json", "claim_type": "parallel", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.10880v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.10880v1_figure_3(b).png", "section": [ "3.2" ], "request_id": 722, "origin_statement": "On WikiMIA, members average ~4.2 perplexity versus ~5.0 for non-members, while members have ~5.1 Min-k% against ~5.8 for non-members, showing a 0.8-unit gap in perplexity and a 0.7-unit gap in Min-k%.", "perturbed_statement": "On WikiMIA, members average ~4.2 perplexity versus ~5.0 for non-members, while members have ~5.1 Min-k% against ~6.8 for non-members, showing a 0.8-unit gap in perplexity and a 1.7-unit gap in Min-k%.", "perturbed_explanation": "The perturbed statement claims non-members have a mean Min-k% of ~6.8 (implying a 1.7-unit gap), but the plotted blue dashed line for non-members is actually around 5.8, so both the 6.8 value and the 1.7-unit gap contradict the figure.", "claim": "On WikiMIA, members average ~4.2 perplexity versus ~5.0 for non-members, while members have ~5.1 Min-k% against ~6.8 for non-members, showing a 0.8-unit gap in perplexity and a 1.7-unit gap in Min-k%.", "label": false }, { "paperid": "2411.16725v1", "paper_path": "./SciVer/papers/2411.16725v1.json", "claim_type": "parallel", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16725v1_figure_5.png", "item2_path": "./SciVer/images/2411.16725v1-Table2-1.png", "section": [ "4.3" ], "request_id": 723, "origin_statement": "For Caltech-101 at t=25, bottleneck yields σ_label=9.35, roughly 12 points lower than up_ft1’s 21.33, reflecting Figure 5 where bottleneck activations show distinct sailing ships and elephants, while up_ft1 captures sketch-style images and objects on white backgrounds.", "perturbed_statement": "For Caltech-101 at t=25, up_ft1 yields σ_label=9.35, roughly 12 points lower than bottleneck’s 21.33, reflecting Figure 5 where bottleneck activations show distinct sailing ships and elephants, while up_ft1 captures sketch-style images and objects on white backgrounds.", "perturbed_explanation": "The perturbation swaps the σ_label values for bottleneck and up_ft1, contradicting the table where bottleneck actually has 9.35 and up_ft1 has 21.33 for Caltech-101 at t=25.", "claim": "For Caltech-101 at t=25, up_ft1 yields σ_label=9.35, roughly 12 points lower than bottleneck’s 21.33, reflecting Figure 5 where bottleneck activations show distinct sailing ships and elephants, while up_ft1 captures sketch-style images and objects on white backgrounds.", "label": false }, { "paperid": "2410.23511v1", "paper_path": "./SciVer/papers/2410.23511v1.json", "claim_type": "parallel", "item1": "7", "item2": "17", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23511v1_figure_7.png", "item2_path": "./SciVer/images/2410.23511v1-Table17-1.png", "section": [ "6.2" ], "request_id": 724, "origin_statement": "Combined DyPlan achieves an F1 of 48.0 on HotpotQA—0.5 points above the individual DyPlan’s 47.5—by training on 44k samples (2k more than individual’s 42k) without changing recall cost (0.76 for both).", "perturbed_statement": "Combined DyPlan achieves an F1 of 48.0 on HotpotQA—0.5 points above the individual DyPlan’s 47.5—by training on 44k samples (2k more than individual’s 42k) while increasing recall cost to 0.80.", "perturbed_explanation": "The perturbed statement is incorrect because Table 17 shows the recall cost (#R) remains 0.76 for both individual and combined DyPlan on HotpotQA; it does not increase to 0.80.", "claim": "Combined DyPlan achieves an F1 of 48.0 on HotpotQA—0.5 points above the individual DyPlan’s 47.5—by training on 44k samples (2k more than individual’s 42k) while increasing recall cost to 0.80.", "label": false }, { "paperid": "2410.22373v1", "paper_path": "./SciVer/papers/2410.22373v1.json", "claim_type": "parallel", "item1": "1", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.22373v1-Table1-1.png", "item2_path": "./SciVer/images/2410.22373v1-Table4-1.png", "section": [ "4.2" ], "request_id": 733, "origin_statement": "MDAA’s average Top-1 accuracy on Kinetics50-C is 72.75% for progressive audio corruption (Table 1) but drops to 67.95% under the interleaved modality corruption task (Table 4), a decrease of 4.80 points when corruption phases alternate.", "perturbed_statement": "MDAA’s average Top-1 accuracy on Kinetics50-C is 72.75% for progressive audio corruption (Table 1) but drops to 62.30% under the interleaved modality corruption task (Table 4), a decrease of 10.45 points when corruption phases alternate.", "perturbed_explanation": "The perturbed statement misreports the interleaved modality corruption average. Table 4 shows MDAA achieves 67.95%, not 62.30%, making the claimed 10.45-point drop incorrect.", "claim": "MDAA’s average Top-1 accuracy on Kinetics50-C is 72.75% for progressive audio corruption (Table 1) but drops to 62.30% under the interleaved modality corruption task (Table 4), a decrease of 10.45 points when corruption phases alternate.", "label": false }, { "paperid": "2409.04073v2", "paper_path": "./SciVer/papers/2409.04073v2.json", "claim_type": "parallel", "item1": "3(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04073v2_figure_3(b).png", "item2_path": "./SciVer/images/2409.04073v2-Table1-1.png", "section": [ "6.1" ], "request_id": 735, "origin_statement": "AnyMatch achieves an average F1 score of 81.96% at roughly $0.00001 per 1K tokens—3,899× cheaper than GPT-4’s inference cost—and only 4.4% below GPT-4’s 86.36%, even on the smallest BEER dataset of just 450 samples.", "perturbed_statement": "AnyMatch achieves an average F1 score of 81.96% at roughly $0.00001 per 1K tokens—3,899× cheaper than GPT-4’s inference cost—and only 4.4% below GPT-4’s 86.36%, even on the smallest BEER dataset of 4,500 samples.", "perturbed_explanation": "The perturbed statement incorrectly claims that the BEER dataset contains 4,500 samples. According to Table 1, BEER actually has 450 samples, so the sample count in the perturbation contradicts the provided dataset information.", "claim": "AnyMatch achieves an average F1 score of 81.96% at roughly $0.00001 per 1K tokens—3,899× cheaper than GPT-4’s inference cost—and only 4.4% below GPT-4’s 86.36%, even on the smallest BEER dataset of 4,500 samples.", "label": false }, { "paperid": "2410.16908v1", "paper_path": "./SciVer/papers/2410.16908v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16908v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.16908v1_figure_3.png", "section": [ "4.2" ], "request_id": 736, "origin_statement": "In the CapsNet[C=20,D=8,I=6] model, most capsule activations in layers 5 and 6 hover around 0.01, while the dead capsule rate climbs from approximately 16% in epoch 1 to about 26% by epoch 15.", "perturbed_statement": "In the CapsNet[C=20,D=8,I=6] model, most capsule activations in layers 5 and 6 hover around 0.02, while the dead capsule rate climbs from approximately 18% in epoch 1 to about 30% by epoch 15.", "perturbed_explanation": "The perturbed statement is incorrect because the heatmap shows activations in layers 5 and 6 are close to 0.01, not 0.02, and Figure 3 indicates dead capsules rise from about 16% at epoch 1 to roughly 26% at epoch 15, not from 18% to 30%.", "claim": "In the CapsNet[C=20,D=8,I=6] model, most capsule activations in layers 5 and 6 hover around 0.02, while the dead capsule rate climbs from approximately 18% in epoch 1 to about 30% by epoch 15.", "label": false }, { "paperid": "2410.12378v1", "paper_path": "./SciVer/papers/2410.12378v1.json", "claim_type": "parallel", "item1": "2(a)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.12378v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.12378v1-Table5-1.png", "section": [ "4.3" ], "request_id": 738, "origin_statement": "The diffusion length sqrt(4 D T) at T=31 min, with D=0.01 mm^2/min, is ~1.12 mm, and the semilog plot shows m(x,t)>1 for ln(x)<0.4 (x<1.5 mm), consistent with this theoretical estimate.", "perturbed_statement": "The diffusion length sqrt(4 D T) at T=31 min, with D=0.01 mm^2/min, is ~2.24 mm, and the semilog plot shows m(x,t)>1 for ln(x)<0.4 (x<1.5 mm), consistent with this theoretical estimate.", "perturbed_explanation": "The correct diffusion length is sqrt(4*0.01*31)≈1.12 mm, not 2.24 mm, so the perturbed statement miscalculates the diffusion length and contradicts the table value of D and the time T=31 min from the image.", "claim": "The diffusion length sqrt(4 D T) at T=31 min, with D=0.01 mm^2/min, is ~2.24 mm, and the semilog plot shows m(x,t)>1 for ln(x)<0.4 (x<1.5 mm), consistent with this theoretical estimate.", "label": false }, { "paperid": "2409.01700v2", "paper_path": "./SciVer/papers/2409.01700v2.json", "claim_type": "parallel", "item1": "7", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01700v2_figure_7.png", "item2_path": "./SciVer/images/2409.01700v2-Table5-1.png", "section": [ "4.1" ], "request_id": 739, "origin_statement": "In Fig. 7, the sputtering depth d_s is much smaller than the ice film thickness d_0, while Table 5 shows H2CO’s cosmic-ray sputtering rate is 0.15 ζ_17 molecules·cm⁻²·s⁻¹, about eight times lower than H2O’s rate of 1.2 ζ_17.", "perturbed_statement": "In Fig. 7, the sputtering depth d_s is much larger than the ice film thickness d_0, while Table 5 shows H2CO’s cosmic-ray sputtering rate is 0.15 ζ_17 molecules·cm⁻²·s⁻¹, about eight times lower than H2O’s rate of 1.2 ζ_17.", "perturbed_explanation": "The perturbation claims that d_s is larger than d_0, but Fig. 7’s caption explicitly states that d_s (the sputtering depth) is inferior to d_0 (the initial ice film thickness), so the altered comparison contradicts the image.", "claim": "In Fig. 7, the sputtering depth d_s is much larger than the ice film thickness d_0, while Table 5 shows H2CO’s cosmic-ray sputtering rate is 0.15 ζ_17 molecules·cm⁻²·s⁻¹, about eight times lower than H2O’s rate of 1.2 ζ_17.", "label": false }, { "paperid": "2411.01217v1", "paper_path": "./SciVer/papers/2411.01217v1.json", "claim_type": "parallel", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01217v1_figure_6.png", "item2_path": "./SciVer/images/2411.01217v1-Table2-1.png", "section": [ "5.2" ], "request_id": 745, "origin_statement": "In three-player play, the Aggressive AI raises to three big blinds 16.0% preflop, whereas in two-player head-to-head, as the small blind versus a Normal opponent, this strategy yields just 58.7 mBB/h, 9.9 mBB/h lower than its 68.6 mBB/h versus an Aggressive opponent.", "perturbed_statement": "In three-player play, the Aggressive AI raises to three big blinds 16.0% preflop, whereas in two-player head-to-head, as the small blind versus a Normal opponent, this strategy yields just 63.7 mBB/h, 4.9 mBB/h lower than its 68.6 mBB/h versus an Aggressive opponent.", "perturbed_explanation": "The perturbed statement misreports the two-player win rate against a Normal opponent: Table 2 shows the Aggressive small blind wins 58.7 mBB/h (not 63.7), and the actual difference from 68.6 mBB/h is 9.9 mBB/h (not 4.9).", "claim": "In three-player play, the Aggressive AI raises to three big blinds 16.0% preflop, whereas in two-player head-to-head, as the small blind versus a Normal opponent, this strategy yields just 63.7 mBB/h, 4.9 mBB/h lower than its 68.6 mBB/h versus an Aggressive opponent.", "label": false }, { "paperid": "2410.24145v1", "paper_path": "./SciVer/papers/2410.24145v1.json", "claim_type": "parallel", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.24145v1_figure_1.png", "item2_path": "./SciVer/images/2410.24145v1-Table1-1.png", "section": [ "4.3" ], "request_id": 746, "origin_statement": "At κ=5, the circular histogram shows response angles concentrated around π/4, and in Table 1 the circular forest method achieves a median arc length of 2.09 (IQR 0.48), approximately 38.5% smaller than the projected normal’s median of 3.40 (IQR 1.20).", "perturbed_statement": "At κ=5, the circular histogram shows response angles concentrated around π/4, and in Table 1 the circular forest method achieves a median arc length of 2.09 (IQR 0.48), approximately 28.5% smaller than the projected normal’s median of 3.40 (IQR 1.20).", "perturbed_explanation": "The perturbed statement claims a 28.5% reduction, but using the values 3.40 and 2.09 from Table 1, the correct reduction is (3.40–2.09)/3.40 ≈ 38.5%, so the 28.5% figure contradicts the tabulated data.", "claim": "At κ=5, the circular histogram shows response angles concentrated around π/4, and in Table 1 the circular forest method achieves a median arc length of 2.09 (IQR 0.48), approximately 28.5% smaller than the projected normal’s median of 3.40 (IQR 1.20).", "label": false }, { "paperid": "2411.09899v1", "paper_path": "./SciVer/papers/2411.09899v1.json", "claim_type": "parallel", "item1": "1(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09899v1_figure_1(b).png", "item2_path": "./SciVer/images/2411.09899v1-Table2-1.png", "section": [ "4.1" ], "request_id": 751, "origin_statement": "For 1/η = 0.5, the ANN weight profile oscillates around the analytic Merton ratio of approximately 0.06 over time, yielding a mean terminal utility of 0.05897 with a standard error of 8.61×10⁻⁵.", "perturbed_statement": "For 1/η = 0.5, the ANN weight profile oscillates around the analytic Merton ratio of approximately 0.06 over time, yielding a mean terminal utility of 0.05897 with a standard error of 8.61×10⁻⁴.", "perturbed_explanation": "The table lists the standard error for the ANN mean terminal utility at 1/η = 0.5 as 8.61×10⁻⁵, not 8.61×10⁻⁴, so the perturbed standard error contradicts the reported value.", "claim": "For 1/η = 0.5, the ANN weight profile oscillates around the analytic Merton ratio of approximately 0.06 over time, yielding a mean terminal utility of 0.05897 with a standard error of 8.61×10⁻⁴.", "label": false }, { "paperid": "2409.08512v1", "paper_path": "./SciVer/papers/2409.08512v1.json", "claim_type": "parallel", "item1": "5(a)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08512v1_figure_5(a).png", "item2_path": "./SciVer/images/2409.08512v1_figure_5(c).png", "section": [ "5.3.1" ], "request_id": 758, "origin_statement": "CPG achieves about 0.88 accuracy in Vulnerability Fix Identification with roughly 0.87 precision (Fig. 5a), but its Vulnerability Severity Classification accuracy in Fig. 5c drops to around 0.80, reflecting an 8% decrease.", "perturbed_statement": "CPG achieves about 0.78 accuracy in Vulnerability Fix Identification with roughly 0.87 precision (Fig. 5a), but its Vulnerability Severity Classification accuracy in Fig. 5c drops to around 0.80, reflecting an 8% decrease.", "perturbed_explanation": "The perturbed statement underreports the VFI accuracy for CPG: Figure 5a shows CPG’s VFI accuracy is approximately 0.88, not 0.78, so the claimed 0.78 contradicts the visual data.", "claim": "CPG achieves about 0.78 accuracy in Vulnerability Fix Identification with roughly 0.87 precision (Fig. 5a), but its Vulnerability Severity Classification accuracy in Fig. 5c drops to around 0.80, reflecting an 8% decrease.", "label": false }, { "paperid": "2409.09099v2", "paper_path": "./SciVer/papers/2409.09099v2.json", "claim_type": "parallel", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.09099v2_figure_3.png", "item2_path": "./SciVer/images/2409.09099v2-Table1-1.png", "section": [ "4.1" ], "request_id": 759, "origin_statement": "Figure 3(c–d) shows that the 1:2 soft-thresholding pruning function is continuous when |a₁|=|a₂|, and Table 1 reports that γ=0 achieves the minimal validation loss of 4.007, which is 0.065 lower than the loss at γ=1.", "perturbed_statement": "Figure 3(a–b) shows that the 1:2 hard-thresholding pruning function is continuous when |a₁|=|a₂|, and Table 1 reports that γ=1 achieves the minimal validation loss of 4.072, which is 0.65 lower than the loss at γ=0.", "perturbed_explanation": "This is incorrect because Figure 3(a–b) depict hard-thresholding, which is discontinuous at |a₁|=|a₂|, not continuous, and Table 1 actually shows γ=0 yields the lowest validation loss (4.007), while γ=1 gives the highest loss (4.072), not the minimal.", "claim": "Figure 3(a–b) shows that the 1:2 hard-thresholding pruning function is continuous when |a₁|=|a₂|, and Table 1 reports that γ=1 achieves the minimal validation loss of 4.072, which is 0.65 lower than the loss at γ=0.", "label": false }, { "paperid": "2409.11724v2", "paper_path": "./SciVer/papers/2409.11724v2.json", "claim_type": "parallel", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11724v2_figure_1.png", "item2_path": "./SciVer/images/2409.11724v2_figure_2.png", "section": [ "1" ], "request_id": 770, "origin_statement": "The median daily snowfall is 5 inches, and if that snowfall were evenly distributed over the 4-hour boat trip from Ocean City at 9:45 A.M. to Surfing Beach at 1:45 P.M., it would average 1.25 inches per hour.", "perturbed_statement": "The median daily snowfall is 5 inches, and if that snowfall were evenly distributed over the 5-hour boat trip from Ocean City at 9:45 A.M. to Surfing Beach at 1:45 P.M., it would average 1 inch per hour.", "perturbed_explanation": "The boat trip from 9:45 A.M. to 1:45 P.M. actually takes 4 hours, not 5 hours, so distributing snowfall over 5 hours is incorrect according to the schedule in Figure 1.", "claim": "The median daily snowfall is 5 inches, and if that snowfall were evenly distributed over the 5-hour boat trip from Ocean City at 9:45 A.M. to Surfing Beach at 1:45 P.M., it would average 1 inch per hour.", "label": false }, { "paperid": "2409.12314v1", "paper_path": "./SciVer/papers/2409.12314v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12314v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.12314v1_figure_7(b).png", "section": [ "5.2" ], "request_id": 775, "origin_statement": "In the benign model, cross-attention maps for “book” show two bright regions at the open pages and for “clock” a circular bright focus, and for “dog” a bright facial region with “hat” highlighting a conical spot above; in the imploded model all token maps collapse into diffuse, nearly identical noise.", "perturbed_statement": "In the benign model, cross-attention maps for “book” show two bright regions at the open pages and for “clock” a circular bright focus, and for “dog” a bright facial region with “hat” highlighting a conical spot above; in the imploded model the “hat” map still clearly shows a conical bright region above the head.", "perturbed_explanation": "The perturbed claim is wrong because in the imploded model (second row of each figure), all token-specific cross-attention maps—including the “hat” map—are uniformly noisy and lack any distinct conical bright region, contradicting the statement that the “hat” map remains clear.", "claim": "In the benign model, cross-attention maps for “book” show two bright regions at the open pages and for “clock” a circular bright focus, and for “dog” a bright facial region with “hat” highlighting a conical spot above; in the imploded model the “hat” map still clearly shows a conical bright region above the head.", "label": false }, { "paperid": "2411.11000v1", "paper_path": "./SciVer/papers/2411.11000v1.json", "claim_type": "parallel", "item1": "6", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11000v1_figure_6.png", "item2_path": "./SciVer/images/2411.11000v1_figure_8.png", "section": [ "3.4", "3.5" ], "request_id": 777, "origin_statement": "In the 120 M⊙ progenitor model, the spectral time series shows log flux ~37.3 erg s⁻¹ Å⁻¹ at the He I 1.083 μm line by 60 days post-explosion (Fig. 6), and the 60 day SDEC plot indicates He accounts for over 60% of emergent emission at ~11000 Å (Fig. 8).", "perturbed_statement": "In the 120 M⊙ progenitor model, the spectral time series shows log flux ~36.3 erg s⁻¹ Å⁻¹ at the He I 1.083 μm line by 60 days post-explosion (Fig. 6), and the 60 day SDEC plot indicates He accounts for over 60% of emergent emission at ~11000 Å (Fig. 8).", "perturbed_explanation": "The perturbed statement incorrectly lowers the log flux at the He I 1.083 μm line from the actual ~37.3 erg s⁻¹ Å⁻¹ (seen in the 120 M⊙ model at 60 days in Fig. 6) to 36.3, contradicting the spectral time series data.", "claim": "In the 120 M⊙ progenitor model, the spectral time series shows log flux ~36.3 erg s⁻¹ Å⁻¹ at the He I 1.083 μm line by 60 days post-explosion (Fig. 6), and the 60 day SDEC plot indicates He accounts for over 60% of emergent emission at ~11000 Å (Fig. 8).", "label": false }, { "paperid": "2409.14201v1", "paper_path": "./SciVer/papers/2409.14201v1.json", "claim_type": "parallel", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14201v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.14201v1_figure_7(b).png", "section": [ "4.3" ], "request_id": 779, "origin_statement": "On formulae, delta-view feedback boosts average localization accuracy to 60.53%, while correct fault localization yields a 51.99% average refinement rate, which is 14.97 percentage points higher than the 37.02% when localization is wrong.", "perturbed_statement": "On formulae, delta-view feedback boosts average localization accuracy to 65.53%, while correct fault localization yields a 51.99% average refinement rate, which is 14.97 percentage points higher than the 37.02% when localization is wrong.", "perturbed_explanation": "The perturbed statement incorrectly claims the average localization accuracy with delta-view is 65.53%, but Figure 7(a) reports it as 60.53% for formulae with delta-view feedback.", "claim": "On formulae, delta-view feedback boosts average localization accuracy to 65.53%, while correct fault localization yields a 51.99% average refinement rate, which is 14.97 percentage points higher than the 37.02% when localization is wrong.", "label": false }, { "paperid": "2410.16597v1", "paper_path": "./SciVer/papers/2410.16597v1.json", "claim_type": "parallel", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16597v1_figure_5.png", "item2_path": "./SciVer/images/2410.16597v1-Table4-1.png", "section": [ "7.5" ], "request_id": 780, "origin_statement": "LLM-based reranking of graph retrieval boosts Hits@10 by 6.28 points (from 66.5% to 72.78%), compared with only a 0.2-point gain in Hits@10 (from 80.9% to 81.1%) for the Distill-SynthKG-7b model when switching KG synthesis from GPT-4o to Llama-3.", "perturbed_statement": "LLM-based reranking of graph retrieval boosts Hits@10 by 6.28 points (from 66.5% to 72.78%), compared with only a 2.0-point gain in Hits@10 (from 80.9% to 81.1%) for the Distill-SynthKG-7b model when switching KG synthesis from GPT-4o to Llama-3.", "perturbed_explanation": "The perturbed statement incorrectly claims a 2.0-point increase in Hits@10 for Distill-SynthKG-7b when switching KG synthesis, but Table 4 shows the Hits@10 only rises from 0.809 (80.9%) to 0.811 (81.1%), a 0.2-point gain, not 2.0 points.", "claim": "LLM-based reranking of graph retrieval boosts Hits@10 by 6.28 points (from 66.5% to 72.78%), compared with only a 2.0-point gain in Hits@10 (from 80.9% to 81.1%) for the Distill-SynthKG-7b model when switching KG synthesis from GPT-4o to Llama-3.", "label": false }, { "paperid": "2411.17987v1", "paper_path": "./SciVer/papers/2411.17987v1.json", "claim_type": "parallel", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17987v1_figure_4.png", "item2_path": "./SciVer/images/2411.17987v1_figure_5.png", "section": [ "5.1.2" ], "request_id": 782, "origin_statement": "At 5 NetFlow fields, P4-NIDS sustains about 77 Mbit/s with a throughput drop of ~3.8%, whereas FlowStalker delivers ~15 Mbit/s but suffers ~9% drop.", "perturbed_statement": "At 5 NetFlow fields, P4-NIDS sustains about 77 Mbit/s with a throughput drop of ~3.8%, whereas FlowStalker delivers ~15 Mbit/s but suffers ~2% drop.", "perturbed_explanation": "Figure 5 shows that FlowStalker’s throughput drop at 5 NetFlow fields is approximately 9%, not 2%, so the perturbed drop value contradicts the data.", "claim": "At 5 NetFlow fields, P4-NIDS sustains about 77 Mbit/s with a throughput drop of ~3.8%, whereas FlowStalker delivers ~15 Mbit/s but suffers ~2% drop.", "label": false }, { "paperid": "2411.16421v1", "paper_path": "./SciVer/papers/2411.16421v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16421v1_figure_3.png", "item2_path": "./SciVer/images/2411.16421v1_figure_4.png", "section": [ "5.2.1" ], "request_id": 784, "origin_statement": "Between grade 2 and grade 5, the typhoon mean distance error with rotation decreases by 93.12 km (from 122.66 to 29.54 km), while the AU (not flipped) test basin’s error falls by 38.81 pixels (from 58.17 to 19.36 px).", "perturbed_statement": "Between grade 2 and grade 5, the typhoon mean distance error with rotation decreases by 93.12 km (from 122.66 to 29.54 km), while the AU (not flipped) test basin’s error falls by 40.81 pixels (from 58.17 to 19.36 px).", "perturbed_explanation": "The AU (not flipped) error reduction is misreported as 40.81 pixels. According to Figure 4, it actually decreases from 58.17 to 19.36 pixels, a difference of 38.81 pixels, not 40.81.", "claim": "Between grade 2 and grade 5, the typhoon mean distance error with rotation decreases by 93.12 km (from 122.66 to 29.54 km), while the AU (not flipped) test basin’s error falls by 40.81 pixels (from 58.17 to 19.36 px).", "label": false }, { "paperid": "2411.05253v1", "paper_path": "./SciVer/papers/2411.05253v1.json", "claim_type": "parallel", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.05253v1_figure_2.png", "item2_path": "./SciVer/images/2411.05253v1-Table1-1.png", "section": [ "5.1" ], "request_id": 800, "origin_statement": "Mistral-7B flags 42% of Malay gold sentences as Singlish while mislabeling the Indonesian phrase “matter pata nai tha” as Malay.", "perturbed_statement": "Mistral-7B flags 22% of Malay gold sentences as Singlish while mislabeling the Indonesian phrase “matter pata nai tha” as Malay.", "perturbed_explanation": "The confusion matrix in Figure 2 shows that Mistral-7B misdetects 42% (not 22%) of Malay gold sentences as Singlish. Changing 42% to 22% contradicts the cell value for Malay→Singlish (0.42) in the matrix.", "claim": "Mistral-7B flags 22% of Malay gold sentences as Singlish while mislabeling the Indonesian phrase “matter pata nai tha” as Malay.", "label": false }, { "paperid": "2411.12248v2", "paper_path": "./SciVer/papers/2411.12248v2.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.12248v2-Table3-1.png", "item2_path": "./SciVer/images/2411.12248v2-Table4-1.png", "section": [ "5.3.1" ], "request_id": 804, "origin_statement": "Incorporating both static and dynamic signals with our neural aggregator raises object-type top-1 classification accuracy from 5.44% to 5.91% (Table 3) and improves the average 2-way top-1 reconstruction performance from 51.64% to 55.81% (Table 4).", "perturbed_statement": "Incorporating both static and dynamic signals with our neural aggregator raises object-type top-1 classification accuracy from 5.44% to 6.11% (Table 3) and improves the average 2-way top-1 reconstruction performance from 51.64% to 55.81% (Table 4).", "perturbed_explanation": "The perturbed statement incorrectly reports the aggregated object-type top-1 classification accuracy as 6.11%, whereas Table 3 shows it is 5.91%, creating a contradiction with the original data.", "claim": "Incorporating both static and dynamic signals with our neural aggregator raises object-type top-1 classification accuracy from 5.44% to 6.11% (Table 3) and improves the average 2-way top-1 reconstruction performance from 51.64% to 55.81% (Table 4).", "label": false }, { "paperid": "2411.00690v1", "paper_path": "./SciVer/papers/2411.00690v1.json", "claim_type": "parallel", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00690v1_figure_3.png", "item2_path": "./SciVer/images/2411.00690v1_figure_4.png", "section": [ "3.4" ], "request_id": 811, "origin_statement": "At R ≈ 5 Å the CCSD reference weight W0 falls to about –0.2 (Fig. 3), whereas QCCSD preserves a positive W0 of roughly 0.12 (Fig. 4), demonstrating that QCCSD avoids the unphysical negative reference amplitude observed in CCSD at large separations.", "perturbed_statement": "At R ≈ 5 Å the CCSD reference weight W0 rises to about 0.2 (Fig. 3), whereas QCCSD preserves a positive W0 of roughly 0.12 (Fig. 4), demonstrating that QCCSD avoids the unphysical negative reference amplitude observed in CCSD at large separations.", "perturbed_explanation": "The perturbed claim falsely states that CCSD W0 is +0.2 at R≈5 Å, but Fig. 3 shows CCSD W0 actually drops to about –0.2 at that separation, contradicting the +0.2 value.", "claim": "At R ≈ 5 Å the CCSD reference weight W0 rises to about 0.2 (Fig. 3), whereas QCCSD preserves a positive W0 of roughly 0.12 (Fig. 4), demonstrating that QCCSD avoids the unphysical negative reference amplitude observed in CCSD at large separations.", "label": false }, { "paperid": "2409.16060v1", "paper_path": "./SciVer/papers/2409.16060v1.json", "claim_type": "parallel", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.16060v1_figure_2.png", "item2_path": "./SciVer/images/2409.16060v1-Table2-1.png", "section": [ "3.1" ], "request_id": 813, "origin_statement": "The soft X-ray photon index Γ_SX decreased by 0.60 from 2.79 to 2.19 (Table 2), while the χ²/d.o.f. of the double power-law model dropped from 1630.75/1631 to 958.66/1014 (Figure 2 panels e and f), indicating a hardened soft excess and improved fit.", "perturbed_statement": "The soft X-ray photon index Γ_SX decreased by 0.80 from 2.99 to 2.19 (Table 2), while the χ²/d.o.f. of the double power-law model dropped from 1630.75/1631 to 958.66/1014 (Figure 2 panels e and f), indicating a hardened soft excess and improved fit.", "perturbed_explanation": "Table 2 actually lists the soft X-ray photon index Γ_SX as 2.79±0.04, not 2.99, and its change to 2.19 is a decrease of 0.60, not 0.80. These incorrect values contradict the tabulated fit parameters.", "claim": "The soft X-ray photon index Γ_SX decreased by 0.80 from 2.99 to 2.19 (Table 2), while the χ²/d.o.f. of the double power-law model dropped from 1630.75/1631 to 958.66/1014 (Figure 2 panels e and f), indicating a hardened soft excess and improved fit.", "label": false }, { "paperid": "2410.11385v1", "paper_path": "./SciVer/papers/2410.11385v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.11385v1_figure_1.png", "item2_path": "./SciVer/images/2410.11385v1_figure_2.png", "section": [ "4.2" ], "request_id": 33, "origin_statement": "GPT-4o’s zero-shot BA accuracy on a 1×5 graph at I=3 is 56%, and Llama-3.1-405B’s one-shot ICL CI accuracy on the same 1×5 graph at I=3 is 52%, showing Llama-3.1 performs slightly lower on the CI task than GPT-4o on BA.", "perturbed_statement": "GPT-4o’s zero-shot BA accuracy on a 1×5 graph at I=3 is 50%, and Llama-3.1-405B’s one-shot ICL CI accuracy on the same 1×5 graph at I=3 is 52%, implying Llama-3.1 outperforms GPT-4o in this comparison.", "perturbed_explanation": "Figure 1 shows GPT-4o’s zero-shot BA accuracy at I=3 for a 1×5 graph is actually 56%, not 50%. Therefore, Llama-3.1’s 52% is still lower than GPT-4o’s 56%, contradicting the perturbed claim that it outperforms GPT-4o.", "claim": "GPT-4o’s zero-shot BA accuracy on a 1×5 graph at I=3 is 56%, and Llama-3.1-405B’s one-shot ICL CI accuracy on the same 1×5 graph at I=3 is 52%, showing Llama-3.1 performs slightly lower on the CI task than GPT-4o on BA.", "label": true }, { "paperid": "2410.09356v1", "paper_path": "./SciVer/papers/2410.09356v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09356v1_figure_6.png", "item2_path": "./SciVer/images/2410.09356v1_figure_7.png", "section": [ "5.5" ], "request_id": 35, "origin_statement": "Figure 6 shows that Taxi-Pick achieves its lowest MSE at kernel size [4,4]. Then, Figure 7 indicates Taxi-Pick reaches its lowest MAPE at diffusion step 2. Therefore, the configuration (kernel [4,4], diffusion=2) optimizes both MSE and MAPE for Taxi-Pick.", "perturbed_statement": "Figure 6 shows that Taxi-Pick achieves its lowest MSE at kernel size [5,3]. Then, Figure 7 indicates Taxi-Pick reaches its lowest MAPE at diffusion step 2. Therefore, the configuration (kernel [5,3], diffusion=2) optimizes both MSE and MAPE for Taxi-Pick.", "perturbed_explanation": "The perturbed statement wrongly claims Taxi-Pick’s lowest MSE occurs at kernel size [5,3]. In Figure 6, Taxi-Pick’s MSE is actually lowest at [4,4], not [5,3], so the supposed optimal kernel [5,3] contradicts the data.", "claim": "Figure 6 shows that Taxi-Pick achieves its lowest MSE at kernel size [4,4]. Then, Figure 7 indicates Taxi-Pick reaches its lowest MAPE at diffusion step 2. Therefore, the configuration (kernel [4,4], diffusion=2) optimizes both MSE and MAPE for Taxi-Pick.", "label": true }, { "paperid": "2409.15727v1", "paper_path": "./SciVer/papers/2409.15727v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15727v1_figure_4.png", "item2_path": "./SciVer/images/2409.15727v1-Table3-1.png", "section": [ "4.1" ], "request_id": 37, "origin_statement": "Figure 4 indicates σ²_conv around the mug rim is notably lower than σ²_dino; leveraging these low-variance regions, LaPose achieves 42.4% accuracy at 10°0.2d on NOCS-CAMERA25 (Table 3), demonstrating variance-guided pose precision.", "perturbed_statement": "Figure 4 indicates σ²_dino around the mug rim is notably lower than σ²_conv; leveraging these low-variance regions, LaPose achieves 39.9% accuracy at 10°0.2d on NOCS-CAMERA25 (Table 3).", "perturbed_explanation": "In Figure 4, the σ²_conv heatmap (third column) shows cooler (blue) colors at the mug rim than the σ²_dino map (sixth column), so σ²_dino is actually higher, not lower. Moreover, Table 3 reports 42.4% at 10°0.2d for LaPose, not 39.9%.", "claim": "Figure 4 indicates σ²_conv around the mug rim is notably lower than σ²_dino; leveraging these low-variance regions, LaPose achieves 42.4% accuracy at 10°0.2d on NOCS-CAMERA25 (Table 3), demonstrating variance-guided pose precision.", "label": true }, { "paperid": "2411.08909v1", "paper_path": "./SciVer/papers/2411.08909v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.08909v1-Table3-1.png", "item2_path": "./SciVer/images/2411.08909v1-Table4-1.png", "section": [ "5.6" ], "request_id": 49, "origin_statement": "ESM-2-650M achieves 26.57% remote homology accuracy (Table 3) and a 0.295 Spearman correlation (Table 4), implying approximately 0.0111 Spearman points gained per 1% increase in remote homology accuracy.", "perturbed_statement": "ESM-2-650M achieves 27.57% remote homology accuracy (Table 3) and a 0.295 Spearman correlation (Table 4), implying approximately 0.0107 Spearman points gained per 1% increase in remote homology accuracy.", "perturbed_explanation": "Table 3 reports the remote homology accuracy for ESM-2-650M as 26.57%, not 27.57%. Changing this value to 27.57% misstates the actual accuracy and thus the derived ratio is incorrect.", "claim": "ESM-2-650M achieves 26.57% remote homology accuracy (Table 3) and a 0.295 Spearman correlation (Table 4), implying approximately 0.0111 Spearman points gained per 1% increase in remote homology accuracy.", "label": true }, { "paperid": "2410.10442v1", "paper_path": "./SciVer/papers/2410.10442v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.10442v1_figure_2.png", "item2_path": "./SciVer/images/2410.10442v1_figure_3.png", "section": [ "3.1" ], "request_id": 71, "origin_statement": "Figure 2 shows that at the sixth Transformer layer, defocus blur level 5 increases mean attention distance by ≈3 tokens compared to clean images, and after DCT adaptation this gap shrinks to under 0.2 tokens, implying Φ^6 effectively corrects blur-induced attention spread.", "perturbed_statement": "Figure 2 shows that at the third Transformer layer, defocus blur level 5 increases mean attention distance by ≈4 tokens compared to clean images, and after DCT adaptation this gap shrinks to under 0.1 tokens, implying Φ^3 perfectly corrects blur-induced attention spread.", "perturbed_explanation": "In Figure 2, at layer 3 the mean attention distance increases by only about 1 token (clean ≈7, blur 5 ≈8), not ≈4 as claimed, and the DCT-adapted profile is around 7.5 (≈0.5 token gap), not within 0.1 tokens. Therefore, Φ^3 cannot produce the asserted full correction.", "claim": "Figure 2 shows that at the sixth Transformer layer, defocus blur level 5 increases mean attention distance by ≈3 tokens compared to clean images, and after DCT adaptation this gap shrinks to under 0.2 tokens, implying Φ^6 effectively corrects blur-induced attention spread.", "label": true }, { "paperid": "2409.14762v1", "paper_path": "./SciVer/papers/2409.14762v1.json", "claim_type": "sequential", "item1": "5", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14762v1_figure_5.png", "item2_path": "./SciVer/images/2409.14762v1_figure_8.png", "section": [ "3.3" ], "request_id": 72, "origin_statement": "Fig5 shows GPT4 achieves about 83% accuracy with only ~5 rounds in easy mode; Fig8 shows GPT4’s accuracy–rethink correlation is 75 (the highest among models), indicating its efficient reasoning aligns with strong rethink performance.", "perturbed_statement": "Fig5 shows GPT4 achieves about 83% accuracy with only ~5 rounds in easy mode; Fig8 shows GPT4’s accuracy–rethink correlation is only 68.4, so it is not the highest, implying its efficient reasoning is less tied to rethink performance.", "perturbed_explanation": "The perturbation is incorrect because Fig8 actually reports GPT4’s accuracy–rethink correlation as 75.0, not 68.4, and 75.0 is the highest value among all models, directly contradicting the perturbed claim.", "claim": "Fig5 shows GPT4 achieves about 83% accuracy with only ~5 rounds in easy mode; Fig8 shows GPT4’s accuracy–rethink correlation is 75 (the highest among models), indicating its efficient reasoning aligns with strong rethink performance.", "label": true }, { "paperid": "2411.10018v1", "paper_path": "./SciVer/papers/2411.10018v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10018v1_figure_3.png", "item2_path": "./SciVer/images/2411.10018v1-Table3-1.png", "section": [ "3.3" ], "request_id": 89, "origin_statement": "Since family films have the highest average emotional range (≈ -9.98) in Figure 3, and Table 3 shows “You’re alive.” among the highest entropies (–8.32), we can infer that “You’re alive.” is often used in family films to exploit their broad emotional register.", "perturbed_statement": "Since thrillers have the highest average emotional range (≈ -11.2) in Figure 3, and Table 3 shows “You’re alive.” among the highest entropies (–8.32), we can infer that “You’re alive.” is often used in thrillers to exploit their broad emotional register.", "perturbed_explanation": "Figure 3 actually shows thrillers have the lowest average emotional range (around –11.2), not the highest, so the premise is factually incorrect and the inference about frequent use in thrillers is unsupported.", "claim": "Since family films have the highest average emotional range (≈ -9.98) in Figure 3, and Table 3 shows “You’re alive.” among the highest entropies (–8.32), we can infer that “You’re alive.” is often used in family films to exploit their broad emotional register.", "label": true }, { "paperid": "2410.14202v1", "paper_path": "./SciVer/papers/2410.14202v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.14202v1-Table2-1.png", "item2_path": "./SciVer/images/2410.14202v1-Table3-1.png", "section": [ "5.2.1" ], "request_id": 203, "origin_statement": "Pegasus with GPT-generated rationales increases its overall QWK score from 0.536 to 0.711, a 17.5% improvement (Table 2). Consequently, in Table 3 for prompt 3, Pegasus+RMTS(G) improves its QWK from 0.518 to 0.683, a 16.5% gain, slightly below its overall improvement rate.", "perturbed_statement": "Pegasus with GPT-generated rationales increases its overall QWK score from 0.536 to 0.713, a 17.7% improvement (Table 2). Consequently, in Table 3 for prompt 3, Pegasus+RMTS(G) improves its QWK from 0.518 to 0.725, a 20.0% gain, roughly matching its overall improvement.", "perturbed_explanation": "The perturbed statement misreports two values. Table 2 shows Pegasus+RMTS(G) reaches 0.711 overall (+17.5%), not 0.713 (+17.7%). Table 3 shows prompt 3 QWK of 0.683 (+16.5%), not 0.725 (+20.0%). These discrepancies contradict the original data.", "claim": "Pegasus with GPT-generated rationales increases its overall QWK score from 0.536 to 0.711, a 17.5% improvement (Table 2). Consequently, in Table 3 for prompt 3, Pegasus+RMTS(G) improves its QWK from 0.518 to 0.683, a 16.5% gain, slightly below its overall improvement rate.", "label": true }, { "paperid": "2409.04043v1", "paper_path": "./SciVer/papers/2409.04043v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04043v1_figure_4.png", "item2_path": "./SciVer/images/2409.04043v1-Table2-1.png", "section": [ "3.2" ], "request_id": 240, "origin_statement": "Since Reddit comments’ largest median sentiment boost is 0.281 from modeling civility (Table 2), and Figure 4 shows the modeling civility box for Reddit comments above 0.7, modeling civility elevates average sentiment above 0.7 in that community.", "perturbed_statement": "Since Reddit comments’ largest median sentiment boost is 0.049 from support (Table 2), and Figure 4 shows the support box for Reddit comments above 0.8, support interventions elevate average sentiment above 0.8 in that community.", "perturbed_explanation": "This is incorrect because Table 2 actually reports that modeling civility yields the highest median boost for Reddit comments at 0.281, not support (which is only 0.049). Moreover, in Figure 4 the support (pink) box for Reddit comments centers around ~0.65–0.75, well below 0.8.", "claim": "Since Reddit comments’ largest median sentiment boost is 0.281 from modeling civility (Table 2), and Figure 4 shows the modeling civility box for Reddit comments above 0.7, modeling civility elevates average sentiment above 0.7 in that community.", "label": true }, { "paperid": "2409.15155v1", "paper_path": "./SciVer/papers/2409.15155v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15155v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.15155v1_figure_4(b).png", "section": [ "3.1.2" ], "request_id": 241, "origin_statement": "The peak PSNR of 29.40 dB occurs at α=0.5, β=1 in Fig. 4(a). At these settings, Fig. 4(b) shows the highest SSIM of 0.65. Thus α=0.5, β=1 optimize both metrics.", "perturbed_statement": "The maximum PSNR of 27.98 dB occurs at α=1, β=1 in Fig. 4(a). At these settings, Fig. 4(b) shows SSIM of 0.67. Therefore, α=1, β=1 optimize both metrics.", "perturbed_explanation": "The PSNR of 27.98 dB at α=1, β=1 is not the highest value; Fig. 4(a) actually shows a maximum PSNR of 29.40 dB at α=0.5, β=1. Hence α=1, β=1 cannot optimize PSNR (nor both metrics).", "claim": "The peak PSNR of 29.40 dB occurs at α=0.5, β=1 in Fig. 4(a). At these settings, Fig. 4(b) shows the highest SSIM of 0.65. Thus α=0.5, β=1 optimize both metrics.", "label": true }, { "paperid": "2410.14875v1", "paper_path": "./SciVer/papers/2410.14875v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.14875v1-Table1-1.png", "item2_path": "./SciVer/images/2410.14875v1-Table2-1.png", "section": [ "2.1" ], "request_id": 281, "origin_statement": "EleutherAI’s GPT-J-6B has 6B parameters, classifying it as an under-10B model; since the Scigen test set contains 2,251 AI-generated texts, all AI-generated scientific samples from models under 10B fall within these 2,251 entries.", "perturbed_statement": "EleutherAI’s GPT-J-6B has 16B parameters, classifying it as an under-10B model; since the Scigen test set contains 2,351 AI-generated texts, all AI-generated scientific samples from models under 10B fall within these 2,351 entries.", "perturbed_explanation": "The perturbed statement misreports GPT-J-6B’s size. Table 1 shows GPT-J-6B has 6B parameters (not 16B), so it truly qualifies as under-10B. It also misstates the Scigen test count: Table 2 lists 2,251 AI-generated samples, not 2,351, making both premises incorrect.", "claim": "EleutherAI’s GPT-J-6B has 6B parameters, classifying it as an under-10B model; since the Scigen test set contains 2,251 AI-generated texts, all AI-generated scientific samples from models under 10B fall within these 2,251 entries.", "label": true }, { "paperid": "2410.04068v1", "paper_path": "./SciVer/papers/2410.04068v1.json", "claim_type": "sequential", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04068v1_figure_4.png", "item2_path": "./SciVer/images/2410.04068v1-Table6-1.png", "section": [ "4.2" ], "request_id": 328, "origin_statement": "Figure 4 shows haiku resolves by chance most frequently at 38.1%. Table 6 reports that with internal belief, haiku’s resolve-by-chance rate increases by 10.8 points. Therefore, when holding internal belief, haiku resolves by chance around 48.9% of the time.", "perturbed_statement": "Figure 4 shows haiku resolves by chance at 24.6%. Table 6 reports that with internal belief, haiku’s resolve-by-chance rate increases by 10.8 points. Therefore, when holding internal belief, haiku resolves by chance around 35.4% of the time.", "perturbed_explanation": "The perturbation misreads Figure 4: the green bar for haiku’s “Resolve by chance” is labeled 38.1%, not 24.6% (24.6% is for integration). Moreover, Table 6 shows haiku’s chance resolution with belief at 45.7%, not 35.4%.", "claim": "Figure 4 shows haiku resolves by chance most frequently at 38.1%. Table 6 reports that with internal belief, haiku’s resolve-by-chance rate increases by 10.8 points. Therefore, when holding internal belief, haiku resolves by chance around 48.9% of the time.", "label": true }, { "paperid": "2409.04766v1", "paper_path": "./SciVer/papers/2409.04766v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04766v1_figure_1.png", "item2_path": "./SciVer/images/2409.04766v1_figure_2.png", "section": [ "1" ], "request_id": 350, "origin_statement": "Figure 1 labels Gaze360 as a source domain. From Figure 2, training on both Gaze360 and ETH-Gaze yields an 11.45° gaze error on Gaze360, which is 0.03° higher than the 11.42° error when trained solely on Gaze360, indicating slight source-domain degradation.", "perturbed_statement": "Figure 1 labels Gaze360 as a source domain. From Figure 2, training on both Gaze360 and ETH-Gaze yields an 11.30° gaze error on Gaze360, which is 0.12° lower than the 11.42° error when trained solely on Gaze360, indicating slight source-domain improvement.", "perturbed_explanation": "This is incorrect because Figure 2 actually reports an 11.45° error (not 11.30°) for the Gaze360+ETH-Gaze combination on Gaze360, which is 0.03° higher than the 11.42° error for training only on Gaze360, reflecting degradation rather than improvement.", "claim": "Figure 1 labels Gaze360 as a source domain. From Figure 2, training on both Gaze360 and ETH-Gaze yields an 11.45° gaze error on Gaze360, which is 0.03° higher than the 11.42° error when trained solely on Gaze360, indicating slight source-domain degradation.", "label": true }, { "paperid": "2411.09726v2", "paper_path": "./SciVer/papers/2411.09726v2.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.09726v2-Table5-1.png", "item2_path": "./SciVer/images/2411.09726v2-Table6-1.png", "section": [ "4" ], "request_id": 352, "origin_statement": "Table 5 shows a positive correlation between wind speed (WS) and UTCI (r = 0.20). Table 6 reveals station S8 recorded the highest WS at 6.97 m/s, implying S8 likely experienced the highest UTCI among the 14 stations.", "perturbed_statement": "Table 5 shows a positive correlation between wind speed (WS) and UTCI (r = 0.20). Table 6 reveals station S9 recorded the highest WS at 6.01 m/s, implying S9 likely experienced the highest UTCI among the 14 stations.", "perturbed_explanation": "Table 6 actually lists station S8 with the highest WS (6.97 m/s) and S9 second at 6.01 m/s. The perturbed claim misidentifies S9 as having the highest wind speed, so the inference that S9 experienced the highest UTCI is invalid.", "claim": "Table 5 shows a positive correlation between wind speed (WS) and UTCI (r = 0.20). Table 6 reveals station S8 recorded the highest WS at 6.97 m/s, implying S8 likely experienced the highest UTCI among the 14 stations.", "label": true }, { "paperid": "2411.10703v1", "paper_path": "./SciVer/papers/2411.10703v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10703v1_figure_5.png", "item2_path": "./SciVer/images/2411.10703v1_figure_6.png", "section": [ "4.2" ], "request_id": 388, "origin_statement": "From Fig. 6, at the 60-minute horizon, the KD [ST] {(64,32)} configuration yields the lowest RMSE (~7.5) among KD [ST] models. Fig. 5(c) shows its predicted glucose curve closely tracks the actual blood glucose over 500 time stamps at PH = 60, confirming accurate long-term forecasting.", "perturbed_statement": "From Fig. 6, at the 60-minute horizon, the smallest KD [ST] {(16,8)} configuration achieves the lowest RMSE (~5.1) among KD [ST] models. Fig. 5(c) shows its predicted curve aligns closely with actual blood glucose over 500 time stamps at PH = 60, proving the tiny model is most accurate.", "perturbed_explanation": "Fig. 6 actually shows that KD [ST] {(16,8)} has the highest RMSE (~11.4) at 60 min, not the lowest (~5.1). Moreover, the ~5.1 RMSE for {16,8} appears at the 30-minute horizon, not 60 minutes, so the perturbed claim misreads both the RMSE value and its prediction horizon.", "claim": "From Fig. 6, at the 60-minute horizon, the KD [ST] {(64,32)} configuration yields the lowest RMSE (~7.5) among KD [ST] models. Fig. 5(c) shows its predicted glucose curve closely tracks the actual blood glucose over 500 time stamps at PH = 60, confirming accurate long-term forecasting.", "label": true }, { "paperid": "2411.00154v1", "paper_path": "./SciVer/papers/2411.00154v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00154v1_figure_3.png", "item2_path": "./SciVer/images/2411.00154v1-Table2-1.png", "section": [ "5.1" ], "request_id": 427, "origin_statement": "Figure 3 shows that for arXiv documents of ~15 K tokens, the 6.9 B model with a 1 024-token context achieves ~0.74 AUROC. Table 2 reports a document-level MIA AUROC of 0.697 for arXiv. Therefore, increasing context length to 1 024 tokens improves document MIA by about 4.3%.", "perturbed_statement": "Figure 3 shows that for arXiv documents of ~15 K tokens, the 6.9 B model with a 1 024-token context achieves ~0.74 AUROC. Table 2 reports a document-level MIA AUROC of 0.560 for arXiv. Therefore, increasing context length to 1 024 tokens improves document MIA by about 18%.", "perturbed_explanation": "The perturbation misreads Table 2: the document-level AUROC for arXiv is actually 0.697 ± 0.060, not 0.560. A value of 0.560 appears for the Wiki dataset’s document scale, so the claimed 18% improvement is incorrect.", "claim": "Figure 3 shows that for arXiv documents of ~15 K tokens, the 6.9 B model with a 1 024-token context achieves ~0.74 AUROC. Table 2 reports a document-level MIA AUROC of 0.697 for arXiv. Therefore, increasing context length to 1 024 tokens improves document MIA by about 4.3%.", "label": true }, { "paperid": "2409.06224v1", "paper_path": "./SciVer/papers/2409.06224v1.json", "claim_type": "sequential", "item1": "2(d)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06224v1_figure_2(d).png", "item2_path": "./SciVer/images/2409.06224v1_figure_4(b).png", "section": [ "3" ], "request_id": 428, "origin_statement": "Since over half of MIP-GAF images contain more than 10 detected faces (Figure 2), human participants rated MIP-GAF as difficult in 55% of trials (Figure 4), showing that high face counts correspond to higher spotting difficulty.", "perturbed_statement": "Since over half of MIP-GAF images contain more than 20 detected faces (Figure 2), human participants rated MIP-GAF as difficult in 55% of trials (Figure 4).", "perturbed_explanation": "Figure 2’s face‐count histogram shows that most images have between 3 and 10 faces and only a small fraction exceed 20 faces, so it is incorrect to claim that over half of the images contain more than 20 faces.", "claim": "Since over half of MIP-GAF images contain more than 10 detected faces (Figure 2), human participants rated MIP-GAF as difficult in 55% of trials (Figure 4), showing that high face counts correspond to higher spotting difficulty.", "label": true }, { "paperid": "2409.17476v1", "paper_path": "./SciVer/papers/2409.17476v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17476v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.17476v1_figure_2(b).png", "section": [ "4.1" ], "request_id": 461, "origin_statement": "Under LightGCN with Bandwagon Attack, the most common number of times users are affected is 2 (Fig 2a), and these users tend to undergo 3 attack status changes (Fig 2b).", "perturbed_statement": "Under LightGCN with Bandwagon Attack, the most common number of times users are affected is 4 (Fig 2a), and these users tend to undergo 3 attack status changes (Fig 2b).", "perturbed_explanation": "The perturbation misreads Fig 2(a): the tallest bar for LightGCN under Bandwagon Attack occurs at 2 times affected, not at 4. Since the premise ‘‘most common times = 4’’ contradicts the actual histogram peak at 2, the claim is false.", "claim": "Under LightGCN with Bandwagon Attack, the most common number of times users are affected is 2 (Fig 2a), and these users tend to undergo 3 attack status changes (Fig 2b).", "label": true }, { "paperid": "2409.11074v2", "paper_path": "./SciVer/papers/2409.11074v2.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11074v2_figure_1.png", "item2_path": "./SciVer/images/2409.11074v2_figure_2.png", "section": [ "3.1" ], "request_id": 542, "origin_statement": "Figure1’s PDF OCR stage extracts LaTeX from all PDFs. Figure2 shows about 17,000 'Numbers' problems and 10,000 'Arithmetic' problems in the synthetic set. Therefore, the OCR stage processed roughly 7,000 more 'Numbers' problems than 'Arithmetic' ones.", "perturbed_statement": "Figure1’s PDF OCR stage extracts LaTeX from all PDFs. Figure2 shows about 8,000 'Numbers' problems and 10,000 'Arithmetic' problems in the synthetic set. Therefore, the OCR stage processed roughly 2,000 more 'Arithmetic' problems than 'Numbers' ones.", "perturbed_explanation": "Figure2 actually indicates about 17,000 'Numbers' problems (not 8,000) and about 10,400 'Arithmetic' problems. Thus, 'Numbers' problems exceed 'Arithmetic' by roughly 7,000, contradicting the perturbed claim that 'Arithmetic' problems are more numerous.", "claim": "Figure1’s PDF OCR stage extracts LaTeX from all PDFs. Figure2 shows about 17,000 'Numbers' problems and 10,000 'Arithmetic' problems in the synthetic set. Therefore, the OCR stage processed roughly 7,000 more 'Numbers' problems than 'Arithmetic' ones.", "label": true }, { "paperid": "2409.04941v1", "paper_path": "./SciVer/papers/2409.04941v1.json", "claim_type": "sequential", "item1": "2(b)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04941v1_figure_2(b).png", "item2_path": "./SciVer/images/2409.04941v1_figure_3(b).png", "section": [ "4" ], "request_id": 544, "origin_statement": "Cuda-tensor workloads consist of about 40% vector_arithmetic instructions (Fig.2), and Fig.3 shows their predicted vs. actual power points lie within ±5 W of the bisector, so the GPU model predicts cuda-tensor power within 5 W error.", "perturbed_statement": "Cuda-tensor workloads consist of about 40% vector_arithmetic instructions (Fig.2), and Fig.3 shows their predicted vs. actual power points lie exactly on the bisector line, so the GPU model perfectly predicts cuda-tensor power.", "perturbed_explanation": "This is incorrect because in Fig.3 the red cuda-tensor points clearly deviate from the bisector by more than a few watts; they do not lie exactly on the bisector line.", "claim": "Cuda-tensor workloads consist of about 40% vector_arithmetic instructions (Fig.2), and Fig.3 shows their predicted vs. actual power points lie within ±5 W of the bisector, so the GPU model predicts cuda-tensor power within 5 W error.", "label": true }, { "paperid": "2409.02076v6", "paper_path": "./SciVer/papers/2409.02076v6.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.02076v6_figure_3(a).png", "item2_path": "./SciVer/images/2409.02076v6_figure_3(b).png", "section": [ "3.5", "3.4" ], "request_id": 546, "origin_statement": "Mixtral-8x7B achieved the highest periodic task accuracy (~0.21) in Figure 3a. In Figure 3b, it scored 91 on Ruler and 23 on LongGenBench at 16K, showing its superior long-context output. This suggests models excelling at periodic instructions also perform better on long-output generation.", "perturbed_statement": "Qwen2-72B achieved the highest periodic task accuracy (~0.21). In Figure 3b, it scored 90 on Ruler and 18 on LongGenBench at 16K, showing its superior long-context output. This suggests models excelling at periodic instructions also perform better on long-output generation.", "perturbed_explanation": "The perturbed statement misidentifies Qwen2-72B as having the top periodic accuracy (~0.21), but Figure 3a shows Mixtral-8x7B at ~0.21 and Qwen2-72B at only ~0.15. It also misreports Qwen2-72B’s Ruler (16K) score (actually ~94, not 90) and its LongGenBench (16K) score (actually ~22, not 18), directly contradicting Figure 3b.", "claim": "Mixtral-8x7B achieved the highest periodic task accuracy (~0.21) in Figure 3a. In Figure 3b, it scored 91 on Ruler and 23 on LongGenBench at 16K, showing its superior long-context output. This suggests models excelling at periodic instructions also perform better on long-output generation.", "label": true }, { "paperid": "2411.09118v1", "paper_path": "./SciVer/papers/2411.09118v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.09118v1-Table3-1.png", "item2_path": "./SciVer/images/2411.09118v1-Table4-1.png", "section": [ "4.5" ], "request_id": 646, "origin_statement": "Table 3 shows s_max=1.2 yields the lowest average error of 31.86%. Table 4 shows the configuration μ=2.0, α₁=10.0, α₂=1.00 also yields 31.86% average error. Thus, combining s_max=1.2 during training and these loss parameters achieves optimal robustness.", "perturbed_statement": "Table 3 shows s_max=0.8 yields the lowest average error of 31.86%. Table 4 shows the configuration μ=2.0, α₁=10.0, α₂=1.00 also yields 31.86% average error. Thus, combining s_max=0.8 during training and these loss parameters achieves optimal robustness.", "perturbed_explanation": "This is incorrect because in Table 3 the entry for s_max=0.8 lists an average error of 32.36%, not 31.86%. The lowest average error of 31.86% actually occurs at s_max=1.2, so the premise about s_max=0.8 is false.", "claim": "Table 3 shows s_max=1.2 yields the lowest average error of 31.86%. Table 4 shows the configuration μ=2.0, α₁=10.0, α₂=1.00 also yields 31.86% average error. Thus, combining s_max=1.2 during training and these loss parameters achieves optimal robustness.", "label": true }, { "paperid": "2411.14215v1", "paper_path": "./SciVer/papers/2411.14215v1.json", "claim_type": "sequential", "item1": "5(b)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14215v1_figure_5(b).png", "item2_path": "./SciVer/images/2411.14215v1_figure_5(c).png", "section": [ "3.2", "3.3" ], "request_id": 675, "origin_statement": "The symbol-variant matrix (Fig 5b) shows the missing element in the bottom-right. The attention check (Fig 5c) instructs to respond with '000'. Therefore, participants encountering the symbol matrix’s blank in the bottom-right would input '000' at that position when taking the attention check.", "perturbed_statement": "The symbol-variant matrix (Fig 5b) shows the missing element in the top-left. The attention check (Fig 5c) instructs to respond with '000'. Therefore, participants encountering the symbol matrix’s blank in the top-left would input '000' at that position when taking the attention check.", "perturbed_explanation": "This is incorrect because Fig 5b clearly displays the blank cell in the bottom-right corner—not the top-left. The perturbation mislocates the missing element, contradicting the actual position shown in the symbol-variant example.", "claim": "The symbol-variant matrix (Fig 5b) shows the missing element in the bottom-right. The attention check (Fig 5c) instructs to respond with '000'. Therefore, participants encountering the symbol matrix’s blank in the bottom-right would input '000' at that position when taking the attention check.", "label": true }, { "paperid": "2409.04723v1", "paper_path": "./SciVer/papers/2409.04723v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04723v1_figure_4(b).png", "item2_path": "./SciVer/images/2409.04723v1-Table4-1.png", "section": [ "5" ], "request_id": 681, "origin_statement": "From the PPG+Sleep chart, fatigue classification achieves an F1 of about 0.9. Table 4 reports the overall PPG+Sleep finetuned F1 as 0.75. Therefore, fatigue F1 outperforms the finetuned average by 0.15.", "perturbed_statement": "From the PPG+Sleep chart, fatigue classification achieves an F1 of about 0.8. Table 4 reports the overall PPG+Sleep finetuned F1 as 0.75. Therefore, fatigue F1 outperforms the finetuned average by 0.15.", "perturbed_explanation": "The first premise is wrong: the chart actually shows fatigue F1 after adding sleep is around 0.9, not 0.8. Consequently, the claimed 0.15 improvement (0.8 − 0.75) is incorrect and the arithmetic does not match the true values.", "claim": "From the PPG+Sleep chart, fatigue classification achieves an F1 of about 0.9. Table 4 reports the overall PPG+Sleep finetuned F1 as 0.75. Therefore, fatigue F1 outperforms the finetuned average by 0.15.", "label": true }, { "paperid": "2410.13650v1", "paper_path": "./SciVer/papers/2410.13650v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13650v1_figure_4.png", "item2_path": "./SciVer/images/2410.13650v1_figure_5.png", "section": [ "2.3.2", "2.3" ], "request_id": 736, "origin_statement": "Since yellow nodes mark tMSPs in Fig 4, and Fig 5 left panel shows J1630+3550 as a yellow node, J1630+3550 is a transitional millisecond pulsar.", "perturbed_statement": "Since green nodes mark redback (RB) pulsars in Fig 4, and Fig 5 left panel shows J1630+3550 as a green node, J1630+3550 is a redback pulsar.", "perturbed_explanation": "In Fig 5 left panel, the node for J1630+3550 is actually colored yellow (not green), and yellow nodes represent tMSPs. Therefore it cannot be a redback pulsar as claimed.", "claim": "Since yellow nodes mark tMSPs in Fig 4, and Fig 5 left panel shows J1630+3550 as a yellow node, J1630+3550 is a transitional millisecond pulsar.", "label": true }, { "paperid": "2409.16492v1", "paper_path": "./SciVer/papers/2409.16492v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.16492v1_figure_3.png", "item2_path": "./SciVer/images/2409.16492v1-Table3-1.png", "section": [ "3" ], "request_id": 768, "origin_statement": "The C6H rotational diagram (Fig.3c) yields T_rot = 8.3 ± 0.7 K, and the FWHM values for C6H lines in the survey (Fig.4 upper panel) cluster around 1.1 km s⁻¹, indicating cold gas with low turbulent broadening.", "perturbed_statement": "The C6H rotational diagram yields T_rot ≈ 13.5 K, and its observed line widths exceed 1.6 km s⁻¹, implying warmer gas with high turbulence.", "perturbed_explanation": "Fig.3c explicitly reports T_rot = 8.3±0.7 K for C6H, not ≈13.5 K. Moreover, Table 3 and Fig.4 show C6H line widths around 1.1 km s⁻¹ (maximum ~1.17 km s⁻¹), not >1.6 km s⁻¹, so the perturbed values contradict these data.", "claim": "The C6H rotational diagram (Fig.3c) yields T_rot = 8.3 ± 0.7 K, and the FWHM values for C6H lines in the survey (Fig.4 upper panel) cluster around 1.1 km s⁻¹, indicating cold gas with low turbulent broadening.", "label": true }, { "paperid": "2410.17226v2", "paper_path": "./SciVer/papers/2410.17226v2.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17226v2_figure_4.png", "item2_path": "./SciVer/images/2410.17226v2_figure_5.png", "section": [ "5.1" ], "request_id": 782, "origin_statement": "On 48 cores, C-BFS achieves about 32× self-speedup (Fig.4), and at cluster diameter d=5 its relative running time is roughly 2.5× (Fig.5), indicating that increasing diameter to 5 more than doubles execution time despite high core parallelism.", "perturbed_statement": "On 48 cores, C-BFS achieves about 16× self-speedup, and at cluster diameter d=5 its relative running time increases to only 1.5×, indicating a modest performance drop with diameter growth compared to parallel gains.", "perturbed_explanation": "The perturbation is incorrect because Figure 4 shows self-speedup at 48 cores is about 32× (not 16×), and Figure 5 shows relative running time at d=5 is around 2.5× (not 1.5×). These mismatches contradict the actual data.", "claim": "On 48 cores, C-BFS achieves about 32× self-speedup (Fig.4), and at cluster diameter d=5 its relative running time is roughly 2.5× (Fig.5), indicating that increasing diameter to 5 more than doubles execution time despite high core parallelism.", "label": true }, { "paperid": "2411.08494v1", "paper_path": "./SciVer/papers/2411.08494v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.08494v1-Table2-1.png", "item2_path": "./SciVer/images/2411.08494v1-Table3-1.png", "section": [ "2.2" ], "request_id": 809, "origin_statement": "CPU A has 56 hardware threads (14 cores/socket ×2 threads/core ×2 sockets), matching the 507.cactuBSSN_r workload’s optimal O3-56 configuration; thus CPU A achieves its highest SPEC score on 507.cactuBSSN_r under its maximum-thread setting.", "perturbed_statement": "CPU A has 64 hardware threads (16 cores/socket ×2 threads/core ×2 sockets), matching the 507.cactuBSSN_r workload’s optimal O3-64 configuration; thus CPU A achieves its highest SPEC score on 507.cactuBSSN_r under its maximum-thread setting.", "perturbed_explanation": "This statement is incorrect because Table 3 shows CPU A has 14 cores per socket, 2 sockets, and 2 threads per core, for a total of 56 threads, not 64. Moreover, Table 2 lists the optimal configuration for 507.cactuBSSN_r as O3-56, not O3-64.", "claim": "CPU A has 56 hardware threads (14 cores/socket ×2 threads/core ×2 sockets), matching the 507.cactuBSSN_r workload’s optimal O3-56 configuration; thus CPU A achieves its highest SPEC score on 507.cactuBSSN_r under its maximum-thread setting.", "label": true }, { "paperid": "2410.15126v1", "paper_path": "./SciVer/papers/2410.15126v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.15126v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.15126v1_figure_5(b).png", "section": [ "4.8" ], "request_id": 855, "origin_statement": "At 80K pre-training steps, MELT achieves 82.2 Macro F1 on SOFC-NER versus Random’s 80.8, then on SOFC-Filling at the same step MELT scores 61.5 against Random’s 62.0, lagging by 0.5 points.", "perturbed_statement": "At 80K pre-training steps, MELT achieves 83.0 Macro F1 on SOFC-NER versus Random’s 81.0, then on SOFC-Filling at the same step MELT scores 60.0 against Random’s 61.0, lagging by 1.0 point.", "perturbed_explanation": "The perturbed claim misreports both figures: in (a) at step 8 MELT actually scores 82.2 not 83.0 and Random 80.8 not 81.0; in (b) MELT scores 61.5 not 60.0 and Random 62.0 not 61.0. These discrepancies render the claim false.", "claim": "At 80K pre-training steps, MELT achieves 82.2 Macro F1 on SOFC-NER versus Random’s 80.8, then on SOFC-Filling at the same step MELT scores 61.5 against Random’s 62.0, lagging by 0.5 points.", "label": true }, { "paperid": "2409.17504v1", "paper_path": "./SciVer/papers/2409.17504v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17504v1_figure_3.png", "item2_path": "./SciVer/images/2409.17504v1_figure_4.png", "section": [ "4.4" ], "request_id": 868, "origin_statement": "Figure 3(b) shows that k=6 yields the highest AUROC (≈77.8%). Using that subspace, HaloScope in Figure 4 achieves 94.4% AUROC on TydiQA-GP, outperforming direct projection’s 90.5%.", "perturbed_statement": "Figure 3(b) shows that k=4 yields the highest AUROC (≈73.4%). Using that subspace, HaloScope in Figure 4 achieves 96.5% AUROC on TydiQA-GP, outperforming direct projection’s 90.5%.", "perturbed_explanation": "The perturbation is wrong because Figure 3(b) actually peaks at k=6 with ≈77.8% AUROC, not at k=4 (≈71.5%). Moreover, Figure 4 reports HaloScope’s TydiQA-GP performance as 94.4% AUROC, not 96.5%.", "claim": "Figure 3(b) shows that k=6 yields the highest AUROC (≈77.8%). Using that subspace, HaloScope in Figure 4 achieves 94.4% AUROC on TydiQA-GP, outperforming direct projection’s 90.5%.", "label": true }, { "paperid": "2410.18321v1", "paper_path": "./SciVer/papers/2410.18321v1.json", "claim_type": "sequential", "item1": "3", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.18321v1-Table3-1.png", "item2_path": "./SciVer/images/2410.18321v1-Table6-1.png", "section": [ "5.5.3" ], "request_id": 891, "origin_statement": "Table 3 shows Focal Calibration on CIFAR-100 ResNet-50 reduces smCE by only 1.1% after temperature scaling; Table 6 shows its AdaECE also drops by 1.1%, indicating FCL training yields minimal post-processing gap in both metrics.", "perturbed_statement": "Table 3 shows Focal Calibration on CIFAR-100 ResNet-50 reduces smCE by 2.0% after temperature scaling; Table 6 shows its AdaECE drops by only 0.5%, indicating FCL training yields minimal post-processing gap in both metrics.", "perturbed_explanation": "In Table 3, smCE for Focal Calibration on CIFAR-100 ResNet-50 actually decreases from 3.68% to 2.29%, a gap of 1.1%, not 2.0%. In Table 6, AdaECE for the same setting drops from 3.74% to 2.11%, a gap of 1.1%, not 0.5%. Therefore, the perturbed gaps contradict the presented data.", "claim": "Table 3 shows Focal Calibration on CIFAR-100 ResNet-50 reduces smCE by only 1.1% after temperature scaling; Table 6 shows its AdaECE also drops by 1.1%, indicating FCL training yields minimal post-processing gap in both metrics.", "label": true }, { "paperid": "2409.15299v1", "paper_path": "./SciVer/papers/2409.15299v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15299v1_figure_4.png", "item2_path": "./SciVer/images/2409.15299v1-Table2-1.png", "section": [ "4.1" ], "request_id": 1075, "origin_statement": "Table 2 shows the decoy has 5 years of experience in Qualification 2 versus the target’s 3. From Figure 4, GPT-4’s P(TARGET) for nurse rises from 0.56 (control) to 0.89 (treatment). Hence, GPT-4 strongly favors the target when the decoy is inferior in experience.", "perturbed_statement": "Table 2 shows the decoy has 2 years of experience in Qualification 2 versus the target’s 3. From Figure 4, GPT-4’s P(TARGET) for nurse rises from 0.67 (control) to 0.77 (treatment). Hence, GPT-4 moderately favors the target when the decoy is inferior in experience.", "perturbed_explanation": "This is incorrect because Table 2 actually lists the decoy’s second qualification as 5 years, not 2. Moreover, Figure 4 shows GPT-4’s control probability for nurse is about 0.56 (not 0.67) and the treatment probability is about 0.89 (not 0.77).", "claim": "Table 2 shows the decoy has 5 years of experience in Qualification 2 versus the target’s 3. From Figure 4, GPT-4’s P(TARGET) for nurse rises from 0.56 (control) to 0.89 (treatment). Hence, GPT-4 strongly favors the target when the decoy is inferior in experience.", "label": true }, { "paperid": "2409.17587v1", "paper_path": "./SciVer/papers/2409.17587v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17587v1_figure_4.png", "item2_path": "./SciVer/images/2409.17587v1_figure_5.png", "section": [ "2.1" ], "request_id": 1084, "origin_statement": "The histogram in Fig.4 shows that most clients have exactly 1 dialogue. The sample geostream table lists 5 geo-coordinates for a single client. Therefore, this client has five times more mobility data points than dialogue interactions.", "perturbed_statement": "The histogram in Fig.4 shows that most clients have exactly 3 dialogues. The sample geostream table lists 5 geo-coordinates for a single client. Therefore, this client has only 1.67 times more mobility data points than dialogue interactions.", "perturbed_explanation": "The perturbation wrongly claims the modal dialogue count is 3, but Fig.4 actually peaks at 1 dialogue per client. Because the premise is incorrect, the resulting ratio of 5:3 (1.67×) is not supported by the data.", "claim": "The histogram in Fig.4 shows that most clients have exactly 1 dialogue. The sample geostream table lists 5 geo-coordinates for a single client. Therefore, this client has five times more mobility data points than dialogue interactions.", "label": true }, { "paperid": "2411.03769v1", "paper_path": "./SciVer/papers/2411.03769v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03769v1_figure_2.png", "item2_path": "./SciVer/images/2411.03769v1_figure_4.png", "section": [ "3.1" ], "request_id": 1143, "origin_statement": "Figure 3 shows Arabic had over 400 annotators, so Arabic appears among the seen languages in the top row of Figure 5's zero-shot generations.", "perturbed_statement": "Figure 3 shows Spanish had over 300 annotators, so Spanish appears among the unseen languages in the second row of Figure 5's zero-shot generations.", "perturbed_explanation": "This is wrong because Figure 3 actually shows Spanish has fewer than 100 annotators (one of the smallest bars), and Figure 5 places Spanish in the top row of seen languages, not the unseen second row.", "claim": "Figure 3 shows Arabic had over 400 annotators, so Arabic appears among the seen languages in the top row of Figure 5's zero-shot generations.", "label": true }, { "paperid": "2409.11064v1", "paper_path": "./SciVer/papers/2409.11064v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11064v1_figure_5(a).png", "item2_path": "./SciVer/images/2409.11064v1-Table2-1.png", "section": [ "4.3" ], "request_id": 1167, "origin_statement": "Figure 5 shows S=10 yields peak AUC of ~0.780. Table 2 shows removing sequence decomposition lowers AUC by 0.1663 (from 0.7413 to 0.5750). Thus retaining sequence decomposition at S=10 yields an expected AUC around 0.9463.", "perturbed_statement": "Figure 5 shows S=25 yields peak AUC of ~0.770. Table 2 shows removing sequence decomposition lowers AUC by 0.1663 (from 0.7413 to 0.5750). Thus retaining sequence decomposition at S=25 yields an expected AUC around 0.9363.", "perturbed_explanation": "The perturbation incorrectly claims that S=25 has the highest AUC in Figure 5. In fact, Figure 5’s tallest bar is at S=10 (≈0.780), while S=25 is lower (≈0.769). Hence the premise about S=25 peak AUC contradicts the actual figure data.", "claim": "Figure 5 shows S=10 yields peak AUC of ~0.780. Table 2 shows removing sequence decomposition lowers AUC by 0.1663 (from 0.7413 to 0.5750). Thus retaining sequence decomposition at S=10 yields an expected AUC around 0.9463.", "label": true }, { "paperid": "2409.11676v1", "paper_path": "./SciVer/papers/2409.11676v1.json", "claim_type": "sequential", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11676v1_figure_8.png", "item2_path": "./SciVer/images/2409.11676v1_figure_9.png", "section": [ "3.3.2", "3.3" ], "request_id": 1175, "origin_statement": "At scale 2 (Fig.8), each hyperedge connects 7 agents; thus in Fig.9’s node-to-hyperedge mapping, the per-category function F() at scale 2 aggregates embeddings from these 7 agents before computing interaction strength, producing a combined 7-agent representation.", "perturbed_statement": "At scale 2 (Fig.8), each hyperedge connects 5 agents; thus in Fig.9’s node-to-hyperedge mapping, the per-category function F() at scale 2 aggregates embeddings from these 5 agents before computing interaction strength, producing a combined 5-agent representation.", "perturbed_explanation": "Figure 8 clearly shows each hyperedge at scale 2 encompassing 7 nodes (the orange center plus six blue agents), not 5. Therefore, F() must aggregate embeddings from 7 agents, making the claim of 5-agent aggregation incorrect.", "claim": "At scale 2 (Fig.8), each hyperedge connects 7 agents; thus in Fig.9’s node-to-hyperedge mapping, the per-category function F() at scale 2 aggregates embeddings from these 7 agents before computing interaction strength, producing a combined 7-agent representation.", "label": true }, { "paperid": "2410.15135v1", "paper_path": "./SciVer/papers/2410.15135v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.15135v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.15135v1_figure_4(b).png", "section": [ "5.3" ], "request_id": 1178, "origin_statement": "After one ISR iteration, the BLEU-4 score rises from 0.429 to 0.442, and using that iteration, the ROUGE-L score simultaneously improves from 0.683 to 0.685, indicating that self-revision notably enhances both surface similarity and recall performance.", "perturbed_statement": "After one ISR iteration, the BLEU-4 score rises from 0.429 to 0.447, and using that iteration, the ROUGE-L score simultaneously improves from 0.683 to 0.690, indicating that self-revision notably enhances both surface similarity and recall performance.", "perturbed_explanation": "This statement is incorrect because Figure 4(a) shows the BLEU-4 score at iteration 1 reaches only 0.442 (not 0.447), and Figure 4(b) shows the ROUGE-L score at iteration 1 is 0.685 (not 0.690).", "claim": "After one ISR iteration, the BLEU-4 score rises from 0.429 to 0.442, and using that iteration, the ROUGE-L score simultaneously improves from 0.683 to 0.685, indicating that self-revision notably enhances both surface similarity and recall performance.", "label": true }, { "paperid": "2410.22895v1", "paper_path": "./SciVer/papers/2410.22895v1.json", "claim_type": "sequential", "item1": "1(b)", "item2": "1(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22895v1_figure_1(b).png", "item2_path": "./SciVer/images/2410.22895v1_figure_1(d).png", "section": [ "4.2" ], "request_id": 1228, "origin_statement": "Figure 2 shows that the combination ('approval', 'curiosity', 'excitement') has a relation intensity of 0.75 with the analyst discourse; Figure 4 shows that the single emotion ‘neutral’ peaks at 0.78 with that same discourse, slightly surpassing the triple set.", "perturbed_statement": "Figure 2 shows that the combination ('approval', 'curiosity', 'excitement') has a relation intensity of 0.65 with the analyst discourse; Figure 4 shows that the single emotion ‘neutral’ peaks at 0.75 with that same discourse, thus neutral outranks the triple set by 0.10.", "perturbed_explanation": "Both intermediate values are incorrect. In Figure 2, the heatmap cell for ('approval','curiosity','excitement') under analyst is labeled 0.75, not 0.65. In Figure 4, the cell for ‘neutral’ under analyst reads 0.78, not 0.75. Hence the final comparison is invalid.", "claim": "Figure 2 shows that the combination ('approval', 'curiosity', 'excitement') has a relation intensity of 0.75 with the analyst discourse; Figure 4 shows that the single emotion ‘neutral’ peaks at 0.78 with that same discourse, slightly surpassing the triple set.", "label": true }, { "paperid": "2411.04397v1", "paper_path": "./SciVer/papers/2411.04397v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04397v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.04397v1_figure_3(b).png", "section": [ "5.1" ], "request_id": 1229, "origin_statement": "The DMHP purity at δ=0.3 falls from about 0.72 in the 4-cluster setting to around 0.60 in the 5-cluster setting, indicating that increasing the ground truth cluster number degrades DMHP’s clustering performance.", "perturbed_statement": "The DMHP purity at δ=0.3 rises from about 0.72 in the 4-cluster setting to around 0.78 in the 5-cluster setting, indicating that increasing the ground truth cluster number improves DMHP’s clustering performance.", "perturbed_explanation": "The right-panel image for K_G_T=5 shows the DMHP purity at δ=0.3 is actually approximately 0.60 (blue line), not 0.78 as claimed. This concrete value contradicts the perturbed intermediate premise.", "claim": "The DMHP purity at δ=0.3 falls from about 0.72 in the 4-cluster setting to around 0.60 in the 5-cluster setting, indicating that increasing the ground truth cluster number degrades DMHP’s clustering performance.", "label": true }, { "paperid": "2410.00049v2", "paper_path": "./SciVer/papers/2410.00049v2.json", "claim_type": "sequential", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.00049v2_figure_4.png", "item2_path": "./SciVer/images/2410.00049v2-Table1-1.png", "section": [ "5.2" ], "request_id": 1269, "origin_statement": "Table 1 shows EARTH achieves the lowest RMSE of 243.2 for US-States at h=5; Figure 4 reveals a top-weight edge (~0.9) between California and Texas, suggesting EARTH leverages this CA–TX link to reduce error for short-term state forecasts.", "perturbed_statement": "Table 1 shows EARTH achieves the lowest RMSE of 243.2 for US-States at h=5; Figure 4 reveals a top-weight edge (~0.9) between California and Florida, suggesting EARTH leverages this CA–FL link to reduce error for short-term state forecasts.", "perturbed_explanation": "The perturbed premise is false because Figure 4 visualizes a strongest connection between California and Texas—not between California and Florida—so EARTH cannot be leveraging a CA–FL link that does not appear among the top-3 weighted edges.", "claim": "Table 1 shows EARTH achieves the lowest RMSE of 243.2 for US-States at h=5; Figure 4 reveals a top-weight edge (~0.9) between California and Texas, suggesting EARTH leverages this CA–TX link to reduce error for short-term state forecasts.", "label": true }, { "paperid": "2410.19050v1", "paper_path": "./SciVer/papers/2410.19050v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19050v1_figure_1.png", "item2_path": "./SciVer/images/2410.19050v1-Table3-1.png", "section": [ "7.1" ], "request_id": 1276, "origin_statement": "Figure 1 shows HD 104438 (green point) located on the subgiant branch. Table 3 gives its mass function f(M)=0.22 M⊙. For a ∼1.5 M⊙ subgiant primary, this yields a minimum companion mass ≈0.7 M⊙, consistent with a late-type main-sequence companion.", "perturbed_statement": "Figure 1 shows HD 104438 (green point) on the subgiant branch. Table 3 gives its mass function f(M)=2.2 M⊙. For a ∼1.5 M⊙ subgiant primary, this implies a minimum companion mass ≈3.5 M⊙, indicating a compact object, possibly a black hole.", "perturbed_explanation": "The perturbation misstates the mass function: Table 3 actually reports f(M)=0.22 M⊙, not 2.2 M⊙. With the correct f(M)=0.22 M⊙, the inferred companion mass is ≈0.7 M⊙, not ≈3.5 M⊙, so there is no indication of a black hole.", "claim": "Figure 1 shows HD 104438 (green point) located on the subgiant branch. Table 3 gives its mass function f(M)=0.22 M⊙. For a ∼1.5 M⊙ subgiant primary, this yields a minimum companion mass ≈0.7 M⊙, consistent with a late-type main-sequence companion.", "label": true }, { "paperid": "2411.09315v1", "paper_path": "./SciVer/papers/2411.09315v1.json", "claim_type": "sequential", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09315v1_figure_4.png", "item2_path": "./SciVer/images/2411.09315v1-Table1-1.png", "section": [ "3.2" ], "request_id": 1354, "origin_statement": "Since the CGRA comprises an 8×8 PE array (Figure 4), and referenced applications use at most 3 concurrent kernels (Table 1), at most 3 PEs are active, leaving 61 PEs idle that can be clock-gated.", "perturbed_statement": "Since the CGRA comprises a 4×4 PE array (Figure 4), and referenced applications use at most 3 concurrent kernels (Table 1), at most 3 PEs are active, leaving 13 PEs idle that can be clock-gated.", "perturbed_explanation": "The perturbed statement misreads the architecture: Figure 4 clearly shows an 8×8 PE grid (64 PEs), not a 4×4 grid (16 PEs). Therefore, the idle-PE count of 13 is incorrect, as there are actually 61 idle PEs when only 3 are active.", "claim": "Since the CGRA comprises an 8×8 PE array (Figure 4), and referenced applications use at most 3 concurrent kernels (Table 1), at most 3 PEs are active, leaving 61 PEs idle that can be clock-gated.", "label": true }, { "paperid": "2409.17836v1", "paper_path": "./SciVer/papers/2409.17836v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17836v1_figure_3.png", "item2_path": "./SciVer/images/2409.17836v1-Table3-1.png", "section": [ "5.3" ], "request_id": 1361, "origin_statement": "Since grouping three bytes yields the highest compression rate (≈52%) in TinyLLAMA (Fig 3), and LM-GC shows the greatest improvement on TinyImageNet (17.2%) in Table 3, selecting BPG=3 likely enhances LM-GC’s compression performance on complex datasets.", "perturbed_statement": "Since grouping eight bytes yields the highest compression rate (≈45%) in TinyLLAMA (Fig 3), and LM-GC shows the greatest improvement on TinyImageNet (17.2%) in Table 3, selecting BPG=8 likely enhances LM-GC’s compression performance on complex datasets.", "perturbed_explanation": "Fig 3 clearly indicates that BPG=3 achieves the top compression rate (~52%) for TinyLLAMA, while BPG=8 only reaches about 45%. Therefore, the claim that grouping eight bytes yields the highest rate contradicts the actual data.", "claim": "Since grouping three bytes yields the highest compression rate (≈52%) in TinyLLAMA (Fig 3), and LM-GC shows the greatest improvement on TinyImageNet (17.2%) in Table 3, selecting BPG=3 likely enhances LM-GC’s compression performance on complex datasets.", "label": true }, { "paperid": "2409.04880v1", "paper_path": "./SciVer/papers/2409.04880v1.json", "claim_type": "sequential", "item1": "8", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04880v1_figure_8.png", "item2_path": "./SciVer/images/2409.04880v1-Table6-1.png", "section": [ "6" ], "request_id": 1429, "origin_statement": "Figure 8 shows S8 had only one press-release post but eleven leak posts, and Table 6 shows its first leak on January 31, 2017, preceding its February 25, 2017 press-release by 25 days.", "perturbed_statement": "Figure 8 shows S8 had eleven press-release posts but only one leak post, and Table 6 shows its first leak on January 31, 2017, implying official coverage outweighed rumors.", "perturbed_explanation": "Figure 8’s green bar for S8 (press-releases) is at 1 and the orange bar (leaks) is at about 11. Thus, S8 actually had one press-release post and eleven leak posts—not the other way around as the perturbed statement claims.", "claim": "Figure 8 shows S8 had only one press-release post but eleven leak posts, and Table 6 shows its first leak on January 31, 2017, preceding its February 25, 2017 press-release by 25 days.", "label": true }, { "paperid": "2409.13983v1", "paper_path": "./SciVer/papers/2409.13983v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.13983v1-Table3-1.png", "item2_path": "./SciVer/images/2409.13983v1-Table4-1.png", "section": [ "4.4" ], "request_id": 1431, "origin_statement": "Since bikes are the smallest class in SensatUrban (2,029 test points) and MCNet achieved the highest mIoU of 74.0% on S3DIS, this high overall mIoU implies MCNet’s robustness extends to underrepresented categories like bikes.", "perturbed_statement": "Since cars are the smallest class in SensatUrban (2,029 test points) and MCNet achieved only 68.0% mIoU on S3DIS, this moderate mIoU suggests MCNet may struggle with underrepresented classes like cars.", "perturbed_explanation": "This statement is incorrect because Table 3 shows that 'bikes'—not 'cars'—have only 2,029 test points; 'cars' have 747,183 test points. Moreover, Table 4 reports MCNet’s mIoU as 74.0%, not 68.0%, so the premises contradict the context.", "claim": "Since bikes are the smallest class in SensatUrban (2,029 test points) and MCNet achieved the highest mIoU of 74.0% on S3DIS, this high overall mIoU implies MCNet’s robustness extends to underrepresented categories like bikes.", "label": true }, { "paperid": "2410.17787v1", "paper_path": "./SciVer/papers/2410.17787v1.json", "claim_type": "sequential", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.17787v1_figure_4.png", "item2_path": "./SciVer/images/2410.17787v1-Table1-1.png", "section": [ "5" ], "request_id": 1451, "origin_statement": "Table 1 shows Gemini-1.5-flash’s most frequent operator is divide at 26.87%, which Figure 4 confirms by showing Divide at about 29% on the phoneme dataset.", "perturbed_statement": "Table 1 shows Gemini-1.5-flash’s most frequent operator is subtract at 26.87%, which Figure 4 confirms by showing Subtract at about 29% on the phoneme dataset.", "perturbed_explanation": "This is incorrect because Table 1 actually lists “divide” (not “subtract”) as Gemini-1.5-flash’s top operator at 26.87%, and Figure 4 shows the Subtract frequency around 9–10%, not 29%, contradicting both the table and the chart.", "claim": "Table 1 shows Gemini-1.5-flash’s most frequent operator is divide at 26.87%, which Figure 4 confirms by showing Divide at about 29% on the phoneme dataset.", "label": true }, { "paperid": "2410.19464v2", "paper_path": "./SciVer/papers/2410.19464v2.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.19464v2-Table2-1.png", "item2_path": "./SciVer/images/2410.19464v2-Table3-1.png", "section": [ "4.2" ], "request_id": 1470, "origin_statement": "Table 2 shows LOCAL’s SHD is approximately 1.00 at d=10. In Table 3, at d=100, LOCAL’s SHD rises to about 234.33, indicating a roughly 233-fold increase when scaling from 10 to 100 dimensions.", "perturbed_statement": "Table 2 shows LOCAL’s SHD is approximately 1.00 at d=10. In Table 3, at d=100, LOCAL’s SHD rises to about 143.00, indicating a roughly 142-fold increase when scaling from 10 to 100 dimensions.", "perturbed_explanation": "The perturbation is wrong because Table 3 reports LOCAL’s SHD at d=100 as 234.33±22.51 (row for LOCAL), not 143.00. The value 143.00±6.38 actually belongs to LOCAL-CNN, so attributing it to LOCAL contradicts the context.", "claim": "Table 2 shows LOCAL’s SHD is approximately 1.00 at d=10. In Table 3, at d=100, LOCAL’s SHD rises to about 234.33, indicating a roughly 233-fold increase when scaling from 10 to 100 dimensions.", "label": true }, { "paperid": "2411.08884v1", "paper_path": "./SciVer/papers/2411.08884v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.08884v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.08884v1_figure_3(b).png", "section": [ "3.3" ], "request_id": 1477, "origin_statement": "GPT-4o mini has the highest Recreation score of 80 on the radar chart; this Recreation score accounts for about 30% of its total risk profile in the percentage stacked bar chart.", "perturbed_statement": "GPT-4o mini has a Recreation score of 78 on the radar chart; this Recreation score constitutes only 25% of its total risk profile in the percentage stacked bar chart.", "perturbed_explanation": "The perturbation is incorrect because the radar chart shows GPT-4o mini’s Recreation score is 80 (not 78), and the stacked bar chart indicates Recreation makes up roughly 30% of its total risk profile (not 25%).", "claim": "GPT-4o mini has the highest Recreation score of 80 on the radar chart; this Recreation score accounts for about 30% of its total risk profile in the percentage stacked bar chart.", "label": true }, { "paperid": "2411.14623v2", "paper_path": "./SciVer/papers/2411.14623v2.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14623v2_figure_3.png", "item2_path": "./SciVer/images/2411.14623v2_figure_4.png", "section": [ "4.2" ], "request_id": 1489, "origin_statement": "Figure 3 shows experimental nodes endured a median of ~10^3 RPC port scans vs ~10^2 for control; Figure 4 reveals a cloud credentials targeting ratio of ~1.15. Thus, nodes with intensified RPC reconnaissance experience about 15% more cloud credential harvesting attempts.", "perturbed_statement": "Figure 3 shows experimental nodes endured a median of ~10^3 RPC port scans vs ~10^2 for control; Figure 4 reveals a cloud credentials targeting ratio of ~0.85. Thus, nodes with intensified RPC reconnaissance experience about 15% fewer cloud credential harvesting attempts.", "perturbed_explanation": "The perturbation is wrong because Figure 4 actually shows the cloud credentials bar above the red 1.0 line (targeting ratio ≈1.15), not below it. The ratio is greater than one, indicating more—not fewer—harvesting attempts on experimental nodes.", "claim": "Figure 3 shows experimental nodes endured a median of ~10^3 RPC port scans vs ~10^2 for control; Figure 4 reveals a cloud credentials targeting ratio of ~1.15. Thus, nodes with intensified RPC reconnaissance experience about 15% more cloud credential harvesting attempts.", "label": true }, { "paperid": "2411.12449v2", "paper_path": "./SciVer/papers/2411.12449v2.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.12449v2_figure_4.png", "item2_path": "./SciVer/images/2411.12449v2-Table5-1.png", "section": [ "4" ], "request_id": 1503, "origin_statement": "At top-k=20, NEON(M2)’s helpfulness peaks at 1.57 in Figure 4; Table 5 then shows its average few-shot temporal score is 1.55—higher than WebRAG’s 1.52—establishing NEON(M2) as the top-performing method overall.", "perturbed_statement": "At top-k=20, NEON(M2)’s helpfulness peaks at 1.57 in Figure 4; Table 5 then shows its average few-shot temporal score is only 1.45—below WebRAG’s 1.52—suggesting NEON(M2) underperforms overall.", "perturbed_explanation": "The perturbation misreports NEON(M2)’s average score. Table 5 actually lists NEON(M2) at 1.55, not 1.45, so the claim that its average is below WebRAG’s 1.52 is factually incorrect.", "claim": "At top-k=20, NEON(M2)’s helpfulness peaks at 1.57 in Figure 4; Table 5 then shows its average few-shot temporal score is 1.55—higher than WebRAG’s 1.52—establishing NEON(M2) as the top-performing method overall.", "label": true }, { "paperid": "2409.14628v1", "paper_path": "./SciVer/papers/2409.14628v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14628v1_figure_4.png", "item2_path": "./SciVer/images/2409.14628v1-Table2-1.png", "section": [ "6.1" ], "request_id": 1512, "origin_statement": "Figure4 shows cycle 1 ckb accuracy is ~87% for Exp3 versus ~93% for Exp2. Table 2 reports final ckb accuracies of 91.4% for Exp3 and 97.7% for Exp2, confirming Exp2's uniform sampling outperforms Exp3 consistently.", "perturbed_statement": "Figure4 shows cycle 1 ckb accuracy is ~87% for Exp3 versus ~93% for Exp2. Table 2 reports final ckb accuracies of 97.7% for Exp3 and 91.4% for Exp2, suggesting Exp3 outperforms Exp2 at both start and end.", "perturbed_explanation": "The perturbation is wrong because Table 2 actually lists Exp2 at 97.7% and Exp3 at 91.4%, so Exp3 does not outperform Exp2 as claimed.", "claim": "Figure4 shows cycle 1 ckb accuracy is ~87% for Exp3 versus ~93% for Exp2. Table 2 reports final ckb accuracies of 91.4% for Exp3 and 97.7% for Exp2, confirming Exp2's uniform sampling outperforms Exp3 consistently.", "label": true }, { "paperid": "2410.07331v2", "paper_path": "./SciVer/papers/2410.07331v2.json", "claim_type": "sequential", "item1": "5", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07331v2_figure_5.png", "item2_path": "./SciVer/images/2410.07331v2-Table5-1.png", "section": [ "5.4" ], "request_id": 1569, "origin_statement": "Table 5 shows File Viewing is the primary initial action. Figure 5 shows GPT-4’s success rate rises from near 0% at step 2 to about 60% by step 6. Thus, initial file viewing likely drives GPT-4’s rapid early success.", "perturbed_statement": "Table 5 shows SQL Query is the primary initial action. Figure 5 shows GPT-4’s success rate rises from near 0% at step 2 to about 60% by step 6. Thus, initial SQL querying likely drives GPT-4’s rapid early success.", "perturbed_explanation": "The perturbed statement is wrong because Table 5 explicitly lists File Viewing—not SQL Query—as the first action used for environment exploration. Therefore, attributing GPT-4’s early success to initial SQL querying contradicts the action ordering defined in the table.", "claim": "Table 5 shows File Viewing is the primary initial action. Figure 5 shows GPT-4’s success rate rises from near 0% at step 2 to about 60% by step 6. Thus, initial file viewing likely drives GPT-4’s rapid early success.", "label": true }, { "paperid": "2410.18666v2", "paper_path": "./SciVer/papers/2410.18666v2.json", "claim_type": "sequential", "item1": "4(g)", "item2": "4(l)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18666v2_figure_4(g).png", "item2_path": "./SciVer/images/2410.18666v2_figure_4(l).png", "section": [ "5.2" ], "request_id": 1576, "origin_statement": "The heavily distorted balcony railings in the first image are sharply restored by DreamClear, implying its strong structural inference; consequently, in the night city scene, it accurately reconstructs tree textures within the red box, showing consistent detail recovery on real-world inputs.", "perturbed_statement": "The first image's balcony railings remain blurred after DreamClear restoration, implying weak structural inference; consequently, in the night city scene, tree textures within the red box are oversmoothed, showing inconsistent detail recovery on real-world inputs.", "perturbed_explanation": "This is incorrect because in the first image DreamClear clearly reconstructs the balcony railings with crisp lines (not blurred), and in the second image the leaves and branches inside the red box are rendered with fine detail (not oversmoothed), contradicting the perturbed claims.", "claim": "The heavily distorted balcony railings in the first image are sharply restored by DreamClear, implying its strong structural inference; consequently, in the night city scene, it accurately reconstructs tree textures within the red box, showing consistent detail recovery on real-world inputs.", "label": true }, { "paperid": "2410.20766v1", "paper_path": "./SciVer/papers/2410.20766v1.json", "claim_type": "sequential", "item1": "3", "item2": "20", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20766v1_figure_3.png", "item2_path": "./SciVer/images/2410.20766v1-Table20-1.png", "section": [ "4.2.3" ], "request_id": 1615, "origin_statement": "Figure 3 shows static outperforms dynamic by about 5.19% on Ubuntu average at context length 3. Therefore, static’s suggestion “You might try to ping the gateway address.” in Ubuntu Case 2 (Table 20) is more contextually relevant than dynamic’s generic “I don’t think it’s possible.”", "perturbed_statement": "Figure 3 shows dynamic outperforms static by about 5.19% on Ubuntu average at context length 4. Therefore, dynamic’s response “I don’t think it’s possible.” in Ubuntu Case 2 (Table 20) is more contextually relevant than static’s “You might try to ping the gateway address.”", "perturbed_explanation": "This is wrong because at context length 4 the dynamic model’s average difference on Ubuntu in Figure 3 is –0.000772 (slightly underperforming), not +5.19%, so dynamic does not outperform static at length 4, invalidating the comparison.", "claim": "Figure 3 shows static outperforms dynamic by about 5.19% on Ubuntu average at context length 3. Therefore, static’s suggestion “You might try to ping the gateway address.” in Ubuntu Case 2 (Table 20) is more contextually relevant than dynamic’s generic “I don’t think it’s possible.”", "label": true }, { "paperid": "2409.17416v1", "paper_path": "./SciVer/papers/2409.17416v1.json", "claim_type": "sequential", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17416v1_figure_4.png", "item2_path": "./SciVer/images/2409.17416v1_figure_6.png", "section": [ "5.2.2" ], "request_id": 1686, "origin_statement": "Figure 4 shows Phi-3 identifies only ~30% of LLM-generated fake news; as Figure 6 indicates Phi-3 misclassifies 60% of its own generated fake news, Phi-3 is worse at detecting self-produced content than external fake news.", "perturbed_statement": "Figure 4 shows Phi-3 identifies 45% of LLM-generated fake news; since Figure 6 indicates Phi-3 misclassifies only 38% of its own generated fake news, Phi-3 is equally proficient in detecting self-generated and external fake news.", "perturbed_explanation": "The perturbation misstates both values: in Figure 4 Phi-3’s detection rate is about 32%, not 45%, and in Figure 6 Phi-3’s misclassification rate is 60%, not 38%, so the claim contradicts the actual bars in both figures.", "claim": "Figure 4 shows Phi-3 identifies only ~30% of LLM-generated fake news; as Figure 6 indicates Phi-3 misclassifies 60% of its own generated fake news, Phi-3 is worse at detecting self-produced content than external fake news.", "label": true }, { "paperid": "2409.03782v1", "paper_path": "./SciVer/papers/2409.03782v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03782v1_figure_5.png", "item2_path": "./SciVer/images/2409.03782v1-Table3-1.png", "section": [ "5.3" ], "request_id": 1723, "origin_statement": "Figure 5 shows that on stableImg at dropout 0.5, the RS_mAP of M2 remains above 0.8; Table 3 lists M2 as the best RS_mAP model for stableImg, confirming M2's robustness under high dropout.", "perturbed_statement": "Figure 5 shows that on stableImg at dropout 0.5, the RS_mAP of M2 remains above 0.9; Table 3 lists M2 as the best RS_mAP model for origImg, confirming M2's robustness under high dropout.", "perturbed_explanation": "The perturbed claim is incorrect on two counts: Figure 5 indicates M2's RS_mAP at dropout 0.5 on stableImg is about 0.88, not above 0.9, and Table 3 names M1 and M4 (not M2) as the best RS_mAP models for origImg.", "claim": "Figure 5 shows that on stableImg at dropout 0.5, the RS_mAP of M2 remains above 0.8; Table 3 lists M2 as the best RS_mAP model for stableImg, confirming M2's robustness under high dropout.", "label": true }, { "paperid": "2411.11925v1", "paper_path": "./SciVer/papers/2411.11925v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11925v1_figure_6.png", "item2_path": "./SciVer/images/2411.11925v1_figure_7.png", "section": [ "4.2" ], "request_id": 1757, "origin_statement": "Figure 6 includes a golden retriever image, and Figure 7’s repeated second-row portrait of a llama across all draft lengths demonstrates continuous speculative decoding consistently preserves different animal class appearances.", "perturbed_statement": "Figure 6 includes a golden retriever image, and Figure 7’s repeated second-row portrait of a fox across all draft lengths demonstrates continuous speculative decoding consistently preserves different animal class appearances.", "perturbed_explanation": "The perturbation is wrong because in Figure 7 the animal in the second row is clearly a llama (long ears, tufted muzzle) rather than a fox, so the premise about a 'fox' contradicts the actual image.", "claim": "Figure 6 includes a golden retriever image, and Figure 7’s repeated second-row portrait of a llama across all draft lengths demonstrates continuous speculative decoding consistently preserves different animal class appearances.", "label": true }, { "paperid": "2409.05620v1", "paper_path": "./SciVer/papers/2409.05620v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05620v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.05620v1_figure_4(c).png", "section": [ "4.3" ], "request_id": 1764, "origin_statement": "SSIL_OC boosts average task accuracy by ~3.8% in CIFAR100-LT with ResNet18 (Fig4a) compared to ~2.5% in CIFAR100 with ResNet32 (Fig4c), indicating a 1.3% larger gain under class imbalance.", "perturbed_statement": "SSIL_OC boosts average task accuracy by ~5.0% in CIFAR100-LT with ResNet18 (Fig4a) compared to ~3.8% in CIFAR100 with ResNet32 (Fig4c), indicating a 1.2% larger gain under class imbalance.", "perturbed_explanation": "The claimed 5.0% average improvement on CIFAR100-LT contradicts Fig4a’s actual per-task gains (ranging from 2.14% to 6.17%), which sum to about 37.96% over ten tasks (≈3.8%). Likewise, Fig4c’s improvements sum to roughly 25.0% (≈2.5%), not 3.8%.", "claim": "SSIL_OC boosts average task accuracy by ~3.8% in CIFAR100-LT with ResNet18 (Fig4a) compared to ~2.5% in CIFAR100 with ResNet32 (Fig4c), indicating a 1.3% larger gain under class imbalance.", "label": true }, { "paperid": "2410.13805v1", "paper_path": "./SciVer/papers/2410.13805v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "5(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13805v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.13805v1_figure_5(a).png", "section": [ "4.5" ], "request_id": 1779, "origin_statement": "From Figure 4 (protein task), detection efficiency peaks at m=5. From Figure 5, the pLDDT score at transition probability a11=0.5 is highest (~0.88). Hence, configuring the watermark with m=5 and a11=0.5 maximizes detectability while preserving optimal protein generation quality.", "perturbed_statement": "From Figure 4 (protein task), detection efficiency peaks at m=6. From Figure 5, the pLDDT score at transition probability a11=0.5 is lowest (~0.84). Hence, configuring the watermark with m=6 and a11=0.5 maximizes detectability while preserving optimal protein generation quality.", "perturbed_explanation": "This statement is incorrect because Figure 4 shows the highest detection efficiency for protein generation at pattern length m=5, not m=6. Moreover, Figure 5 indicates that pLDDT at a11=0.5 is the highest (~0.88), not the lowest (~0.84), contradicting the perturbed claim.", "claim": "From Figure 4 (protein task), detection efficiency peaks at m=5. From Figure 5, the pLDDT score at transition probability a11=0.5 is highest (~0.88). Hence, configuring the watermark with m=5 and a11=0.5 maximizes detectability while preserving optimal protein generation quality.", "label": true }, { "paperid": "2410.22184v1", "paper_path": "./SciVer/papers/2410.22184v1.json", "claim_type": "sequential", "item1": "5(b)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22184v1_figure_5(b).png", "item2_path": "./SciVer/images/2410.22184v1-Table5-1.png", "section": [ "7.3" ], "request_id": 1813, "origin_statement": "Figure 5 shows Caltech-101 accuracy increases from 79.4% with three datasets to 81.7% with four. Table 5 shows the student model’s Caltech-101 accuracy actually decreases from 80.32% (three) to 80.04% (four).", "perturbed_statement": "Figure 5 shows Caltech-101 accuracy drops from 80.8% with two datasets to 79.4% with three. Table 5 shows the student model’s Caltech-101 accuracy increasing from 80.32% (three) to 80.04% (four).", "perturbed_explanation": "The perturbed claim incorrectly asserts that the student’s accuracy increases from 80.32% to 80.04%, but Table 5 actually reports a decrease (80.04% is lower than 80.32%), contradicting the stated trend.", "claim": "Figure 5 shows Caltech-101 accuracy increases from 79.4% with three datasets to 81.7% with four. Table 5 shows the student model’s Caltech-101 accuracy actually decreases from 80.32% (three) to 80.04% (four).", "label": true }, { "paperid": "2411.04554v1", "paper_path": "./SciVer/papers/2411.04554v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04554v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.04554v1_figure_7(b).png", "section": [ "6" ], "request_id": 41, "origin_statement": "Peri-midFormer uses only ~0.2 M parameters on the Heartbeat classification task (the lowest of all models), so in ETTh2 long-term forecasting it likewise employs under 1 M parameters and achieves the lowest MSE (~0.35).", "perturbed_statement": "Peri-midFormer uses only ~2 M parameters on the Heartbeat classification task (the lowest of all models), so in ETTh2 long-term forecasting it likewise employs under 1 M parameters and achieves the lowest MSE (~0.35).", "perturbed_explanation": "This is incorrect because in the left plot of Figure 7, the Peri-midFormer marker sits just above 0 on the x-axis (around 0.15 M parameters), not at 2 M. Thus the claim of 2 M parameters contradicts the actual ~0.2 M shown.", "claim": "Peri-midFormer uses only ~0.2 M parameters on the Heartbeat classification task (the lowest of all models), so in ETTh2 long-term forecasting it likewise employs under 1 M parameters and achieves the lowest MSE (~0.35).", "label": true }, { "paperid": "2409.07355v1", "paper_path": "./SciVer/papers/2409.07355v1.json", "claim_type": "sequential", "item1": "5", "item2": "6(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07355v1_figure_5.png", "item2_path": "./SciVer/images/2409.07355v1_figure_6(a).png", "section": [ "5.3.1" ], "request_id": 53, "origin_statement": "Comb-TA attains the lowest average self-ROUGE-L among conditions (Fig.6a), and because it also exhibits the highest average Spearman correlation (0.67) in Fig.5, increased attribute diversity directly drives evaluation performance.", "perturbed_statement": "ML-TA attains the lowest average self-ROUGE-L among conditions (Fig.6a), and because it also exhibits the highest average Spearman correlation (0.62) in Fig.5, increased attribute diversity directly drives evaluation performance.", "perturbed_explanation": "This is incorrect because Fig.5 shows that Comb-TA—not ML-TA—has the highest average Spearman correlation (0.67 vs. ML-TA’s 0.62). Therefore ML-TA does not hold the top correlation, invalidating the claim.", "claim": "Comb-TA attains the lowest average self-ROUGE-L among conditions (Fig.6a), and because it also exhibits the highest average Spearman correlation (0.67) in Fig.5, increased attribute diversity directly drives evaluation performance.", "label": true }, { "paperid": "2409.10955v1", "paper_path": "./SciVer/papers/2409.10955v1.json", "claim_type": "sequential", "item1": "1", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.10955v1-Table1-1.png", "item2_path": "./SciVer/images/2409.10955v1-Table7-1.png", "section": [ "4.3" ], "request_id": 60, "origin_statement": "From Table 1, GPT-4 has 918 popQA questions with indirect evidence; Table 7 shows its intersection of 2- and 3-sentence evidence is 918; thus, every indirect evidence instance for popQA GPT-4 appears in both 2- and 3-sentence sets.", "perturbed_statement": "From Table 1, GPT-4 has 918 popQA questions with indirect evidence; Table 7 shows its intersection of 2- and 3-sentence evidence is 920; thus, GPT-4 has 2 more intersection instances than total indirect evidence.", "perturbed_explanation": "The perturbed statement misreports the intersection count as 920, but the actual value in Table 7 is 918, so you cannot have more intersection instances than total indirect evidence.", "claim": "From Table 1, GPT-4 has 918 popQA questions with indirect evidence; Table 7 shows its intersection of 2- and 3-sentence evidence is 918; thus, every indirect evidence instance for popQA GPT-4 appears in both 2- and 3-sentence sets.", "label": true }, { "paperid": "2409.13587v1", "paper_path": "./SciVer/papers/2409.13587v1.json", "claim_type": "sequential", "item1": "2", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.13587v1-Table2-1.png", "item2_path": "./SciVer/images/2409.13587v1-Table5-1.png", "section": [ "6.3" ], "request_id": 62, "origin_statement": "For system 20qubits05, Table 2 shows the optimal ham_cutoff increased from 1e-9 to approximately 8.34526e-3, leading QCELS to require 26 iterations instead of 9 with default settings.", "perturbed_statement": "For system 20qubits05, Table 2 shows the optimal ham_cutoff increased from 1e-9 to approximately 4.31098e-4, leading QCELS to require 26 iterations instead of 9 with default settings.", "perturbed_explanation": "This is incorrect because Table 2 (col. D) lists the ham_cutoff for 20qubits05 as 8.34526e-3. The value 4.31098e-4 actually corresponds to system 20qubits02, so the stated change is wrong.", "claim": "For system 20qubits05, Table 2 shows the optimal ham_cutoff increased from 1e-9 to approximately 8.34526e-3, leading QCELS to require 26 iterations instead of 9 with default settings.", "label": true }, { "paperid": "2409.16400v1", "paper_path": "./SciVer/papers/2409.16400v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16400v1_figure_2.png", "item2_path": "./SciVer/images/2409.16400v1_figure_3.png", "section": [ "4.1.2" ], "request_id": 81, "origin_statement": "Fig.2’s table shows Turla’s 2020 TTP list includes T1036, T1029, T1110, but not T1071. In Fig.3, the kill-chain example’s seventh TTP is T1071 (Application Layer Protocol). Thus, Turla’s 2020 campaign did not employ the sequence’s seventh C2 action.", "perturbed_statement": "Fig.2’s table shows Turla’s 2020 TTP list includes T1071. In Fig.3, the kill-chain example’s seventh TTP is T1071 (Application Layer Protocol). Therefore, Turla’s 2020 campaign employed the sequence’s seventh C2 action.", "perturbed_explanation": "The perturbed claim falsely asserts that Turla’s 2020 entry lists T1071. In the actual Fig.2 table, Turla’s TTP list contains T1036, T1029, T1110, etc., but does not include T1071. Hence Turla could not have employed the seventh TTP from Fig.3.", "claim": "Fig.2’s table shows Turla’s 2020 TTP list includes T1036, T1029, T1110, but not T1071. In Fig.3, the kill-chain example’s seventh TTP is T1071 (Application Layer Protocol). Thus, Turla’s 2020 campaign did not employ the sequence’s seventh C2 action.", "label": true }, { "paperid": "2410.02052v3", "paper_path": "./SciVer/papers/2410.02052v3.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02052v3_figure_1(a).png", "item2_path": "./SciVer/images/2410.02052v3_figure_1(b).png", "section": [ "4.3" ], "request_id": 103, "origin_statement": "ReAct's constant 28.6% success rate (fig a) vastly exceeds the 3.1% peak of the no-SFT model on unseen tasks with 15 actions (fig b), meaning the baseline reactive approach outperforms the untrained GPT-4o by 25.5%.", "perturbed_statement": "ReAct's constant 35% success rate (fig a) vastly exceeds the 3.1% peak of the no-SFT model on unseen tasks with 15 actions (fig b), meaning the baseline reactive approach outperforms the untrained GPT-4o by 31.9%.", "perturbed_explanation": "Figure 1a actually shows the ReAct baseline at 28.6%, not 35%, so the starting premise is incorrect and the computed performance gap is invalid.", "claim": "ReAct's constant 28.6% success rate (fig a) vastly exceeds the 3.1% peak of the no-SFT model on unseen tasks with 15 actions (fig b), meaning the baseline reactive approach outperforms the untrained GPT-4o by 25.5%.", "label": true }, { "paperid": "2409.05061v2", "paper_path": "./SciVer/papers/2409.05061v2.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05061v2_figure_3.png", "item2_path": "./SciVer/images/2409.05061v2-Table2-1.png", "section": [ "4.1" ], "request_id": 130, "origin_statement": "Because Table 2 indicates that VFA features are generated from sampled pickups and Figure 3 places feature design before VFA training, the resulting VFA weights inherently incorporate stochastic pickup behavior.", "perturbed_statement": "Because Table 2 indicates that VFA features are generated using maximum storage time and Figure 3 places feature design before VFA training, the resulting VFA weights inherently incorporate stochastic pickup behavior.", "perturbed_explanation": "Table 2 explicitly states that VFA features use sampled pickups—not maximum storage time. By mislabeling the pickup assumption for VFA features, the perturbed premise contradicts the context, so the VFA weights would not actually reflect stochastic pickup behavior.", "claim": "Because Table 2 indicates that VFA features are generated from sampled pickups and Figure 3 places feature design before VFA training, the resulting VFA weights inherently incorporate stochastic pickup behavior.", "label": true }, { "paperid": "2409.16016v2", "paper_path": "./SciVer/papers/2409.16016v2.json", "claim_type": "sequential", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16016v2_figure_1.png", "item2_path": "./SciVer/images/2409.16016v2_figure_4.png", "section": [ "3.1" ], "request_id": 133, "origin_statement": "From Fig. 1’s device distribution, 3D OCT-2000 FA Plus has about 20 CFIs annotated for vessel segmentation (Step 1) and about 30 for A/V segmentation (Step 2), implying roughly 10 CFIs have A/V labels but lack vessel annotations (Step 3).", "perturbed_statement": "From Fig. 1’s device distribution, 3D OCT-2000 FA Plus has about 60 CFIs annotated for vessel segmentation and about 30 for A/V segmentation, implying roughly 30 CFIs have vessel labels but lack A/V annotations.", "perturbed_explanation": "The perturbation is wrong because the bar for 3D OCT-2000 FA Plus in the vessel segmentation chart is approximately 20 CFIs, not 60. Using 60 instead of the actual ~20 contradicts the device count shown in Fig. 1, invalidating the inference of 30 missing A/V annotations.", "claim": "From Fig. 1’s device distribution, 3D OCT-2000 FA Plus has about 20 CFIs annotated for vessel segmentation (Step 1) and about 30 for A/V segmentation (Step 2), implying roughly 10 CFIs have A/V labels but lack vessel annotations (Step 3).", "label": true }, { "paperid": "2410.04203v1", "paper_path": "./SciVer/papers/2410.04203v1.json", "claim_type": "sequential", "item1": "1", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04203v1-Table1-1.png", "item2_path": "./SciVer/images/2410.04203v1-Table7-1.png", "section": [ "2.2" ], "request_id": 135, "origin_statement": "Since Table1 shows that CPO lacks length normalization, and Table7's CPO objective omits any division by output length |y|, CPO will favor longer responses due to unnormalized log-probabilities.", "perturbed_statement": "Since Table1 shows that CPO includes length normalization, and Table7's CPO objective divides log-probabilities by output length |y|, CPO avoids length bias by normalizing scores across responses.", "perturbed_explanation": "This is wrong because in Table 1 the entry under \"Length Norm.\" for CPO is marked × (no length normalization), and in Table 7 the CPO objective does not include any term dividing by |y|—there is no per-token normalization in its formula.", "claim": "Since Table1 shows that CPO lacks length normalization, and Table7's CPO objective omits any division by output length |y|, CPO will favor longer responses due to unnormalized log-probabilities.", "label": true }, { "paperid": "2410.21131v1", "paper_path": "./SciVer/papers/2410.21131v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.21131v1-Table1-1.png", "item2_path": "./SciVer/images/2410.21131v1-Table2-1.png", "section": [ "3.3" ], "request_id": 138, "origin_statement": "Given Complexity is rated -2 to 2 with 0 as ideal, and the mean Complexity score was -0.26, participants perceived explanations as slightly too simple.", "perturbed_statement": "Given Complexity is rated -1 to 1 with 0 as ideal, and the mean Complexity score was +0.26, participants perceived explanations as slightly too complex.", "perturbed_explanation": "Table 1 specifies Complexity on a –2 to 2 scale (with 0 as desired), and Table 2 shows a mean of –0.26. The perturbation incorrectly uses a –1 to 1 scale and +0.26 mean, contradicting both the actual scale and reported average.", "claim": "Given Complexity is rated -2 to 2 with 0 as ideal, and the mean Complexity score was -0.26, participants perceived explanations as slightly too simple.", "label": true }, { "paperid": "2409.00718v1", "paper_path": "./SciVer/papers/2409.00718v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.00718v1-Table2-1.png", "item2_path": "./SciVer/images/2409.00718v1-Table3-1.png", "section": [ "3.3" ], "request_id": 147, "origin_statement": "MCGAEc achieves 0.972 specificity on the training set (Table 2) and 0.973 specificity on the test set (Table 3), demonstrating stable false-positive discrimination across datasets.", "perturbed_statement": "MCGAEc achieves 0.972 specificity on the training set (Table 2) and 0.972 specificity on the test set (Table 3), demonstrating stable false-positive discrimination across datasets.", "perturbed_explanation": "The perturbed claim wrongly states the test-set specificity as 0.972. According to Table 3, the correct specificity for MCGAEc on the test set is 0.973, not 0.972, so the perturbed statement contradicts the reported value.", "claim": "MCGAEc achieves 0.972 specificity on the training set (Table 2) and 0.973 specificity on the test set (Table 3), demonstrating stable false-positive discrimination across datasets.", "label": true }, { "paperid": "2410.07196v1", "paper_path": "./SciVer/papers/2410.07196v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.07196v1_figure_4.png", "item2_path": "./SciVer/images/2410.07196v1_figure_5.png", "section": [ "3.2" ], "request_id": 148, "origin_statement": "In Fig.4’s gamma band, a single sample shows a ~1800 amplitude spike at 20 Hz; Fig.5’s Subj37_S02_Exp3.mat correlation heatmap is uniformly red, indicating high inter-channel correlation in that sample.", "perturbed_statement": "In Fig.4’s gamma band, a single sample shows a ~200 amplitude spike at 20 Hz; Fig.5’s Subj37_S02_Exp3.mat correlation heatmap is uniformly red, indicating high inter-channel correlation in that sample.", "perturbed_explanation": "The gamma band plot’s y-axis runs up to 1800 and the spike clearly reaches near that maximum (~1800). It does not peak at ~200, so the stated amplitude contradicts the actual scale in Fig.4.", "claim": "In Fig.4’s gamma band, a single sample shows a ~1800 amplitude spike at 20 Hz; Fig.5’s Subj37_S02_Exp3.mat correlation heatmap is uniformly red, indicating high inter-channel correlation in that sample.", "label": true }, { "paperid": "2411.10213v1", "paper_path": "./SciVer/papers/2411.10213v1.json", "claim_type": "sequential", "item1": "1(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10213v1_figure_1(b).png", "item2_path": "./SciVer/images/2411.10213v1-Table2-1.png", "section": [ "4.1" ], "request_id": 152, "origin_statement": "Figure 1 shows MarsCode Agent resolves 239 cases, more than any other tool. Table 2 assigns 'Stacktrace' a maximum score of 10 for bug location quality. This implies MarsCode Agent’s superior fault localization performance likely leverages stacktrace information more effectively than other tools.", "perturbed_statement": "Figure 1 shows MarsCode Agent resolves 239 cases, more than any other tool. Table 2 assigns 'Keyword' a maximum score of 10 for bug location quality. This implies MarsCode Agent’s superior fault localization performance likely leverages keyword-based bug locations more effectively than other tools.", "perturbed_explanation": "The perturbation incorrectly states that 'Keyword' receives the highest score in Table 2. In reality, Table 2 gives 'Stacktrace' a score of 10 (highest) and 'Keyword' only 6.67. Hence the premise about 'Keyword' being the top bug-location indicator contradicts the table.", "claim": "Figure 1 shows MarsCode Agent resolves 239 cases, more than any other tool. Table 2 assigns 'Stacktrace' a maximum score of 10 for bug location quality. This implies MarsCode Agent’s superior fault localization performance likely leverages stacktrace information more effectively than other tools.", "label": true }, { "paperid": "2410.18529v2", "paper_path": "./SciVer/papers/2410.18529v2.json", "claim_type": "sequential", "item1": "6(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.18529v2_figure_6(a).png", "item2_path": "./SciVer/images/2410.18529v2-Table3-1.png", "section": [ "4.3" ], "request_id": 154, "origin_statement": "The ALCHEMY example displays five beakers; SCONE supports fewer than six entities; therefore SCONE can represent all five beakers in that scenario.", "perturbed_statement": "The ALCHEMY example displays six beakers; SCONE supports fewer than six entities; therefore SCONE can represent all six beakers in that scenario.", "perturbed_explanation": "The perturbed statement is wrong because the ALCHEMY example actually contains only five beakers (not six), and SCONE allows at most five entities (fewer than six), so it cannot model six beakers.", "claim": "The ALCHEMY example displays five beakers; SCONE supports fewer than six entities; therefore SCONE can represent all five beakers in that scenario.", "label": true }, { "paperid": "2411.03025v1", "paper_path": "./SciVer/papers/2411.03025v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.03025v1-Table1-1.png", "item2_path": "./SciVer/images/2411.03025v1-Table2-1.png", "section": [ "5.2" ], "request_id": 162, "origin_statement": "DA-MoE improves GIN’s IMDB-M accuracy by 2.72% (Table 1), and reduces RMSE on ogbg-molfreesolv by 9.06% (Table 2), showing DA-MoE yields larger relative gains on molecular regression tasks than on social graph classification.", "perturbed_statement": "DA-MoE improves GIN’s IMDB-M accuracy by 0.57% (Table 1), and reduces RMSE on ogbg-molfreesolv by 3.63% (Table 2), showing DA-MoE yields smaller gains on molecular regression tasks than on social graph classification.", "perturbed_explanation": "The IMDB-M improvement is actually 2.72% (not 0.57%), as shown under GIN+DA-MoE in Table 1. The 3.63% figure is the classification gain on ogbg-molbbbp, not the 9.06% RMSE reduction on ogbg-molfreesolv (Table 2).", "claim": "DA-MoE improves GIN’s IMDB-M accuracy by 2.72% (Table 1), and reduces RMSE on ogbg-molfreesolv by 9.06% (Table 2), showing DA-MoE yields larger relative gains on molecular regression tasks than on social graph classification.", "label": true }, { "paperid": "2411.15865v1", "paper_path": "./SciVer/papers/2411.15865v1.json", "claim_type": "sequential", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15865v1_figure_7.png", "item2_path": "./SciVer/images/2411.15865v1_figure_8.png", "section": [ "4.2.2" ], "request_id": 180, "origin_statement": "Approximately 103.5 of the 104 target |0⟩ qubits (104×99.520%) and 105.5 of the 106 target |1⟩ qubits (106×99.628%) were correctly measured by Bob.", "perturbed_statement": "Approximately 105.5 of the 106 target |0⟩ qubits (106×99.520%) and 103.5 of the 104 target |1⟩ qubits (104×99.628%) were correctly measured by Bob.", "perturbed_explanation": "The perturbed claim swaps the qubit counts and fidelities: in the actual experiment 104 qubits were teleported for |0⟩ with 99.520% fidelity and 106 qubits for |1⟩ with 99.628%. Applying 99.520% to 106 qubits and 99.628% to 104 qubits contradicts the context provided by Fig. 8 and Section IV.2.2.", "claim": "Approximately 103.5 of the 104 target |0⟩ qubits (104×99.520%) and 105.5 of the 106 target |1⟩ qubits (106×99.628%) were correctly measured by Bob.", "label": true }, { "paperid": "2411.07050v1", "paper_path": "./SciVer/papers/2411.07050v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07050v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.07050v1_figure_2(b).png", "section": [ "3.2" ], "request_id": 184, "origin_statement": "Client3’s median age exceeds Client1’s (Fig2a), and STACH is the most frequent label at Client3 (Fig2b). Therefore, a model trained solely on Client3 data will bias towards detecting STACH, potentially underperforming on NORM-heavy, younger cohorts.", "perturbed_statement": "Client1’s median age exceeds Client3’s (Fig2a), and STACH is the most frequent label at Client1 (Fig2b). Therefore, a model trained solely on Client1 data will bias towards detecting STACH, potentially underperforming on NORM-heavy, younger cohorts.", "perturbed_explanation": "The perturbed statement wrongly claims Client1’s median age exceeds Client3’s, but Fig2a shows Client1’s age distribution is younger than Client3’s. It also misstates that STACH is most frequent at Client1, whereas Fig2b indicates NORM is most common at Client1. These incorrect premises invalidate the conclusion.", "claim": "Client3’s median age exceeds Client1’s (Fig2a), and STACH is the most frequent label at Client3 (Fig2b). Therefore, a model trained solely on Client3 data will bias towards detecting STACH, potentially underperforming on NORM-heavy, younger cohorts.", "label": true }, { "paperid": "2410.19218v1", "paper_path": "./SciVer/papers/2410.19218v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "5(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19218v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.19218v1_figure_5(a).png", "section": [ "5.3" ], "request_id": 185, "origin_statement": "From Figure 4, at a 25% retention ratio, recall@100 reaches approximately 0.745. Building on this reduced set, Figure 5(a) shows that LLM-based topic filtering at a 50% ratio achieves an NDCG@5 of roughly 0.445, outperforming FFT’s ~0.365 baseline.", "perturbed_statement": "From Figure 4, at a 25% retention ratio, recall@100 only reaches 0.712. Building on this reduced set, Figure 5(a) shows that LLM-based topic filtering at a 50% ratio achieves an NDCG@5 of just 0.375, barely exceeding FFT’s ~0.365 baseline.", "perturbed_explanation": "The recall@100 at 25% retention is actually about 0.745, not 0.712 (Figure 4). Moreover, LLM-based filtering at 50% achieves NDCG@5 of ~0.445, not ~0.375; the latter value misrepresents Figure 5(a) and is near the FFT baseline, making the perturbed claim false.", "claim": "From Figure 4, at a 25% retention ratio, recall@100 reaches approximately 0.745. Building on this reduced set, Figure 5(a) shows that LLM-based topic filtering at a 50% ratio achieves an NDCG@5 of roughly 0.445, outperforming FFT’s ~0.365 baseline.", "label": true }, { "paperid": "2411.14711v1", "paper_path": "./SciVer/papers/2411.14711v1.json", "claim_type": "sequential", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14711v1_figure_6.png", "item2_path": "./SciVer/images/2411.14711v1-Table4-1.png", "section": [ "8.3" ], "request_id": 187, "origin_statement": "Fig. 6 shows HE(all) achieves Hits@50 of 52.58% on ogbl-collab. Table 4 shows GCN(X)+HE attains 56.11% on ogbl-collab. Therefore, adding heuristic encoding to GCN with node attributes outperforms encoding all heuristics alone.", "perturbed_statement": "Fig. 6 shows HE(all) achieves Hits@50 of 52.58% on ogbl-collab. Table 4 shows GCN(X)+HE attains only 50.76% on ogbl-collab. Therefore, integrating heuristic encoding with node attributes underperforms compared to encoding all heuristics.", "perturbed_explanation": "The perturbation misreads the Table 4 entry: GCN(X)+HE actually achieves 56.11% on ogbl-collab, not 50.76% (50.76% is GCN(X) alone). Thus the premise and conclusion in the perturbed statement contradict the true data.", "claim": "Fig. 6 shows HE(all) achieves Hits@50 of 52.58% on ogbl-collab. Table 4 shows GCN(X)+HE attains 56.11% on ogbl-collab. Therefore, adding heuristic encoding to GCN with node attributes outperforms encoding all heuristics alone.", "label": true }, { "paperid": "2409.07124v1", "paper_path": "./SciVer/papers/2409.07124v1.json", "claim_type": "sequential", "item1": "5", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07124v1_figure_5.png", "item2_path": "./SciVer/images/2409.07124v1_figure_8.png", "section": [ "3.3" ], "request_id": 202, "origin_statement": "Figure5 shows the Perseus arm produces 35% of total N II emission. Fig8 reveals only ≈10% of OB associations exceed 1000 M⊙. Thus the Perseus arm must concentrate most massive clusters to drive its dominant ionisation.", "perturbed_statement": "Figure5 shows the Perseus arm produces 35% of total N II emission. Fig8 reveals ≈40% of OB associations exceed 1000 M⊙. Thus the Perseus arm cannot host enough massive clusters to account for its dominant ionisation.", "perturbed_explanation": "The perturbation misquotes Figure 8: the histogram’s tail above 1000 M⊙ corresponds to only about 10% of OB associations, not 40%. Claiming ≈40% thus contradicts the actual distribution shown in the context.", "claim": "Figure5 shows the Perseus arm produces 35% of total N II emission. Fig8 reveals only ≈10% of OB associations exceed 1000 M⊙. Thus the Perseus arm must concentrate most massive clusters to drive its dominant ionisation.", "label": true }, { "paperid": "2411.15583v1", "paper_path": "./SciVer/papers/2411.15583v1.json", "claim_type": "sequential", "item1": "3", "item2": "5(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15583v1_figure_3.png", "item2_path": "./SciVer/images/2411.15583v1_figure_5(a).png", "section": [ "4.2" ], "request_id": 207, "origin_statement": "In Modality 2’s forest plot, Cao et al. (2020) show the highest effect size (Hedge’s g=1.16 [0.20, 2.22]) with N=19. Table data confirm their sample of 19 is the second smallest in Modality 2, indicating even small cohorts can yield strong implicit diegetic guidance effects.", "perturbed_statement": "In Modality 2’s forest plot, Cao et al. (2020) show the highest effect size (Hedge’s g=1.16 [0.20, 2.22]) with N=29. Table data confirm their sample of 29 is the median size in Modality 2, indicating that moderate cohorts can yield strong implicit diegetic guidance effects.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 5 (the study table) lists Cao et al. (2020) with N=19 participants for Modality 2, not 29. Thus the claimed sample size of 29 contradicts the actual table entry.", "claim": "In Modality 2’s forest plot, Cao et al. (2020) show the highest effect size (Hedge’s g=1.16 [0.20, 2.22]) with N=19. Table data confirm their sample of 19 is the second smallest in Modality 2, indicating even small cohorts can yield strong implicit diegetic guidance effects.", "label": true }, { "paperid": "2411.01370v1", "paper_path": "./SciVer/papers/2411.01370v1.json", "claim_type": "sequential", "item1": "5(b)", "item2": "5(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01370v1_figure_5(b).png", "item2_path": "./SciVer/images/2411.01370v1_figure_5(f).png", "section": [ "5.1.3" ], "request_id": 228, "origin_statement": "The SD scenario trees histogram shows an RGAP_LB1 mean of 1.98% (image 1), and the SI trees histogram shows an RGAP_UB mean of 9.82% (image 2). Thus, the analytical lower bound is roughly one-fifth the analytical upper bound under SI trees.", "perturbed_statement": "The SD scenario trees histogram shows an RGAP_LB1 mean of 2.98% (image 1), and the SI trees histogram shows an RGAP_UB mean of 9.82% (image 2). Thus, the analytical lower bound is roughly one-third the analytical upper bound under SI trees.", "perturbed_explanation": "The perturbation misreads the RGAP_LB1 mean as 2.98% when the first histogram explicitly labels its mean as 1.98%. This incorrect premise invalidates the one-third comparison to the 9.82% RGAP_UB value.", "claim": "The SD scenario trees histogram shows an RGAP_LB1 mean of 1.98% (image 1), and the SI trees histogram shows an RGAP_UB mean of 9.82% (image 2). Thus, the analytical lower bound is roughly one-fifth the analytical upper bound under SI trees.", "label": true }, { "paperid": "2409.07135v1", "paper_path": "./SciVer/papers/2409.07135v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07135v1_figure_6.png", "item2_path": "./SciVer/images/2409.07135v1_figure_7.png", "section": [ "4.5" ], "request_id": 258, "origin_statement": "Since nuSVM achieves the minimal inference time (~10⁻⁶ s) at the lowest feature count in Fig6, and Fig7 shows its normalized novelty metric saturates near 1 for all samples, nuSVM rapidly flags even slight deviations as anomalies.", "perturbed_statement": "Since nuSVM has the highest inference time (~10⁻⁴ s) at minimal features in Fig6, and its novelty metric hovers around 0.5 for all samples in Fig7, nuSVM poorly detects anomalies and is too slow for real-time monitoring.", "perturbed_explanation": "The perturbed statement is wrong on two counts. In Fig6, the nuSVM curve (orange) is the fastest (lowest) at about 10⁻⁶ s, not the slowest at 10⁻⁴ s. In Fig7, the nuSVM novelty metric saturates close to 1, not around 0.5 as claimed.", "claim": "Since nuSVM achieves the minimal inference time (~10⁻⁶ s) at the lowest feature count in Fig6, and Fig7 shows its normalized novelty metric saturates near 1 for all samples, nuSVM rapidly flags even slight deviations as anomalies.", "label": true }, { "paperid": "2409.13441v1", "paper_path": "./SciVer/papers/2409.13441v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13441v1_figure_3(b).png", "item2_path": "./SciVer/images/2409.13441v1-Table1-1.png", "section": [ "5" ], "request_id": 288, "origin_statement": "Figure 3 shows seven LRDs with AGN fraction >80%. Table 1 lists four JWST programs. Thus at least one program must have uncovered two such high-AGN LRDs.", "perturbed_statement": "Figure 3 shows nine LRDs with AGN fraction >80%. Table 1 lists four JWST programs. Thus at least one program must have uncovered three such high-AGN LRDs.", "perturbed_explanation": "The perturbed claim misstates Figure 3: the histogram and summary specify seven LRDs exceed an AGN fraction of 0.8, not nine. With only seven high-AGN sources and four programs, pigeonholing yields at least two per program, not three, so the conclusion is unsupported.", "claim": "Figure 3 shows seven LRDs with AGN fraction >80%. Table 1 lists four JWST programs. Thus at least one program must have uncovered two such high-AGN LRDs.", "label": true }, { "paperid": "2411.06208v2", "paper_path": "./SciVer/papers/2411.06208v2.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.06208v2_figure_4.png", "item2_path": "./SciVer/images/2411.06208v2-Table3-1.png", "section": [ "6.2" ], "request_id": 290, "origin_statement": "Table 3 shows IOPO achieves 68.9% on IF-M under Qwen2-7B, 1.7% higher than DPO’s 67.2%. Figure 4 indicates DPO* scores about 67% on IF-M. Therefore, IOPO maintains roughly a 2% absolute advantage over DPO* when training with equal token counts.", "perturbed_statement": "Table 3 shows IOPO achieves 68.9% on IF-M under Qwen2-7B. Figure 4 indicates DPO* scores about 69% on IF-M. Therefore, IOPO slightly underperforms DPO* by 0.1% under equal token training.", "perturbed_explanation": "The perturbation misreads DPO*’s IF-M score as 69%. In Figure 4, the DPO* IF-M bar is actually around 67%, not 69%, so IOPO still outperforms DPO* by about 2%, contradicting the perturbed claim.", "claim": "Table 3 shows IOPO achieves 68.9% on IF-M under Qwen2-7B, 1.7% higher than DPO’s 67.2%. Figure 4 indicates DPO* scores about 67% on IF-M. Therefore, IOPO maintains roughly a 2% absolute advantage over DPO* when training with equal token counts.", "label": true }, { "paperid": "2411.11129v1", "paper_path": "./SciVer/papers/2411.11129v1.json", "claim_type": "sequential", "item1": "3", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.11129v1-Table3-1.png", "item2_path": "./SciVer/images/2411.11129v1-Table5-1.png", "section": [ "3.1" ], "request_id": 303, "origin_statement": "Table 3 lists Ghiara mortar’s diffusion coefficient as 1.95×10⁻² cm²/s, nearly four times Azolo’s 4.8×10⁻³. Table 5 then shows Ghiara’s error E₁ (2.20×10⁻⁴) exceeds Azolo’s E₁ (1.44×10⁻⁴), linking higher diffusion to larger simple-model error.", "perturbed_statement": "Since Table 3 shows Ghiara’s diffusion coefficient (1.95×10⁻² cm²/s) is less than Azolo’s (4.8×10⁻³), Table 5 therefore shows Ghiara’s E₁ (2.20×10⁻⁴) is lower than Azolo’s (1.44×10⁻⁴), confirming lower diffusion yields lower error.", "perturbed_explanation": "The perturbation wrongly states that Ghiara’s diffusion coefficient (1.95×10⁻²) is less than Azolo’s (4.8×10⁻³), but Table 3 clearly gives 1.95×10⁻² cm²/s > 4.8×10⁻³ cm²/s. Consequently, E₁ for Ghiara (2.20×10⁻⁴) is actually higher than Azolo’s (1.44×10⁻⁴) per Table 5, contradicting the claim.", "claim": "Table 3 lists Ghiara mortar’s diffusion coefficient as 1.95×10⁻² cm²/s, nearly four times Azolo’s 4.8×10⁻³. Table 5 then shows Ghiara’s error E₁ (2.20×10⁻⁴) exceeds Azolo’s E₁ (1.44×10⁻⁴), linking higher diffusion to larger simple-model error.", "label": true }, { "paperid": "2411.03401v1", "paper_path": "./SciVer/papers/2411.03401v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03401v1_figure_2.png", "item2_path": "./SciVer/images/2411.03401v1-Table3-1.png", "section": [ "2.2" ], "request_id": 322, "origin_statement": "Finished 4PB specimens have 5 mm thickness (Fig. 2a). With 30 µm layer height (Table 3), each specimen requires ~167 layers. Scanning velocities ranged over 23 values, so a full velocity sweep for one 4PB build uses ~3833 layers.", "perturbed_statement": "Finished 4PB specimens have 6 mm thickness (Fig. 2a). With 30 µm layer height (Table 3), each specimen requires ~200 layers. Scanning velocities ranged over 23 values, so a full velocity sweep for one 4PB build uses ~4600 layers.", "perturbed_explanation": "Figure 2a shows the finished specimen thickness is 5 mm, not 6 mm. Using 6 mm inflates the layer count; the true layer count is 5 mm / 30 µm ≈ 167 layers, so the 200-layer and 4600-layer figures are incorrect.", "claim": "Finished 4PB specimens have 5 mm thickness (Fig. 2a). With 30 µm layer height (Table 3), each specimen requires ~167 layers. Scanning velocities ranged over 23 values, so a full velocity sweep for one 4PB build uses ~3833 layers.", "label": true }, { "paperid": "2410.02320v2", "paper_path": "./SciVer/papers/2410.02320v2.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02320v2_figure_5.png", "item2_path": "./SciVer/images/2410.02320v2-Table4-1.png", "section": [ "6.2" ], "request_id": 334, "origin_statement": "Figure 5 shows that on the EnRu training set, SFT reduces preference for post-edits from 51.9% (Base) to 49.6%. Table 4 reveals that SFT nonetheless raises the EnDe training log-probability gap from 0.038 to 0.060, indicating divergent effects of SFT across language pairs.", "perturbed_statement": "Figure 5 shows that on the EnRu training set, SFT increases preference for post-edits from 51.9% (Base) to 52.6%. Table 4 reveals that SFT simultaneously raises the EnDe training log-probability gap from 0.048 to 0.073, implying that SFT uniformly enhances both preference and probability gap across language pairs.", "perturbed_explanation": "The perturbed claim is wrong on two counts: (1) Figure 5 actually shows SFT drops the EnRu train preference from 51.9% down to 49.6%, not increasing it to 52.6%. (2) Table 4 reports the EnDe training gap rising from 0.038 (Base) to 0.060 under SFT, not from 0.048 to 0.073.", "claim": "Figure 5 shows that on the EnRu training set, SFT reduces preference for post-edits from 51.9% (Base) to 49.6%. Table 4 reveals that SFT nonetheless raises the EnDe training log-probability gap from 0.038 to 0.060, indicating divergent effects of SFT across language pairs.", "label": true }, { "paperid": "2409.06801v1", "paper_path": "./SciVer/papers/2409.06801v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.06801v1-Table5-1.png", "item2_path": "./SciVer/images/2409.06801v1-Table6-1.png", "section": [ "7.1" ], "request_id": 345, "origin_statement": "CT’s State House plans exhibit a 97.6% block‐level no‐offset discrepancy rate (Table 5), yet Table 6 reports CT’s congressional short‐burst ensemble has a mean MMD discrepancy of 0.0 and a 0% discrepancy rate, implying that high block‐unit noise does not induce any MMD errors.", "perturbed_statement": "CT’s State House plans exhibit a 97.6% block‐level no‐offset discrepancy rate (Table 5), yet Table 6 reports CT’s congressional short‐burst ensemble has a mean MMD discrepancy of 0.05 and a 5% discrepancy rate, implying that high block‐unit noise does not induce any MMD errors.", "perturbed_explanation": "The perturbed claim contradicts Table 6, which shows CT’s short‐burst ensemble has a mean MMD discrepancy of 0.0 and a 0% discrepancy rate, not 0.05 or 5%. Thus the assertion of a 5% discrepancy rate is incorrect.", "claim": "CT’s State House plans exhibit a 97.6% block‐level no‐offset discrepancy rate (Table 5), yet Table 6 reports CT’s congressional short‐burst ensemble has a mean MMD discrepancy of 0.0 and a 0% discrepancy rate, implying that high block‐unit noise does not induce any MMD errors.", "label": true }, { "paperid": "2410.05468v2", "paper_path": "./SciVer/papers/2410.05468v2.json", "claim_type": "sequential", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05468v2_figure_5.png", "item2_path": "./SciVer/images/2410.05468v2-Table1-1.png", "section": [ "5.3" ], "request_id": 348, "origin_statement": "PH-Dropout's average Spearman correlation on NeRFacto is 0.337 (Table 1). In T&T unbounded settings (Figure 5), 3DGS at 128 views achieves ~0.39 Spearman correlation. Therefore, 3DGS yields stronger UQ-error correlation in unbounded scenes than PH-Dropout on NeRFacto.", "perturbed_statement": "PH-Dropout's average Spearman correlation on NeRFacto is 0.337 (Table 1). In T&T unbounded settings (Figure 5), 2DGS at 256 views achieves a Spearman correlation of ~0.30. Therefore, 2DGS yields stronger UQ-error correlation in unbounded scenes than PH-Dropout on NeRFacto.", "perturbed_explanation": "The premise that 2DGS at 256 views has a Spearman correlation of ~0.30 is incorrect. Figure 5 (T&T Spearman panel) shows 2DGS at 256 views actually reaches about 0.41. Since 0.30 is both misreported and lower than PH-Dropout's 0.337, the conclusion that 2DGS is stronger than PH-Dropout is false.", "claim": "PH-Dropout's average Spearman correlation on NeRFacto is 0.337 (Table 1). In T&T unbounded settings (Figure 5), 3DGS at 128 views achieves ~0.39 Spearman correlation. Therefore, 3DGS yields stronger UQ-error correlation in unbounded scenes than PH-Dropout on NeRFacto.", "label": true }, { "paperid": "2409.17137v3", "paper_path": "./SciVer/papers/2409.17137v3.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17137v3_figure_6.png", "item2_path": "./SciVer/images/2409.17137v3_figure_7.png", "section": [ "4.3" ], "request_id": 364, "origin_statement": "Fig 6 shows that applying PACE among two networks (M=2) gives the lowest error, and Fig 7 shows 1-shot OxfordPets accuracy peaks at λ=1.0 and σ=0.1; thus, using M=2 with λ=1.0 and σ=0.1 maximizes 1-shot performance.", "perturbed_statement": "Fig 6 shows that applying PACE among four networks (M=4) gives the lowest error, and Fig 7 shows 1-shot OxfordPets accuracy peaks at λ=0.5; thus, using M=4 with λ=0.5 maximizes 1-shot performance.", "perturbed_explanation": "The perturbed statement is wrong because Fig 6 actually reports its lowest error at M=2, not M=4, and Fig 7’s 1-shot heatmap clearly indicates the highest accuracy occurs at λ=1.0 (with σ=0.1), not at λ=0.5.", "claim": "Fig 6 shows that applying PACE among two networks (M=2) gives the lowest error, and Fig 7 shows 1-shot OxfordPets accuracy peaks at λ=1.0 and σ=0.1; thus, using M=2 with λ=1.0 and σ=0.1 maximizes 1-shot performance.", "label": true }, { "paperid": "2409.13175v1", "paper_path": "./SciVer/papers/2409.13175v1.json", "claim_type": "sequential", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.13175v1_figure_7.png", "item2_path": "./SciVer/images/2409.13175v1_figure_8.png", "section": [ "5.3" ], "request_id": 369, "origin_statement": "From Figure 7, the policy function without penalty (red) stabilizes at 1.0, indicating all requests use real‐time recommendations. Then Figure 8 shows the red RPAF‐TD3(w/o RLA) line yields consistently lower total watch time than the blue (MSE) and yellow (KL) lines across hours.", "perturbed_statement": "In Figure 7, the policy function with penalty (blue) stabilizes at 1.0, forcing all requests through real‐time. Consequently, Figure 8 reveals that the blue RPAF‐TD3‐MSE results in the lowest total watch time at peak hour 13, underperforming both KL and w/o RLA variants.", "perturbed_explanation": "This is incorrect because in Figure 7 the blue curve actually plateaus around 0.75 (not 1.0), while the red curve reaches 1.0. Moreover, in Figure 8 at hour 13 the blue RPAF‐TD3‐MSE line is the highest (~135 000 s), and the red w/o RLA line is the lowest (~131 000 s), contradicting the claim.", "claim": "From Figure 7, the policy function without penalty (red) stabilizes at 1.0, indicating all requests use real‐time recommendations. Then Figure 8 shows the red RPAF‐TD3(w/o RLA) line yields consistently lower total watch time than the blue (MSE) and yellow (KL) lines across hours.", "label": true }, { "paperid": "2410.21769v1", "paper_path": "./SciVer/papers/2410.21769v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21769v1_figure_5.png", "item2_path": "./SciVer/images/2410.21769v1_figure_6.png", "section": [ "2.5" ], "request_id": 375, "origin_statement": "In the 2H phase (Fig.5), ZrSH has λ=2.68 while ZrSeH has λ=1.48. In the 1T phase (Fig.6), ZrSeH has λ=2.44 and ZrTeH has λ=1.18. Thus λ consistently decreases with X from S→Se→Te for ZrMXH across both phases.", "perturbed_statement": "In the 2H phase (Fig.5), ZrSH has λ=2.68 while ZrSeH has λ=1.48. In the 1T phase (Fig.6), ZrSeH has λ=1.18 and ZrTeH has λ=2.44. Thus λ consistently decreases with X from S→Se→Te for ZrMXH across both phases.", "perturbed_explanation": "The perturbed statement misreports the 1T-phase λ values from Fig.6: ZrSeH actually has λ=2.44 and ZrTeH has λ=1.18. Swapping these values invalidates the claimed monotonic decrease from Se to Te, making the conclusion false.", "claim": "In the 2H phase (Fig.5), ZrSH has λ=2.68 while ZrSeH has λ=1.48. In the 1T phase (Fig.6), ZrSeH has λ=2.44 and ZrTeH has λ=1.18. Thus λ consistently decreases with X from S→Se→Te for ZrMXH across both phases.", "label": true }, { "paperid": "2409.14940v1", "paper_path": "./SciVer/papers/2409.14940v1.json", "claim_type": "sequential", "item1": "5", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.14940v1-Table5-1.png", "item2_path": "./SciVer/images/2409.14940v1-Table7-1.png", "section": [ "4.2" ], "request_id": 392, "origin_statement": "From Table 5, PointDP+PST achieves 77.1% mixed accuracy on ModelNet40 using the PCT backbone under adaptive l∞ attacks; Table 7 then reports 79.6% mixed accuracy when adversarial examples are generated on PCT, indicating strong transfer robustness.", "perturbed_statement": "From Table 5, PointDP+PST achieves 87.1% mixed accuracy on ModelNet40 using the PCT backbone under adaptive l∞ attacks; Table 7 then reports 89.6% mixed accuracy when adversarial examples are generated on PCT, indicating exceptionally strong transfer robustness.", "perturbed_explanation": "The perturbed claim is incorrect because Table 5 actually shows 77.1% mixed accuracy (not 87.1%) for PointDP+PST on ModelNet40 with PCT under adaptive l∞ attacks. Likewise, Table 7 reports 79.6% (not 89.6%) mixed accuracy for PCT→PCT transfer.", "claim": "From Table 5, PointDP+PST achieves 77.1% mixed accuracy on ModelNet40 using the PCT backbone under adaptive l∞ attacks; Table 7 then reports 79.6% mixed accuracy when adversarial examples are generated on PCT, indicating strong transfer robustness.", "label": true }, { "paperid": "2410.05782v1", "paper_path": "./SciVer/papers/2410.05782v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.05782v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.05782v1_figure_3(b).png", "section": [ "5.3" ], "request_id": 435, "origin_statement": "ICoPro’s average highway distance under DiffRand decreases from about 1,150 at 1% noise to 600 at 50%, and similarly in Boxing the average raw reward declines from roughly 66 to 36, indicating performance degrades with rising corrective-action randomness.", "perturbed_statement": "ICoPro’s average highway distance under DiffRand decreases from about 1,150 at 1% noise to 280 at 50%, and similarly in Boxing the average raw reward declines from roughly 66 to 16, indicating performance degrades with rising corrective-action randomness.", "perturbed_explanation": "The perturbed statement is false because the highway plot shows ICoPro’s distance at 50% noise remains around 600, not 280, and the Boxing curve at 50% noise is approximately 36 raw reward, not 16.", "claim": "ICoPro’s average highway distance under DiffRand decreases from about 1,150 at 1% noise to 600 at 50%, and similarly in Boxing the average raw reward declines from roughly 66 to 36, indicating performance degrades with rising corrective-action randomness.", "label": true }, { "paperid": "2411.03862v1", "paper_path": "./SciVer/papers/2411.03862v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03862v1_figure_6.png", "item2_path": "./SciVer/images/2411.03862v1_figure_7.png", "section": [ "4.2" ], "request_id": 438, "origin_statement": "Figure 6’s Noise panel reveals pronounced speckled distortion from Gaussian noise. Figure 7’s single-attack sample (1) also shows similar speckles. Therefore, sample 1 in Figure 7 corresponds to the Gaussian noise attack (intensity 10%).", "perturbed_statement": "Figure 6’s JPEG panel shows pronounced speckled distortion. Figure 7’s single-attack sample (1) also shows similar speckles. Therefore, sample 1 in Figure 7 corresponds to the JPEG compression attack (quality 25).", "perturbed_explanation": "This is incorrect because the Noise panel in Figure 6—not the JPEG panel—exhibits the characteristic speckled artifacts seen in Figure 7’s sample 1. The JPEG panel instead shows blocky compression artifacts, so sample 1 actually matches Gaussian noise, not JPEG compression.", "claim": "Figure 6’s Noise panel reveals pronounced speckled distortion from Gaussian noise. Figure 7’s single-attack sample (1) also shows similar speckles. Therefore, sample 1 in Figure 7 corresponds to the Gaussian noise attack (intensity 10%).", "label": true }, { "paperid": "2411.00997v1", "paper_path": "./SciVer/papers/2411.00997v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00997v1_figure_3.png", "item2_path": "./SciVer/images/2411.00997v1_figure_4.png", "section": [ "5.1" ], "request_id": 450, "origin_statement": "Figure 3 shows DebiasCLIP’s occupation category has the highest normalized gender entropy (~0.7), indicating minimal gender bias in occupations; then Figure 4 shows DebiasCLIP’s religion category has the lowest normalized race entropy (~0.65), indicating maximal racial bias in religion.", "perturbed_statement": "Figure 3 shows DebiasCLIP’s occupation category has the lowest normalized gender entropy (~0.6), indicating maximal gender bias in occupations; then Figure 4 shows DebiasCLIP’s religion category has the lowest normalized race entropy (~0.65), indicating maximal racial bias in religion.", "perturbed_explanation": "The perturbation is wrong because Figure 3 actually depicts DebiasCLIP’s occupation normalized gender entropy as the highest among models (around 0.7), not the lowest (0.6). Therefore it does not indicate maximal gender bias in occupations.", "claim": "Figure 3 shows DebiasCLIP’s occupation category has the highest normalized gender entropy (~0.7), indicating minimal gender bias in occupations; then Figure 4 shows DebiasCLIP’s religion category has the lowest normalized race entropy (~0.65), indicating maximal racial bias in religion.", "label": true }, { "paperid": "2410.01463v2", "paper_path": "./SciVer/papers/2410.01463v2.json", "claim_type": "sequential", "item1": "2(c)", "item2": "2(f)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01463v2_figure_2(c).png", "item2_path": "./SciVer/images/2410.01463v2_figure_2(f).png", "section": [ "1" ], "request_id": 456, "origin_statement": "Since LoRA-A value matrices exhibit near-perfect cosine similarity (>0.98) across all 24 layers (Fig. 2c) and LoRA-B query matrices dip below zero at layer 8 (Fig. 2f), we infer that layer 8’s query adaptations diverge most across clients while value adaptations stay uniform.", "perturbed_statement": "Since LoRA-A value matrices exhibit near-perfect cosine similarity (>0.98) across all 24 layers and LoRA-B query matrices reach a moderate positive similarity (0.15) at layer 8, we infer that layer 8’s query adaptations remain moderately aligned across clients.", "perturbed_explanation": "The perturbation incorrectly states that the LoRA-B query cosine similarity at layer 8 is 0.15, but Fig. 2f shows it falls slightly below zero. Because the actual value is negative, the claim of moderate positive alignment at layer 8 is false.", "claim": "Since LoRA-A value matrices exhibit near-perfect cosine similarity (>0.98) across all 24 layers (Fig. 2c) and LoRA-B query matrices dip below zero at layer 8 (Fig. 2f), we infer that layer 8’s query adaptations diverge most across clients while value adaptations stay uniform.", "label": true }, { "paperid": "2411.00653v1", "paper_path": "./SciVer/papers/2411.00653v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00653v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.00653v1_figure_5(b).png", "section": [ "5.5" ], "request_id": 472, "origin_statement": "Fig.5(a) reports a 0.826 Pearson correlation between coherence score and node classification accuracy; using this predictive power and the 0.725 correlation in Fig.5(b) for link prediction, a model with 0.85 coherence score is expected to achieve link prediction accuracy above 0.75.", "perturbed_statement": "Fig.5(a) reports a 0.75 correlation between coherence score and node classification accuracy; combined with a 0.82 correlation in Fig.5(b) for link prediction, a model with 0.8 coherence score will likely achieve link prediction accuracy above 0.8.", "perturbed_explanation": "The perturbed statement misquotes both correlations: Fig.5(a) actually shows a Pearson correlation of 0.826 (not 0.75), and Fig.5(b) shows 0.725 (not 0.82). These exact values are printed in the figure captions, so the premises are contradicted by the context.", "claim": "Fig.5(a) reports a 0.826 Pearson correlation between coherence score and node classification accuracy; using this predictive power and the 0.725 correlation in Fig.5(b) for link prediction, a model with 0.85 coherence score is expected to achieve link prediction accuracy above 0.75.", "label": true }, { "paperid": "2409.07365v1", "paper_path": "./SciVer/papers/2409.07365v1.json", "claim_type": "sequential", "item1": "6(c)", "item2": "6(g)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07365v1_figure_6(c).png", "item2_path": "./SciVer/images/2409.07365v1_figure_6(g).png", "section": [ "3.2", "4.3" ], "request_id": 475, "origin_statement": "EMBA reduces the photometric error by 60%, thereby producing maps where HDR ceiling lights become visible in the refined bottom-row map compared to the blurred GT-rotations top-row map.", "perturbed_statement": "EMBA reduces the photometric error by only 10%, hence the refined map still fails to reveal the HDR ceiling lights, remaining as blurred as the initial GT-rotations map.", "perturbed_explanation": "This is incorrect because EMBA actually achieves about a 50–70% reduction in photometric error (not 10%), and the refined bottom-row map clearly recovers the HDR ceiling lights, as reported in the text and visible in Fig. 6.", "claim": "EMBA reduces the photometric error by 60%, thereby producing maps where HDR ceiling lights become visible in the refined bottom-row map compared to the blurred GT-rotations top-row map.", "label": true }, { "paperid": "2410.16928v2", "paper_path": "./SciVer/papers/2410.16928v2.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16928v2_figure_3.png", "item2_path": "./SciVer/images/2410.16928v2-Table3-1.png", "section": [ "4.2" ], "request_id": 481, "origin_statement": "Figure 3’s initial embedding tokens for Weather at horizon 96 display distinct colored trajectories, reflecting dataset-specific conditioning. In Table 3, removing the init token (#3) increases Weather MSE at horizon 96 from 0.143 to 0.145, showing that initial tokens improve short-term forecasts.", "perturbed_statement": "Figure 3’s initial embedding tokens for Weather at horizon 96 display distinct colored trajectories, reflecting dataset-specific conditioning. In Table 3, removing the init token (#3) reduces Weather MSE at horizon 96 from 0.143 to 0.142, suggesting initial tokens degrade short-term forecasts.", "perturbed_explanation": "This is incorrect because Table 3 actually shows that configuration #3 (without the init token) yields an MSE of 0.145 at horizon 96, not 0.142, indicating removal of the init token worsens rather than improves accuracy.", "claim": "Figure 3’s initial embedding tokens for Weather at horizon 96 display distinct colored trajectories, reflecting dataset-specific conditioning. In Table 3, removing the init token (#3) increases Weather MSE at horizon 96 from 0.143 to 0.145, showing that initial tokens improve short-term forecasts.", "label": true }, { "paperid": "2409.13844v1", "paper_path": "./SciVer/papers/2409.13844v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "8", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13844v1_figure_5(a).png", "item2_path": "./SciVer/images/2409.13844v1-Table8-1.png", "section": [ "3.5" ], "request_id": 482, "origin_statement": "Figure 5 shows the SW potential attains its global minimum in about three CG iterations; consequently, Table 8 reports that optimized SW C₁₁ rises from 151.4 GPa to 153.4 GPa, shrinking its error versus the DFT value of 153.2 GPa from ∼1.8 GPa to ∼0.2 GPa.", "perturbed_statement": "Figure 5 shows the SW potential attains its global minimum in about three CG iterations; consequently, Table 8 reports that optimized SW C₁₁ rises from 151.4 GPa to 155.4 GPa, shrinking its error versus the DFT value of 153.2 GPa from ∼1.8 GPa to ∼0.2 GPa.", "perturbed_explanation": "This is incorrect because Table 8 lists the optimized SW C₁₁ as 153.4054 GPa, not 155.4 GPa. With the true optimized value, the deviation from the DFT reference (153.2141 GPa) is only about 0.19 GPa, not increased or unchanged as the perturbed claim implies.", "claim": "Figure 5 shows the SW potential attains its global minimum in about three CG iterations; consequently, Table 8 reports that optimized SW C₁₁ rises from 151.4 GPa to 153.4 GPa, shrinking its error versus the DFT value of 153.2 GPa from ∼1.8 GPa to ∼0.2 GPa.", "label": true }, { "paperid": "2410.08695v2", "paper_path": "./SciVer/papers/2410.08695v2.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08695v2_figure_5.png", "item2_path": "./SciVer/images/2410.08695v2_figure_6.png", "section": [ "5.3" ], "request_id": 486, "origin_statement": "Yi_VL’s accuracy in MMBench drops 11.97 points from 84.87% (V1/L3) to 72.9% (V1/L4); GPT-4o’s accuracy in SEEDBench falls 44.5 points from 79.5% (Vanilla) to 35% (V1+V3+L4).", "perturbed_statement": "Yi_VL’s accuracy in MMBench drops 15.17 points from 86.07% (V2/L3) to 70.9% (V1/L4); GPT-4o’s accuracy in SEEDBench falls 31.5 points from 78.6% (Vanilla) to 47.1% (V1+V3+L4).", "perturbed_explanation": "The perturbed claim misreads the heatmap and line chart. Yi_VL’s correct values are 84.87% at V1/L3 (not 86.07% at V2/L3) and 72.9% at V1/L4 (not 70.9%), so the drop is 11.97 points, not 15.17. Likewise, GPT-4o’s SEEDBench accuracy is ~79.5% (Vanilla) to 35% (V1+V3+L4), not 78.6% to 47.1%, making the true drop ~44.5 points, not 31.5.", "claim": "Yi_VL’s accuracy in MMBench drops 11.97 points from 84.87% (V1/L3) to 72.9% (V1/L4); GPT-4o’s accuracy in SEEDBench falls 44.5 points from 79.5% (Vanilla) to 35% (V1+V3+L4).", "label": true }, { "paperid": "2411.07954v2", "paper_path": "./SciVer/papers/2411.07954v2.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.07954v2-Table1-1.png", "item2_path": "./SciVer/images/2411.07954v2-Table2-1.png", "section": [ "4.2" ], "request_id": 500, "origin_statement": "AttentionTuner achieves 99.9% success on Hallway (Table 1). When applying memory loss only to the middle-layer heads, success increases to 100%, but applying it to last-layer heads drops success to 89.4% (Table 2), indicating that middle-layer memory loss is more critical for Hallway.", "perturbed_statement": "AttentionTuner achieves 99.9% success on Hallway (Table 1). When applying memory loss only to the middle-layer heads, success drops to 89.4%, but applying it to last-layer heads increases success to 100% (Table 2), indicating that last-layer memory loss is more critical for Hallway.", "perturbed_explanation": "This is wrong because Table 2 shows that the middle-layer all-heads ablation yields 100% success on Hallway, while the last-layer all-heads ablation yields only 89.4%. The perturbed statement swaps these two values, contradicting the actual data.", "claim": "AttentionTuner achieves 99.9% success on Hallway (Table 1). When applying memory loss only to the middle-layer heads, success increases to 100%, but applying it to last-layer heads drops success to 89.4% (Table 2), indicating that middle-layer memory loss is more critical for Hallway.", "label": true }, { "paperid": "2410.17292v1", "paper_path": "./SciVer/papers/2410.17292v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17292v1_figure_2.png", "item2_path": "./SciVer/images/2410.17292v1_figure_4.png", "section": [ "4.2.5", "4.2.3" ], "request_id": 504, "origin_statement": "Since Fig. 2 has ρ₂<ρ₁, the chameleon field φ climbs from φ₁ to φ₂, so Δ₁₂φ>0; Fig. 4 then shows the red ε_rel=10⁻²⁰ contour lies right of the orange ε_rel=10⁻¹⁵, meaning improving precision from 10⁻¹⁵ to 10⁻²⁰ raises the Λ bound by about a decade at M=10⁶ GeV.", "perturbed_statement": "Since Fig. 2 has ρ₂>ρ₁, the chameleon field φ climbs from φ₁ to φ₂, so Δ₁₂φ>0; Fig. 4 then shows the red ε_rel=10⁻²⁰ contour lies left of the orange ε_rel=10⁻¹⁵, meaning improving precision from 10⁻¹⁵ to 10⁻²⁰ lowers the Λ bound by about a decade at M=10⁶ GeV.", "perturbed_explanation": "The perturbation incorrectly states ρ₂>ρ₁, whereas Fig. 2 actually shows ρ₁ (gold box) > ρ₂ (blue box). It also misreads Fig. 4: the red (10⁻²⁰) contour is to the right of the orange (10⁻¹⁵), not to the left. Both errors invert the true shift of the Λ bound.", "claim": "Since Fig. 2 has ρ₂<ρ₁, the chameleon field φ climbs from φ₁ to φ₂, so Δ₁₂φ>0; Fig. 4 then shows the red ε_rel=10⁻²⁰ contour lies right of the orange ε_rel=10⁻¹⁵, meaning improving precision from 10⁻¹⁵ to 10⁻²⁰ raises the Λ bound by about a decade at M=10⁶ GeV.", "label": true }, { "paperid": "2409.16217v1", "paper_path": "./SciVer/papers/2409.16217v1.json", "claim_type": "sequential", "item1": "1", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.16217v1-Table1-1.png", "item2_path": "./SciVer/images/2409.16217v1-Table4-1.png", "section": [ "5" ], "request_id": 529, "origin_statement": "Under slicing_1, which allocates 41 PRBs to the URLLC slice, UEs satisfy the 5 ms AGV control latency requirement only 62.5% of the time.", "perturbed_statement": "Under slicing_4, which allocates 41 PRBs to the URLLC slice, UEs satisfy the 5 ms AGV control latency requirement 96.4% of the time.", "perturbed_explanation": "The perturbed claim is incorrect because Table 1 shows slicing_4 allocates only 11 URLLC PRBs (not 41), and Table 4 reports a 62.5% success rate for AGV control at 41 PRBs (0.625), not 96.4% (0.964 corresponds to the AR use case at 41 PRBs).", "claim": "Under slicing_1, which allocates 41 PRBs to the URLLC slice, UEs satisfy the 5 ms AGV control latency requirement only 62.5% of the time.", "label": true }, { "paperid": "2410.20597v1", "paper_path": "./SciVer/papers/2410.20597v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20597v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.20597v1_figure_4(b).png", "section": [ "4.3" ], "request_id": 540, "origin_statement": "In Figure 4(a), the original GAT_analysts model has the strongest anti‐correlation with the market at –0.21. Figure 4(b) shows every ablated GAT variant with market correlations above –0.21. Therefore, the original GAT_analysts yields the best diversification of all the studied strategies.", "perturbed_statement": "In Figure 4(a), the original GAT_analysts model has a market correlation of –0.21, and in Figure 4(b) the GAT_edge_delete ablation has an even lower market correlation of –0.31; hence GAT_edge_delete is the most diversifying strategy.", "perturbed_explanation": "The perturbation misstates GAT_edge_delete’s market correlation. Figure 4(b) actually reports GAT_edge_delete at +0.01, not –0.31, so it is not more anti‐correlated than the original GAT_analysts model.", "claim": "In Figure 4(a), the original GAT_analysts model has the strongest anti‐correlation with the market at –0.21. Figure 4(b) shows every ablated GAT variant with market correlations above –0.21. Therefore, the original GAT_analysts yields the best diversification of all the studied strategies.", "label": true }, { "paperid": "2409.03466v1", "paper_path": "./SciVer/papers/2409.03466v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03466v1_figure_3.png", "item2_path": "./SciVer/images/2409.03466v1-Table2-1.png", "section": [ "3" ], "request_id": 543, "origin_statement": "The planetary radius histogram shows a spike of ≈320 erroneous simulations at 11 R⊕. Table 2 lists 3 593 simulated and 2 677 truncated planetary transits (i.e. 916 removed), so beyond the 320 bug cases, an additional 596 planetary signals were filtered out for other reasons.", "perturbed_statement": "The planetary radius histogram shows a spike of ≈200 erroneous simulations at 11 R⊕. Table 2 lists 3 593 simulated and 2 677 truncated planetary transits (i.e. 916 removed), so beyond the 200 bug cases, an additional 716 planetary signals were filtered out for other reasons.", "perturbed_explanation": "The perturbation is wrong because the histogram (Figure 3, top-left) clearly shows about 320 counts in the 11 R⊕ bin, not 200. Using the correct 320 yields 916−320=596 extra removals, not 716.", "claim": "The planetary radius histogram shows a spike of ≈320 erroneous simulations at 11 R⊕. Table 2 lists 3 593 simulated and 2 677 truncated planetary transits (i.e. 916 removed), so beyond the 320 bug cases, an additional 596 planetary signals were filtered out for other reasons.", "label": true }, { "paperid": "2410.20335v1", "paper_path": "./SciVer/papers/2410.20335v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20335v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.20335v1_figure_2(b).png", "section": [ "5.4" ], "request_id": 569, "origin_statement": "Figure (a) shows RUTSVM-CIL achieved the highest sensitivity at ~93%. In Figure (b), RUTSVM-CIL’s sensitivity (~70%) is 5 points below IFUTSVM-ID’s (~75%), indicating RUTSVM-CIL is less sensitive than IFUTSVM-ID in the CN vs MCI scenario.", "perturbed_statement": "Figure (a) shows RUTSVM-CIL achieved a sensitivity of only ~70%. In Figure (b), IFUTSVM-ID’s sensitivity is just ~65%, making RUTSVM-CIL about 5 points more sensitive than IFUTSVM-ID in the CN vs MCI scenario.", "perturbed_explanation": "This is wrong because in Figure (a) RUTSVM-CIL’s sensitivity bar is actually around 93%, not 70%, and in Figure (b) IFUTSVM-ID’s sensitivity is about 75%, not 65%. The misread values contradict the actual graph data.", "claim": "Figure (a) shows RUTSVM-CIL achieved the highest sensitivity at ~93%. In Figure (b), RUTSVM-CIL’s sensitivity (~70%) is 5 points below IFUTSVM-ID’s (~75%), indicating RUTSVM-CIL is less sensitive than IFUTSVM-ID in the CN vs MCI scenario.", "label": true }, { "paperid": "2411.05733v1", "paper_path": "./SciVer/papers/2411.05733v1.json", "claim_type": "sequential", "item1": "2(f)", "item2": "2(h)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05733v1_figure_2(f).png", "item2_path": "./SciVer/images/2411.05733v1_figure_2(h).png", "section": [ "5.2" ], "request_id": 571, "origin_statement": "In the Priv. LogReg plot, the red decision region covers only a small corner of the positive-class ellipse (step 1). The Priv. Weighted FTT plot shows no red region (step 2), so it labels all points negative and misclassifies nearly 100% of minority-class samples (step 3).", "perturbed_statement": "In the Priv. LogReg plot, the red decision region covers over 90% of the positive-class ellipse. Since the Priv. Weighted FTT plot shows no red region, it only misclassifies the few minority samples outside that major ellipse coverage—around 10% false negatives.", "perturbed_explanation": "This is incorrect because in the Priv. LogReg visualization the red decision area clearly occupies only a small corner of the dotted positive-class ellipse (not over 90%). Moreover, the Priv. Weighted FTT decision boundary is entirely blue, so it actually misclassifies nearly all minority points (approaching 100%), not about 10%.", "claim": "In the Priv. LogReg plot, the red decision region covers only a small corner of the positive-class ellipse (step 1). The Priv. Weighted FTT plot shows no red region (step 2), so it labels all points negative and misclassifies nearly 100% of minority-class samples (step 3).", "label": true }, { "paperid": "2410.19055v1", "paper_path": "./SciVer/papers/2410.19055v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19055v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.19055v1_figure_2(b).png", "section": [ "4.2" ], "request_id": 579, "origin_statement": "The input terrain map has a small lake in the bottom right. In the ground truth cost embedding, that area is light green (lowest movement cost), so the shortest path goes straight across the lake to minimize overall cost.", "perturbed_statement": "The ground truth cost embedding colors the dense forest in the top-left the lightest shade, indicating lowest movement cost, so the shortest path cuts diagonally through the forest region to reduce path length.", "perturbed_explanation": "This is incorrect because the cost embedding actually shows the forest region in the darkest green shade, indicating high movement cost, not in the lightest shade. Therefore, the shortest path would avoid the forest rather than traverse it.", "claim": "The input terrain map has a small lake in the bottom right. In the ground truth cost embedding, that area is light green (lowest movement cost), so the shortest path goes straight across the lake to minimize overall cost.", "label": true }, { "paperid": "2410.01239v1", "paper_path": "./SciVer/papers/2410.01239v1.json", "claim_type": "sequential", "item1": "3(c)", "item2": "3(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01239v1_figure_3(c).png", "item2_path": "./SciVer/images/2410.01239v1_figure_3(d).png", "section": [ "4.4.1" ], "request_id": 583, "origin_statement": "According to the caption, (a) uses end-to-end training, and image (a) indeed shows a red hotspot centered near (15,15); image (b), from preceding-layer updates, shifts this hotspot to about (20,15), implying that preceding-layer updates push feature activations rightward.", "perturbed_statement": "According to the caption, (a) uses end-to-end training, and image (a) indeed shows a red hotspot centered near (15,15); image (b), from preceding-layer updates, shifts this hotspot to about (10,15), implying that preceding-layer updates push feature activations leftward.", "perturbed_explanation": "The perturbation errs by locating the image (b) hotspot at (10,15) and claiming a leftward shift. In reality, image (b)’s red hotspot appears around (20,15), showing a rightward rather than leftward movement relative to (a).", "claim": "According to the caption, (a) uses end-to-end training, and image (a) indeed shows a red hotspot centered near (15,15); image (b), from preceding-layer updates, shifts this hotspot to about (20,15), implying that preceding-layer updates push feature activations rightward.", "label": true }, { "paperid": "2411.09046v1", "paper_path": "./SciVer/papers/2411.09046v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.09046v1-Table2-1.png", "item2_path": "./SciVer/images/2411.09046v1-Table3-1.png", "section": [ "3.2.1" ], "request_id": 593, "origin_statement": "Case 4 adopts weak coupling and Δt=2.5e-4 s (Table 2), leading to a relative wall-clock time of 20.56% (Table 3), while the mean luminal area RMSRE remains below 1e-4, so weak coupling preserves accuracy with a fourfold speed-up.", "perturbed_statement": "Case 4 adopts strong coupling and Δt=2e-4 s (Table 2), leading to a relative wall-clock time of 53.35% (Table 3), while the mean luminal area RMSRE remains below 1e-4, so strong coupling preserves accuracy with a twofold speed-up.", "perturbed_explanation": "The perturbed claim misstates case 4’s parameters and timings. Table 2 shows case 4 uses weak coupling with Δt=2.5e-4 s, not strong coupling and 2e-4 s. Moreover, the 53.35% relative WCT corresponds to case 3 (Table 3), whereas case 4’s actual WCT is 20.56%.", "claim": "Case 4 adopts weak coupling and Δt=2.5e-4 s (Table 2), leading to a relative wall-clock time of 20.56% (Table 3), while the mean luminal area RMSRE remains below 1e-4, so weak coupling preserves accuracy with a fourfold speed-up.", "label": true }, { "paperid": "2409.07770v1", "paper_path": "./SciVer/papers/2409.07770v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.07770v1_figure_5.png", "item2_path": "./SciVer/images/2409.07770v1-Table3-1.png", "section": [ "5.1" ], "request_id": 595, "origin_statement": "For VoxCeleb2 with HuBERT, figure 5 shows avg. pool FRR* highest; table 3 lists its EER* as 32.72%, versus our Uni.pool EER* at 2.13%, yielding a 30.59-point EER* reduction.", "perturbed_statement": "For VoxCeleb2 with HuBERT, figure 5 shows avg. pool FRR* highest; table 3 lists its EER* as 20.72%, versus our Uni.pool EER* at 2.13%, yielding an 18.59-point EER* reduction.", "perturbed_explanation": "Table 3 actually reports the avg. pool EER* for HuBERT on VoxCeleb2 as 32.72%, not 20.72%. Therefore, the claimed 18.59-point reduction is incorrect; the true reduction is about 30.59 points.", "claim": "For VoxCeleb2 with HuBERT, figure 5 shows avg. pool FRR* highest; table 3 lists its EER* as 32.72%, versus our Uni.pool EER* at 2.13%, yielding a 30.59-point EER* reduction.", "label": true }, { "paperid": "2410.18555v1", "paper_path": "./SciVer/papers/2410.18555v1.json", "claim_type": "sequential", "item1": "8(a)", "item2": "8(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18555v1_figure_8(a).png", "item2_path": "./SciVer/images/2410.18555v1_figure_8(c).png", "section": [ "4.3.2" ], "request_id": 599, "origin_statement": "The baseline model’s correct expression rate drops from 11% at stroke counts above 30 to 7% at symbol counts above 25, indicating a 4% performance gap in the most complex expressions across metrics.", "perturbed_statement": "The baseline model’s correct expression rate drops from 14% at stroke counts above 30 to 17% at symbol counts above 25, indicating a 3% performance gap in the most complex expressions across metrics.", "perturbed_explanation": "This is incorrect because the charts show the baseline rate at stroke counts above 30 is actually 11%, not 14%, and at symbol counts above 25 it is 7%, not 17%.", "claim": "The baseline model’s correct expression rate drops from 11% at stroke counts above 30 to 7% at symbol counts above 25, indicating a 4% performance gap in the most complex expressions across metrics.", "label": true }, { "paperid": "2410.21259v2", "paper_path": "./SciVer/papers/2410.21259v2.json", "claim_type": "sequential", "item1": "7", "item2": "13", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21259v2_figure_7.png", "item2_path": "./SciVer/images/2410.21259v2_figure_13.png", "section": [ "4.3" ], "request_id": 610, "origin_statement": "Figure 7 shows Qwen2-VL’s Hard–Easy drop in Spatial tasks is about 26%; thus its hard-level Spatial accuracy falls by roughly 26%, corresponding to around 7 incorrect out of 25 in Figure 13.", "perturbed_statement": "Figure 7 shows Qwen2-VL’s Hard–Easy drop in Spatial tasks is about 18%; thus its hard-level Spatial accuracy falls by roughly 18%, corresponding to around 4 incorrect out of 25 in Figure 13.", "perturbed_explanation": "The perturbation is wrong because in Figure 7 the blue (Hard–Easy) bar for Qwen2-VL under Spatial is approximately –26%, not –18%. Accordingly, its error rate at the hard Spatial level in Figure 13 is about 7 wrong answers, not 4.", "claim": "Figure 7 shows Qwen2-VL’s Hard–Easy drop in Spatial tasks is about 26%; thus its hard-level Spatial accuracy falls by roughly 26%, corresponding to around 7 incorrect out of 25 in Figure 13.", "label": true }, { "paperid": "2410.13246v1", "paper_path": "./SciVer/papers/2410.13246v1.json", "claim_type": "sequential", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13246v1_figure_4.png", "item2_path": "./SciVer/images/2410.13246v1-Table1-1.png", "section": [ "5.2" ], "request_id": 618, "origin_statement": "Figure 4 shows that Mistral-7B-Instruct’s atomic-level weighted-average calibration score on WildHallu is 0.58, and Table 1 reports its Gen-Binary ECE on WildHallu as 12.7%, indicating that even the best fusion strategy yields errors well above the 1% calibration target.", "perturbed_statement": "Figure 4 shows that Mistral-7B-Instruct’s atomic-level weighted-average calibration score on WildHallu is 0.58, and Table 1 reports its Gen-Binary ECE on WildHallu as 0.80%, indicating that even the best fusion strategy yields errors well above the 1% calibration target.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 actually lists the Gen-Binary ECE for Mistral-7B-Instruct on WildHallu as 12.7%, not 0.80%. Thus the claim of 0.80% ECE contradicts the context.", "claim": "Figure 4 shows that Mistral-7B-Instruct’s atomic-level weighted-average calibration score on WildHallu is 0.58, and Table 1 reports its Gen-Binary ECE on WildHallu as 12.7%, indicating that even the best fusion strategy yields errors well above the 1% calibration target.", "label": true }, { "paperid": "2409.11395v1", "paper_path": "./SciVer/papers/2409.11395v1.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11395v1_figure_1.png", "item2_path": "./SciVer/images/2409.11395v1-Table1-1.png", "section": [ "3.3" ], "request_id": 630, "origin_statement": "The spectroscopic light curves in Fig. 1 show ingress at –0.05 days and egress at +0.05 days (0.10 days total). Table 1 gives an orbital period of 2.218575 days, so the eclipse spans ≃4.5% of the orbit.", "perturbed_statement": "The spectroscopic light curves in Fig. 1 show ingress at –0.06 days and egress at +0.06 days (0.12 days total). Table 1 gives an orbital period of 2.218575 days, so the eclipse spans ≃5.4% of the orbit.", "perturbed_explanation": "Fig. 1’s model curves actually mark ingress and egress at ±0.05 days, not ±0.06 days, so the eclipse duration is 0.10 days rather than 0.12 days. Hence the true fraction is ≃4.5%, not 5.4%.", "claim": "The spectroscopic light curves in Fig. 1 show ingress at –0.05 days and egress at +0.05 days (0.10 days total). Table 1 gives an orbital period of 2.218575 days, so the eclipse spans ≃4.5% of the orbit.", "label": true }, { "paperid": "2411.13919v1", "paper_path": "./SciVer/papers/2411.13919v1.json", "claim_type": "sequential", "item1": "6", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13919v1_figure_6.png", "item2_path": "./SciVer/images/2411.13919v1-Table1-1.png", "section": [ "3.3" ], "request_id": 632, "origin_statement": "Figure6 shows HDBSCAN clusters events into six distinct temporal segments with minimal overlap; Table1 then confirms HDBSCAN achieved the highest ARI (0.55) and NMI (0.64), validating its superior separation quality among algorithms.", "perturbed_statement": "Figure6 shows HDBSCAN clusters events into six distinct temporal segments with minimal overlap; Table1 then confirms HDBSCAN achieved the highest ARI (0.64) and NMI (0.55), validating its superior separation quality among algorithms.", "perturbed_explanation": "The perturbed statement swaps HDBSCAN’s ARI and NMI values. According to Table 1, HDBSCAN’s ARI is actually 0.55 (not 0.64) and its NMI is 0.64 (not 0.55), so the claim contradicts the presented metrics.", "claim": "Figure6 shows HDBSCAN clusters events into six distinct temporal segments with minimal overlap; Table1 then confirms HDBSCAN achieved the highest ARI (0.55) and NMI (0.64), validating its superior separation quality among algorithms.", "label": true }, { "paperid": "2409.01393v1", "paper_path": "./SciVer/papers/2409.01393v1.json", "claim_type": "sequential", "item1": "2", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01393v1_figure_2.png", "item2_path": "./SciVer/images/2409.01393v1_figure_5.png", "section": [ "3.3" ], "request_id": 634, "origin_statement": "Figure 2(c) shows the neutron gate accepting pulses up to ~228 keVee, but neutrons of 285 keV (as in Fig 5(a)) only produce ≤73 keVee. Therefore, the counts above 80 keVee in the blue histogram of Fig 5(a) must be background rather than genuine 22Ne(α,n)25Mg events.", "perturbed_statement": "Figure 2(c) shows the neutron gate accepting pulses only up to ~100 keVee, but neutrons of 285 keV produce ≤73 keVee. Therefore, the counts above 80 keVee in the blue histogram of Fig 5(a) must be background.", "perturbed_explanation": "This statement is incorrect because Fig 2(c)’s red dashed neutron gate clearly extends to about 228 keVee on the light‐output axis, not 100 keVee. Thus the premise that the gate cuts off at 100 keVee contradicts the actual gate boundary in the first image.", "claim": "Figure 2(c) shows the neutron gate accepting pulses up to ~228 keVee, but neutrons of 285 keV (as in Fig 5(a)) only produce ≤73 keVee. Therefore, the counts above 80 keVee in the blue histogram of Fig 5(a) must be background rather than genuine 22Ne(α,n)25Mg events.", "label": true }, { "paperid": "2411.03940v1", "paper_path": "./SciVer/papers/2411.03940v1.json", "claim_type": "sequential", "item1": "2", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03940v1_figure_2.png", "item2_path": "./SciVer/images/2411.03940v1_figure_8.png", "section": [ "4.2" ], "request_id": 647, "origin_statement": "The s3 shear mode exhibits three zero crossings within the crust in both full GR and the Cowling approximation, whereas the i1 interface mode’s radial displacement shows a single node only in full GR and remains node-free in Cowling.", "perturbed_statement": "The s3 shear mode exhibits four zero crossings within the crust in both full GR and the Cowling approximation, and the i1 interface mode’s radial displacement shows a single node in both full GR and Cowling.", "perturbed_explanation": "This is incorrect because Fig. 2 demonstrates that the shear mode sₙ has exactly n zero crossings, so s₃ must have three nodes, not four. Moreover, Fig. 8 clearly shows that the i₁ interface mode’s radial displacement changes sign (node) only in full GR and does not perform a sign change in the Cowling approximation.", "claim": "The s3 shear mode exhibits three zero crossings within the crust in both full GR and the Cowling approximation, whereas the i1 interface mode’s radial displacement shows a single node only in full GR and remains node-free in Cowling.", "label": true }, { "paperid": "2411.02653v1", "paper_path": "./SciVer/papers/2411.02653v1.json", "claim_type": "sequential", "item1": "9", "item2": "10", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02653v1_figure_9.png", "item2_path": "./SciVer/images/2411.02653v1_figure_10.png", "section": [ "5.2" ], "request_id": 678, "origin_statement": "Figure 9 shows Phaethon’s subsolar temperature exceeds 400 K. This high heating induces strong Yarkovsky acceleration. Consequently, in Figure 10, the semi-major axis under the DeepONet model decreases by roughly 0.009 au over 30 000 years, exceeding the ~0.006 au inward drift predicted by the linear model.", "perturbed_statement": "Figure 9 shows Phaethon’s subsolar temperature exceeds 200 K. This moderate heating induces weak Yarkovsky acceleration. Consequently, in Figure 10, the semi-major axis under the DeepONet model increases by roughly 0.009 au over 30 000 years, exceeding the ~0.006 au outward drift of the linear model.", "perturbed_explanation": "The perturb is incorrect because Figure 9’s color bar and surface map show temperatures above 400 K at Phaethon’s subsolar point, not just 200 K. Moreover, Figure 10’s Δa panel for DeepONet clearly shows a negative change (inward migration), not an increase (outward drift) of 0.009 au.", "claim": "Figure 9 shows Phaethon’s subsolar temperature exceeds 400 K. This high heating induces strong Yarkovsky acceleration. Consequently, in Figure 10, the semi-major axis under the DeepONet model decreases by roughly 0.009 au over 30 000 years, exceeding the ~0.006 au inward drift predicted by the linear model.", "label": true }, { "paperid": "2410.23511v1", "paper_path": "./SciVer/papers/2410.23511v1.json", "claim_type": "sequential", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23511v1_figure_6.png", "item2_path": "./SciVer/images/2410.23511v1-Table4-1.png", "section": [ "6.1.3" ], "request_id": 719, "origin_statement": "Figure 6 shows 0-shot DyPlan selects retrieval about 97% of the time; Table 4 lists #R = 0.97 for 0-shot DyPlan, confirming it issues nearly one retrieval call per question.", "perturbed_statement": "Figure 6 shows 0-shot DyPlan selects direct reasoning about 97% of the time; Table 4 lists #R = 0.97 for 0-shot DyPlan, confirming it issues nearly one retrieval call per question.", "perturbed_explanation": "The perturbed claim misreads Figure 6: the blue ‘Direct’ bar for 0-shot DyPlan is only about 3%, not 97%. In fact, retrieval (green) is ~97%, so it is wrong to state direct reasoning is selected 97% of the time.", "claim": "Figure 6 shows 0-shot DyPlan selects retrieval about 97% of the time; Table 4 lists #R = 0.97 for 0-shot DyPlan, confirming it issues nearly one retrieval call per question.", "label": true }, { "paperid": "2410.16908v1", "paper_path": "./SciVer/papers/2410.16908v1.json", "claim_type": "sequential", "item1": "2(b)", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16908v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.16908v1_figure_3.png", "section": [ "4.2" ], "request_id": 731, "origin_statement": "Figure 2 shows that all capsules in the 7-layer CapsNet have zero activations (100% dead). Figure 3 shows the 6-layer CapsNet’s dead capsule rate rising from ~15% to ~26% while accuracy stays ~76%. Therefore, adding the 7th layer increases dead capsules to 100% and drops accuracy to 10%.", "perturbed_statement": "Figure 2 shows that only about 50% of capsules in the 7-layer CapsNet vanish, while Figure 3 shows the 6-layer CapsNet achieves 86% validation accuracy with a 15% initial dead capsule rate. Thus adding the 7th layer halves dead capsules yet lowers accuracy from 86% to 10%.", "perturbed_explanation": "In reality, Figure 2 indicates all capsule activations for the 7-layer model are zero (100% dead), not 50%. Also, Figure 3 reports the 6-layer model’s accuracy plateau at ~76%, not 86%. These errors invalidate the perturbed conclusion.", "claim": "Figure 2 shows that all capsules in the 7-layer CapsNet have zero activations (100% dead). Figure 3 shows the 6-layer CapsNet’s dead capsule rate rising from ~15% to ~26% while accuracy stays ~76%. Therefore, adding the 7th layer increases dead capsules to 100% and drops accuracy to 10%.", "label": true }, { "paperid": "2409.10502v1", "paper_path": "./SciVer/papers/2409.10502v1.json", "claim_type": "sequential", "item1": "8(a)", "item2": "8(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10502v1_figure_8(a).png", "item2_path": "./SciVer/images/2409.10502v1_figure_8(b).png", "section": [ "4.3" ], "request_id": 743, "origin_statement": "In the bottom-left puzzle state, the green cell is the only empty in its 3×3 block, so block constraint yields its value; the solution confirms it as 1.", "perturbed_statement": "In the bottom-left puzzle state, the green cell is the only empty in its column, so column constraint yields its value; the solution confirms it as 2.", "perturbed_explanation": "This is incorrect because in the left image that green cell shares its column with other empty cells (so it isn’t uniquely determined by the column), and the right solution image shows its value is 1, not 2.", "claim": "In the bottom-left puzzle state, the green cell is the only empty in its 3×3 block, so block constraint yields its value; the solution confirms it as 1.", "label": true }, { "paperid": "2411.17927v1", "paper_path": "./SciVer/papers/2411.17927v1.json", "claim_type": "sequential", "item1": "9", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17927v1_figure_9.png", "item2_path": "./SciVer/images/2411.17927v1-Table3-1.png", "section": [ "5.2" ], "request_id": 748, "origin_statement": "Fig.9 shows prompt1’s BLEU falls to around 0.07 at 1B parameters (Step 1). Table 3 reports prompt1 has the highest BLEU RMSD of 0.0269 (Step 2). Thus, prompt1 exhibits the greatest BLEU variability across model scales.", "perturbed_statement": "Fig.9 shows prompt1’s BLEU remains at about 0.15 at 1B parameters. Table 3 reports prompt2 has the highest BLEU RMSD of 0.0245. Thus, prompt2 exhibits the greatest BLEU variability across model scales.", "perturbed_explanation": "This is wrong because Fig.9 actually plots prompt1’s BLEU at roughly 0.07 (not 0.15) for the 1B parameter model. Moreover, Table 3 shows prompt1’s BLEU RMSD is 0.0269—higher than prompt2’s 0.0245—so prompt2 does not have the greatest BLEU variability.", "claim": "Fig.9 shows prompt1’s BLEU falls to around 0.07 at 1B parameters (Step 1). Table 3 reports prompt1 has the highest BLEU RMSD of 0.0269 (Step 2). Thus, prompt1 exhibits the greatest BLEU variability across model scales.", "label": true }, { "paperid": "2411.06214v1", "paper_path": "./SciVer/papers/2411.06214v1.json", "claim_type": "sequential", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06214v1_figure_3.png", "item2_path": "./SciVer/images/2411.06214v1_figure_5.png", "section": [ "3.5" ], "request_id": 761, "origin_statement": "Since PCA’s first principal component derives 74.56% of variance from features 6 and 12, the MKTCN model using the top three PCs classifies normal samples with 99.7% accuracy.", "perturbed_statement": "Since PCA’s first principal component derives only 5.29% of variance from features 4 and 11, the MKTCN model using the top three PCs classifies normal samples with 99.7% accuracy.", "perturbed_explanation": "The perturbation incorrectly swaps PC1 with PC3: in the context, PC1 actually captures 74.56% variance from features 6 and 12, whereas the 5.29% contribution from features 4 and 11 refers to PC3, not PC1.", "claim": "Since PCA’s first principal component derives 74.56% of variance from features 6 and 12, the MKTCN model using the top three PCs classifies normal samples with 99.7% accuracy.", "label": true }, { "paperid": "2411.16421v1", "paper_path": "./SciVer/papers/2411.16421v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16421v1_figure_4.png", "item2_path": "./SciVer/images/2411.16421v1_figure_5.png", "section": [ "5.3" ], "request_id": 777, "origin_statement": "At grade 4 in Figure 4, flipping reduces AU center error from 35.17 px to 21.71 px. However, Figure 5 shows AU regression error at grade 4 (23.84 hPa) is over fourteen times WP’s 1.68 hPa. Thus, although flipping improves center estimation, it does not homogenize regression performance.", "perturbed_statement": "At grade 4 in Figure 4, flipping increases AU center error from 21.71 px to 35.17 px. However, Figure 5 shows AU regression error at grade 4 (23.84 hPa) is over fourteen times WP’s 1.68 hPa. Thus, although flipping degrades center estimation, it does not homogenize regression performance.", "perturbed_explanation": "Figure 4’s red curve (flipped AU) at grade 4 is labeled 21.71 px and the green curve (not flipped) is 35.17 px, so flipping actually reduces the error rather than increasing it as stated.", "claim": "At grade 4 in Figure 4, flipping reduces AU center error from 35.17 px to 21.71 px. However, Figure 5 shows AU regression error at grade 4 (23.84 hPa) is over fourteen times WP’s 1.68 hPa. Thus, although flipping improves center estimation, it does not homogenize regression performance.", "label": true }, { "paperid": "2410.07242v1", "paper_path": "./SciVer/papers/2410.07242v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07242v1_figure_3.png", "item2_path": "./SciVer/images/2410.07242v1-Table3-1.png", "section": [ "5" ], "request_id": 805, "origin_statement": "Table 3 shows an average 31.4% response rate among MTX trials; Figure 3A then shows that at a 31.4% interim observed rate after 75 patients, the regression submodel receives its maximum posterior weight, which corresponds in Panel C to the smallest Stage 2 sample size (~15).", "perturbed_statement": "Table 3 shows an average 26.4% response rate among MTX trials; Figure 3A then shows that at a 26.4% interim observed rate after 75 patients, the regression submodel receives its maximum posterior weight, which corresponds in Panel C to the smallest Stage 2 sample size (~15).", "perturbed_explanation": "The perturbed statement misreports the MTX trial mean as 26.4%, but Table 3’s seven MTX trials actually average 31.4%. In Figure 3A the regression submodel peaks at 31.4%, not 26.4%, so the claimed maximum at 26.4% contradicts both the table and the figure.", "claim": "Table 3 shows an average 31.4% response rate among MTX trials; Figure 3A then shows that at a 31.4% interim observed rate after 75 patients, the regression submodel receives its maximum posterior weight, which corresponds in Panel C to the smallest Stage 2 sample size (~15).", "label": true }, { "paperid": "2411.06070v1", "paper_path": "./SciVer/papers/2411.06070v1.json", "claim_type": "sequential", "item1": "1", "item2": "9", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.06070v1-Table1-1.png", "item2_path": "./SciVer/images/2411.06070v1-Table9-1.png", "section": [ "2.2" ], "request_id": 808, "origin_statement": "Table1 shows that for the homophily USA target, source Europe has the highest tree similarity (88.7%) and accuracy (57.9%). In Table9 under random features, Europe→USA also has the highest tree similarity (76.1%) and accuracy (46.1%), confirming that higher tree similarity drives transferability.", "perturbed_statement": "Table1 shows that for the homophily USA target, source Europe has the highest tree similarity (88.7%) and accuracy (57.9%). In Table9 under random features, Brazil→USA has the highest tree similarity (58.9%) and accuracy (45.4%), confirming that higher tree similarity drives transferability.", "perturbed_explanation": "This perturbed statement is false because in Table9 the Europe→USA pair actually has a tree similarity of 76.1% and accuracy of 46.1%, both exceeding Brazil→USA’s 58.9% tree similarity and 45.4% accuracy. Therefore, Brazil is not the top source under random features.", "claim": "Table1 shows that for the homophily USA target, source Europe has the highest tree similarity (88.7%) and accuracy (57.9%). In Table9 under random features, Europe→USA also has the highest tree similarity (76.1%) and accuracy (46.1%), confirming that higher tree similarity drives transferability.", "label": true }, { "paperid": "2411.03122v2", "paper_path": "./SciVer/papers/2411.03122v2.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03122v2_figure_4(a).png", "item2_path": "./SciVer/images/2411.03122v2_figure_4(c).png", "section": [ "7", "7.4" ], "request_id": 812, "origin_statement": "The scattering Feynman diagram shows a photon exchanged with a green electron/nucleus line, so process 5 (neutrino scattering on e/N) requires background matter and cannot occur in vacuum.", "perturbed_statement": "The scattering Feynman diagram shows only a neutrino line and no electron or nucleus, so neutrino scattering can occur in vacuum.", "perturbed_explanation": "In reality, the second image clearly depicts a green line labeled e/N representing an electron or nucleus. This explicitly shows that neutrino scattering (process 5) involves exchange with background electrons or nuclei, contradicting the claim that no such line is present and that the process can occur in vacuum.", "claim": "The scattering Feynman diagram shows a photon exchanged with a green electron/nucleus line, so process 5 (neutrino scattering on e/N) requires background matter and cannot occur in vacuum.", "label": true }, { "paperid": "2410.04386v1", "paper_path": "./SciVer/papers/2410.04386v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.04386v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.04386v1_figure_1(b).png", "section": [ "5.1" ], "request_id": 814, "origin_statement": "From Fig.1 left, at 10% sample size, LAVA’s ℓ2 error is ~0.1. From Fig.1 right, at 10%, its ℓ∞ error is ~0.25. Hence, at 10% samples, LAVA’s maximum error exceeds twice its root‐mean‐square error.", "perturbed_statement": "From Fig.1 left, at 10% sample size, LAVA’s ℓ2 error is ~0.1. From Fig.1 right, at 10%, its ℓ∞ error is ~0.18. Hence, at 10% samples, LAVA’s maximum error is less than twice its root‐mean‐square error.", "perturbed_explanation": "The ℓ∞ error of LAVA at 10% samples is actually about 0.25 (Fig.1 right), which is more than twice its ℓ2 error (~0.1) from Fig.1 left. Thus the claim that the maximum error is less than twice the RMS error contradicts the plotted value.", "claim": "From Fig.1 left, at 10% sample size, LAVA’s ℓ2 error is ~0.1. From Fig.1 right, at 10%, its ℓ∞ error is ~0.25. Hence, at 10% samples, LAVA’s maximum error exceeds twice its root‐mean‐square error.", "label": true }, { "paperid": "2411.10013v1", "paper_path": "./SciVer/papers/2411.10013v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10013v1_figure_7(a).png", "item2_path": "./SciVer/images/2411.10013v1_figure_7(b).png", "section": [ "7.5" ], "request_id": 823, "origin_statement": "Figure7a shows homography error variance ~0.0033; Fig7b’s red fit indicates smooth loss increases by ~10 units per unit variance; thus at variance 0.0033, smooth loss increases only ~0.033, negligibly affecting depth estimation.", "perturbed_statement": "Figure7a shows homography error variance ~0.03; Fig7b’s red fit indicates smooth loss increases by ~10 units per unit variance; thus at variance 0.03, smooth loss increases ~0.3, noticeably affecting depth estimation.", "perturbed_explanation": "The premise is incorrect: Fig.7a’s caption specifies the homography error variance as 0.003300, not 0.03. Hence the calculation of a 0.3-unit loss increase is based on the wrong variance value.", "claim": "Figure7a shows homography error variance ~0.0033; Fig7b’s red fit indicates smooth loss increases by ~10 units per unit variance; thus at variance 0.0033, smooth loss increases only ~0.033, negligibly affecting depth estimation.", "label": true }, { "paperid": "2409.00210v1", "paper_path": "./SciVer/papers/2409.00210v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00210v1_figure_3.png", "item2_path": "./SciVer/images/2409.00210v1-Table1-1.png", "section": [ "2.2" ], "request_id": 834, "origin_statement": "At ~10 pN, the LPGS undecamer extends to ≈1.7 nm (Fig. 3a), yielding a stretching free energy of ≈2 kBT (Fig. 3b). Using ΔF≈5.6 kBT from Table 1, this implies a standard binding free energy ΔF⁰_b≈−9.6 kBT (Table 1).", "perturbed_statement": "At ~10 pN, the LPGS undecamer extends to ≈2.2 nm (Fig. 3a), yielding a stretching free energy of ≈4 kBT (Fig. 3b). Using ΔF≈5.6 kBT from Table 1, this implies a standard binding free energy ΔF⁰_b≈−1.6 kBT.", "perturbed_explanation": "The perturbed statement misreads Fig. 3a and Fig. 3b: at 10 pN the actual extension is ≈1.7 nm (not 2.2 nm) and the stretching free energy is ≈2 kBT (not 4 kBT). Those correct values yield ΔF⁰_b≈−9.6 kBT, not −1.6 kBT.", "claim": "At ~10 pN, the LPGS undecamer extends to ≈1.7 nm (Fig. 3a), yielding a stretching free energy of ≈2 kBT (Fig. 3b). Using ΔF≈5.6 kBT from Table 1, this implies a standard binding free energy ΔF⁰_b≈−9.6 kBT (Table 1).", "label": true }, { "paperid": "2411.13247v1", "paper_path": "./SciVer/papers/2411.13247v1.json", "claim_type": "sequential", "item1": "1", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13247v1_figure_1.png", "item2_path": "./SciVer/images/2411.13247v1_figure_2(a).png", "section": [ "3.1" ], "request_id": 840, "origin_statement": "From Figure 1’s bottom-right panel, the retrieved simulation-extinction is Av≈0.6 and Rv≈2.75. In Figure 2, the black (Rv=2.2, Av=0.5) and red (Rv=3.1, Av=1.0) curves at 1400 Å lie at ~0.80 and ~0.85, so for Rv=2.75 and Av=0.6, transmission at 1400 Å is ≈0.83.", "perturbed_statement": "From Figure 1’s bottom-right panel, the retrieved simulation-extinction is Av≈0.6 and Rv≈3.5. In Figure 2, the black (Rv=2.2, Av=0.5) and red (Rv=3.1, Av=1.0) curves at 1400 Å lie at ~0.80 and ~0.85, so for Rv=3.5 and Av=0.6, transmission at 1400 Å is ≈0.90.", "perturbed_explanation": "The perturbed claim misreads Figure 1: the black dot in the bottom-right panel marks Rv≈2.75, not 3.5. Moreover, Figure 2 shows no transmission curve for Rv=3.5, so deriving ≈0.90 transmission at 1400 Å from an Rv=3.5 premise is unsupported.", "claim": "From Figure 1’s bottom-right panel, the retrieved simulation-extinction is Av≈0.6 and Rv≈2.75. In Figure 2, the black (Rv=2.2, Av=0.5) and red (Rv=3.1, Av=1.0) curves at 1400 Å lie at ~0.80 and ~0.85, so for Rv=2.75 and Av=0.6, transmission at 1400 Å is ≈0.83.", "label": true }, { "paperid": "2410.04465v2", "paper_path": "./SciVer/papers/2410.04465v2.json", "claim_type": "sequential", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.04465v2_figure_5.png", "item2_path": "./SciVer/images/2410.04465v2_figure_7.png", "section": [ "5" ], "request_id": 842, "origin_statement": "Figure 5 shows that 99% of events with an Hbb score above 0.8 are true Hbb, yet Figure 7 reports only 84.30% of true Hbb events are correctly reconstructed as Hbb.", "perturbed_statement": "Figure 5 shows that 95% of events with an Hbb score above 0.8 are true Hbb, but Figure 7 reports only 90% of true Hbb events are correctly reconstructed as Hbb.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 5 actually indicates 99% purity for Hbb events above score 0.8 (not 95%), and Figure 7 shows 84.30% correct reconstruction for Hbb (not 90%).", "claim": "Figure 5 shows that 99% of events with an Hbb score above 0.8 are true Hbb, yet Figure 7 reports only 84.30% of true Hbb events are correctly reconstructed as Hbb.", "label": true }, { "paperid": "2409.16316v1", "paper_path": "./SciVer/papers/2409.16316v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16316v1_figure_4(b).png", "item2_path": "./SciVer/images/2409.16316v1_figure_5.png", "section": [ "5.3.1" ], "request_id": 847, "origin_statement": "Fig4(b) shows mean RMSD in the Alpine region is approximately 120 W/m². In Fig5(B), model B at Alpine stations exhibits MBE greater than 150 W/m². Therefore, model B’s positive bias surpasses the typical regional RMSD by at least 30 W/m².", "perturbed_statement": "Fig4(b) shows mean RMSD in the Alpine region is approximately 150 W/m². In Fig5(B), model B at Alpine stations exhibits MBE around 120 W/m². Therefore, model B’s positive bias is lower than the typical regional RMSD by about 30 W/m².", "perturbed_explanation": "This is incorrect because Fig 4(b) indicates the Alpine RMSD peaks near 120 W/m², not 150 W/m², and Fig 5(B) shows model B’s MBE exceeds 150 W/m² rather than being around 120 W/m². Thus model B’s bias actually exceeds, rather than falls below, the regional RMSD.", "claim": "Fig4(b) shows mean RMSD in the Alpine region is approximately 120 W/m². In Fig5(B), model B at Alpine stations exhibits MBE greater than 150 W/m². Therefore, model B’s positive bias surpasses the typical regional RMSD by at least 30 W/m².", "label": true }, { "paperid": "2411.16662v1", "paper_path": "./SciVer/papers/2411.16662v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16662v1_figure_5.png", "item2_path": "./SciVer/images/2411.16662v1_figure_6.png", "section": [ "3.3" ], "request_id": 864, "origin_statement": "The binary classifier achieves its highest macro F1 (~0.92) on Track Record, and Track Record has the lowest perceived difficulty (~1.3), indicating that the binary classifier excels on the easiest category.", "perturbed_statement": "The binary classifier achieves its highest macro F1 (~0.92) on Track Record, and Track Record has a high perceived difficulty (~4.3), indicating that the binary classifier excels on the most challenging category.", "perturbed_explanation": "The perturbed claim wrongly assigns Track Record a difficulty of ~4.3. In Figure 6 (left panel), Track Record’s actual perceived difficulty is about 1.3 (the lowest), so it is not a challenging category, contradicting the perturbation.", "claim": "The binary classifier achieves its highest macro F1 (~0.92) on Track Record, and Track Record has the lowest perceived difficulty (~1.3), indicating that the binary classifier excels on the easiest category.", "label": true }, { "paperid": "2411.15645v1", "paper_path": "./SciVer/papers/2411.15645v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15645v1_figure_4.png", "item2_path": "./SciVer/images/2411.15645v1-Table3-1.png", "section": [ "4.7" ], "request_id": 867, "origin_statement": "Because GPT-4o solved 40 Number Theory problems using MC-NEST (Fig.4) and its AIME 16-rollout score with MC-NEST under the Importance Sampling Policy is 39 (Table 3), its domain-specific Number Theory performance slightly exceeds its overall AIME MC-NEST benchmark by one point.", "perturbed_statement": "Because GPT-4o solved 40 Number Theory problems using MC-NEST (Fig.4) and its AIME 16-rollout score with MC-NEST under the Importance Sampling Policy is 41 (Table 3), its domain-specific Number Theory performance slightly trails its overall AIME MC-NEST benchmark by one point.", "perturbed_explanation": "The perturbed claim is false because Table 3 reports GPT-4o’s MC-NEST Importance Sampling score at 16 rollouts as 39—not 41—so the overall AIME benchmark is 39, not 41.", "claim": "Because GPT-4o solved 40 Number Theory problems using MC-NEST (Fig.4) and its AIME 16-rollout score with MC-NEST under the Importance Sampling Policy is 39 (Table 3), its domain-specific Number Theory performance slightly exceeds its overall AIME MC-NEST benchmark by one point.", "label": true }, { "paperid": "2409.03843v1", "paper_path": "./SciVer/papers/2409.03843v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03843v1_figure_2.png", "item2_path": "./SciVer/images/2409.03843v1_figure_4.png", "section": [ "4.4" ], "request_id": 870, "origin_statement": "Figure 2B shows assigning a Republican identity yields an ingroup bias of +2.43 on Republican statements. Figure 4A shows explicit debiasing shifts responses by roughly 0.1. Therefore, Republican identity assignment is over 20 times more effective at boosting agreement than explicit debiasing.", "perturbed_statement": "Figure 2B shows assigning a Republican identity yields an ingroup bias of +1.50 on Republican statements. Figure 4A shows explicit debiasing shifts responses by roughly 0.5. Therefore, Republican identity assignment is 3 times more effective at boosting agreement than explicit debiasing.", "perturbed_explanation": "This statement is incorrect because Figure 2B actually reports an ingroup bias of +2.43, not +1.50, and Figure 4A shows explicit debiasing shifts of about +0.1, not +0.5. Both altered premises conflict directly with the data, invalidating the 3× effectiveness claim.", "claim": "Figure 2B shows assigning a Republican identity yields an ingroup bias of +2.43 on Republican statements. Figure 4A shows explicit debiasing shifts responses by roughly 0.1. Therefore, Republican identity assignment is over 20 times more effective at boosting agreement than explicit debiasing.", "label": true }, { "paperid": "2410.16343v1", "paper_path": "./SciVer/papers/2410.16343v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.16343v1-Table1-1.png", "item2_path": "./SciVer/images/2410.16343v1-Table2-1.png", "section": [ "4" ], "request_id": 874, "origin_statement": "The Pecos River near Pecos, NM has a mean evaporation of 1.50 mm/day, matching the maximum catchment mean evaporation of 1.5 mm/day reported in Table 1.", "perturbed_statement": "The Arkansas River above Pueblo, CO has a mean evaporation of 1.50 mm/day, matching the maximum catchment mean evaporation of 1.5 mm/day reported in Table 1.", "perturbed_explanation": "This is incorrect because Table 2 lists the Arkansas River above Pueblo, CO with a mean evaporation of only 1.28 mm/day, not 1.50 mm/day, so it does not equal the maximum of 1.5 mm/day from Table 1.", "claim": "The Pecos River near Pecos, NM has a mean evaporation of 1.50 mm/day, matching the maximum catchment mean evaporation of 1.5 mm/day reported in Table 1.", "label": true }, { "paperid": "2409.07335v1", "paper_path": "./SciVer/papers/2409.07335v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.07335v1_figure_3.png", "item2_path": "./SciVer/images/2409.07335v1-Table3-1.png", "section": [ "4.3.1" ], "request_id": 890, "origin_statement": "Bootstrapping exhibits the largest agreement decline (0.16) from small to large models and the lowest PGR p-value (0.002), indicating it most effectively fosters generalization among the weak-to-strong methods.", "perturbed_statement": "Auxiliary Confidence exhibits the largest agreement decline (0.11) from small to large models and the lowest PGR p-value (0.01), indicating it most effectively fosters generalization among the weak-to-strong methods.", "perturbed_explanation": "Figure 3 shows that Auxiliary Confidence’s agreement drops from 0.89 to 0.78 (a decline of 0.11), which is smaller than Bootstrapping’s drop of 0.88 to 0.72 (0.16). Table 3 reports Auxiliary Confidence’s PGR p-value as 0.01, whereas Bootstrapping’s is 0.002, making Bootstrapping, not Auxiliary Confidence, the method with the lowest p-value and largest decline.", "claim": "Bootstrapping exhibits the largest agreement decline (0.16) from small to large models and the lowest PGR p-value (0.002), indicating it most effectively fosters generalization among the weak-to-strong methods.", "label": true }, { "paperid": "2410.18630v1", "paper_path": "./SciVer/papers/2410.18630v1.json", "claim_type": "sequential", "item1": "13", "item2": "15", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18630v1_figure_13.png", "item2_path": "./SciVer/images/2410.18630v1_figure_15.png", "section": [ "3.2.2" ], "request_id": 1080, "origin_statement": "Figure13 reveals significant noise in the colored Cloud B segmentation, implying color-based registration alone is prone to disturbances; consequently, Figure15 shows that the R+Co method has a rotational error spanning 2.42°–179.94°, while R+Cs+Co narrows errors to 1.96°–5.24°, demonstrating combined constraints improve robustness.", "perturbed_statement": "Figure13 shows minimal noise in the colored Cloud B segmentation, implying color-based registration alone is highly reliable; consequently, Figure15 indicates the R+Co method’s rotational errors range from 3.31°–34.20°, outperforming R+Cs+Co’s 1.96°–5.24° deviation, confirming color-only yields best precision.", "perturbed_explanation": "The perturbation is wrong because Figure13(e) explicitly highlights multiple noise clusters around Cloud B (labeled “Noise”), contradicting the claim of minimal noise. Furthermore, Figure15 shows R+Co’s rotational errors actually span 2.42° to 179.94°, far exceeding R+Cs+Co’s narrow 1.96°–5.24° range, so R+Co does not outperform the combined method.", "claim": "Figure13 reveals significant noise in the colored Cloud B segmentation, implying color-based registration alone is prone to disturbances; consequently, Figure15 shows that the R+Co method has a rotational error spanning 2.42°–179.94°, while R+Cs+Co narrows errors to 1.96°–5.24°, demonstrating combined constraints improve robustness.", "label": true }, { "paperid": "2411.14713v1", "paper_path": "./SciVer/papers/2411.14713v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14713v1_figure_1(a).png", "item2_path": "./SciVer/images/2411.14713v1_figure_1(b).png", "section": [ "1" ], "request_id": 1089, "origin_statement": "The Movielens-100k chart shows DIEN’s AUC rises from 0.793 at history length 20 to 0.798 at length 100; the Amazon-books chart shows DIEN+Llama2’s AUC falls from 0.842 to 0.839. This confirms longer histories boost DIEN but hinder DIEN+Llama2.", "perturbed_statement": "The Movielens-100k chart shows DIEN’s AUC drops from 0.793 at history length 20 to 0.789 at length 100; the Amazon-books chart shows DIEN+Llama2’s AUC rises from 0.842 to 0.845. This suggests longer histories harm DIEN but benefit DIEN+Llama2.", "perturbed_explanation": "In fact, on Movielens-100k DIEN’s AUC increases from 0.793 (K=20) to 0.798 (K=100), not drops to 0.789. And on Amazon-books DIEN+Llama2’s AUC decreases from 0.842 to 0.839, not rises to 0.845, contradicting the perturbed claim.", "claim": "The Movielens-100k chart shows DIEN’s AUC rises from 0.793 at history length 20 to 0.798 at length 100; the Amazon-books chart shows DIEN+Llama2’s AUC falls from 0.842 to 0.839. This confirms longer histories boost DIEN but hinder DIEN+Llama2.", "label": true }, { "paperid": "2411.02775v2", "paper_path": "./SciVer/papers/2411.02775v2.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02775v2_figure_5(a).png", "item2_path": "./SciVer/images/2411.02775v2_figure_5(b).png", "section": [ "4.4" ], "request_id": 1095, "origin_statement": "At 8 dimensions, CADETS achieves only about 40% ACC (Figure 5a); adding five labeled nodes per batch raises its ACC to around 63% (Figure 5b), showing that supervised signals offset weak embeddings.", "perturbed_statement": "At 8 dimensions, CADETS achieves only about 40% ACC (Figure 5a); adding five labeled nodes per batch raises its ACC to around 80% (Figure 5b), showing that supervised signals offset weak embeddings.", "perturbed_explanation": "Figure 5b shows that with 5 labeled nodes per batch, CADETS's ACC is approximately 63%, not 80%. The claim of 80% is therefore contradicted by the plotted value.", "claim": "At 8 dimensions, CADETS achieves only about 40% ACC (Figure 5a); adding five labeled nodes per batch raises its ACC to around 63% (Figure 5b), showing that supervised signals offset weak embeddings.", "label": true }, { "paperid": "2411.04165v1", "paper_path": "./SciVer/papers/2411.04165v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04165v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.04165v1_figure_2(b).png", "section": [ "4.1" ], "request_id": 1129, "origin_statement": "At 10 B tokens in the CLM plot, DNA-xLSTM-2M has the lowest loss (~1.075) versus Mamba (~1.083); the MLM plot also shows DNA-xLSTM-2M with the lowest loss (~0.953) compared to Transformer++ (~0.977), proving DNA-xLSTM’s top pre-training performance.", "perturbed_statement": "At 10 B tokens in the CLM plot, DNA-xLSTM-2M has the lowest loss (~1.075) versus Mamba (~1.083); the MLM plot shows Transformer++ with the lowest loss (~0.977) versus DNA-xLSTM (~0.953), proving Transformer++’s top pre-training performance.", "perturbed_explanation": "This statement is wrong because in the MLM plot DNA-xLSTM-2M’s validation loss (~0.953) is lower than Transformer++’s (~0.977). Transformer++ does not achieve the lowest MLM loss, so it cannot be the top performer.", "claim": "At 10 B tokens in the CLM plot, DNA-xLSTM-2M has the lowest loss (~1.075) versus Mamba (~1.083); the MLM plot also shows DNA-xLSTM-2M with the lowest loss (~0.953) compared to Transformer++ (~0.977), proving DNA-xLSTM’s top pre-training performance.", "label": true }, { "paperid": "2410.18126v1", "paper_path": "./SciVer/papers/2410.18126v1.json", "claim_type": "sequential", "item1": "6(b)", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.18126v1_figure_6(b).png", "item2_path": "./SciVer/images/2410.18126v1-Table6-1.png", "section": [ "5.4" ], "request_id": 1146, "origin_statement": "The proposed approach’s confusion matrix shows 10 actual high-interference instances (3 false negatives + 7 true positives), yielding a recall of 7/10=0.70, which exactly matches the 0.70 recall reported for the High class in Table 6(b).", "perturbed_statement": "The proposed approach’s confusion matrix shows 11 actual high-interference instances (3 false negatives + 8 true positives), yielding a recall of 8/11≈0.73, which matches the 0.73 recall reported for the High class in Table 6(b).", "perturbed_explanation": "This is incorrect because the confusion matrix in Figure 6(b) actually records 7 true positives and 3 false negatives for the High class, totaling 10 instances and a recall of 7/10=0.70, not 8/11≈0.73.", "claim": "The proposed approach’s confusion matrix shows 10 actual high-interference instances (3 false negatives + 7 true positives), yielding a recall of 7/10=0.70, which exactly matches the 0.70 recall reported for the High class in Table 6(b).", "label": true }, { "paperid": "2410.06134v1", "paper_path": "./SciVer/papers/2410.06134v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06134v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.06134v1_figure_1(c).png", "section": [ "4.2", "1", "3.3" ], "request_id": 1151, "origin_statement": "Baseline exhibits ID max logits peaking near 15 and OOD near 7 (Fig.1a). With LS 0.3, peaks shrink to ~2.0 (ID) and ~1.7 (OOD) (Fig.1c), reducing the logit gap from ~8 to ~0.3 and increasing their overlap.", "perturbed_statement": "Baseline exhibits ID max logits peaking near 15 and OOD near 7 (Fig.1a). With LS 0.3, peaks shrink to ~2.0 (ID) and ~0.5 (OOD) (Fig.1c), increasing the logit gap from ~8 to ~1.5 and reducing their overlap.", "perturbed_explanation": "The perturbation misreads the OOD peak in Fig.1c as ~0.5. In reality, the OOD distribution peak is around ~1.7, making the actual gap ~0.3, not ~1.5, so LS 0.3 actually reduces separation and increases overlap.", "claim": "Baseline exhibits ID max logits peaking near 15 and OOD near 7 (Fig.1a). With LS 0.3, peaks shrink to ~2.0 (ID) and ~1.7 (OOD) (Fig.1c), reducing the logit gap from ~8 to ~0.3 and increasing their overlap.", "label": true }, { "paperid": "2411.08224v2", "paper_path": "./SciVer/papers/2411.08224v2.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.08224v2_figure_3.png", "item2_path": "./SciVer/images/2411.08224v2-Table1-1.png", "section": [ "5.2" ], "request_id": 1152, "origin_statement": "Figure 3 shows JDCL attains 90.15% accuracy on task 1 immediately after training. Table 1 reports its average forgetting on CIFAR-100 T=5 as 37.20%. Hence, JDCL preserves about 53% of its initial task 1 accuracy by the end.", "perturbed_statement": "Figure 3 shows JDCL attains 82.85% accuracy on task 1 immediately after training. Table 1 reports its average forgetting on CIFAR-100 T=5 as 19.16%. Hence, JDCL preserves about 53% of its initial task 1 accuracy by the end.", "perturbed_explanation": "The perturbed statement misreads two values: Figure 3’s cell for JDCL on task 1 after task 1 is 90.15%, not 82.85% (82.85% is after task 2). Table 1 lists JDCL’s forgetting as 37.20%, not 19.16% (19.16% is GFR’s CIFAR-100 forgetting). These incorrect premises invalidate the conclusion.", "claim": "Figure 3 shows JDCL attains 90.15% accuracy on task 1 immediately after training. Table 1 reports its average forgetting on CIFAR-100 T=5 as 37.20%. Hence, JDCL preserves about 53% of its initial task 1 accuracy by the end.", "label": true }, { "paperid": "2411.09263v2", "paper_path": "./SciVer/papers/2411.09263v2.json", "claim_type": "sequential", "item1": "12", "item2": "13", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09263v2_figure_12.png", "item2_path": "./SciVer/images/2411.09263v2_figure_13.png", "section": [ "9" ], "request_id": 1168, "origin_statement": "ResNet50 kernels 5 and 9 share similar diagonal patterns; but VGG19 kernels 5 and 9 exhibit differently oriented features, suggesting ResNet50 clusters similar edge detectors more tightly than VGG19.", "perturbed_statement": "ResNet50 kernels 5 and 9 exhibit perpendicular orientations; while VGG19 kernels 5 and 9 share identical diagonal features, implying ResNet50 disperses diagonal detectors more than VGG19.", "perturbed_explanation": "This is incorrect because in Fig. 12 ResNet50’s kernels 5 and 9 actually display similar diagonal patterns (not perpendicular). Moreover, in Fig. 13 VGG19’s kernels 5 and 9 do not share identical diagonal features but differ in orientation, contradicting the perturbed claim.", "claim": "ResNet50 kernels 5 and 9 share similar diagonal patterns; but VGG19 kernels 5 and 9 exhibit differently oriented features, suggesting ResNet50 clusters similar edge detectors more tightly than VGG19.", "label": true }, { "paperid": "2410.08875v1", "paper_path": "./SciVer/papers/2410.08875v1.json", "claim_type": "sequential", "item1": "5(b)", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08875v1_figure_5(b).png", "item2_path": "./SciVer/images/2410.08875v1_figure_8.png", "section": [ "5.2" ], "request_id": 1169, "origin_statement": "ECDF (Fig. 8) indicates about 90% of consecutive stop spacings are ≤10 km. In Fig. 5, the 30-bus fleet (yellow) drops to roughly 25% unserved by 10:30. Therefore, with most stops within 10 km and 30 buses, the system serves about 75% of requests by 10:30.", "perturbed_statement": "ECDF (Fig. 8) indicates only about 50% of consecutive stop spacings are ≤10 km. In Fig. 5, the 30-bus fleet (yellow) drops to roughly 15% unserved by 10:30. Therefore, with half the stops within 10 km and 30 buses, the system serves about 85% of requests by 10:30.", "perturbed_explanation": "This is incorrect because Fig. 8 actually shows around 85–90% of stop spacings are ≤10 km (not 50%), and in Fig. 5 the yellow line for 30 buses is about 25% unserved at 10:30 (not 15%). Both misread values contradict the original plots.", "claim": "ECDF (Fig. 8) indicates about 90% of consecutive stop spacings are ≤10 km. In Fig. 5, the 30-bus fleet (yellow) drops to roughly 25% unserved by 10:30. Therefore, with most stops within 10 km and 30 buses, the system serves about 75% of requests by 10:30.", "label": true }, { "paperid": "2410.11591v1", "paper_path": "./SciVer/papers/2410.11591v1.json", "claim_type": "sequential", "item1": "2", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.11591v1_figure_2.png", "item2_path": "./SciVer/images/2410.11591v1_figure_3(a).png", "section": [ "1" ], "request_id": 1185, "origin_statement": "Figure 2 shows four object categories (Cable, Hazelnut, Transistor, Metal nut). In the cloud scheme of Figure 3a, each category’s sample is passed through a heavyweight extractor (WideResNet50), requiring four separate high-memory feature extraction passes.", "perturbed_statement": "Figure 2 shows five object categories (Cable, Hazelnut, Transistor, Metal nut, and Bottle). In the edge scheme of Figure 3b, each category’s sample is passed through a lightweight ResNet34 extractor, requiring five low-memory feature extraction passes.", "perturbed_explanation": "The perturbed claim miscounts the categories: Figure 2 only depicts four objects (Cable, Hazelnut, Transistor, Metal nut), not five. It also asserts the use of a ResNet34 extractor in Figure 3b, but the text and diagram only specify a generic less-expensive feature extractor—not ResNet34.", "claim": "Figure 2 shows four object categories (Cable, Hazelnut, Transistor, Metal nut). In the cloud scheme of Figure 3a, each category’s sample is passed through a heavyweight extractor (WideResNet50), requiring four separate high-memory feature extraction passes.", "label": true }, { "paperid": "2410.23609v1", "paper_path": "./SciVer/papers/2410.23609v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.23609v1-Table3-1.png", "item2_path": "./SciVer/images/2410.23609v1-Table4-1.png", "section": [ "3.3" ], "request_id": 1198, "origin_statement": "GPT-4o full-context yields 80.1% BACC on PubMed (Table 3). Table 4 shows that ordering the most important PubMed document at the bottom raises BACC to 85.5%, a 5.4-point gain when placing it last.", "perturbed_statement": "GPT-4o full-context yields 83.2% BACC on PubMed (Table 3). Table 4 shows that ordering the most important PubMed document at the bottom raises BACC to 83.9%, a 0.7-point gain when placing it last.", "perturbed_explanation": "Table 3 actually reports 80.1% for GPT-4o full-context on PubMed, not 83.2%. Table 4 reports a bottom-order BACC of 85.5%, not 83.9%. Thus both quoted values and the 0.7-point improvement are incorrect.", "claim": "GPT-4o full-context yields 80.1% BACC on PubMed (Table 3). Table 4 shows that ordering the most important PubMed document at the bottom raises BACC to 85.5%, a 5.4-point gain when placing it last.", "label": true }, { "paperid": "2410.20464v1", "paper_path": "./SciVer/papers/2410.20464v1.json", "claim_type": "sequential", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20464v1_figure_2.png", "item2_path": "./SciVer/images/2410.20464v1-Table1-1.png", "section": [ "5.2" ], "request_id": 1206, "origin_statement": "Fig 2 shows an AGN flux density fν≈10 mJy at rest-frame 6 µm. Table 1 gives an observed X-ray luminosity of log LX(2–10 keV)=44.11. Thus the νL6 µm/LX ratio ≳10, consistent with significant X-ray absorption in J1324+4501.", "perturbed_statement": "Fig 2 shows an AGN flux density fν≈1 mJy at rest-frame 6 µm. Table 1 gives an observed X-ray luminosity of log LX(2–10 keV)=44.11. Thus the νL6 µm/LX ratio ≈1, implying no significant X-ray absorption in J1324+4501.", "perturbed_explanation": "The perturbation misreads the SED in Fig 2: the green AGN photometry point at 6 µm is ≈10 mJy, not 1 mJy. Using the true fν≈10 mJy with log LX=44.11 yields νL6 µm/LX≫1, indicating heavy absorption—contradicting the perturbed ratio≈1.", "claim": "Fig 2 shows an AGN flux density fν≈10 mJy at rest-frame 6 µm. Table 1 gives an observed X-ray luminosity of log LX(2–10 keV)=44.11. Thus the νL6 µm/LX ratio ≳10, consistent with significant X-ray absorption in J1324+4501.", "label": true }, { "paperid": "2411.06805v1", "paper_path": "./SciVer/papers/2411.06805v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06805v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.06805v1_figure_5(c).png", "section": [ "5.2" ], "request_id": 1209, "origin_statement": "At 50k training samples, AssistRAG’s F1 outperforms the w/o DPO variant by 0.023 on HotpotQA and by 0.010 on Bamboogle, indicating a stronger benefit from DPO training on HotpotQA.", "perturbed_statement": "At 50k training samples, AssistRAG’s F1 outperforms the w/o DPO variant by 0.020 on HotpotQA and by 0.010 on Bamboogle, indicating a stronger benefit from DPO training on HotpotQA.", "perturbed_explanation": "The perturbation misstates the HotpotQA performance gap. According to the HotpotQA plot, AssistRAG at 50k is ~0.448 and w/o DPO is ~0.425, yielding a 0.023 gap, not the claimed 0.020.", "claim": "At 50k training samples, AssistRAG’s F1 outperforms the w/o DPO variant by 0.023 on HotpotQA and by 0.010 on Bamboogle, indicating a stronger benefit from DPO training on HotpotQA.", "label": true }, { "paperid": "2410.13339v1", "paper_path": "./SciVer/papers/2410.13339v1.json", "claim_type": "sequential", "item1": "8", "item2": "9", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.13339v1-Table8-1.png", "item2_path": "./SciVer/images/2410.13339v1-Table9-1.png", "section": [ "4.4" ], "request_id": 1215, "origin_statement": "Table 8 shows threshold 1 yields the highest TriviaQA accuracy (51.50%). Table 9 shows the best TriviaQA accuracy at probing layer 6 is 50.70%. Thus, tuning the threshold achieves a 0.8% higher TriviaQA performance than optimal layer placement.", "perturbed_statement": "Table 8 shows threshold 1 yields the highest TriviaQA accuracy (52.50%). Table 9 shows the best TriviaQA accuracy at probing layer 6 is 50.70%. Thus, tuning the threshold achieves a 1.8% higher TriviaQA performance than optimal layer placement.", "perturbed_explanation": "The perturbed statement misreports the TriviaQA accuracy at threshold 1 as 52.50%. In fact, Table 8 lists the TriviaQA score for threshold 1 as 51.50%, not 52.50%, so the calculated 1.8% improvement is incorrect.", "claim": "Table 8 shows threshold 1 yields the highest TriviaQA accuracy (51.50%). Table 9 shows the best TriviaQA accuracy at probing layer 6 is 50.70%. Thus, tuning the threshold achieves a 0.8% higher TriviaQA performance than optimal layer placement.", "label": true }, { "paperid": "2409.05103v1", "paper_path": "./SciVer/papers/2409.05103v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05103v1_figure_2.png", "item2_path": "./SciVer/images/2409.05103v1_figure_3.png", "section": [ "4.2" ], "request_id": 1230, "origin_statement": "Figure 2 shows Texas’s centralized retention R₃*(X₃) flattens at about $5×10⁷, but Figure 3 shows its peer-to-peer share g₃*(S) is zero for all S. Thus, Texas offloads all its flood risk in the decentralized scheme.", "perturbed_statement": "Figure 2 shows Texas’s centralized retention R₃*(X₃) flattens at about $5×10⁷, and Figure 3 shows its peer-to-peer share g₃*(S) also holds at $5×10⁷ for all S. Thus, Texas retains a constant $5×10⁷ flood risk in both schemes.", "perturbed_explanation": "The perturbation is incorrect because in Figure 3 the red curve for Texas (g₃*(S)) lies along the horizontal axis at zero, not at $5×10⁷. The plot’s y-values for TX are effectively zero across all aggregate losses S.", "claim": "Figure 2 shows Texas’s centralized retention R₃*(X₃) flattens at about $5×10⁷, but Figure 3 shows its peer-to-peer share g₃*(S) is zero for all S. Thus, Texas offloads all its flood risk in the decentralized scheme.", "label": true }, { "paperid": "2411.17771v1", "paper_path": "./SciVer/papers/2411.17771v1.json", "claim_type": "sequential", "item1": "7", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17771v1_figure_7.png", "item2_path": "./SciVer/images/2411.17771v1-Table2-1.png", "section": [ "5.4" ], "request_id": 1233, "origin_statement": "Figure 7 shows a mean BLEU-1 score of about 0.59, and Table 2 lists HKI-DQG (Qwen2-VL 7B, T5-Large) at 61.63; thus, HKI-DQG exceeds the average concept-wise BLEU-1 by about 2.6 points.", "perturbed_statement": "Since Figure 7 depicts the mean BLEU-1 at roughly 0.65 and Table 2 shows HKI-DQG (Qwen2-VL 7B, T5-Large) achieves 61.63, HKI-DQG falls about 3.4 points below the average concept-wise BLEU-1.", "perturbed_explanation": "This is incorrect because Figure 7’s dashed line for mean BLEU-1 is at approximately 0.59, not 0.65, so HKI-DQG’s 61.63 BLEU-1 actually exceeds—not falls below—the true average by around 2.6 points.", "claim": "Figure 7 shows a mean BLEU-1 score of about 0.59, and Table 2 lists HKI-DQG (Qwen2-VL 7B, T5-Large) at 61.63; thus, HKI-DQG exceeds the average concept-wise BLEU-1 by about 2.6 points.", "label": true }, { "paperid": "2409.05546v1", "paper_path": "./SciVer/papers/2409.05546v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05546v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.05546v1_figure_3(b).png", "section": [ "3.4.2" ], "request_id": 1235, "origin_statement": "Both datasets reach their highest Recall@10 at μ=3e-4 (Fig.3a). However, Fig.3b shows that the optimal λ for Instrument is 1e-4 and for Baby is 5e-5.", "perturbed_statement": "Both datasets reach their highest Recall@10 at μ=3e-4 (Fig.3a). However, Fig.3b shows that the optimal λ for both Instrument and Baby is 1e-4 for preference-semantic alignment.", "perturbed_explanation": "In Fig.3b, Baby's highest Recall@10 (0.0389) and NDCG@10 (0.0394) occur at λ=5e-5, not at 1e-4. Therefore stating that Baby peaks at λ=1e-4 contradicts the graph, which shows a lower recall (0.0382) at 1e-4 compared to 5e-5.", "claim": "Both datasets reach their highest Recall@10 at μ=3e-4 (Fig.3a). However, Fig.3b shows that the optimal λ for Instrument is 1e-4 and for Baby is 5e-5.", "label": true }, { "paperid": "2410.10323v1", "paper_path": "./SciVer/papers/2410.10323v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.10323v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.10323v1_figure_4.png", "section": [ "5.4" ], "request_id": 1243, "origin_statement": "GPT-4’s lowest expert-agreed rate was 95% on SOS-HL-1K. MentalGLM-chat’s professionality median is 2/3 (~67%). Hence MentalGLM-chat’s professionality satisfaction is 28 percentage points lower than GPT-4’s lowest agreement rate.", "perturbed_statement": "GPT-4’s expert agreement was at least 99% on all datasets, and MentalGLM-chat’s professionality median is 3/3 (~100%). Hence MentalGLM-chat’s professionality satisfaction exceeds GPT-4’s lowest agreement by 1 percentage point.", "perturbed_explanation": "The perturbation is wrong because the first figure shows GPT-4’s agreement on SOS-HL-1K was 95%, not 99%, and the second figure shows MentalGLM-chat’s professionality median score is 2, not 3. These values contradict the context.", "claim": "GPT-4’s lowest expert-agreed rate was 95% on SOS-HL-1K. MentalGLM-chat’s professionality median is 2/3 (~67%). Hence MentalGLM-chat’s professionality satisfaction is 28 percentage points lower than GPT-4’s lowest agreement rate.", "label": true }, { "paperid": "2410.17357v1", "paper_path": "./SciVer/papers/2410.17357v1.json", "claim_type": "sequential", "item1": "2(c)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17357v1_figure_2(c).png", "item2_path": "./SciVer/images/2410.17357v1_figure_2(d).png", "section": [ "4.2" ], "request_id": 1246, "origin_statement": "Figure 2’s BERTScore scatter (modality1) shows most VLScores above 0.75. Using this subset in the CheXbert plot (modality2) reveals CheXbert scores clustered around 0.5–0.7, signifying strong image-text alignment yet only moderate clinical entity agreement for normal report pairs.", "perturbed_statement": "Figure 2’s BERTScore scatter shows most VLScores above 0.9. Using this subset in the CheXbert plot reveals CheXbert scores clustered around 0.8–0.9, signifying both high image-text alignment and high clinical entity agreement for normal report pairs.", "perturbed_explanation": "The perturbation is wrong because in the BERTScore vs. VLSscatter plot most VLScores lie between 0.75 and 0.9, not above 0.9. Moreover, in the CheXbert vs. VLS plot the CheXbert scores cluster around 0.5–0.7, not 0.8–0.9, so the claimed high CheXbert range contradicts the actual data.", "claim": "Figure 2’s BERTScore scatter (modality1) shows most VLScores above 0.75. Using this subset in the CheXbert plot (modality2) reveals CheXbert scores clustered around 0.5–0.7, signifying strong image-text alignment yet only moderate clinical entity agreement for normal report pairs.", "label": true }, { "paperid": "2409.06994v2", "paper_path": "./SciVer/papers/2409.06994v2.json", "claim_type": "sequential", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06994v2_figure_3.png", "item2_path": "./SciVer/images/2409.06994v2-Table5-1.png", "section": [ "5.2" ], "request_id": 2, "origin_statement": "For the Twitch network, DN yields the lowest BE metric (0.004) in Table 5, and Figure 3 shows DN’s core set has the darkest (lowest) Jaccard similarities with all other methods, indicating it both underperforms and deviates most in core composition.", "perturbed_statement": "For the Twitch network, DFS produces the lowest BE metric (0.066) in Table 5, and Figure 3 shows the DFS core set has the darkest (lowest) Jaccard similarities with all other methods, indicating it both underperforms and deviates most in core composition.", "perturbed_explanation": "The perturbation is incorrect because Table 5 actually reports DN with the lowest BE metric of 0.004 (not DFS’s 0.066), and in Figure 3 the darkest (purple) row belongs to DN, whereas DFS’s row is not the darkest but instead shows moderate overlap (blue-green).", "claim": "For the Twitch network, DN yields the lowest BE metric (0.004) in Table 5, and Figure 3 shows DN’s core set has the darkest (lowest) Jaccard similarities with all other methods, indicating it both underperforms and deviates most in core composition.", "label": true }, { "paperid": "2409.06906v1", "paper_path": "./SciVer/papers/2409.06906v1.json", "claim_type": "sequential", "item1": "2", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06906v1_figure_2.png", "item2_path": "./SciVer/images/2409.06906v1_figure_4(b).png", "section": [ "4" ], "request_id": 3, "origin_statement": "From the low-metallicity (z=0.002) STIS fit in Figure 2, the companion radius is 2.7 R⊙; applying the UV-sensitive range in Figure 4 at z=0.006 yields a companion radius of 4.2 R⊙, a 56% increase under higher metallicity.", "perturbed_statement": "From the low-metallicity (z=0.002) STIS fit in Figure 2, the companion radius is 2.7 R⊙; applying the UV-sensitive range in Figure 4 at z=0.006 yields a companion radius of 1.8 R⊙, a 33% reduction under higher metallicity.", "perturbed_explanation": "Figure 4’s lower panel caption explicitly states the best-fit companion radius at z=0.006 is 4.2 R⊙, not 1.8 R⊙, so the perturbed statement contradicts the labeled model fit.", "claim": "From the low-metallicity (z=0.002) STIS fit in Figure 2, the companion radius is 2.7 R⊙; applying the UV-sensitive range in Figure 4 at z=0.006 yields a companion radius of 4.2 R⊙, a 56% increase under higher metallicity.", "label": true }, { "paperid": "2410.09504v1", "paper_path": "./SciVer/papers/2410.09504v1.json", "claim_type": "sequential", "item1": "6", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09504v1_figure_6.png", "item2_path": "./SciVer/images/2410.09504v1-Table2-1.png", "section": [ "4.2" ], "request_id": 6, "origin_statement": "Because Σ3,3’s 95% credible interval in Fig6 excludes its true value, the highly misspecified model yields the highest average RMSPE of 1.236 in Table2, reflecting the impact of spatial variance misspecification on predictive performance.", "perturbed_statement": "Since Σ2,2’s credible interval fails to include the true value in Fig6, the misspecified model achieves the lowest average RMSPE of 0.942 in Table2, indicating that misestimated spatial covariance improved prediction.", "perturbed_explanation": "This is incorrect because in Fig6 the 95% interval for Σ2,2 actually contains the true value (the yellow triangle lies within the blue interval), and Table 2 shows the 0.942 RMSPE belongs to the well-specified model, not the misspecified one.", "claim": "Because Σ3,3’s 95% credible interval in Fig6 excludes its true value, the highly misspecified model yields the highest average RMSPE of 1.236 in Table2, reflecting the impact of spatial variance misspecification on predictive performance.", "label": true }, { "paperid": "2411.14880v1", "paper_path": "./SciVer/papers/2411.14880v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14880v1_figure_3.png", "item2_path": "./SciVer/images/2411.14880v1_figure_4.png", "section": [ "5.2" ], "request_id": 8, "origin_statement": "The Synchrony prototype in PDTB-2 exhibits the highest mean cosine distance (~1.2, Figure 3), and Figure 4 shows over 60% of its ten nearest neighbors bear the Conjunction label, suggesting the Synchrony prototype poorly captures its own class.", "perturbed_statement": "The Synchrony prototype in PDTB-2 exhibits one of the lowest mean cosine distances (~0.7), and Figure 4 shows over 60% of its ten nearest neighbors bear the Synchrony label, suggesting the Synchrony prototype effectively captures its own class.", "perturbed_explanation": "This is incorrect because Figure 3 actually shows the Synchrony prototype has the highest average cosine distance (~1.2), not a low distance (~0.7), and Figure 4 indicates that about 70% of its nearest neighbors are labeled Conjunction, not Synchrony.", "claim": "The Synchrony prototype in PDTB-2 exhibits the highest mean cosine distance (~1.2, Figure 3), and Figure 4 shows over 60% of its ten nearest neighbors bear the Conjunction label, suggesting the Synchrony prototype poorly captures its own class.", "label": true }, { "paperid": "2411.15223v1", "paper_path": "./SciVer/papers/2411.15223v1.json", "claim_type": "sequential", "item1": "6", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15223v1_figure_6.png", "item2_path": "./SciVer/images/2411.15223v1-Table1-1.png", "section": [ "3.3.2" ], "request_id": 11, "origin_statement": "Figure 6 shows logloss drops from about 0.4641 at embedding size 4 to about 0.4635 at size 6. Table 1 confirms these values. Thus, increasing embedding dimensions from 4 to 6 reduces logloss by 0.0006.", "perturbed_statement": "Figure 6 indicates logloss drops from about 0.4641 at embedding size 4 to roughly 0.4629 at size 6. Table 1 confirms these values. Therefore, increasing embedding dimensions from 4 to 6 reduces logloss by 0.0012.", "perturbed_explanation": "The perturbed claim misstates the logloss at embedding size 6. Both Figure 6 and Table 1 report logloss 0.4635 for size 6, not 0.4629, so the actual reduction from size 4’s 0.4641 is 0.0006, not 0.0012.", "claim": "Figure 6 shows logloss drops from about 0.4641 at embedding size 4 to about 0.4635 at size 6. Table 1 confirms these values. Thus, increasing embedding dimensions from 4 to 6 reduces logloss by 0.0006.", "label": true }, { "paperid": "2409.10066v1", "paper_path": "./SciVer/papers/2409.10066v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10066v1_figure_3(b).png", "item2_path": "./SciVer/images/2409.10066v1_figure_4.png", "section": [ "6.1.1" ], "request_id": 16, "origin_statement": "The DSL code assigns v1 to lane 2 and v2 to lane 1. In Fig. 4, vehicle B is shown in the middle lane (lane 2), so B corresponds to v1 and A corresponds to v2.", "perturbed_statement": "The DSL code assigns v1 to lane 1 and v2 to lane 2. In Fig. 4, vehicle B is shown in the middle lane (lane 2), so B corresponds to v2 and A corresponds to v1.", "perturbed_explanation": "This is wrong because Fig. 3’s DSL snippet defines v1 = NPC(lane_id=[2,2],…) and v2 = NPC(lane_id=[1,1],…). v1 is actually in lane 2 and v2 in lane 1, so B (middle lane) maps to v1, not v2.", "claim": "The DSL code assigns v1 to lane 2 and v2 to lane 1. In Fig. 4, vehicle B is shown in the middle lane (lane 2), so B corresponds to v1 and A corresponds to v2.", "label": true }, { "paperid": "2409.16902v1", "paper_path": "./SciVer/papers/2409.16902v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.16902v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.16902v1-Table2-1.png", "section": [ "3" ], "request_id": 17, "origin_statement": "Table 2 shows SAM2-large with center point prompting has 58.7% AUC. Figure 2 lists its 0.587 AUC as highest among trackers. Therefore, center point prompting drives SAM2-large’s top performance on UW-COT.", "perturbed_statement": "Table 2 shows SAM2-large with random point prompting has 58.7% AUC. Figure 2 lists its 0.587 AUC as highest among trackers. Therefore, random point prompting drives SAM2-large’s top performance on UW-COT.", "perturbed_explanation": "This claim is wrong because Table 2 actually reports a 43.8% AUC for SAM2-large when using a random point, not 58.7%. The 58.7% AUC in Table 2 corresponds to the center point prompt, so random prompting cannot account for the top performance.", "claim": "Table 2 shows SAM2-large with center point prompting has 58.7% AUC. Figure 2 lists its 0.587 AUC as highest among trackers. Therefore, center point prompting drives SAM2-large’s top performance on UW-COT.", "label": true }, { "paperid": "2411.15098v2", "paper_path": "./SciVer/papers/2411.15098v2.json", "claim_type": "sequential", "item1": "8", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15098v2_figure_8.png", "item2_path": "./SciVer/images/2411.15098v2-Table1-1.png", "section": [ "4.2" ], "request_id": 18, "origin_statement": "Table 1 shows our method achieves the lowest colorization MSE of 24. Building on this, Figure 8 shows our method’s average color fidelity (~60%) surpasses IP-Adapter (FLUX) (~32%), indicating that reduced colorization error directly yields improved color accuracy in subject-driven edits.", "perturbed_statement": "Table 1 shows our method achieves the lowest colorization MSE of 351. Building on this, Figure 8 shows our method’s average color fidelity (~60%) surpasses IP-Adapter (FLUX) (~32%), indicating that reduced colorization error directly yields improved color accuracy in subject-driven edits.", "perturbed_explanation": "This is incorrect because Table 1 reports our method’s colorization MSE as 24 (not 351). The value 351 actually corresponds to ControlNet, not our method. Misstating the MSE thus breaks the premise and invalidates the claim.", "claim": "Table 1 shows our method achieves the lowest colorization MSE of 24. Building on this, Figure 8 shows our method’s average color fidelity (~60%) surpasses IP-Adapter (FLUX) (~32%), indicating that reduced colorization error directly yields improved color accuracy in subject-driven edits.", "label": true }, { "paperid": "2409.14672v1", "paper_path": "./SciVer/papers/2409.14672v1.json", "claim_type": "sequential", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14672v1_figure_3.png", "item2_path": "./SciVer/images/2409.14672v1_figure_5.png", "section": [ "5.1" ], "request_id": 19, "origin_statement": "By step 4000, Figure 5 shows DPO-ICL margin reaches about 8 log-prob units, roughly double DPO Base (~3.5); correspondingly, Figure 3 reports Falcon DPO-ICL wins 75% of comparisons against original Falcon, indicating margin improvements align with higher human preference.", "perturbed_statement": "Figure 5 shows DPO-ICL margin at step 4000 below DPO Base; correspondingly, Figure 3 indicates Falcon DPO-ICL only wins 40% against the original Falcon, suggesting lower margins correlate with weaker preference.", "perturbed_explanation": "This is incorrect because in Figure 5 the purple DPO-ICL curve actually attains the highest margin (~8 Δlog prob), exceeding DPO Base (~3.5). Also, Figure 3 shows Falcon DPO-ICL wins 75% (not 40%) of head-to-head comparisons against the original Falcon.", "claim": "By step 4000, Figure 5 shows DPO-ICL margin reaches about 8 log-prob units, roughly double DPO Base (~3.5); correspondingly, Figure 3 reports Falcon DPO-ICL wins 75% of comparisons against original Falcon, indicating margin improvements align with higher human preference.", "label": true }, { "paperid": "2411.10322v1", "paper_path": "./SciVer/papers/2411.10322v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10322v1_figure_4.png", "item2_path": "./SciVer/images/2411.10322v1-Table3-1.png", "section": [ "4" ], "request_id": 21, "origin_statement": "Fig. 4’s right panel shows combined-dataset calibration curves align more closely with the ideal diagonal, and Table 3 reports that EfficientNetB6 trained on combined sets [A–C,E,G] achieves the lowest post-rejection ECE of 0.0091 on the Kaggle test set.", "perturbed_statement": "Fig. 4’s right panel shows combined-dataset calibration curves align more closely with the ideal diagonal, and Table 3 reports that VGG16 trained on combined sets [A–C,E,G] achieves the lowest post-rejection ECE of 0.0091 on the Kaggle test set.", "perturbed_explanation": "Table 3 actually shows that the 0.0091 post-rejection ECE on the Kaggle test set belongs to EfficientNetB6 [A–C,E,G], not VGG16, whose post-rejection ECE is 0.0221.", "claim": "Fig. 4’s right panel shows combined-dataset calibration curves align more closely with the ideal diagonal, and Table 3 reports that EfficientNetB6 trained on combined sets [A–C,E,G] achieves the lowest post-rejection ECE of 0.0091 on the Kaggle test set.", "label": true }, { "paperid": "2409.06367v1", "paper_path": "./SciVer/papers/2409.06367v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06367v1_figure_5.png", "item2_path": "./SciVer/images/2409.06367v1_figure_6.png", "section": [ "5.3" ], "request_id": 23, "origin_statement": "SimpleNet’s average AUROC on Texture-AD is 59.12% (Fig. 5). In Fig. 6, its heatmaps for cloth defects precisely highlight the annotated regions without spurious activations. Thus, even with moderate average performance, SimpleNet excels at localizing cloth anomalies.", "perturbed_statement": "SimpleNet’s average AUROC on Texture-AD is 65% (Fig. 5). Fig. 6’s heatmaps for cloth defects show widespread spurious activations around the edges. Therefore, despite its high average performance, SimpleNet fails to localize cloth anomalies accurately.", "perturbed_explanation": "The statement is wrong because Fig. 5 actually reports SimpleNet’s Texture-AD AUROC as 59.12%, not 65%. Moreover, Fig. 6’s bottom-row heatmaps for cloth show focused activations around the white defect masks and minimal edge noise, contradicting the claim of widespread spurious activations.", "claim": "SimpleNet’s average AUROC on Texture-AD is 59.12% (Fig. 5). In Fig. 6, its heatmaps for cloth defects precisely highlight the annotated regions without spurious activations. Thus, even with moderate average performance, SimpleNet excels at localizing cloth anomalies.", "label": true }, { "paperid": "2409.18042v2", "paper_path": "./SciVer/papers/2409.18042v2.json", "claim_type": "sequential", "item1": "10", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.18042v2_figure_10.png", "item2_path": "./SciVer/images/2409.18042v2-Table2-1.png", "section": [ "5.1" ], "request_id": 25, "origin_statement": "Table 2 shows Stage-2 uses 7.4M EMOVA-Alignment samples; Fig. 10 indicates OCR accounts for 17.9% of that dataset; thus in Stage-2 the model trains on about 1.33M OCR image-text pairs.", "perturbed_statement": "In Stage-2 the model actually trains on about 2.71M OCR pairs, since 36.6% of the 7.4M alignment samples are assumed to be OCR data.", "perturbed_explanation": "The perturbed claim wrongly uses 36.6% (the share of the General category in Fig. 10) for OCR. Fig. 10 clearly states OCR makes up only 17.9% of the 7.4M samples, which corresponds to about 1.33M, not 2.71M.", "claim": "Table 2 shows Stage-2 uses 7.4M EMOVA-Alignment samples; Fig. 10 indicates OCR accounts for 17.9% of that dataset; thus in Stage-2 the model trains on about 1.33M OCR image-text pairs.", "label": true }, { "paperid": "2411.14321v1", "paper_path": "./SciVer/papers/2411.14321v1.json", "claim_type": "sequential", "item1": "8(d)", "item2": "8(e)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14321v1_figure_8(d).png", "item2_path": "./SciVer/images/2411.14321v1_figure_8(e).png", "section": [ "4.1" ], "request_id": 26, "origin_statement": "Image 1 shows a bipedal Unitree-H1 humanoid (two legs vs four for quadrupeds), and image 2 shows another bipedal Unitree-G1. Therefore, both H1 and G1 are humanoids and include the optional 4D quaternion in their system state representations.", "perturbed_statement": "Image 1 shows a bipedal Unitree-H1 (two legs), but image 2 shows a quadruped Unitree-G1 (four legs). Thus, only Unitree-H1, not G1, includes the optional 4D quaternion orientation in its state representation.", "perturbed_explanation": "The perturbed claim misidentifies Unitree-G1 as having four legs. In the context and the second image, G1 is clearly a bipedal humanoid with only two legs and is listed alongside H1 as a humanoid. Therefore, G1 also includes the optional 4D quaternion in its state, contradicting the quadruped premise.", "claim": "Image 1 shows a bipedal Unitree-H1 humanoid (two legs vs four for quadrupeds), and image 2 shows another bipedal Unitree-G1. Therefore, both H1 and G1 are humanoids and include the optional 4D quaternion in their system state representations.", "label": true }, { "paperid": "2410.07110v1", "paper_path": "./SciVer/papers/2410.07110v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07110v1_figure_3.png", "item2_path": "./SciVer/images/2410.07110v1-Table3-1.png", "section": [ "4.2", "1" ], "request_id": 27, "origin_statement": "Table3 indicates Ours has 0.00 class CV, while GEM has 28.91%. Consequently, in Figure3(a) i.i.d. scenario, Ours achieves ~28% ACC on task1 compared to GEM's ~14%, demonstrating that perfect class balance correlates with roughly double early-task stability.", "perturbed_statement": "Table3 indicates Ours has a class CV of 29.87% and GEM has 28.91%. Consequently, in Figure3(a) i.i.d. scenario, Ours achieves ~28% ACC on task1 compared to GEM's ~14%, demonstrating that higher class imbalance corresponds to greater early-task stability.", "perturbed_explanation": "The perturb incorrectly states that Ours has a class CV of 29.87%. In fact, Table 3 shows Ours has a class CV of 0.00. Hence the premise about high imbalance is false, invalidating the claim about imbalance improving early-task stability.", "claim": "Table3 indicates Ours has 0.00 class CV, while GEM has 28.91%. Consequently, in Figure3(a) i.i.d. scenario, Ours achieves ~28% ACC on task1 compared to GEM's ~14%, demonstrating that perfect class balance correlates with roughly double early-task stability.", "label": true }, { "paperid": "2411.10431v1", "paper_path": "./SciVer/papers/2411.10431v1.json", "claim_type": "sequential", "item1": "6", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10431v1_figure_6.png", "item2_path": "./SciVer/images/2411.10431v1_figure_8.png", "section": [ "4.2" ], "request_id": 28, "origin_statement": "From Fig6, CDI’s RMSE under FT3 is 0.5503, indicating poor handling of motor stalling. Fig8 shows CDI’s estimates of motor reactance X and time constant T have wide dispersion, while JCDI’s are tightly aligned, demonstrating that JCDI captures the X–T correlation and achieves the low FT3 RMSE.", "perturbed_statement": "From Fig6, CDI’s RMSE under FT3 is 0.0061, indicating good handling of motor stalling. Fig8 shows CDI’s estimates of motor reactance X and time constant T are tightly aligned, while JCDI’s are widely dispersed, demonstrating that JCDI fails to capture the X–T correlation and thus incurs high FT3 RMSE.", "perturbed_explanation": "This statement is false because Fig6 reports CDI’s FT3 RMSE as 0.5503 (not 0.0061), and Fig8 actually shows CDI’s estimates are widely dispersed while JCDI’s are tightly clustered along the X–T diagonal. The perturbation directly contradicts these details.", "claim": "From Fig6, CDI’s RMSE under FT3 is 0.5503, indicating poor handling of motor stalling. Fig8 shows CDI’s estimates of motor reactance X and time constant T have wide dispersion, while JCDI’s are tightly aligned, demonstrating that JCDI captures the X–T correlation and achieves the low FT3 RMSE.", "label": true }, { "paperid": "2411.10948v1", "paper_path": "./SciVer/papers/2411.10948v1.json", "claim_type": "sequential", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10948v1_figure_6.png", "item2_path": "./SciVer/images/2411.10948v1-Table3-1.png", "section": [ "5.3" ], "request_id": 29, "origin_statement": "On an (8,256,512) layer, 6-bit ShiftMM reaches 15.5 GFLOPS (Fig. 6) and uses 29,177 LUTs in LUT‐priority mode, a 43.5% reduction from FP16’s 52,080 LUTs (Table 3), thus improving throughput‐per‐LUT efficiency.", "perturbed_statement": "On an (8,256,512) layer, 6-bit ShiftMM reaches 16.5 GFLOPS (Fig. 6) and uses 23,000 LUTs in LUT‐priority mode, a 55% reduction from FP16’s 52,080 LUTs (Table 3), thus tripling throughput‐per‐LUT efficiency.", "perturbed_explanation": "Fig. 6 shows the actual 6-bit ShiftMM throughput is about 15.5 GFLOPS (not 16.5), and Table 3 reports 29,177 LUTs (not 23,000), so both the throughput value and LUT‐reduction percentage are incorrect.", "claim": "On an (8,256,512) layer, 6-bit ShiftMM reaches 15.5 GFLOPS (Fig. 6) and uses 29,177 LUTs in LUT‐priority mode, a 43.5% reduction from FP16’s 52,080 LUTs (Table 3), thus improving throughput‐per‐LUT efficiency.", "label": true }, { "paperid": "2409.02664v1", "paper_path": "./SciVer/papers/2409.02664v1.json", "claim_type": "sequential", "item1": "1", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.02664v1-Table1-1.png", "item2_path": "./SciVer/images/2409.02664v1-Table5-1.png", "section": [ "4.3.2" ], "request_id": 31, "origin_statement": "Since T3 uses identity tokens in the fake prompt (Table 1), combining it with the generic real template T0 ({T0,T3}) yields the highest CDF AUC of 88.41 (Table 5), outperforming {T2,T3} (85.98) and {T0,T1} (82.88).", "perturbed_statement": "Since T2 includes identity tokens in the real prompt (Table 1), pairing it with T3 ({T2,T3}) yields the highest CDF AUC of 88.41 (Table 5), surpassing the {T0,T3} configuration.", "perturbed_explanation": "This is incorrect because Table 5 reports that {T2,T3} actually achieves a CDF AUC of 85.98, whereas the maximum CDF AUC of 88.41 is reached by the {T0,T3} setting.", "claim": "Since T3 uses identity tokens in the fake prompt (Table 1), combining it with the generic real template T0 ({T0,T3}) yields the highest CDF AUC of 88.41 (Table 5), outperforming {T2,T3} (85.98) and {T0,T1} (82.88).", "label": true }, { "paperid": "2411.08298v1", "paper_path": "./SciVer/papers/2411.08298v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.08298v1_figure_3.png", "item2_path": "./SciVer/images/2411.08298v1_figure_4.png", "section": [ "5" ], "request_id": 36, "origin_statement": "Fig.3(a) shows TM theory’s first g(r) peak for repulsive PE24r at ~5.0 Å. In Fig.4, TM’s first peak for attractive PE24 remains at ~5.0 Å, demonstrating that turning on attractions does not shift the primary interchain packing distance.", "perturbed_statement": "Fig.3(a) shows TM theory’s first g(r) peak for repulsive PE24r at ~5.0 Å. In Fig.4, TM’s first peak for attractive PE24 shifts to ~5.8 Å, implying that adding attractions increases the primary interchain packing distance by ~0.8 Å.", "perturbed_explanation": "The perturbed claim is false because in Fig. 4 the TM curves for PE24 (attractive) and PE24r (repulsive) overlap with their first peaks both at ~5.0 Å. There is no ~0.8 Å shift, so attractions do not increase the primary packing distance as stated.", "claim": "Fig.3(a) shows TM theory’s first g(r) peak for repulsive PE24r at ~5.0 Å. In Fig.4, TM’s first peak for attractive PE24 remains at ~5.0 Å, demonstrating that turning on attractions does not shift the primary interchain packing distance.", "label": true }, { "paperid": "2411.11340v1", "paper_path": "./SciVer/papers/2411.11340v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.11340v1_figure_1(c).png", "item2_path": "./SciVer/images/2411.11340v1_figure_2(c).png", "section": [ "4.2" ], "request_id": 38, "origin_statement": "In Fig 1, DLinear’s Trend forecast (yellow) undershoots the true Trend (green) by about 0.1; in Fig 2, the hybrid-loss Trend forecast (yellow) aligns within a 0.02 range of the true Trend; thus hybrid loss reduces DLinear’s Trend forecasting bias on ETTm1.", "perturbed_statement": "In Fig 1, DLinear’s Trend forecast (yellow) undershoots the true Trend (green) by about 0.5; in Fig 2, the hybrid-loss Trend forecast (yellow) undershoots the true Trend by 0.3; thus hybrid loss increases DLinear’s Trend forecasting bias by 60% on ETTm1.", "perturbed_explanation": "The perturbed claim misreads the actual offsets. In Fig 1 the yellow Trend line deviates from the green by only ~0.1 on the –2 to 0 axis, not 0.5. In Fig 2 the deviation is under 0.02, not 0.3. Therefore the premises are false and the conclusion fails.", "claim": "In Fig 1, DLinear’s Trend forecast (yellow) undershoots the true Trend (green) by about 0.1; in Fig 2, the hybrid-loss Trend forecast (yellow) aligns within a 0.02 range of the true Trend; thus hybrid loss reduces DLinear’s Trend forecasting bias on ETTm1.", "label": true }, { "paperid": "2411.00157v1", "paper_path": "./SciVer/papers/2411.00157v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00157v1_figure_3.png", "item2_path": "./SciVer/images/2411.00157v1-Table1-1.png", "section": [ "3.1" ], "request_id": 39, "origin_statement": "Table 1 lists f* = 0.10 as the lowest reduced frequency; in Figure 3’s LEV+TEV regime (αT/4 > 0.5 rad), blockage-corrected blue markers peak at η≈0.23, which is lower than the ≈0.29 peak of the green markers (f* = 0.15).", "perturbed_statement": "Table 1 lists f* = 0.10 as the lowest reduced frequency; in Figure 3’s LEV+TEV regime (αT/4 > 0.5 rad), blockage-corrected blue markers peak at η≈0.29, exceeding the ≈0.23 peak of the green markers (f* = 0.15).", "perturbed_explanation": "Figure 3 shows that in the LEV+TEV regime (αT/4 > 0.5 rad), the blue markers (f* = 0.10) reach only about η≈0.23, whereas the green markers (f* = 0.15) reach about η≈0.29. Thus claiming blue peaks at 0.29 and green at 0.23 directly contradicts the plotted data.", "claim": "Table 1 lists f* = 0.10 as the lowest reduced frequency; in Figure 3’s LEV+TEV regime (αT/4 > 0.5 rad), blockage-corrected blue markers peak at η≈0.23, which is lower than the ≈0.29 peak of the green markers (f* = 0.15).", "label": true }, { "paperid": "2411.09289v1", "paper_path": "./SciVer/papers/2411.09289v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09289v1_figure_3.png", "item2_path": "./SciVer/images/2411.09289v1_figure_4.png", "section": [ "4.2" ], "request_id": 40, "origin_statement": "StreamAdapter’s accuracy on unseen ARC-E climbs from about 80% with 1 demonstration to about 82% with 30 demonstrations, and in the PG19 test its perplexity gap over sliding window on LLaMA-3-8B grows from ~0.03 at 2K to ~0.30 at 256K, showing scalability across tasks and modalities.", "perturbed_statement": "StreamAdapter’s accuracy on unseen ARC-E climbs from about 80% with 1 demonstration to about 84% with 30 demonstrations, and in the PG19 test its perplexity gap over sliding window on LLaMA-3-8B grows from ~0.03 at 2K to ~0.40 at 256K, showing scalability across tasks and modalities.", "perturbed_explanation": "The ARC-E unseen task accuracy for StreamAdapter with 30 demonstrations in Figure 3 is approximately 82%, not 84%. Similarly, Figure 4 shows the perplexity gap for LLaMA-3-8B at 256K is about 0.30, not 0.40, so both figures contradict the perturbed values.", "claim": "StreamAdapter’s accuracy on unseen ARC-E climbs from about 80% with 1 demonstration to about 82% with 30 demonstrations, and in the PG19 test its perplexity gap over sliding window on LLaMA-3-8B grows from ~0.03 at 2K to ~0.30 at 256K, showing scalability across tasks and modalities.", "label": true }, { "paperid": "2409.14067v1", "paper_path": "./SciVer/papers/2409.14067v1.json", "claim_type": "sequential", "item1": "4(g)", "item2": "4(n)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14067v1_figure_4(g).png", "item2_path": "./SciVer/images/2409.14067v1_figure_4(n).png", "section": [ "3.4" ], "request_id": 42, "origin_statement": "From the first provided image, PNeRFLoc produces significant blur on shoe edges, indicating poor detail preservation. Table 3 shows our method reduces LPIPS by 0.12 over PNeRFLoc. Therefore, our lower LPIPS corresponds to finer high-frequency shoe detail reconstruction.", "perturbed_statement": "From the first provided image, PNeRFLoc produces clear shoe edges, indicating strong detail preservation. Table 3 shows our method only reduces LPIPS by 0.02 over PNeRFLoc. Therefore, our lower LPIPS does not significantly affect high-frequency shoe detail reconstruction.", "perturbed_explanation": "The perturbed first premise is wrong because in the image (Figure 4) the PNeRFLoc rendering of shoes is visibly blurred, not clear. The perturbed table value is also incorrect: the paper reports a substantial LPIPS reduction (≈0.12), not just 0.02, so the conclusion about insignificant detail impact is false.", "claim": "From the first provided image, PNeRFLoc produces significant blur on shoe edges, indicating poor detail preservation. Table 3 shows our method reduces LPIPS by 0.12 over PNeRFLoc. Therefore, our lower LPIPS corresponds to finer high-frequency shoe detail reconstruction.", "label": true }, { "paperid": "2411.03966v1", "paper_path": "./SciVer/papers/2411.03966v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.03966v1-Table1-1.png", "item2_path": "./SciVer/images/2411.03966v1-Table2-1.png", "section": [ "4" ], "request_id": 44, "origin_statement": "Table 1 shows a Pearson split-half reliability (SHR(r)) of 0.89. Using this high continuous reliability, Table 2 reports a 0.78 SHCMP for 5 bins, far above the 20% random baseline.", "perturbed_statement": "Table 1 shows a Spearman split-half reliability (SHR(ρ)) of 0.89. Using this high correlation, Table 2 reports a 0.78 SHCMP for 7 bins, far above the 20% random baseline.", "perturbed_explanation": "This statement is wrong in two ways: (1) Table 1’s 0.89 value is the Pearson SHR(r), not the Spearman SHR(ρ) (Spearman is 0.82). (2) Table 2’s SHCMP of 0.78 applies to 5 bins, not 7 bins (7 bins yield 0.71).", "claim": "Table 1 shows a Pearson split-half reliability (SHR(r)) of 0.89. Using this high continuous reliability, Table 2 reports a 0.78 SHCMP for 5 bins, far above the 20% random baseline.", "label": true }, { "paperid": "2409.05755v1", "paper_path": "./SciVer/papers/2409.05755v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "1(e)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05755v1_figure_1(c).png", "item2_path": "./SciVer/images/2409.05755v1_figure_1(e).png", "section": [ "4.2", "4.1" ], "request_id": 45, "origin_statement": "At β=7, SGC accuracy falls to about 0.3 (Figure 1). At μ=0.2, the node homophily metric H_node is about 0.25 (Figure 2). Thus, when H_node ≤0.3, SGC accuracy drops below 0.4 in both settings.", "perturbed_statement": "At β=7, SGC accuracy falls to about 0.3 (Figure 1). At μ=0.2, the node homophily metric H_node is about 0.45 (Figure 2). Thus, when H_node ≤0.3, SGC accuracy drops below 0.4 in both settings.", "perturbed_explanation": "The perturbation misstates H_node at μ=0.2. In Figure 2, the homophily curve at μ=0.2 is approximately 0.25, not 0.45. Therefore, H_node≤0.3 does not correspond to the stated value of 0.45, making the final inference false.", "claim": "At β=7, SGC accuracy falls to about 0.3 (Figure 1). At μ=0.2, the node homophily metric H_node is about 0.25 (Figure 2). Thus, when H_node ≤0.3, SGC accuracy drops below 0.4 in both settings.", "label": true }, { "paperid": "2410.15355v1", "paper_path": "./SciVer/papers/2410.15355v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.15355v1-Table5-1.png", "item2_path": "./SciVer/images/2410.15355v1-Table6-1.png", "section": [ "6.4" ], "request_id": 46, "origin_statement": "Table5 shows LAC achieves 86.02% accuracy on Cora; Table6 shows InfoBal also scores 86.02% on Cora, implying InfoBal effectively retains LAC’s continuous view augmentation advantages in node classification.", "perturbed_statement": "Table5 shows LAC achieves 85.80% accuracy on Cora; Table6 shows InfoBal also scores 85.80% on Cora, implying InfoBal effectively retains LAC’s continuous view augmentation advantages in node classification.", "perturbed_explanation": "The perturbed claim misreads LAC’s Cora accuracy as 85.80%, but Table5 actually lists LAC at 86.02% on Cora (85.80% is the LAC-FtMsk variant). Likewise, Table6 reports InfoBal at 86.02% on Cora, not 85.80%, so the assertion is incorrect.", "claim": "Table5 shows LAC achieves 86.02% accuracy on Cora; Table6 shows InfoBal also scores 86.02% on Cora, implying InfoBal effectively retains LAC’s continuous view augmentation advantages in node classification.", "label": true }, { "paperid": "2410.09729v2", "paper_path": "./SciVer/papers/2410.09729v2.json", "claim_type": "sequential", "item1": "7", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09729v2_figure_7.png", "item2_path": "./SciVer/images/2410.09729v2_figure_9.png", "section": [ "4" ], "request_id": 47, "origin_statement": "From Fig.7, LLaVA’s validation F1 climbs from about 75.6% at epoch 1 to 80.0% by epoch 3. Yet in Fig.9, purple points for target frequencies 1–2 lie well below the diagonal, showing that despite high overall F1, LLaVA still underpredicts very rare medications.", "perturbed_statement": "From Fig.7, LLaVA’s validation F1 climbs from about 75.6% at epoch 1 to 80.0% by epoch 3. Yet in Fig.9, purple points for target frequencies 1–2 lie above the diagonal, showing that despite high overall F1, LLaVA overpredicts very rare medications.", "perturbed_explanation": "This is incorrect because in Fig.9 all purple points corresponding to target frequencies of 1–2 actually lie below the diagonal line (prediction < target), indicating underprediction rather than overprediction of very rare medications.", "claim": "From Fig.7, LLaVA’s validation F1 climbs from about 75.6% at epoch 1 to 80.0% by epoch 3. Yet in Fig.9, purple points for target frequencies 1–2 lie well below the diagonal, showing that despite high overall F1, LLaVA still underpredicts very rare medications.", "label": true }, { "paperid": "2411.15665v1", "paper_path": "./SciVer/papers/2411.15665v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15665v1_figure_6.png", "item2_path": "./SciVer/images/2411.15665v1_figure_7.png", "section": [ "4" ], "request_id": 54, "origin_statement": "Quantum hydrogen’s H2 bond peak height falls significantly between 175 and 180 GPa at 1570 K (Fig. 6), indicating molecular dissociation around 178 GPa, which coincides with the pronounced drop in the quantum melting slope above ∼172 GPa in Fig. 7.", "perturbed_statement": "Quantum hydrogen’s H2 bond peak height rises significantly between 175 and 180 GPa at 1570 K (Fig. 6), indicating enhanced molecular association around 178 GPa, which coincides with the pronounced drop in the quantum melting slope above ∼172 GPa in Fig. 7.", "perturbed_explanation": "Figure 6 (right panel) shows that the H2 bond peak at 0.75 Å actually decreases from the 175 GPa curve (solid blue) to the 180 GPa curve (dashed orange), not increases. The claim of a rising peak and increased molecular fraction contradicts this observed drop.", "claim": "Quantum hydrogen’s H2 bond peak height falls significantly between 175 and 180 GPa at 1570 K (Fig. 6), indicating molecular dissociation around 178 GPa, which coincides with the pronounced drop in the quantum melting slope above ∼172 GPa in Fig. 7.", "label": true }, { "paperid": "2411.12007v2", "paper_path": "./SciVer/papers/2411.12007v2.json", "claim_type": "sequential", "item1": "2(c)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12007v2_figure_2(c).png", "item2_path": "./SciVer/images/2411.12007v2_figure_2(d).png", "section": [ "4.1", "4" ], "request_id": 58, "origin_statement": "Figure 2(c) shows the fitted spike radius \\tilde r_sp consistently lies above the G&S (1999) curve. Then Figure 2(d) shows that for μ≳0.1 the fitted spike slope γ_sp converges to −7/3. Hence at high μ only the slope matches the classical prediction.", "perturbed_statement": "Figure 2(c) shows the fitted spike radius \\tilde r_sp lies below the G&S (1999) curve. Then Figure 2(d) shows that for μ≲0.1 the fitted spike slope γ_sp converges to −7/3. Hence at low μ only the slope matches the classical prediction.", "perturbed_explanation": "The perturbation is incorrect because in Figure 2(c) the red best-fit line always sits above the orange G&S (1999) dotted line, so r_sp never lies below the G&S curve. Moreover, Figure 2(d) only shows convergence of γ_sp to −7/3 at high μ (μ≳0.1), not at low μ.", "claim": "Figure 2(c) shows the fitted spike radius \\tilde r_sp consistently lies above the G&S (1999) curve. Then Figure 2(d) shows that for μ≳0.1 the fitted spike slope γ_sp converges to −7/3. Hence at high μ only the slope matches the classical prediction.", "label": true }, { "paperid": "2409.15218v1", "paper_path": "./SciVer/papers/2409.15218v1.json", "claim_type": "sequential", "item1": "10(b)", "item2": "10(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15218v1_figure_10(b).png", "item2_path": "./SciVer/images/2409.15218v1_figure_10(d).png", "section": [ "4.2" ], "request_id": 61, "origin_statement": "Step1: The left diagram shows μ→e conversion via WR± exchange (image1). Step2: The right diagram shows H2± exchange (image2). Step3: Charged scalar loops give suppressed scalar coefficients due to small nucleon form factors, so WR-mediated diagrams dominate photonic μ→e conversion.", "perturbed_statement": "Charged scalar loops give unsuppressed vector contributions, so H2± loops dominate photonic μ→e conversion, overshadowing WR-mediated diagrams.", "perturbed_explanation": "The H2± line in the second image is a charged scalar, which cannot induce vector currents; indeed, the text states the vector coefficient C_V is exactly zero, so H2± loops cannot produce dominant vector contributions, contradicting the perturbed claim.", "claim": "Step1: The left diagram shows μ→e conversion via WR± exchange (image1). Step2: The right diagram shows H2± exchange (image2). Step3: Charged scalar loops give suppressed scalar coefficients due to small nucleon form factors, so WR-mediated diagrams dominate photonic μ→e conversion.", "label": true }, { "paperid": "2409.01239v1", "paper_path": "./SciVer/papers/2409.01239v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01239v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.01239v1_figure_7(b).png", "section": [ "3" ], "request_id": 67, "origin_statement": "TOI-2379 b’s transit depth is ~0.17 in TESS, Z*, Y, and J bands (left panel), showing no chromatic dependence and excluding blends. TOI-2384 b’s depth falls from ~0.16 in TESS to ~0.14 in H* (right panel), consistent with flux dilution by its red neighbor.", "perturbed_statement": "TOI-2379 b’s transit depth falls from ~0.17 in TESS to ~0.15 in J band (left panel), indicating wavelength-dependent dilution. TOI-2384 b’s depth falls from ~0.16 in TESS to ~0.14 in H* (right panel), consistent with flux dilution by its red neighbor.", "perturbed_explanation": "The left panel for TOI-2379 b actually shows nearly constant depths (~0.17) across TESS, Z*, Y, and J bands, with no drop to ~0.15 in J. Therefore the claimed decrease to ~0.15 contradicts the measured Rₚ/R_* values in the context.", "claim": "TOI-2379 b’s transit depth is ~0.17 in TESS, Z*, Y, and J bands (left panel), showing no chromatic dependence and excluding blends. TOI-2384 b’s depth falls from ~0.16 in TESS to ~0.14 in H* (right panel), consistent with flux dilution by its red neighbor.", "label": true }, { "paperid": "2410.17406v1", "paper_path": "./SciVer/papers/2410.17406v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17406v1_figure_3.png", "item2_path": "./SciVer/images/2410.17406v1_figure_4.png", "section": [ "5.4" ], "request_id": 68, "origin_statement": "Mitigation TPs have a higher median cosine similarity (~0.75) than FPs (~0.55), and Fig 4b shows CWE attribution > 0.9 for mitigation, so correct mitigation outputs rely mainly on CWE summaries.", "perturbed_statement": "Mitigation TPs have a higher median cosine similarity (~0.75) than FPs (~0.55), and Fig 4b shows NVD attribution > 0.9 for mitigation, so correct mitigation outputs rely mainly on NVD summaries.", "perturbed_explanation": "Fig 4b actually shows NVD attribution for mitigation is near 0.05 (not >0.9), whereas CWE attribution is >0.9. The statement misreads the NVD box as the highest source, contradicting the figure’s values.", "claim": "Mitigation TPs have a higher median cosine similarity (~0.75) than FPs (~0.55), and Fig 4b shows CWE attribution > 0.9 for mitigation, so correct mitigation outputs rely mainly on CWE summaries.", "label": true }, { "paperid": "2411.07200v1", "paper_path": "./SciVer/papers/2411.07200v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.07200v1-Table1-1.png", "item2_path": "./SciVer/images/2411.07200v1-Table3-1.png", "section": [ "4.1" ], "request_id": 69, "origin_statement": "Table1 reports the original policy’s E[V(s0)] of 0.307, exceeding every cluster-removed policy (highest 0.305). Table3 shows the mean cluster E[V(s0)] of 0.3029. Because 0.3029<0.307, removing trajectories consistently lowers the initial state value.", "perturbed_statement": "Table1 reports the original policy’s E[V(s0)] of 0.307, exceeding every cluster-removed policy (highest 0.305). Table3 shows the mean cluster E[V(s0)] of 0.3129. Because 0.3129>0.307, removing trajectories increases the initial state value.", "perturbed_explanation": "The perturbed claim incorrectly cites Table3’s mean cluster E[V(s0)] as 0.3129, but the actual reproduced mean is 0.3029. Since 0.3029<0.307, removing trajectories still lowers the initial state value; there is no evidence of 0.3129 in the table.", "claim": "Table1 reports the original policy’s E[V(s0)] of 0.307, exceeding every cluster-removed policy (highest 0.305). Table3 shows the mean cluster E[V(s0)] of 0.3029. Because 0.3029<0.307, removing trajectories consistently lowers the initial state value.", "label": true }, { "paperid": "2411.18383v1", "paper_path": "./SciVer/papers/2411.18383v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.18383v1-Table2-1.png", "item2_path": "./SciVer/images/2411.18383v1-Table3-1.png", "section": [ "4.1" ], "request_id": 70, "origin_statement": "Topic No.2 has top keywords like “operation,” “duration,” and “extension” (Table 2), which explains the December 2022 spike when the regulatory framework for extending plant operations was approved (Table 3).", "perturbed_statement": "Topic No.2 has top keywords like “operation,” “duration,” and “extension” (Table 2), which explains the December 2022 spike when the official decision for reactor operation extension was announced (Table 3).", "perturbed_explanation": "Table 3 shows that in December 2022 the event was the approval of the regulatory framework, not the official decision for extension—which actually occurred in February 2023.", "claim": "Topic No.2 has top keywords like “operation,” “duration,” and “extension” (Table 2), which explains the December 2022 spike when the regulatory framework for extending plant operations was approved (Table 3).", "label": true }, { "paperid": "2409.14396v1", "paper_path": "./SciVer/papers/2409.14396v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14396v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.14396v1_figure_2(b).png", "section": [ "4.3" ], "request_id": 78, "origin_statement": "Because LoRA accuracy on MRPC drops by ~1.2% from rank 16 to 64, while on CIFAR-100 it rises by ~0.6%, text tasks suffer more overfitting at high LoRA ranks than image tasks.", "perturbed_statement": "Because LoRA accuracy on MRPC increases by ~0.4% from rank 16 to 64, while on CIFAR-100 it decreases by ~0.3%, image tasks suffer more overfitting at high LoRA ranks than text tasks.", "perturbed_explanation": "The perturbation is incorrect because in the MRPC plot LoRA accuracy actually falls from about 88.9% at rank 16 to about 87.7% at rank 64 (a drop, not an increase), and in the CIFAR-100 plot LoRA accuracy climbs from about 88.1% to about 88.7% (a rise, not a decrease).", "claim": "Because LoRA accuracy on MRPC drops by ~1.2% from rank 16 to 64, while on CIFAR-100 it rises by ~0.6%, text tasks suffer more overfitting at high LoRA ranks than image tasks.", "label": true }, { "paperid": "2410.02401v5", "paper_path": "./SciVer/papers/2410.02401v5.json", "claim_type": "sequential", "item1": "5(b)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02401v5_figure_5(b).png", "item2_path": "./SciVer/images/2410.02401v5_figure_5(c).png", "section": [ "4.3", "5.5" ], "request_id": 80, "origin_statement": "SynCo’s full configuration yields a 0.68% Top-1 increase over MoCo-v2 in standard queue settings (Figure 5(a)), and at queue size 131,072, SynCo shows a 0.90% Top-1 advantage over MoCo-v2 (Figure 5(c)), indicating larger relative gains with increased queue size.", "perturbed_statement": "SynCo’s full configuration yields a 0.88% Top-1 increase over MoCo-v2 in standard queue settings (Figure 5(a)), and at queue size 131,072, SynCo shows a 0.50% Top-1 advantage over MoCo-v2 (Figure 5(c)), indicating larger relative gains with increased queue size.", "perturbed_explanation": "Figure 5(a) actually reports a 0.68% Top-1 gain for full SynCo over MoCo-v2 (48.42% vs. 47.74%), not 0.88%. And in Figure 5(c), at queue size 131,072, SynCo’s Top-1 advantage is 0.90% (48.50% vs. 47.60%), not 0.50%. These mismatched percentages contradict the data.", "claim": "SynCo’s full configuration yields a 0.68% Top-1 increase over MoCo-v2 in standard queue settings (Figure 5(a)), and at queue size 131,072, SynCo shows a 0.90% Top-1 advantage over MoCo-v2 (Figure 5(c)), indicating larger relative gains with increased queue size.", "label": true }, { "paperid": "2410.09343v1", "paper_path": "./SciVer/papers/2410.09343v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "6(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09343v1_figure_4(b).png", "item2_path": "./SciVer/images/2410.09343v1_figure_6(a).png", "section": [ "5.1" ], "request_id": 83, "origin_statement": "Fig6 shows similarity-based retrieval AUC=0.28, implying precision rarely exceeds ≈0.42. Therefore, switching to our trained retriever and setting recall to 0.8 yields 49.2% accuracy (Fig4b), surpassing the ~48.45% zero-shot baseline.", "perturbed_statement": "Fig6 shows similarity-based retrieval AUC=0.42, implying precision rarely drops below ≈0.5. Therefore, switching to our trained retriever and setting recall to 0.6 yields 49.2% accuracy (Fig4b), surpassing the ~48.45% zero-shot baseline.", "perturbed_explanation": "This is incorrect because Figure 6’s caption explicitly reports an AUC of 0.28 for similarity-based methods, not 0.42. Additionally, Figure 4(b) shows the intervene accuracy at recall 0.6 is about 48.43%, not 49.2%, so both premises contradict the context.", "claim": "Fig6 shows similarity-based retrieval AUC=0.28, implying precision rarely exceeds ≈0.42. Therefore, switching to our trained retriever and setting recall to 0.8 yields 49.2% accuracy (Fig4b), surpassing the ~48.45% zero-shot baseline.", "label": true }, { "paperid": "2409.03735v1", "paper_path": "./SciVer/papers/2409.03735v1.json", "claim_type": "sequential", "item1": "6(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03735v1_figure_6(b).png", "item2_path": "./SciVer/images/2409.03735v1-Table1-1.png", "section": [ "6.2" ], "request_id": 84, "origin_statement": "In Figure 6, the ‘serve contextual ads’ cell shows red triangles for all tulu-2 models, and Table 1 confirms these base and DPO tulu-2 variants are sourced from HuggingFace, so all HuggingFace–sourced COPPA LLMs encode strong disapproval of contextual ads.", "perturbed_statement": "In Figure 6, the ‘serve contextual ads’ cell shows green triangles for all tulu-2 models, and Table 1 confirms these base and DPO tulu-2 variants are sourced from OpenAI, so all OpenAI–sourced COPPA LLMs encode strong approval of contextual ads.", "perturbed_explanation": "This is incorrect because Figure 6 actually shows red (strongly unacceptable) triangles, not green, for the ‘serve contextual ads’ cell, and Table 1 lists the tulu-2 models as sourced from HuggingFace, not OpenAI.", "claim": "In Figure 6, the ‘serve contextual ads’ cell shows red triangles for all tulu-2 models, and Table 1 confirms these base and DPO tulu-2 variants are sourced from HuggingFace, so all HuggingFace–sourced COPPA LLMs encode strong disapproval of contextual ads.", "label": true }, { "paperid": "2410.19886v1", "paper_path": "./SciVer/papers/2410.19886v1.json", "claim_type": "sequential", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19886v1_figure_2.png", "item2_path": "./SciVer/images/2410.19886v1-Table1-1.png", "section": [ "2" ], "request_id": 86, "origin_statement": "In Fig.2 the red current plateau at about 30 A/m² implies CC charging at 1 C (since 1 C = 29.23 A/m²). Table 1 lists 1.0 C as an experiment C-rate. Thus, the cycle shown corresponds to the 1.0 C CC condition specified in Table 1.", "perturbed_statement": "In Fig.2 the red current plateau at about 60 A/m² implies CC charging at 2 C (since 1 C = 29.23 A/m²). Table 1 lists 2.0 C as an experiment C-rate. Thus, the cycle shown corresponds to the 2.0 C CC condition specified in Table 1.", "perturbed_explanation": "The red current plateau in Fig.2 is actually around 30 A/m², not 60 A/m², so it indicates a 1 C charging rate rather than 2 C. Therefore, the cycle corresponds to the 1.0 C condition in Table 1, not the 2.0 C condition.", "claim": "In Fig.2 the red current plateau at about 30 A/m² implies CC charging at 1 C (since 1 C = 29.23 A/m²). Table 1 lists 1.0 C as an experiment C-rate. Thus, the cycle shown corresponds to the 1.0 C CC condition specified in Table 1.", "label": true }, { "paperid": "2411.07133v2", "paper_path": "./SciVer/papers/2411.07133v2.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07133v2_figure_3.png", "item2_path": "./SciVer/images/2411.07133v2-Table3-1.png", "section": [ "3.3" ], "request_id": 88, "origin_statement": "Figure 3 shows Gemma-2-9b-it peaks at temperature 1.2 and top-p 0.9 with average performance 14.56. Table 3 then shows the Best-of-N method yields the highest AP (13.92 for Llama-3.1-Minitron-4B and 17.415 for Qwen2.5-3B), implying that diverse sampling combined with selecting top responses enhances instruction-tuning.", "perturbed_statement": "Figure 3 shows Gemma-2-9b-it peaks at temperature 1.2 and top-p 0.9 with average performance 14.56. Table 3 then shows the Worst-of-N method yields the highest AP (12.01 for Llama-3.1-Minitron-4B and 15.135 for Qwen2.5-3B), implying that high-variance sampling combined with selecting low-rated responses boosts instruction-tuning.", "perturbed_explanation": "This is wrong because Table 3 clearly lists Best-of-N as having the highest AP scores (13.92 and 17.415). The Worst-of-N scores are 12.01 and 15.135, both lower than Best-of-N (and lower than the sampling baseline), so Worst-of-N does not yield the highest AP.", "claim": "Figure 3 shows Gemma-2-9b-it peaks at temperature 1.2 and top-p 0.9 with average performance 14.56. Table 3 then shows the Best-of-N method yields the highest AP (13.92 for Llama-3.1-Minitron-4B and 17.415 for Qwen2.5-3B), implying that diverse sampling combined with selecting top responses enhances instruction-tuning.", "label": true }, { "paperid": "2411.01703v1", "paper_path": "./SciVer/papers/2411.01703v1.json", "claim_type": "sequential", "item1": "3(d)", "item2": "3(e)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01703v1_figure_3(d).png", "item2_path": "./SciVer/images/2411.01703v1_figure_3(e).png", "section": [ "3.3" ], "request_id": 92, "origin_statement": "The chart’s dashed halo marker aligns with the 'Ours+P' bar, which has the lowest attack success ratio (~0.56). The halo emoji in the second image reinforces this alignment, indicating that pre-defined text guardrails achieve the best defense on Gemini Pro.", "perturbed_statement": "The chart’s halo marker aligns with the 'Ours+O' bar, which achieves the lowest attack success ratio (~0.56). The halo emoji thus indicates that the optimized text guardrails provide the best defense on Gemini Pro.", "perturbed_explanation": "This is incorrect because the dashed halo marker in the chart is placed at the second bar labeled 'Ours+P', not at 'Ours+O'. Moreover, the 'Ours+O' bar actually shows an attack success ratio around 0.73, not 0.56, so optimized guardrails do not yield the lowest ratio.", "claim": "The chart’s dashed halo marker aligns with the 'Ours+P' bar, which has the lowest attack success ratio (~0.56). The halo emoji in the second image reinforces this alignment, indicating that pre-defined text guardrails achieve the best defense on Gemini Pro.", "label": true }, { "paperid": "2409.01672v2", "paper_path": "./SciVer/papers/2409.01672v2.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01672v2_figure_4.png", "item2_path": "./SciVer/images/2409.01672v2_figure_5.png", "section": [ "4.4.2" ], "request_id": 93, "origin_statement": "Figure 4 shows FMR heat maps concentrate on object regions more than the fine-tuning baseline. Figure 5 shows FMR maintains about 8% top-50 feature overlap versus roughly 2% for fine-tuning. Therefore, FMR’s object-focused attention yields more consistent feature reuse across train and test sets.", "perturbed_statement": "Figure 4 shows FMR heat maps concentrate on object regions more than the fine-tuning baseline. Figure 5 shows FMR maintains only about 1% top-50 feature overlap versus roughly 2% for fine-tuning. Therefore, FMR’s object-focused attention yields less consistent feature reuse across train and test sets.", "perturbed_explanation": "The perturbation misreports Figure 5: the actual overlap at k=50 is about 8% for FMR and 2% for fine-tuning, not 1% versus 2%. Because FMR’s true top-50 overlap is higher, the claim that it yields less consistent feature reuse is contradicted by the data.", "claim": "Figure 4 shows FMR heat maps concentrate on object regions more than the fine-tuning baseline. Figure 5 shows FMR maintains about 8% top-50 feature overlap versus roughly 2% for fine-tuning. Therefore, FMR’s object-focused attention yields more consistent feature reuse across train and test sets.", "label": true }, { "paperid": "2410.04803v2", "paper_path": "./SciVer/papers/2410.04803v2.json", "claim_type": "sequential", "item1": "7", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04803v2_figure_7.png", "item2_path": "./SciVer/images/2410.04803v2-Table6-1.png", "section": [ "4.5" ], "request_id": 95, "origin_statement": "Figure 7’s averaged block shows attention weight Attn(6,7) is higher than Attn(6,2). Table 6 shows adding the variable embedding reduces Traffic MSE from 0.361 to 0.340. Thus, variable embeddings enable the model to focus on strongly auto-correlated variables and improve accuracy.", "perturbed_statement": "Figure 7’s averaged block shows attention weight Attn(6,2) is higher than Attn(6,7). Table 6 shows adding the variable embedding reduces Traffic MSE from 0.361 to 0.340. Thus, variable embeddings enable the model to focus on strongly auto-correlated variables and improve accuracy.", "perturbed_explanation": "The perturbed claim incorrectly states that Attn(6,2) exceeds Attn(6,7). In the averaged block of Figure 7, the cell at (6,7) is highlighted in bright yellow—indicating a larger attention weight—whereas the cell at (6,2) is much darker. This contradicts the perturbed premise.", "claim": "Figure 7’s averaged block shows attention weight Attn(6,7) is higher than Attn(6,2). Table 6 shows adding the variable embedding reduces Traffic MSE from 0.361 to 0.340. Thus, variable embeddings enable the model to focus on strongly auto-correlated variables and improve accuracy.", "label": true }, { "paperid": "2409.11491v1", "paper_path": "./SciVer/papers/2409.11491v1.json", "claim_type": "sequential", "item1": "5(b)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11491v1_figure_5(b).png", "item2_path": "./SciVer/images/2409.11491v1_figure_5(c).png", "section": [ "5.2" ], "request_id": 97, "origin_statement": "GPT-4 and Claude 3 Haiku exhibit a 0.82 nationality agreement on Wikipedia, and this alignment increases to 0.98 for ethnicity predictions on the HK SFC dataset.", "perturbed_statement": "GPT-4 and Claude 3 Haiku exhibit a 0.92 nationality agreement on Wikipedia, which drops to 0.85 for ethnicity predictions on the HK SFC dataset.", "perturbed_explanation": "The perturbation is incorrect because the Wikipedia nationality agreement between GPT-4 and Claude 3 Haiku is actually 0.82 (not 0.92), and their ethnicity agreement on HK SFC is 0.98 (not 0.85), as shown in the respective heatmaps.", "claim": "GPT-4 and Claude 3 Haiku exhibit a 0.82 nationality agreement on Wikipedia, and this alignment increases to 0.98 for ethnicity predictions on the HK SFC dataset.", "label": true }, { "paperid": "2411.12812v1", "paper_path": "./SciVer/papers/2411.12812v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12812v1_figure_1.png", "item2_path": "./SciVer/images/2411.12812v1_figure_2.png", "section": [ "1" ], "request_id": 98, "origin_statement": "Figure 1 shows high-carbohydrate meals lead to rapid glucose spikes requiring fast insulin boluses, and Figure 2 shows DIETS’s dietary analysis estimates carbohydrate content to inform its insulin delivery determination, enabling automated rapid bolus recommendations for carb-heavy meals.", "perturbed_statement": "Figure 1 shows high-fat meals lead to rapid glucose spikes requiring fast insulin boluses, and Figure 2 shows DIETS’s dietary analysis estimates fat content to inform its insulin delivery determination, enabling automated rapid bolus recommendations for fat-heavy meals.", "perturbed_explanation": "The perturbed claim is wrong because Figure 1 (and the accompanying text) indicates meals rich in fats require slower, prolonged insulin infusions due to delayed glucose metabolism—not rapid boluses. Thus fast bolus dosing for fat-heavy meals contradicts the documented physiology.", "claim": "Figure 1 shows high-carbohydrate meals lead to rapid glucose spikes requiring fast insulin boluses, and Figure 2 shows DIETS’s dietary analysis estimates carbohydrate content to inform its insulin delivery determination, enabling automated rapid bolus recommendations for carb-heavy meals.", "label": true }, { "paperid": "2411.16868v1", "paper_path": "./SciVer/papers/2411.16868v1.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16868v1_figure_1.png", "item2_path": "./SciVer/images/2411.16868v1-Table1-1.png", "section": [ "5", "2.1" ], "request_id": 104, "origin_statement": "Figure 1 shows the X-ray flux peaks at MJD 60095; thus the “Peak” interval (MJD 60094–60096) in Table 1 under the scenario Ψ_X ⟂ Ψ_O yields a 99.73% upper limit on polarization degree of <13%.", "perturbed_statement": "Figure 1 shows the X-ray flux peaks at MJD 60096; thus the “Peak” interval (MJD 60094–60096) in Table 1 with Ψ_X ⟂ Ψ_O yields a 99.73% upper limit on polarization degree of <15%.", "perturbed_explanation": "This is incorrect because Figure 1’s light curve clearly peaks at MJD 60095, not 60096. Moreover, Table 1 reports a 99.73% upper limit of <13% for the Ψ_X ⟂ Ψ_O case during the Peak interval, not <15%.", "claim": "Figure 1 shows the X-ray flux peaks at MJD 60095; thus the “Peak” interval (MJD 60094–60096) in Table 1 under the scenario Ψ_X ⟂ Ψ_O yields a 99.73% upper limit on polarization degree of <13%.", "label": true }, { "paperid": "2410.04088v1", "paper_path": "./SciVer/papers/2410.04088v1.json", "claim_type": "sequential", "item1": "5(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04088v1_figure_5(b).png", "item2_path": "./SciVer/images/2410.04088v1-Table1-1.png", "section": [ "4.1" ], "request_id": 105, "origin_statement": "Figure 5 shows DN-DETR-R18 + CRED reaches ~28 AP by epoch 6 vs ~24 AP for the vanilla model. Table 1 reports DN-DETR-R50 + CRED achieves 46.2 AP vs 44.1 AP baseline in 50 epochs. Hence, CRED yields ~4 AP early and ~2 AP at convergence gains across backbones.", "perturbed_statement": "Figure 5 shows DN-DETR-R18 + CRED reaches ~28 AP by epoch 6 vs ~23 AP for the vanilla model. Table 1 reports DN-DETR-R50 + CRED achieves 47.2 AP vs 44.2 AP baseline in 50 epochs. Hence, CRED yields ~5 AP early and ~3 AP at convergence gains across backbones.", "perturbed_explanation": "The perturbed claim is wrong because Figure 5’s baseline curve is ~24 AP at epoch 6 (not ~23), so the early gain is ~4 AP, not 5 AP. Also, Table 1 shows DN-DETR-R50 + CRED delivers 46.2 AP vs 44.1 AP (a 2.1 AP gain), not 47.2 AP vs 44.2 AP (3 AP gain).", "claim": "Figure 5 shows DN-DETR-R18 + CRED reaches ~28 AP by epoch 6 vs ~24 AP for the vanilla model. Table 1 reports DN-DETR-R50 + CRED achieves 46.2 AP vs 44.1 AP baseline in 50 epochs. Hence, CRED yields ~4 AP early and ~2 AP at convergence gains across backbones.", "label": true }, { "paperid": "2411.01299v1", "paper_path": "./SciVer/papers/2411.01299v1.json", "claim_type": "sequential", "item1": "10", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01299v1_figure_10.png", "item2_path": "./SciVer/images/2411.01299v1-Table1-1.png", "section": [ "3.3" ], "request_id": 106, "origin_statement": "From the JSON snippet, the DTDL model includes three Angle_Left properties (Angle_Left_1 to Angle_Left_3); Table 1, however, lists only Angle_Left_1, implying two angle measurements are configured in Azure but unrepresented in the example schema.", "perturbed_statement": "From the JSON snippet, the DTDL model includes three Angle_Left properties (Angle_Left_1 to Angle_Left_3); Table 1 lists two Angle_Left properties, implying one measurement is unaccounted for in the example schema.", "perturbed_explanation": "The perturbation is incorrect because Table 1 actually lists only a single Angle_Left_1 property—there is no second Angle_Left entry in the schema example.", "claim": "From the JSON snippet, the DTDL model includes three Angle_Left properties (Angle_Left_1 to Angle_Left_3); Table 1, however, lists only Angle_Left_1, implying two angle measurements are configured in Azure but unrepresented in the example schema.", "label": true }, { "paperid": "2410.23992v1", "paper_path": "./SciVer/papers/2410.23992v1.json", "claim_type": "sequential", "item1": "3(c)", "item2": "3(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23992v1_figure_3(c).png", "item2_path": "./SciVer/images/2410.23992v1_figure_3(d).png", "section": [ "5.3" ], "request_id": 107, "origin_statement": "Figure 3(c) shows two black nodes at indices ~30 and ~90, indicating -w/o HC clusters distant peaks. Figure 3(d) shows only one black node around ~30, implying Ada-MSHyper’s NHC mechanism filters out the later peak to capture temporal variation.", "perturbed_statement": "Figure 3(c) shows only one black node at index ~30, suggesting -w/o HC cannot cluster distant peaks. Yet Figure 3(d) shows two black nodes at ~30 and ~90, implying Ada-MSHyper’s NHC mechanism clusters both peaks to capture temporal variations.", "perturbed_explanation": "This statement is incorrect because the actual visualization in Figure 3(c) highlights two black nodes (around indices ~30 and ~90), not one, and Figure 3(d) only highlights a single black node (~30), not two. Both counts directly contradict the images.", "claim": "Figure 3(c) shows two black nodes at indices ~30 and ~90, indicating -w/o HC clusters distant peaks. Figure 3(d) shows only one black node around ~30, implying Ada-MSHyper’s NHC mechanism filters out the later peak to capture temporal variation.", "label": true }, { "paperid": "2409.17730v1", "paper_path": "./SciVer/papers/2409.17730v1.json", "claim_type": "sequential", "item1": "1(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17730v1_figure_1(b).png", "item2_path": "./SciVer/images/2409.17730v1-Table2-1.png", "section": [ "4.2.2" ], "request_id": 110, "origin_statement": "Figure1(b) shows that when using four beams on Yelp, beam search’s NDCG@10 (~0.0311) falls just below the top-K baseline (~0.0312). Table2 reveals beam search’s first-step HitRate (0.076) is far lower than greedy’s (0.101). Thus, initial HitRate losses cause the net NDCG drop at four beams.", "perturbed_statement": "Figure1(b) shows that when using four beams on Yelp, beam search’s NDCG@10 (~0.0313) slightly exceeds the top-K baseline (~0.0312). Table2 reveals beam search’s first-step HitRate (0.076) is far lower than greedy’s (0.101). Thus, initial HitRate losses cause the net NDCG gain at four beams.", "perturbed_explanation": "This is wrong because Figure1(b) actually plots beam search’s NDCG@10 at four beams around 0.03115, which is below the dashed top-K line at ~0.03120. The claim that it exceeds the baseline (and thus yields a net gain) contradicts the plotted values.", "claim": "Figure1(b) shows that when using four beams on Yelp, beam search’s NDCG@10 (~0.0311) falls just below the top-K baseline (~0.0312). Table2 reveals beam search’s first-step HitRate (0.076) is far lower than greedy’s (0.101). Thus, initial HitRate losses cause the net NDCG drop at four beams.", "label": true }, { "paperid": "2410.16845v1", "paper_path": "./SciVer/papers/2410.16845v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16845v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.16845v1_figure_3(a).png", "section": [ "3.2" ], "request_id": 112, "origin_statement": "Because Fig3(b) shows g_v and g_G vary much slower than g_s and g_h, FGSAM reuses these gradients in its optimization, resulting in a noticeably flatter GNN loss landscape in Fig2(a) compared to standard FSNC.", "perturbed_statement": "Because Fig3(b) shows g_s and g_h vary much slower than g_v and g_G, FGSAM reuses these gradients in its optimization, resulting in a noticeably flatter GNN loss landscape in Fig2(a) compared to standard FSNC.", "perturbed_explanation": "The perturbed statement is wrong because Fig3(b) explicitly demonstrates that g_s and g_h change much faster than g_v and g_G. Only g_v and g_G are slow-changing and reusable, so reusing g_s and g_h is not supported by the context.", "claim": "Because Fig3(b) shows g_v and g_G vary much slower than g_s and g_h, FGSAM reuses these gradients in its optimization, resulting in a noticeably flatter GNN loss landscape in Fig2(a) compared to standard FSNC.", "label": true }, { "paperid": "2411.00915v1", "paper_path": "./SciVer/papers/2411.00915v1.json", "claim_type": "sequential", "item1": "23", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.00915v1_figure_23.png", "item2_path": "./SciVer/images/2411.00915v1-Table3-1.png", "section": [ "6.4" ], "request_id": 113, "origin_statement": "V-LoRA’s average token latency at 32 adapters is ~0.03 s (Fig. 23). Table 3 shows 2 GPUs achieve 11.48 req/s (~0.087 s per request). Thus, V-LoRA handles single-request inference at 32 adapters ~65% faster than the per-request time under a 2-GPU setup.", "perturbed_statement": "V-LoRA’s average token latency at 32 adapters is ~0.12 s (Fig. 23). Table 3 shows 2 GPUs achieve 11.48 req/s (~0.065 s per request). Thus, V-LoRA’s single-request inference at 32 adapters is ~85% slower than the per-request time under a 2-GPU setup.", "perturbed_explanation": "The perturbed claim misreads two facts: Fig. 23 actually shows V-LoRA’s latency at 32 adapters is ~0.03 s, not 0.12 s. And 2 GPUs at 11.48 req/s correspond to ~0.087 s per request, not 0.065 s. Hence the computed ‘85% slower’ conclusion contradicts the true values.", "claim": "V-LoRA’s average token latency at 32 adapters is ~0.03 s (Fig. 23). Table 3 shows 2 GPUs achieve 11.48 req/s (~0.087 s per request). Thus, V-LoRA handles single-request inference at 32 adapters ~65% faster than the per-request time under a 2-GPU setup.", "label": true }, { "paperid": "2411.16535v1", "paper_path": "./SciVer/papers/2411.16535v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16535v1_figure_3.png", "item2_path": "./SciVer/images/2411.16535v1-Table2-1.png", "section": [ "4.2" ], "request_id": 115, "origin_statement": "Figure 3 shows DDS reaches about 32 dB PSNR at 128 NFEs in the noise-free 8× acceleration setting; Table 2 reports DDS at 31.58 dB PSNR in the noisy 8× case, so adding noise reduces DDS PSNR by ~0.4 dB.", "perturbed_statement": "Figure 3 shows DDS reaches about 34 dB PSNR at 128 NFEs in the noise-free 8× acceleration setting; Table 2 reports DDS at 31.58 dB PSNR in the noisy 8× case, so adding noise reduces DDS PSNR by ~2.4 dB.", "perturbed_explanation": "The perturbation falsely inflates the noise-free PSNR: Figure 3 actually plots DDS at around 32 dB PSNR for ~128 NFEs, not 34 dB. Therefore the claimed 2.4 dB drop contradicts the image data.", "claim": "Figure 3 shows DDS reaches about 32 dB PSNR at 128 NFEs in the noise-free 8× acceleration setting; Table 2 reports DDS at 31.58 dB PSNR in the noisy 8× case, so adding noise reduces DDS PSNR by ~0.4 dB.", "label": true }, { "paperid": "2410.04797v1", "paper_path": "./SciVer/papers/2410.04797v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04797v1-Table2-1.png", "item2_path": "./SciVer/images/2410.04797v1-Table3-1.png", "section": [ "4.5" ], "request_id": 116, "origin_statement": "Because Fusion raises FEMH-sen accuracy from 85.71% (Wav2vec2.0) to 90.51% (Table 2), and Table 3 shows attentive fusion yields 90.51% versus Add’s 88.23%, attentive fusion delivers a 2.28% improvement over additive fusion on FEMH sentences.", "perturbed_statement": "Because Fusion raises FEMH-sen accuracy from 85.71% (Wav2vec2.0) to 90.51% (Table 2), and Table 3 shows attentive fusion yields 90.51% versus Add’s 89.11%, attentive fusion delivers a 1.40% improvement over additive fusion on FEMH sentences.", "perturbed_explanation": "The inaccuracy lies in misreporting the Add fusion accuracy. Table 3 actually lists Add accuracy on FEMH-sen as 88.23%, not 89.11%. Thus the true improvement (90.51% − 88.23%) is 2.28%, not 1.40%.", "claim": "Because Fusion raises FEMH-sen accuracy from 85.71% (Wav2vec2.0) to 90.51% (Table 2), and Table 3 shows attentive fusion yields 90.51% versus Add’s 88.23%, attentive fusion delivers a 2.28% improvement over additive fusion on FEMH sentences.", "label": true }, { "paperid": "2410.06423v1", "paper_path": "./SciVer/papers/2410.06423v1.json", "claim_type": "sequential", "item1": "3", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.06423v1-Table3-1.png", "item2_path": "./SciVer/images/2410.06423v1-Table6-1.png", "section": [ "5.3" ], "request_id": 118, "origin_statement": "Table 3 shows the Adult dataset has 32 650 male and 16 192 female instances (~66.8% male). Table 6 reports FAIREDU’s SPD for Adult Sex as 0.019. Therefore, after FAIREDU intervention, outcomes for male and female are nearly balanced in the Adult dataset.", "perturbed_statement": "Table 3 shows the Adult dataset has 32 650 male and 16 192 female instances (~40% male). Table 6 reports FAIREDU’s SPD for Adult Sex as 0.019. Therefore, after FAIREDU intervention, outcomes for male and female are nearly balanced in the Adult dataset.", "perturbed_explanation": "The perturbed statement misreports the Adult dataset’s male proportion. Table 3 actually indicates 32 650 of 48 842 instances are male (≈66.8%), not 40%. This incorrect premise makes the subsequent fairness claim based on dataset composition false.", "claim": "Table 3 shows the Adult dataset has 32 650 male and 16 192 female instances (~66.8% male). Table 6 reports FAIREDU’s SPD for Adult Sex as 0.019. Therefore, after FAIREDU intervention, outcomes for male and female are nearly balanced in the Adult dataset.", "label": true }, { "paperid": "2411.12078v1", "paper_path": "./SciVer/papers/2411.12078v1.json", "claim_type": "sequential", "item1": "8", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.12078v1_figure_8.png", "item2_path": "./SciVer/images/2411.12078v1-Table3-1.png", "section": [ "4.2" ], "request_id": 120, "origin_statement": "Figure 8 shows an f-RAG-generated parp1 molecule with docking score −14.1 kcal/mol. Table 3 reports the mean novel top 5 % docking score for parp1 as −12.945 kcal/mol. Thus, f-RAG yields at least one parp1 hit ∼1.2 kcal/mol better than its average top 5 % performance.", "perturbed_statement": "Figure 8 shows an f-RAG-generated parp1 molecule with docking score −12.6 kcal/mol. Table 3 reports the mean novel top 5 % docking score for parp1 as −12.945 kcal/mol. Thus, f-RAG yields its best parp1 hits ~0.35 kcal/mol worse than its average top 5 % performance.", "perturbed_explanation": "The perturbation is wrong because Figure 8 actually displays parp1 molecules with docking scores as low as −14.1 kcal/mol (second molecule in the top row), not −12.6 kcal/mol. Hence the best hit is ~1.16 kcal/mol better than the −12.945 mean, not worse by ~0.35.", "claim": "Figure 8 shows an f-RAG-generated parp1 molecule with docking score −14.1 kcal/mol. Table 3 reports the mean novel top 5 % docking score for parp1 as −12.945 kcal/mol. Thus, f-RAG yields at least one parp1 hit ∼1.2 kcal/mol better than its average top 5 % performance.", "label": true }, { "paperid": "2411.02542v1", "paper_path": "./SciVer/papers/2411.02542v1.json", "claim_type": "sequential", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02542v1_figure_6.png", "item2_path": "./SciVer/images/2411.02542v1-Table4-1.png", "section": [ "6.2" ], "request_id": 121, "origin_statement": "Figure 6 shows the average AUC on state datasets peaks at 85.5% with a mask rate of 0.2; Table 4 reports GEN-CP’s mean AUC is 89.81%, which is 4.31% higher, demonstrating Concurrency Prior outperforms the optimal masking hyperparameter.", "perturbed_statement": "Figure 6 shows the average AUC on state datasets peaks at 85.6% with a mask rate of 0.3; Table 4 reports GEN-CP’s mean AUC is 90.96%, which is 5.36% higher, demonstrating Concurrency Prior outperforms the optimal masking hyperparameter.", "perturbed_explanation": "The perturbed statement misreads two key details: (1) Figure 6’s highest AUC occurs at mask rate 0.2 (about 85.5%), not at 0.3, and (2) Table 4 lists GEN-CP’s mean AUC as 89.81%, not 90.96% (that value belongs to TRAVEL-CP). Hence, both premises are incorrect.", "claim": "Figure 6 shows the average AUC on state datasets peaks at 85.5% with a mask rate of 0.2; Table 4 reports GEN-CP’s mean AUC is 89.81%, which is 4.31% higher, demonstrating Concurrency Prior outperforms the optimal masking hyperparameter.", "label": true }, { "paperid": "2411.16459v1", "paper_path": "./SciVer/papers/2411.16459v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16459v1_figure_4.png", "item2_path": "./SciVer/images/2411.16459v1_figure_5.png", "section": [ "4" ], "request_id": 122, "origin_statement": "Figure 4’s FWHM distribution peaks near 0.5 km s⁻¹, indicating very narrow lines; thus in Figure 5 the 38–41 km s⁻¹ emission—with similarly narrow widths—appears as detached core-like fragments along the dense ridge.", "perturbed_statement": "Figure 4’s FWHM distribution peaks near 2.5 km s⁻¹, indicating broad lines; thus in Figure 5 the 38–41 km s⁻¹ emission—with similarly broad widths—appears as a continuous, widespread filament along the ridge.", "perturbed_explanation": "The perturbation misreads Figure 4: the FWHM histogram actually peaks around 0.5–0.7 km s⁻¹, not 2.5 km s⁻¹. Moreover, Figure 5 shows the 38–41 km s⁻¹ component as isolated fragments, not as a continuous filament.", "claim": "Figure 4’s FWHM distribution peaks near 0.5 km s⁻¹, indicating very narrow lines; thus in Figure 5 the 38–41 km s⁻¹ emission—with similarly narrow widths—appears as detached core-like fragments along the dense ridge.", "label": true }, { "paperid": "2409.04384v1", "paper_path": "./SciVer/papers/2409.04384v1.json", "claim_type": "sequential", "item1": "4(l)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04384v1_figure_4(l).png", "item2_path": "./SciVer/images/2409.04384v1_figure_5(c).png", "section": [ "4.2", "4.3" ], "request_id": 123, "origin_statement": "The first image depicts a furred animal with whiskers, indicating a mammal; the second shows human facial features, also a mammal; thus both subjects are mammals.", "perturbed_statement": "The first image depicts a scaly animal with feathers, indicating a bird; the second shows human facial features, a mammal; thus one subject is a bird and the other a mammal.", "perturbed_explanation": "The perturbed statement contradicts the first image: the animal clearly has fur and whiskers (not scales or feathers), so it is a mammal rather than a bird.", "claim": "The first image depicts a furred animal with whiskers, indicating a mammal; the second shows human facial features, also a mammal; thus both subjects are mammals.", "label": true }, { "paperid": "2410.04422v5", "paper_path": "./SciVer/papers/2410.04422v5.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.04422v5_figure_1(a).png", "item2_path": "./SciVer/images/2410.04422v5_figure_1(b).png", "section": [ "1" ], "request_id": 125, "origin_statement": "The table lists keys 103 and 105 with Value 3, and the multi-matching accuracy in Figure 1b falls from 100% at 0.1k tokens to 45% at 10k tokens, so GPT-4o perfectly retrieves both keys in short contexts but fails over half the time in longer ones.", "perturbed_statement": "The table lists keys 104 and 105 with Value 3, and the multi-matching accuracy in Figure 1b falls from 100% at 0.1k tokens to 45% at 10k tokens, so GPT-4o perfectly retrieves both keys in short contexts but fails over half the time in longer ones.", "perturbed_explanation": "This is incorrect because the table actually shows keys 103 and 105 have Value 3; key 104 has Value 8. Therefore the premise that key 104 matches Value 3 is false, invalidating the final claim about retrieving keys 104 and 105.", "claim": "The table lists keys 103 and 105 with Value 3, and the multi-matching accuracy in Figure 1b falls from 100% at 0.1k tokens to 45% at 10k tokens, so GPT-4o perfectly retrieves both keys in short contexts but fails over half the time in longer ones.", "label": true }, { "paperid": "2410.13627v1", "paper_path": "./SciVer/papers/2410.13627v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "6(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13627v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.13627v1_figure_6(a).png", "section": [ "4" ], "request_id": 127, "origin_statement": "From Fig. 5, non-CMB data alone in the flat CDM model yields w₀+ wₐ ≃−1.3; then in Fig. 6, allowing A_L variation with P18+non-CMB shifts w₀+ wₐ to ≃−1.9; hence including A_L makes the combined equation-of-state sum more negative by ≃0.6.", "perturbed_statement": "From Fig. 5, non-CMB data alone in the flat CDM model yields w₀+ wₐ ≃−1.8; then in Fig. 6, allowing A_L variation with P18+non-CMB shifts w₀+ wₐ to ≃−1.9; hence including A_L makes the combined equation-of-state sum more negative by ≃0.1.", "perturbed_explanation": "Figure 5’s grey non-CMB contour for w₀+ wₐ actually peaks near −1.3 (not −1.8), so the first premise is incorrect. The true shift when moving from −1.3 to −1.9 is ≃0.6, not ≃0.1, making the perturbed claim false.", "claim": "From Fig. 5, non-CMB data alone in the flat CDM model yields w₀+ wₐ ≃−1.3; then in Fig. 6, allowing A_L variation with P18+non-CMB shifts w₀+ wₐ to ≃−1.9; hence including A_L makes the combined equation-of-state sum more negative by ≃0.6.", "label": true }, { "paperid": "2409.12479v1", "paper_path": "./SciVer/papers/2409.12479v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12479v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.12479v1_figure_2(b).png", "section": [ "4.5.1" ], "request_id": 128, "origin_statement": "From the FPR95 bar chart, iSUN FPR95 falls from about 75% at N=1 to ~47% at N=2; using this two-sample premise, the AUC bar chart shows iSUN AUC rises from ~80.5% to ~85.8%, implying enrolling two OOD samples improves calibration by over 5%.", "perturbed_statement": "From the FPR95 bar chart, iSUN FPR95 falls from about 75% at N=1 to ~65% at N=2; using this two-sample premise, the AUC bar chart shows iSUN AUC rises from ~80.5% to ~85.8%, implying enrolling two OOD samples improves calibration by over 5%.", "perturbed_explanation": "The perturbation incorrectly states iSUN FPR95 at N=2 is ~65%. In the FPR95 bar chart, the actual iSUN FPR95 at N=2 is approximately 46.7%, not 65%, so the first premise (and thus the final claim) is false.", "claim": "From the FPR95 bar chart, iSUN FPR95 falls from about 75% at N=1 to ~47% at N=2; using this two-sample premise, the AUC bar chart shows iSUN AUC rises from ~80.5% to ~85.8%, implying enrolling two OOD samples improves calibration by over 5%.", "label": true }, { "paperid": "2410.13762v1", "paper_path": "./SciVer/papers/2410.13762v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.13762v1-Table3-1.png", "item2_path": "./SciVer/images/2410.13762v1-Table4-1.png", "section": [ "3.4" ], "request_id": 134, "origin_statement": "In Table 3, the 80–20 split yields a velocity MSE of 1.40×10⁻³; Table 4 shows the same 1.40×10⁻³ velocity MSE at 11,340 nodes, indicating stable velocity accuracy across data split and node count variations.", "perturbed_statement": "In Table 3, the 80–20 split yields a velocity MSE of 1.40×10⁻³; Table 4 reports a higher 1.50×10⁻³ velocity MSE at 2,835 nodes, suggesting velocity accuracy degrades with fewer nodes.", "perturbed_explanation": "This claim is incorrect because Table 4 actually lists the velocity MSE at 2,835 nodes as 1.42×10⁻³ (not 1.50×10⁻³). The real difference is minimal, so performance remains stable rather than degraded.", "claim": "In Table 3, the 80–20 split yields a velocity MSE of 1.40×10⁻³; Table 4 shows the same 1.40×10⁻³ velocity MSE at 11,340 nodes, indicating stable velocity accuracy across data split and node count variations.", "label": true }, { "paperid": "2411.04709v1", "paper_path": "./SciVer/papers/2411.04709v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04709v1_figure_4.png", "item2_path": "./SciVer/images/2411.04709v1_figure_5.png", "section": [ "5.1" ], "request_id": 137, "origin_statement": "In Fig.4 ‘Astronaut’ ranks second by frequency, and Fig.5 shows the top-2 subjects account for over 60% of all subject requests, so focusing on ‘Person’ and ‘Astronaut’ will satisfy the majority of user preferences.", "perturbed_statement": "In Fig.4 ‘Robot’ ranks second by frequency, and Fig.5 shows the top-2 subjects account for over 80% of all subject requests, so focusing on ‘Person’ and ‘Robot’ will satisfy the majority of user preferences.", "perturbed_explanation": "This is incorrect because Fig.4 clearly shows ‘Robot’ is only the sixth most frequent subject, not second, and Fig.5’s curve indicates the top-2 subjects cover about 60–65% of requests rather than over 80%.", "claim": "In Fig.4 ‘Astronaut’ ranks second by frequency, and Fig.5 shows the top-2 subjects account for over 60% of all subject requests, so focusing on ‘Person’ and ‘Astronaut’ will satisfy the majority of user preferences.", "label": true }, { "paperid": "2411.02640v1", "paper_path": "./SciVer/papers/2411.02640v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02640v1_figure_6.png", "item2_path": "./SciVer/images/2411.02640v1_figure_7.png", "section": [ "3.2.2" ], "request_id": 140, "origin_statement": "Figure 6 shows the AMD EPYC 9654 CPU saturates at about 5 GDOF/s for the CH₄ GRI-3.0 mechanism around 10⁶ thermochemical states. Figure 7 reports KinetiX’s CH₄ throughput on that same CPU (C1) is only 3.3 GDOF/s. Thus, KinetiX underperforms the CPU’s maximum transport kernel throughput.", "perturbed_statement": "Figure 6 shows the AMD EPYC 9654 CPU saturates at about 5 GDOF/s for the CH₄ GRI-3.0 mechanism. Figure 7 shows KinetiX’s CH₄ throughput on that same CPU (C1) reaches 7.1 GDOF/s. Thus, KinetiX exceeds the CPU’s hardware limit for mixture-averaged transport performance.", "perturbed_explanation": "The perturbed claim misreads Figure 7: the 7.1 GDOF/s value corresponds to KinetiX on the NVIDIA H100 (G1) for CH₄, not the C1 CPU. On C1, KinetiX actually achieves only 3.3 GDOF/s, so it does not exceed the CPU’s ~5 GDOF/s saturation point.", "claim": "Figure 6 shows the AMD EPYC 9654 CPU saturates at about 5 GDOF/s for the CH₄ GRI-3.0 mechanism around 10⁶ thermochemical states. Figure 7 reports KinetiX’s CH₄ throughput on that same CPU (C1) is only 3.3 GDOF/s. Thus, KinetiX underperforms the CPU’s maximum transport kernel throughput.", "label": true }, { "paperid": "2410.09133v1", "paper_path": "./SciVer/papers/2410.09133v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09133v1_figure_2.png", "item2_path": "./SciVer/images/2410.09133v1_figure_3.png", "section": [ "4.3.1" ], "request_id": 143, "origin_statement": "The MVG-CRPS-trained model produces covariance matrices with visibly lower off-diagonal values (Fig.2), leading to consistently sharper and narrower prediction intervals in its forecasts compared to the log-score-trained model (Fig.3).", "perturbed_statement": "The log-score-trained model produces covariance matrices with visibly lower off-diagonal values (Fig.2), leading to consistently sharper and narrower prediction intervals in its forecasts compared to the MVG-CRPS-trained model (Fig.3).", "perturbed_explanation": "This statement is incorrect because in Fig.2 the top row (log-score) shows darker-colored off-diagonal entries (up to the clipping limit of 0.6), indicating higher covariances rather than lower. Additionally, Fig.3 shows that the log-score forecasts (top row) have much wider prediction intervals, whereas the MVG-CRPS forecasts (bottom row) are noticeably sharper and narrower.", "claim": "The MVG-CRPS-trained model produces covariance matrices with visibly lower off-diagonal values (Fig.2), leading to consistently sharper and narrower prediction intervals in its forecasts compared to the log-score-trained model (Fig.3).", "label": true }, { "paperid": "2409.03904v1", "paper_path": "./SciVer/papers/2409.03904v1.json", "claim_type": "sequential", "item1": "7(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03904v1_figure_7(b).png", "item2_path": "./SciVer/images/2409.03904v1-Table3-1.png", "section": [ "3.4.1" ], "request_id": 146, "origin_statement": "Figure 7’s right panel shows mrhs-HDCG converging in about 190 outer iterations on the 96³×192×24 volume; Table 3 confirms FlexADEF2 used 187 iterations performing 1683 fine matrix multiplies, yielding a 10.5× speedup over RedBlackCG.", "perturbed_statement": "Figure 7’s right panel shows mrhs-HDCG converging in about 150 outer iterations on the 96³×192×24 volume; Table 3 confirms FlexADEF2 used 187 iterations performing 1683 fine matrix multiplies, yielding a 10.5× speedup over RedBlackCG.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 7’s right panel actually shows convergence around 190 outer iterations, not 150, and Table 3 explicitly lists 187 iterations for the FlexADEF2 solver, contradicting the 150-iteration claim.", "claim": "Figure 7’s right panel shows mrhs-HDCG converging in about 190 outer iterations on the 96³×192×24 volume; Table 3 confirms FlexADEF2 used 187 iterations performing 1683 fine matrix multiplies, yielding a 10.5× speedup over RedBlackCG.", "label": true }, { "paperid": "2409.09622v1", "paper_path": "./SciVer/papers/2409.09622v1.json", "claim_type": "sequential", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.09622v1_figure_5.png", "item2_path": "./SciVer/images/2409.09622v1-Table2-1.png", "section": [ "5" ], "request_id": 149, "origin_statement": "Figure 5 reports 79 regions for the elliptope arrangement. Table 2 shows random cubic arrangements in ℝ³ with k=7 yield between 123 and 280 regions. Hence, the elliptope arrangement produces fewer regions than any random cubic arrangement of seven surfaces in three variables.", "perturbed_statement": "Figure 5 reports 79 regions for the elliptope arrangement. Table 2 shows random cubic arrangements in ℝ³ with k=7 yield between 70 and 90 regions. Hence, the elliptope arrangement produces a typical region count among random cubic arrangements with seven surfaces in three variables.", "perturbed_explanation": "Table 2’s row for n=3, k=7 actually lists a region range of 123 to 280, not 70 to 90. Therefore 79 regions lie below the true minimum of 123, contradicting the perturbed claim.", "claim": "Figure 5 reports 79 regions for the elliptope arrangement. Table 2 shows random cubic arrangements in ℝ³ with k=7 yield between 123 and 280 regions. Hence, the elliptope arrangement produces fewer regions than any random cubic arrangement of seven surfaces in three variables.", "label": true }, { "paperid": "2411.07239v1", "paper_path": "./SciVer/papers/2411.07239v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07239v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.07239v1_figure_2(b).png", "section": [ "4.1.1", "4.1" ], "request_id": 151, "origin_statement": "LoRA-D2NO’s log-scale error drops to about 1.1 by 1000 epochs (Fig.2 left). Therefore, in Fig.2 right its green dashed curve nearly overlaps the red exact solution at the peak, demonstrating highly accurate amplitude reconstruction.", "perturbed_statement": "Full-Random’s log-scale error drops to about 1.1 by 1000 epochs (Fig.2 left). Therefore, in Fig.2 right its blue dashed curve nearly overlaps the red exact solution at the peak, demonstrating highly accurate amplitude reconstruction.", "perturbed_explanation": "Figure 2 (left) shows Full-Random (blue dashed) ending near log relative error 3.1, not 1.1. Moreover, the right panel plots only Exact (red dashed), LoRA-Single (blue dashed), and LoRA-D2NO (green dashed); Full-Random is neither low-error nor shown there.", "claim": "LoRA-D2NO’s log-scale error drops to about 1.1 by 1000 epochs (Fig.2 left). Therefore, in Fig.2 right its green dashed curve nearly overlaps the red exact solution at the peak, demonstrating highly accurate amplitude reconstruction.", "label": true }, { "paperid": "2410.19523v1", "paper_path": "./SciVer/papers/2410.19523v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19523v1_figure_3.png", "item2_path": "./SciVer/images/2410.19523v1_figure_4.png", "section": [ "6" ], "request_id": 153, "origin_statement": "Fig3c shows some BRCA feature sets have CN TDP near 1.0. In Fig4b, the 5q arm column exhibits near-saturated purple indicating CN TDP ~1 for most pathways. Thus BRCA CN aberrations are strongest on chromosome arm 5q.", "perturbed_statement": "Fig3c shows BRCA CN TDP values rarely exceed 0.5. In Fig4b, the 8q arm column exhibits near-saturated purple indicating CN TDP ~1 for most pathways. Thus BRCA CN aberrations are strongest on chromosome arm 8q.", "perturbed_explanation": "The perturbation is wrong because Fig3c clearly displays BRCA CN TDP points reaching above 0.8 and up to 1.0, not rarely exceeding 0.5. Additionally, in Fig4b the columns for 5q (not 8q) are the most saturated purple, indicating the highest CN TDP on 5q rather than on 8q.", "claim": "Fig3c shows some BRCA feature sets have CN TDP near 1.0. In Fig4b, the 5q arm column exhibits near-saturated purple indicating CN TDP ~1 for most pathways. Thus BRCA CN aberrations are strongest on chromosome arm 5q.", "label": true }, { "paperid": "2411.14736v1", "paper_path": "./SciVer/papers/2411.14736v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14736v1_figure_2.png", "item2_path": "./SciVer/images/2411.14736v1_figure_3.png", "section": [ "4.1.2" ], "request_id": 156, "origin_statement": "From Figure 2, increasing β chains from 4 to 64 brings the median log-marginal likelihood closer to true log(z). Then, in Figure 3, raising K from 8 to 64 increases the median log(BF_CURN/IRN) above 29, indicating strong CURN support over IRN.", "perturbed_statement": "From Figure 2, increasing β chains from 4 to 64 moves the median log-marginal likelihood farther from the true log(z). Then, in Figure 3, raising K from 8 to 64 lowers the median log(BF_CURN/IRN) below 28, indicating weak CURN support over IRN.", "perturbed_explanation": "The perturbation is false because in Figure 2 the boxplot medians actually converge toward the dashed true log(z) line as K increases, not diverge. In Figure 3 (bottom panel), the median log(BF_CURN/IRN) at K=64 is around 29.2—well above 28—demonstrating strong, not weak, support.", "claim": "From Figure 2, increasing β chains from 4 to 64 brings the median log-marginal likelihood closer to true log(z). Then, in Figure 3, raising K from 8 to 64 increases the median log(BF_CURN/IRN) above 29, indicating strong CURN support over IRN.", "label": true }, { "paperid": "2411.01289v1", "paper_path": "./SciVer/papers/2411.01289v1.json", "claim_type": "sequential", "item1": "7", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01289v1_figure_7.png", "item2_path": "./SciVer/images/2411.01289v1-Table4-1.png", "section": [ "4.2" ], "request_id": 158, "origin_statement": "Figure 7 shows the smoke feature has around 0.8 sensitivity, so ML-CP using temperature and smoke achieves the highest recall (99%) compared to DST-CEP’s 97.3%.", "perturbed_statement": "Figure 7 shows the temperature feature has around 0.8 sensitivity, so ML-CP using temperature and smoke achieves the highest recall (99%) compared to DST-CEP’s 97.3%.", "perturbed_explanation": "In Figure 7, the temperature sensitivity is only about 0.2 (blue bar ~0.20, orange ~0.24), while smoke has ~0.8. The perturbed claim misattributes the high sensitivity value to temperature instead of smoke, making it factually incorrect.", "claim": "Figure 7 shows the smoke feature has around 0.8 sensitivity, so ML-CP using temperature and smoke achieves the highest recall (99%) compared to DST-CEP’s 97.3%.", "label": true }, { "paperid": "2411.00429v1", "paper_path": "./SciVer/papers/2411.00429v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00429v1_figure_1.png", "item2_path": "./SciVer/images/2411.00429v1_figure_3.png", "section": [ "7.3" ], "request_id": 159, "origin_statement": "Figure 1’s relative‐contribution plot shows Gower underweights numerical variables (low contributions for num1/num2), and in Figure 3 at nine categories Gower’s median alienation exceeds HL’s, so Gower retrieves the configuration worse than HL for many‐category data.", "perturbed_statement": "Figure 1’s relative‐contribution plot shows Gower emphasizes numerical variables heavily, and in Figure 3 at nine categories its median alienation falls below HL’s, so Gower outperforms HL with many‐category data.", "perturbed_explanation": "The perturbed claim is wrong on two counts. First, in Figure 1 (right panel), Gower’s relative contributions for the numerical variables (num1, num2) are about 0.07, the lowest of all variants, so it underweights rather than emphasizes them. Second, in Figure 3 (nine cats panel), Gower’s median alienation (~0.35) is higher than HL’s (~0.33), not lower, so Gower actually performs worse than HL.", "claim": "Figure 1’s relative‐contribution plot shows Gower underweights numerical variables (low contributions for num1/num2), and in Figure 3 at nine categories Gower’s median alienation exceeds HL’s, so Gower retrieves the configuration worse than HL for many‐category data.", "label": true }, { "paperid": "2411.15413v1", "paper_path": "./SciVer/papers/2411.15413v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15413v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.15413v1_figure_3(c).png", "section": [ "3.4" ], "request_id": 160, "origin_statement": "Since heart reports are overwhelmingly 5–10 words long (Fig 3a), and Fig 3c shows heart heatmaps most frequently occupy 40–50% of their bounding box, radiologists produce concise heart descriptions while focusing gaze on roughly half of the heart region.", "perturbed_statement": "Since heart reports are predominantly 10–15 words long (Fig 3a), and Fig 3c shows heart heatmaps most frequently cover over 50% of their bounding box, radiologists produce moderate-length heart descriptions while concentrating gaze on more than half of the heart region.", "perturbed_explanation": "Figure 3a indicates that heart reports are mainly 5–10 words, not 10–15. Figure 3c shows heart heatmaps commonly cover 40–50% of the bounding box, not over 50%, so both premises in the perturbed statement contradict the actual data.", "claim": "Since heart reports are overwhelmingly 5–10 words long (Fig 3a), and Fig 3c shows heart heatmaps most frequently occupy 40–50% of their bounding box, radiologists produce concise heart descriptions while focusing gaze on roughly half of the heart region.", "label": true }, { "paperid": "2410.15910v2", "paper_path": "./SciVer/papers/2410.15910v2.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.15910v2_figure_2.png", "item2_path": "./SciVer/images/2410.15910v2_figure_3.png", "section": [ "5.3" ], "request_id": 161, "origin_statement": "By step 1200, MsPacmanAR style's mutual information exceeds 1.2 (Figure 2), so in Figure 3 when the agent moves into the bottom-left area (frames 2 and 4), PMI spikes above 0.5, reflecting strong style alignment.", "perturbed_statement": "By step 1200, MsPacmanAR style's mutual information exceeds 1.3 (Figure 2), so in Figure 3 when the agent moves into the bottom-left area (frames 2 and 4), PMI drops below 0, reflecting poor style alignment.", "perturbed_explanation": "The perturbed statement is incorrect because in Figure 2 the MsPacmanAR MI curve plateaus around 1.2 by step 1200, not above 1.3. Moreover, in Figure 3 the PMI at frames 2 and 4 actually rises to about 0.6–0.7, not below zero, indicating strong rather than poor alignment.", "claim": "By step 1200, MsPacmanAR style's mutual information exceeds 1.2 (Figure 2), so in Figure 3 when the agent moves into the bottom-left area (frames 2 and 4), PMI spikes above 0.5, reflecting strong style alignment.", "label": true }, { "paperid": "2411.07393v1", "paper_path": "./SciVer/papers/2411.07393v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07393v1_figure_3(b).png", "item2_path": "./SciVer/images/2411.07393v1-Table5-1.png", "section": [ "5.2" ], "request_id": 164, "origin_statement": "Table 5 reports Knot 5’s V-band range of 19.08–19.16 mag (ΔV=0.08). Figure 3’s green V lightcurve fluctuates by ~0.08 mag around 19.10. Since nearby supergiants No. 7, 21, 42 each contribute only 1–3.5% of total light, they must brighten by factors of 2.5–8 to produce the observed ΔV.", "perturbed_statement": "Table 5 reports Knot 5’s V-band range of 19.05–19.15 mag (ΔV=0.10). Figure 3’s green V lightcurve fluctuates by ~0.10 mag around 19.10. Since nearby supergiants No. 7, 21, 42 each contribute 5–10% of total light, they need brightenings of only 1.5–3× to account for ΔV.", "perturbed_explanation": "The perturbed statement misquotes the V-range from Table 5: the actual range is 19.08–19.16 mag (ΔV=0.08), not 19.05–19.15 mag (ΔV=0.10). It also overstates the stars’ contributions: they each contribute only 1–3.5% of the knot’s light, not 5–10%. Consequently, the required brightening factors are 2.5–8×, not 1.5–3×, making the perturbed claim invalid.", "claim": "Table 5 reports Knot 5’s V-band range of 19.08–19.16 mag (ΔV=0.08). Figure 3’s green V lightcurve fluctuates by ~0.08 mag around 19.10. Since nearby supergiants No. 7, 21, 42 each contribute only 1–3.5% of total light, they must brighten by factors of 2.5–8 to produce the observed ΔV.", "label": true }, { "paperid": "2409.17791v1", "paper_path": "./SciVer/papers/2409.17791v1.json", "claim_type": "sequential", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17791v1_figure_2.png", "item2_path": "./SciVer/images/2409.17791v1-Table1-1.png", "section": [ "3.2" ], "request_id": 169, "origin_statement": "Since Figure 2 shows LLaMA-13B’s DPO win rate at 82.8% on TL;DR, and Table 1 reports a 5.8% SPO boost, LLaMA-13B+SPO reaches 88.6%, remaining 1.6% below Mistral-7B+SPO’s 90.2%.", "perturbed_statement": "Since Figure 2 shows LLaMA-13B’s DPO win rate at 81.0% on TL;DR, and Table 1 reports a 5.8% SPO boost, LLaMA-13B+SPO reaches 86.8%, remaining 3.4% below Mistral-7B+SPO’s 90.2%.", "perturbed_explanation": "The DPO baseline for LLaMA-13B on TL;DR is actually 82.8% (not 81.0%) according to Figure 2 and Table 1. Therefore, after a 5.8% SPO boost, the correct win rate is 88.6%, making the gap to Mistral-7B+SPO 1.6%, not 3.4%.", "claim": "Since Figure 2 shows LLaMA-13B’s DPO win rate at 82.8% on TL;DR, and Table 1 reports a 5.8% SPO boost, LLaMA-13B+SPO reaches 88.6%, remaining 1.6% below Mistral-7B+SPO’s 90.2%.", "label": true }, { "paperid": "2410.08821v1", "paper_path": "./SciVer/papers/2410.08821v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08821v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.08821v1_figure_2(c).png", "section": [ "5.1.4" ], "request_id": 174, "origin_statement": "Increasing the top-k from 5 to 10 raises Vanilla RAG’s Str-em on ASQA by ≈2.4 (from 43.4 to 45.8), yet it stays ≈1.4 points below ARAG’s 47.2; similarly, on 2WikiMQA, raising top-k from 5 to 9 yields only ≈0.8 F1 gain (43.5→44.3), leaving Vanilla ≈8.4 behind ARAG’s 52.7.", "perturbed_statement": "Increasing the top-k from 5 to 10 raises Vanilla RAG’s Str-em on ASQA by ≈4 (from 43.4 to 45.8), yet it stays ≈2 points below ARAG’s 47.2; similarly, on 2WikiMQA, raising top-k from 5 to 9 yields ≈3 F1 gain (43.5→44.3), leaving Vanilla ≈5 behind ARAG’s 52.7.", "perturbed_explanation": "This is incorrect because Figure 2a shows Vanilla Str-em rises from 43.4 to 45.8 (a ≈2.4 increase), not ≈4; and Figure 2c shows F1 increases from ~43.5 to ~44.3 (≈0.8), not ≈3. Consequently, the performance gaps to ARAG’s 47.2 and 52.7 are ~1.4 and ~8.4, not 2 and 5.", "claim": "Increasing the top-k from 5 to 10 raises Vanilla RAG’s Str-em on ASQA by ≈2.4 (from 43.4 to 45.8), yet it stays ≈1.4 points below ARAG’s 47.2; similarly, on 2WikiMQA, raising top-k from 5 to 9 yields only ≈0.8 F1 gain (43.5→44.3), leaving Vanilla ≈8.4 behind ARAG’s 52.7.", "label": true }, { "paperid": "2409.15552v1", "paper_path": "./SciVer/papers/2409.15552v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "2(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15552v1_figure_1(a).png", "item2_path": "./SciVer/images/2409.15552v1_figure_2(a).png", "section": [ "4.2.2" ], "request_id": 176, "origin_statement": "Figure 1 shows that STS with s = 10 exhibits unbounded oscillations in its internal-stage stability polynomials, so STS cannot reliably exceed 10 stages. Figure 2 shows ROCK2 with s = 13 has a wide, bounded stability domain, implying ROCK2 can stably use 13 stages where STS cannot.", "perturbed_statement": "Figure 1 shows that STS with s = 8 exhibits unbounded oscillations in its internal-stage stability polynomials, so STS cannot reliably exceed 8 stages. Figure 2 shows ROCK2 with s = 13 has a wide, bounded stability domain, implying ROCK2 can stably use 13 stages where STS cannot.", "perturbed_explanation": "This is incorrect because Figure 1’s caption explicitly specifies s = 10 for the STS method, not s = 8. There is no evidence in the context that STS already exhibits unbounded internal-stage behavior at s = 8; the instability was demonstrated only at s = 10.", "claim": "Figure 1 shows that STS with s = 10 exhibits unbounded oscillations in its internal-stage stability polynomials, so STS cannot reliably exceed 10 stages. Figure 2 shows ROCK2 with s = 13 has a wide, bounded stability domain, implying ROCK2 can stably use 13 stages where STS cannot.", "label": true }, { "paperid": "2409.14335v1", "paper_path": "./SciVer/papers/2409.14335v1.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14335v1_figure_1.png", "item2_path": "./SciVer/images/2409.14335v1-Table1-1.png", "section": [ "1" ], "request_id": 179, "origin_statement": "Figure1 shows MQM-APE halves the original error score from −12 to −6. Table1 indicates MQM-APE is a training-free approach that surpasses GEMBA-MQM. Therefore, a training-free MQM-APE pipeline can reduce translation error penalty by 50% without retraining.", "perturbed_statement": "Figure1 shows MQM-APE halves the original error score from −12 to −6. Table1 indicates MQM-APE requires training. Therefore, a training-dependent MQM-APE pipeline can reduce translation error penalty by 50%.", "perturbed_explanation": "The perturbation misstates Table1: it clearly labels MQM-APE as a training-free approach, not training-dependent. Since MQM-APE does not require any training, the claim that it \"requires training\" contradicts the table’s explicit description.", "claim": "Figure1 shows MQM-APE halves the original error score from −12 to −6. Table1 indicates MQM-APE is a training-free approach that surpasses GEMBA-MQM. Therefore, a training-free MQM-APE pipeline can reduce translation error penalty by 50% without retraining.", "label": true }, { "paperid": "2409.00049v2", "paper_path": "./SciVer/papers/2409.00049v2.json", "claim_type": "sequential", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00049v2_figure_5.png", "item2_path": "./SciVer/images/2409.00049v2_figure_7.png", "section": [ "3.2" ], "request_id": 181, "origin_statement": "Fig.5 defines EVPI as the difference between expected pre-posterior and prior utilities. Fig.7 shows expected prior utility at -138 £/day (black dashed) and expected pre-posterior utility at -117 £/day (blue dashed), thus EVPI = 21 £/day.", "perturbed_statement": "Fig.5 defines EVPI as the difference between expected pre-posterior and prior utilities. Fig.7 shows expected prior utility at -140 £/day (black dashed) and expected pre-posterior utility at -117 £/day (blue dashed), thus EVPI = 23 £/day.", "perturbed_explanation": "The perturbation misreads the prior expected utility from -138 £/day (as shown by the black dashed line in Fig.7) as -140 £/day. This error leads to computing EVPI = -117 - (-140) = 23, whereas the correct EVPI is -117 - (-138) = 21 £/day.", "claim": "Fig.5 defines EVPI as the difference between expected pre-posterior and prior utilities. Fig.7 shows expected prior utility at -138 £/day (black dashed) and expected pre-posterior utility at -117 £/day (blue dashed), thus EVPI = 21 £/day.", "label": true }, { "paperid": "2409.11887v1", "paper_path": "./SciVer/papers/2409.11887v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11887v1_figure_5.png", "item2_path": "./SciVer/images/2409.11887v1_figure_6.png", "section": [ "5.3" ], "request_id": 191, "origin_statement": "At input length 4096, DocMamba achieves 3.1 FPS versus LayoutLMv3’s 1.3 FPS—a 2.4× speedup. Given DocMamba₅₁₂ reaches 98.7 % F1 at input length 2560, this implies it can maintain near-99 % accuracy while processing long documents in real time.", "perturbed_statement": "At input length 4096, DocMamba achieves 3.1 FPS versus LayoutLMv3’s 1.6 FPS—a 1.9× speedup. Given DocMamba₅₁₂ reaches 96.7 % F1 at input length 2560, this implies it can maintain near-97 % accuracy while processing long documents in real time.", "perturbed_explanation": "Figure 5 shows LayoutLMv3’s FPS at input length 4096 is 1.3 FPS, not 1.6 FPS, making the true speedup 2.4× (3.1/1.3) rather than 1.9×. Figure 6 shows DocMamba₅₁₂ attains 98.7 % F1 at length 2560, not 96.7 %, so the accuracy is near-99 %, not near-97 %.", "claim": "At input length 4096, DocMamba achieves 3.1 FPS versus LayoutLMv3’s 1.6 FPS—a 1.9× speedup. Given DocMamba₅₁₂ reaches 96.7 % F1 at input length 2560, this implies it can maintain near-97 % accuracy while processing long documents in real time.", "label": false }, { "paperid": "2409.07107v1", "paper_path": "./SciVer/papers/2409.07107v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.07107v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.07107v1_figure_2(c).png", "section": [ "4" ], "request_id": 192, "origin_statement": "Since MuJoCo’s half-cheetah (left) is a simple low-DoF system and Unitree’s H1 (right) is a complex high-DoF robot, computing simulation gradients on the H1 with an Apple M3 CPU took significantly longer than on the half-cheetah.", "perturbed_statement": "Since MuJoCo’s half-cheetah (left) is a complex high-DoF system and Unitree’s H1 (right) is a simple low-DoF robot, computing simulation gradients on the H1 with an Apple M3 CPU took significantly longer than on the half-cheetah.", "perturbed_explanation": "The perturbed statement flips the complexity labels: Figure 2 explicitly describes MuJoCo’s half-cheetah as a simple, low-DoF system and Unitree’s H1 as a complex, high-DoF robot. This contradicts those descriptions, making the claim false.", "claim": "Since MuJoCo’s half-cheetah (left) is a complex high-DoF system and Unitree’s H1 (right) is a simple low-DoF robot, computing simulation gradients on the H1 with an Apple M3 CPU took significantly longer than on the half-cheetah.", "label": false }, { "paperid": "2410.22543v1", "paper_path": "./SciVer/papers/2410.22543v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.22543v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.22543v1_figure_3(b).png", "section": [ "2.4" ], "request_id": 193, "origin_statement": "Figure 3 (right) shows that Ra reaches 1 AU at argument of perihelion ≈90° and ≈270°; since Earth crossings occur when Ra or Rd = 1 AU, 2010 XC11 crosses Earth’s orbit at varpi ≈90° (ascending node) and ≈270° (descending node).", "perturbed_statement": "Figure 3 (right) shows that Ra reaches 1 AU at argument of perihelion ≈60° and ≈300°; since Earth crossings occur when Ra or Rd = 1 AU, 2010 XC11 crosses Earth’s orbit at varpi ≈60° (ascending node) and ≈300° (descending node).", "perturbed_explanation": "The blue Ra curve in Figure 3 (right) actually intersects the 1 AU reference line at about 90° and 270°, not at 60° and 300°. Therefore the claimed varpi values of 60° and 300° contradict the plotted peak positions.", "claim": "Figure 3 (right) shows that Ra reaches 1 AU at argument of perihelion ≈60° and ≈300°; since Earth crossings occur when Ra or Rd = 1 AU, 2010 XC11 crosses Earth’s orbit at varpi ≈60° (ascending node) and ≈300° (descending node).", "label": false }, { "paperid": "2409.16057v2", "paper_path": "./SciVer/papers/2409.16057v2.json", "claim_type": "sequential", "item1": "4(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.16057v2_figure_4(b).png", "item2_path": "./SciVer/images/2409.16057v2-Table2-1.png", "section": [ "4.3" ], "request_id": 194, "origin_statement": "Table 2 reports APS of 0.15 for Faster R-CNN with our method on the poisoned dataset. Figure 4 shows overall mAP exceeds 0.25 by epoch 15. This shows that small-object AP fully recovers to 0.15 while overall detection surpasses 0.25 by mid-training.", "perturbed_statement": "Table 2 reports APS of 0.15 for Faster R-CNN with our method on the poisoned dataset. Figure 4 shows overall mAP exceeds 0.30 by epoch 10. This shows that small-object AP fully recovers to 0.15 while overall detection surpasses 0.30 by early training.", "perturbed_explanation": "The perturbed claim states that mAP exceeds 0.30 by epoch 10, but Figure 4 shows that at epoch 10 the mAP for the poisoned dataset + Ours line is about 0.246, well below 0.30. Thus the timing and value are incorrect.", "claim": "Table 2 reports APS of 0.15 for Faster R-CNN with our method on the poisoned dataset. Figure 4 shows overall mAP exceeds 0.30 by epoch 10. This shows that small-object AP fully recovers to 0.15 while overall detection surpasses 0.30 by early training.", "label": false }, { "paperid": "2410.02810v1", "paper_path": "./SciVer/papers/2410.02810v1.json", "claim_type": "sequential", "item1": "6", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02810v1_figure_6.png", "item2_path": "./SciVer/images/2410.02810v1-Table5-1.png", "section": [ "6.2" ], "request_id": 195, "origin_statement": "Since StateAct with correction averages 19.11 steps (Table 5), most tasks fall within the [10–20] bin, where Figure 6 shows its success rate at approximately 90%.", "perturbed_statement": "Since StateAct with correction averages 19.11 steps (Table 5), most tasks fall within the [20–30] bin, where Figure 6 shows its success rate at approximately 80%.", "perturbed_explanation": "The average of 19.11 steps is below 20, so tasks actually lie in the [10–20] bin, not [20–30]. Thus the claim that they fall in [20–30] contradicts the table’s 19.11 value.", "claim": "Since StateAct with correction averages 19.11 steps (Table 5), most tasks fall within the [20–30] bin, where Figure 6 shows its success rate at approximately 80%.", "label": false }, { "paperid": "2409.06280v1", "paper_path": "./SciVer/papers/2409.06280v1.json", "claim_type": "sequential", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06280v1_figure_8.png", "item2_path": "./SciVer/images/2409.06280v1_figure_9.png", "section": [ "5.2.6" ], "request_id": 197, "origin_statement": "From Fig. 8, reducing marking intensity (m 0.7→0.8, δ 8/255→4/255) decreases unsupervised OOD detection AUC from 0.8085 to 0.6333. From Fig. 9, a realistic supervised defender achieves 0.6600 AUC. Thus, supervised detection only marginally outperforms unsupervised methods at low marking intensity.", "perturbed_statement": "From Fig. 8, reducing marking intensity decreases unsupervised OOD detection AUC from 0.8085 to 0.7800. From Fig. 9, a realistic supervised defender achieves 0.6600 AUC. Thus, unsupervised methods still outperform supervised detection at low marking intensity.", "perturbed_explanation": "Fig. 8 actually reports that reducing intensity lowers the unsupervised AUC to 0.6333 (not 0.7800). Because 0.6333 is below the realistic supervised AUC of 0.6600, unsupervised methods do not outperform supervised detection.", "claim": "From Fig. 8, reducing marking intensity decreases unsupervised OOD detection AUC from 0.8085 to 0.7800. From Fig. 9, a realistic supervised defender achieves 0.6600 AUC. Thus, unsupervised methods still outperform supervised detection at low marking intensity.", "label": false }, { "paperid": "2410.11378v1", "paper_path": "./SciVer/papers/2410.11378v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.11378v1_figure_5(a).png", "item2_path": "./SciVer/images/2410.11378v1_figure_5(c).png", "section": [ "4.8" ], "request_id": 199, "origin_statement": "Since under 20% adversaries WPFed (‘Ours’) maintains ~90% accuracy with <2% deviation while ProxyFL dips to ~84% at iteration 75, WPFed also sustains >90% accuracy under 60% adversaries (Fig.5c), whereas ProxyFL falls below 70% during resets.", "perturbed_statement": "Since under 20% adversaries WPFed maintains ~90% accuracy with <2% deviation while ProxyFL dips only to ~88% at iteration 75, ProxyFL also sustains >90% accuracy under 60% adversaries (Fig.5c), matching WPFed’s resilience.", "perturbed_explanation": "The perturbation is wrong because Fig.5a shows ProxyFL actually dips to about 84%, not ~88%, at iteration 75 under 20% adversaries. Furthermore, Fig.5c confirms ProxyFL falls below 70% during resets at 60% adversaries, so it does not sustain >90% accuracy.", "claim": "Since under 20% adversaries WPFed maintains ~90% accuracy with <2% deviation while ProxyFL dips only to ~88% at iteration 75, ProxyFL also sustains >90% accuracy under 60% adversaries (Fig.5c), matching WPFed’s resilience.", "label": false }, { "paperid": "2410.13500v1", "paper_path": "./SciVer/papers/2410.13500v1.json", "claim_type": "sequential", "item1": "2(b)", "item2": "9(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13500v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.13500v1_figure_9(a).png", "section": [ "5.2" ], "request_id": 200, "origin_statement": "Because Fig. 2 shows the large building missing from the ground truth disparity (modality 1), this missing structure contributes many inconsistent points, which Fig. 9 (modality 2) shows are reduced by over 50% in the first 1 000 training steps.", "perturbed_statement": "Because Fig. 2 shows the large building present in the ground truth disparity, this structure contributes many inconsistent points, which Fig. 9 shows are reduced by over 50% in the first 1 000 training steps.", "perturbed_explanation": "The perturbation is wrong because Fig. 2 explicitly notes that the large building is missing in the ground truth disparity map, not present. Claiming it ‘is present’ contradicts the caption stating it is absent.", "claim": "Because Fig. 2 shows the large building present in the ground truth disparity, this structure contributes many inconsistent points, which Fig. 9 shows are reduced by over 50% in the first 1 000 training steps.", "label": false }, { "paperid": "2411.09556v1", "paper_path": "./SciVer/papers/2411.09556v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09556v1_figure_1.png", "item2_path": "./SciVer/images/2411.09556v1_figure_3.png", "section": [ "3.1" ], "request_id": 205, "origin_statement": "Fig.1 indicates radiative cooling lowers inner log Tₑ from ≈11.3 to ≈11.1. Using the cooled temperature, Fig.3 shows Σq⁻ at r=5 r_g ≈0.4, far below Ṁ_disk ≈30, implying radiative cooling negligibly impacts accretion at that radius.", "perturbed_statement": "Fig.1 indicates radiative cooling lowers inner log Tₑ by only ≈0.01, keeping it ≈11.3. Using this, Fig.3 shows Σq⁻ at r=5 r_g ≈15, half of Ṁ_disk ≈30, implying radiative cooling significantly influences accretion at that radius.", "perturbed_explanation": "This is incorrect because Fig.1 actually shows a drop of ≈0.2 dex in inner log Tₑ (from ~11.3 to ~11.1), not ≈0.01 dex. Moreover, Fig.3 plots Σq⁻ ≈0.4 at 5 r_g, not ≈15, so cooling remains much smaller than Ṁ_disk (~30), contrary to the perturbed claim.", "claim": "Fig.1 indicates radiative cooling lowers inner log Tₑ by only ≈0.01, keeping it ≈11.3. Using this, Fig.3 shows Σq⁻ at r=5 r_g ≈15, half of Ṁ_disk ≈30, implying radiative cooling significantly influences accretion at that radius.", "label": false }, { "paperid": "2410.16843v1", "paper_path": "./SciVer/papers/2410.16843v1.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.16843v1_figure_2.png", "item2_path": "./SciVer/images/2410.16843v1-Table2-1.png", "section": [ "4.2" ], "request_id": 208, "origin_statement": "From Figure 2, Vicuna-7b-v1.5's memorization ratio M_R drops below 0.05 by episode 500. Then, Table 2 reports Trustworthy-Alignment yields a +0.00% ARC change for Vicuna-7b-v1.5. Thus early M_R suppression coincides with stable ARC performance.", "perturbed_statement": "From Figure 2, Vicuna-7b-v1.5's memorization ratio M_R remains above 0.2 until episode 1000. Then, Table 2 reports Trustworthy-Alignment yields a +0.86% ARC change for Vicuna-7b-v1.5. Thus delayed M_R suppression results in moderate ARC boost.", "perturbed_explanation": "This is incorrect because Figure 2 shows Vicuna-7b-v1.5’s M_R falls below 0.05 well before episode 500 (not staying >0.2 until 1000). Moreover, Table 2 lists a +0.00% change on ARC for ‘Ours’ with Vicuna-7b-v1.5, not +0.86%.", "claim": "From Figure 2, Vicuna-7b-v1.5's memorization ratio M_R remains above 0.2 until episode 1000. Then, Table 2 reports Trustworthy-Alignment yields a +0.86% ARC change for Vicuna-7b-v1.5. Thus delayed M_R suppression results in moderate ARC boost.", "label": false }, { "paperid": "2409.15317v1", "paper_path": "./SciVer/papers/2409.15317v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15317v1_figure_3.png", "item2_path": "./SciVer/images/2409.15317v1-Table2-1.png", "section": [ "4.3" ], "request_id": 209, "origin_statement": "Because Figure 3a shows that copilot advantage equals 1 more often during corrupted pilot actions, IDA intervenes more under noise, leading to an increased noisy-pilot success rate: Table 2 records an 83% success for IDA (diffusion) compared to 75% for the standalone diffusion copilot.", "perturbed_statement": "Because Figure 3a shows that copilot advantage equals 1 more often during corrupted pilot actions, IDA intervenes more under noise, but Table 2 records a 75% success rate for IDA (diffusion) with the noisy pilot—lower than the 83% success of the standalone diffusion copilot.", "perturbed_explanation": "The perturbed claim misreports the numbers in Table 2. In reality, Table 2 shows that IDA (diffusion) achieves an 83% success rate with the noisy pilot and the standalone diffusion copilot achieves only 75%, so the assertion that IDA has just 75% success and is outperformed by the copilot is incorrect.", "claim": "Because Figure 3a shows that copilot advantage equals 1 more often during corrupted pilot actions, IDA intervenes more under noise, but Table 2 records a 75% success rate for IDA (diffusion) with the noisy pilot—lower than the 83% success of the standalone diffusion copilot.", "label": false }, { "paperid": "2410.02409v1", "paper_path": "./SciVer/papers/2410.02409v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.02409v1-Table1-1.png", "item2_path": "./SciVer/images/2410.02409v1-Table2-1.png", "section": [ "3.1.2" ], "request_id": 211, "origin_statement": "In Table 1, for p=ℓ and s=ℓm, the weight is ℓ(n−1)+m·n; then in Table 2, for p=ℓm and s=ℓm, the weight is ℓ(n−1)+m·(n+1); hence the Table 2 weight exceeds the Table 1 weight by exactly m.", "perturbed_statement": "In Table 1, for p=ℓ and s=ℓm, the weight is ℓ·n+m·n; then in Table 2, for p=ℓm and s=ℓm, the weight is ℓ(n−1)+m·(n+1); hence the Table 2 weight exceeds the Table 1 weight by exactly m.", "perturbed_explanation": "The perturbation misreads the Table 1 entry for (p=ℓ,s=ℓm). Table 1 actually gives |y|₀=n−1, so the weight is ℓ(n−1)+m·n, not ℓ·n+m·n as stated, making the premise false and invalidating the conclusion.", "claim": "In Table 1, for p=ℓ and s=ℓm, the weight is ℓ·n+m·n; then in Table 2, for p=ℓm and s=ℓm, the weight is ℓ(n−1)+m·(n+1); hence the Table 2 weight exceeds the Table 1 weight by exactly m.", "label": false }, { "paperid": "2410.09123v2", "paper_path": "./SciVer/papers/2410.09123v2.json", "claim_type": "sequential", "item1": "1(b)", "item2": "1(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09123v2_figure_1(b).png", "item2_path": "./SciVer/images/2410.09123v2_figure_1(c).png", "section": [ "1", "5.5" ], "request_id": 212, "origin_statement": "Panel (b) shows the median cosine similarity (50th percentile) at approximately 0.42. Using that baseline, panel (c) has its median at around 0.48. Thus, relations in the dataset corresponding to panel (c) exhibit higher overall similarity than those in panel (b).", "perturbed_statement": "Panel (b) shows the median cosine similarity (50th percentile) at approximately 0.52, higher than panel (c)’s median of around 0.48. Thus, relations in the dataset corresponding to panel (b) exhibit higher overall similarity than those in panel (c).", "perturbed_explanation": "The perturbed statement is wrong because panel (b)’s actual median similarity is about 0.42, not 0.52, which is lower than panel (c)’s ~0.48. This contradiction in the 50th percentile values invalidates the claim that panel (b) is more homogeneous.", "claim": "Panel (b) shows the median cosine similarity (50th percentile) at approximately 0.52, higher than panel (c)’s median of around 0.48. Thus, relations in the dataset corresponding to panel (b) exhibit higher overall similarity than those in panel (c).", "label": false }, { "paperid": "2409.19351v1", "paper_path": "./SciVer/papers/2409.19351v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "8(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19351v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.19351v1_figure_8(a).png", "section": [ "3.1" ], "request_id": 215, "origin_statement": "At Ts=5s and Dmin=50m, estimated velocities cluster at discrete values (Fig.7), yet at the same parameters, estimated directions scatter smoothly across the full 0°–360° range (Fig.8), showing direction estimates avoid the binning seen in velocity under coarse sampling.", "perturbed_statement": "At Ts=5s and Dmin=50m, estimated velocities cluster at discrete values (Fig.7), and at the same parameters, estimated directions also exhibit pronounced binning at multiples of 30° (Fig.8), indicating direction estimates suffer the same coarse-resolution issue.", "perturbed_explanation": "Figure 8 at Ts=5s and Dmin=50m shows direction points continuously filling the 0°–360° range without any discrete 30° clustering, so directions are not binned as claimed.", "claim": "At Ts=5s and Dmin=50m, estimated velocities cluster at discrete values (Fig.7), and at the same parameters, estimated directions also exhibit pronounced binning at multiples of 30° (Fig.8), indicating direction estimates suffer the same coarse-resolution issue.", "label": false }, { "paperid": "2410.04199v3", "paper_path": "./SciVer/papers/2410.04199v3.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04199v3_figure_3.png", "item2_path": "./SciVer/images/2410.04199v3-Table3-1.png", "section": [ "5.1" ], "request_id": 218, "origin_statement": "GPT-4o’s LongGenBench average on GSM8K is 75.6% (table). At question index 35, its accuracy is around 0.69 (figure). Therefore, by the tail of the prompt, GPT-4o underperforms its LongGenBench mean by roughly 6.6 points.", "perturbed_statement": "GPT-4o’s LongGenBench average on GSM8K is 85.0%. At question index 35, its accuracy is around 0.69. Therefore, by the tail of the prompt, GPT-4o underperforms its LongGenBench mean by roughly 16.1 points.", "perturbed_explanation": "The perturbed statement mistakenly uses 85.0% as GPT-4o’s LongGenBench average; the table shows GPT-4o’s actual LongGenBench accuracy is 75.6%, not 85.0%, so the claimed 16.1-point drop is incorrect.", "claim": "GPT-4o’s LongGenBench average on GSM8K is 85.0%. At question index 35, its accuracy is around 0.69. Therefore, by the tail of the prompt, GPT-4o underperforms its LongGenBench mean by roughly 16.1 points.", "label": false }, { "paperid": "2411.03697v1", "paper_path": "./SciVer/papers/2411.03697v1.json", "claim_type": "sequential", "item1": "10", "item2": "16", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03697v1_figure_10.png", "item2_path": "./SciVer/images/2411.03697v1_figure_16.png", "section": [ "6.4" ], "request_id": 219, "origin_statement": "Figure 10 shows TATAA’s GeLU kernel loads inputs into on-chip registers and performs only local mul/add operations without external memory access. Consequently, in Figure 16 TATAA achieves around 2.8× normalized throughput for GeLU, exceeding Wu et al.’s ~2.2× and Huang et al.’s ~2.0×.", "perturbed_statement": "Figure 10 shows TATAA’s GeLU kernel reloads data from off-chip memory each iteration. Consequently, in Figure 16 TATAA reports only about 1.5× normalized throughput for GeLU, trailing Huang et al.’s ~2.0×.", "perturbed_explanation": "This is incorrect because Figure 10’s GeLU implementation uses on-chip registers with local mul/add operations and no repeated off-chip loads. Furthermore, Figure 16 shows TATAA’s GeLU achieves approximately 2.8× normalized throughput, not 1.5×.", "claim": "Figure 10 shows TATAA’s GeLU kernel reloads data from off-chip memory each iteration. Consequently, in Figure 16 TATAA reports only about 1.5× normalized throughput for GeLU, trailing Huang et al.’s ~2.0×.", "label": false }, { "paperid": "2411.07976v5", "paper_path": "./SciVer/papers/2411.07976v5.json", "claim_type": "sequential", "item1": "2(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07976v5_figure_2(a).png", "item2_path": "./SciVer/images/2411.07976v5_figure_3(c).png", "section": [ "3.2" ], "request_id": 220, "origin_statement": "The red-marked calcification in the CT slice (Fig 2a) aligns with the bright yellow–green focus in the DINO-LG self-attention overlay (Fig 3d), showing that label-guided augmentation sharpens attention on calcified regions.", "perturbed_statement": "The red-marked calcification in the CT slice (Fig 2a) aligns with the bright yellow–green focus in the standard DINO self-attention overlay (Fig 3c), indicating that unmodified DINO already centers attention on calcified regions.", "perturbed_explanation": "This is incorrect because the high-attention yellow–green cluster appears only in the DINO-LG overlay (Fig 3d). In Fig 3c (standard DINO), the attention is diffuse and does not concentrate on the red-annotated calcification.", "claim": "The red-marked calcification in the CT slice (Fig 2a) aligns with the bright yellow–green focus in the standard DINO self-attention overlay (Fig 3c), indicating that unmodified DINO already centers attention on calcified regions.", "label": false }, { "paperid": "2411.05521v2", "paper_path": "./SciVer/papers/2411.05521v2.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.05521v2-Table1-1.png", "item2_path": "./SciVer/images/2411.05521v2-Table2-1.png", "section": [ "4" ], "request_id": 222, "origin_statement": "From Table 1, SM3-Text-to-Query supports SPARQL. Table 2 shows SM3-SPARQL queries average 4.00 joins, compared to MIMICSQL’s 0.64. Hence, SM3-SPARQL queries use over six times more joins than MIMICSQL queries.", "perturbed_statement": "From Table 1, SM3-Text-to-Query supports SPARQL. Table 2 shows SM3-SPARQL queries average 0.62 joins, compared to MIMICSQL’s 0.64. Hence, SM3-SPARQL queries use slightly fewer joins than MIMICSQL queries.", "perturbed_explanation": "The perturbed statement misattributes the 0.62 joins value to SM3-SPARQL. According to Table 2, SM3-SPARQL actually averages 4.00 joins per query; the 0.62 figure belongs to SM3_Cypher.", "claim": "From Table 1, SM3-Text-to-Query supports SPARQL. Table 2 shows SM3-SPARQL queries average 0.62 joins, compared to MIMICSQL’s 0.64. Hence, SM3-SPARQL queries use slightly fewer joins than MIMICSQL queries.", "label": false }, { "paperid": "2410.07484v2", "paper_path": "./SciVer/papers/2410.07484v2.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.07484v2_figure_5.png", "item2_path": "./SciVer/images/2410.07484v2_figure_6.png", "section": [ "4.3" ], "request_id": 227, "origin_statement": "Because the gather action reaches 100% coverage at iteration 1 (Fig. 5), WALL-E’s average replanning rounds drop to 16 by iteration 4 (Fig. 6).", "perturbed_statement": "Because the gather action reaches 100% coverage at iteration 1, WALL-E’s average replanning rounds drop to 16 by iteration 3.", "perturbed_explanation": "This is incorrect because Figure 6 shows that at iteration 3 the replanning rounds for WALL-E are about 18, not 16; the drop to 16 only occurs at iteration 4.", "claim": "Because the gather action reaches 100% coverage at iteration 1, WALL-E’s average replanning rounds drop to 16 by iteration 3.", "label": false }, { "paperid": "2411.04093v1", "paper_path": "./SciVer/papers/2411.04093v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04093v1_figure_3.png", "item2_path": "./SciVer/images/2411.04093v1_figure_4.png", "section": [ "5.2" ], "request_id": 229, "origin_statement": "BART extracts significantly more from right-leaning sources (Fig. 3) and also shows the strongest position bias by focusing on the first document (Fig. 4), so when a right-leaning article appears first, BART’s summary will disproportionately reflect right-leaning content.", "perturbed_statement": "BART extracts significantly more from right-leaning sources (Fig. 3) and Mistral shows the strongest position bias by focusing on the first document (Fig. 4), so when a right-leaning article appears first, BART’s summary will disproportionately reflect right-leaning content.", "perturbed_explanation": "Fig. 4’s position bias plot shows that BART (blue line) has the highest extraction coverage at document position 0 (~0.52), whereas Mistral (red line) peaks lower (~0.48). Thus, Mistral does not exhibit the strongest position bias, contradicting the perturbed premise.", "claim": "BART extracts significantly more from right-leaning sources (Fig. 3) and Mistral shows the strongest position bias by focusing on the first document (Fig. 4), so when a right-leaning article appears first, BART’s summary will disproportionately reflect right-leaning content.", "label": false }, { "paperid": "2410.05053v1", "paper_path": "./SciVer/papers/2410.05053v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.05053v1_figure_3(a).png", "item2_path": "./SciVer/images/2410.05053v1_figure_6.png", "section": [ "3.1" ], "request_id": 231, "origin_statement": "At 400 K, the L1 monolayer adsorption curve shows a decreased plateau uptake (~13 mmol/g) (Fig. 3d), which correlates with the reduced average H-bonds per molecule (~3.4) in Fig. 6, indicating thermal disruption of the two-dimensional network lowers uptake.", "perturbed_statement": "At 400 K, the L1 monolayer adsorption plateau is ~15 mmol/g, which correlates with a reduced average H-bonds per molecule (~3.58) in Fig. 6, indicating thermal disruption of the two-dimensional network lowers uptake.", "perturbed_explanation": "This is incorrect because Fig. 3d shows the L1 monolayer plateau at 400 K is ~13 mmol/g (not ~15 mmol/g), and Fig. 6 reports L1’s H-bonds per molecule as ~3.4 (not 3.58; 3.58 corresponds to L2). Both values contradict the original data.", "claim": "At 400 K, the L1 monolayer adsorption plateau is ~15 mmol/g, which correlates with a reduced average H-bonds per molecule (~3.58) in Fig. 6, indicating thermal disruption of the two-dimensional network lowers uptake.", "label": false }, { "paperid": "2411.09850v1", "paper_path": "./SciVer/papers/2411.09850v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09850v1_figure_1(c).png", "item2_path": "./SciVer/images/2411.09850v1_figure_2.png", "section": [ "3.1" ], "request_id": 235, "origin_statement": "Fig.1(c) shows that at t=0 the DPS-CM curve (blue) achieves the lowest reconstruction MSE compared to DPS (purple) and DPSₙₜ (red). Consequently, in Fig.2 the DPS-CM output (rightmost) appears noticeably sharper than DPSₙₜ, demonstrating that lower MSE yields better visual fidelity.", "perturbed_statement": "Fig.1(c) shows that at t=0 the DPSₙₜ curve (red) achieves the lowest reconstruction MSE compared to DPS (purple) and DPS-CM (blue). Consequently, in Fig.2 the DPSₙₜ output appears noticeably sharper than DPS-CM, demonstrating that lower MSE yields better visual fidelity.", "perturbed_explanation": "This is incorrect because in Fig.1(c) the blue curve (DPS-CM) is in fact the lowest at t=0, not the red curve (DPSₙₜ). Moreover, in Fig.2 the rightmost DPS-CM image is visibly sharper than the DPSₙₜ image, contradicting the perturbed claim.", "claim": "Fig.1(c) shows that at t=0 the DPSₙₜ curve (red) achieves the lowest reconstruction MSE compared to DPS (purple) and DPS-CM (blue). Consequently, in Fig.2 the DPSₙₜ output appears noticeably sharper than DPS-CM, demonstrating that lower MSE yields better visual fidelity.", "label": false }, { "paperid": "2409.05305v1", "paper_path": "./SciVer/papers/2409.05305v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05305v1_figure_3.png", "item2_path": "./SciVer/images/2409.05305v1_figure_4.png", "section": [ "4.2" ], "request_id": 236, "origin_statement": "The Pareto plot for the 1D polynomial potential (Fig3) retrieves the expression x1^2 + x2^2 as the closest match, indicating the network encodes a second-order invariant; subsequently in the 2×2 matrix trace experiment (Fig4) it identifies trace = x1 + x4, showing a shift to a linear invariant.", "perturbed_statement": "The Pareto plot for the 1D polynomial potential (Fig3) retrieves the expression x1 + x2 as the closest match, indicating the network encodes a first-order invariant; consequently in the 2×2 matrix trace experiment (Fig4) it identifies trace = x1 + x4, reflecting consistent linear encoding.", "perturbed_explanation": "This is incorrect because Fig3’s red bar clearly labels the retrieved expression as x1^2 + x2^2, not x1 + x2; thus the network did not encode a first-order invariant in the polynomial potential experiment.", "claim": "The Pareto plot for the 1D polynomial potential (Fig3) retrieves the expression x1 + x2 as the closest match, indicating the network encodes a first-order invariant; consequently in the 2×2 matrix trace experiment (Fig4) it identifies trace = x1 + x4, reflecting consistent linear encoding.", "label": false }, { "paperid": "2410.20063v2", "paper_path": "./SciVer/papers/2410.20063v2.json", "claim_type": "sequential", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20063v2_figure_2.png", "item2_path": "./SciVer/images/2410.20063v2-Table1-1.png", "section": [ "3" ], "request_id": 237, "origin_statement": "Table 1 gives N^ST_tag=689,042 and ε^ST=25.50% for D−→K+π−π−π0, implying ≃2.70×10^6 decays produced. In Fig. 2 the D−→K+π−π−π0 peak is ≃21×10^3 events, about 0.78% of those decays.", "perturbed_statement": "Table 1 gives N^ST_tag=689,042 and ε^ST=30.00% for D−→K+π−π−π0, implying ≃2.30×10^6 decays produced. In Fig. 2 the D−→K+π−π−π0 peak is ≃21×10^3 events, about 0.90% of those decays.", "perturbed_explanation": "The perturb misreads ε^ST for D−→K+π−π−π0: Table 1 actually lists 25.50%, not 30.00%. Using 25.50% gives ~2.70×10^6 produced decays, so the stated 2.30×10^6 and 0.90% fraction are incorrect.", "claim": "Table 1 gives N^ST_tag=689,042 and ε^ST=30.00% for D−→K+π−π−π0, implying ≃2.30×10^6 decays produced. In Fig. 2 the D−→K+π−π−π0 peak is ≃21×10^3 events, about 0.90% of those decays.", "label": false }, { "paperid": "2411.15799v1", "paper_path": "./SciVer/papers/2411.15799v1.json", "claim_type": "sequential", "item1": "9(g)", "item2": "9(n)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15799v1_figure_9(g).png", "item2_path": "./SciVer/images/2411.15799v1_figure_9(n).png", "section": [ "4.6" ], "request_id": 238, "origin_statement": "The input back image shows a right scapular bulge, and the Grad-CAM heatmap highlights the lower right back. This indicates the model uses right-side asymmetry to predict the 32° Cobb angle severity.", "perturbed_statement": "The input back image shows a left scapular bulge, and the Grad-CAM heatmap highlights the upper left back. This indicates the model uses left-side asymmetry to predict the 32° Cobb angle severity.", "perturbed_explanation": "The perturbation is incorrect because the context and images clearly show a protruding right scapula, not left, and the Grad-CAM map emphasizes the lower right region, not the upper left, when estimating the 32° Cobb angle.", "claim": "The input back image shows a left scapular bulge, and the Grad-CAM heatmap highlights the upper left back. This indicates the model uses left-side asymmetry to predict the 32° Cobb angle severity.", "label": false }, { "paperid": "2409.15440v1", "paper_path": "./SciVer/papers/2409.15440v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "7(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15440v1_figure_3(b).png", "item2_path": "./SciVer/images/2409.15440v1_figure_7(c).png", "section": [ "6" ], "request_id": 242, "origin_statement": "The northeastern outer lobe seen in the GMRT 150 MHz map (Fig. 3b) extends ≳60 kpc NE of the nucleus, and this region coincides with the X-ray surface brightness deficit NE of the center in the Chandra 0.5–4 keV image (Fig. 7b), showing the lobe fills that cavity.", "perturbed_statement": "The southwestern outer lobe seen in the GMRT 150 MHz map (Fig. 3b) extends ≳60 kpc SW of the nucleus, and this region coincides with the X-ray surface brightness deficit SW of the center in the Chandra 0.5–4 keV image (Fig. 7b), showing the lobe fills that cavity.", "perturbed_explanation": "Fig. 3b clearly shows the extended outer lobe lying to the northeast of the nucleus, not the southwest, and Fig. 7b locates the X-ray deficit to the NE, not SW. Thus there is no SW cavity filled by a radio lobe in the data.", "claim": "The southwestern outer lobe seen in the GMRT 150 MHz map (Fig. 3b) extends ≳60 kpc SW of the nucleus, and this region coincides with the X-ray surface brightness deficit SW of the center in the Chandra 0.5–4 keV image (Fig. 7b), showing the lobe fills that cavity.", "label": false }, { "paperid": "2411.16393v1", "paper_path": "./SciVer/papers/2411.16393v1.json", "claim_type": "sequential", "item1": "14(a)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16393v1_figure_14(a).png", "item2_path": "./SciVer/images/2411.16393v1-Table5-1.png", "section": [ "5.1" ], "request_id": 243, "origin_statement": "Figure 14 shows the disk’s radius is 25 mm. Table 5 lists the regularization length ℓ = 1.25 mm. Hence, the regularization length is one-twentieth of the specimen’s radius.", "perturbed_statement": "Figure 14 shows the disk’s radius is 30 mm. Table 5 lists the regularization length ℓ = 1.25 mm. Hence, the regularization length is one-twenty-fourth of the specimen’s radius.", "perturbed_explanation": "The schematic in Figure 14 indicates the disk’s radius is 25 mm, not 30 mm as claimed. Therefore, dividing ℓ = 1.25 mm by the true radius of 25 mm yields one-twentieth, not one-twenty-fourth.", "claim": "Figure 14 shows the disk’s radius is 30 mm. Table 5 lists the regularization length ℓ = 1.25 mm. Hence, the regularization length is one-twenty-fourth of the specimen’s radius.", "label": false }, { "paperid": "2409.09641v2", "paper_path": "./SciVer/papers/2409.09641v2.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.09641v2_figure_1.png", "item2_path": "./SciVer/images/2409.09641v2_figure_3.png", "section": [ "4.1" ], "request_id": 246, "origin_statement": "On the child’s turn, AACessTalk presents 16 vocabulary options: four Topic cards, four Action cards, four Emotion cards, plus four default Core cards.", "perturbed_statement": "On the child’s turn, AACessTalk presents 13 vocabulary options: three Topic cards, three Action cards, three Emotion cards, plus four default Core cards.", "perturbed_explanation": "The perturbed statement contradicts Figure 3 and the text, which specify that each of the three categories (Topic, Action, Emotion) shows four cards (not three), yielding 12 AI-recommended cards plus four default Core cards, for a total of 16 rather than 13.", "claim": "On the child’s turn, AACessTalk presents 13 vocabulary options: three Topic cards, three Action cards, three Emotion cards, plus four default Core cards.", "label": false }, { "paperid": "2410.22517v1", "paper_path": "./SciVer/papers/2410.22517v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22517v1_figure_7.png", "item2_path": "./SciVer/images/2410.22517v1-Table3-1.png", "section": [ "6.3" ], "request_id": 247, "origin_statement": "At temperature 1.0, top-p=0.9, top-k=80, the average absolute candidate count difference falls from 6.0 pre-intervention to 4.33 post-intervention. Meanwhile, Table 3 shows nationality perplexity rises merely 0.49 (19.10→19.59), so Atlas reduces bias with negligible fluency loss.", "perturbed_statement": "At temperature 1.0, top-p=0.9, top-k=80, the average absolute candidate count difference falls from 6.0 pre-intervention to 5.25 post-intervention. Meanwhile, Table 3 shows nationality perplexity rises only 0.20 (19.10→19.30), so Atlas reduces bias with negligible fluency loss.", "perturbed_explanation": "Figure 7 actually shows a post-intervention count difference of 4.33 at (1.0,0.9,80), not 5.25. Table 3 reports nationality perplexity going from 19.10 to 19.59 (+0.49), not to 19.30 (+0.20), so both figures contradict the perturbed values.", "claim": "At temperature 1.0, top-p=0.9, top-k=80, the average absolute candidate count difference falls from 6.0 pre-intervention to 5.25 post-intervention. Meanwhile, Table 3 shows nationality perplexity rises only 0.20 (19.10→19.30), so Atlas reduces bias with negligible fluency loss.", "label": false }, { "paperid": "2411.07672v1", "paper_path": "./SciVer/papers/2411.07672v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(h)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07672v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.07672v1_figure_4(h).png", "section": [ "5.2" ], "request_id": 249, "origin_statement": "The original Wisconsin graph (Fig.4a) shows h_edge=0.18; the MLP-based reconstruction (Fig.4h) achieves h_edge=1.00. This implies that a simple label-sampling method can fully recover intra-class connectivity, outperforming the heterophilous original structure without complex GSL techniques.", "perturbed_statement": "Figure 4a shows the original Wisconsin graph with h_edge=0.50, and Figure 4h shows the MLP reconstruction at h_edge=0.95, indicating simple label-sampling only modestly improves homophily by 0.45, making complex GSL methods unnecessary for achieving slight connectivity gains.", "perturbed_explanation": "The perturbed claim misreports both homophily values. In Fig.4a, the original graph’s h_edge is 0.18 (not 0.50), and in Fig.4h, the MLP reconstruction’s h_edge is 1.00 (not 0.95). These contradictions to the actual captions render the statement false.", "claim": "Figure 4a shows the original Wisconsin graph with h_edge=0.50, and Figure 4h shows the MLP reconstruction at h_edge=0.95, indicating simple label-sampling only modestly improves homophily by 0.45, making complex GSL methods unnecessary for achieving slight connectivity gains.", "label": false }, { "paperid": "2410.00903v2", "paper_path": "./SciVer/papers/2410.00903v2.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.00903v2_figure_6.png", "item2_path": "./SciVer/images/2410.00903v2_figure_7.png", "section": [ "5.3" ], "request_id": 250, "origin_statement": "From Fig6 (separability top left), the proposed new estimator’s propensity scores are symmetric and far from zero. Given this overlap, Fig7 shows that as sample size increases from 1000 to 2000, bias for new texts decreases from about –8 to –2, with coverage remaining near 95%.", "perturbed_statement": "Since Fig6 shows symmetric overlap for new texts under separability, Fig7 indicates the bias for reused texts increases from –4 to –3 as sample size grows from 1000 to 4000.", "perturbed_explanation": "This is incorrect because, according to Fig7, the bias for reused texts actually moves from roughly –4 at sample size 1000 to about –1 at sample size 4000, not to –3. Thus the stated increase to –3 contradicts the plotted bias values.", "claim": "Since Fig6 shows symmetric overlap for new texts under separability, Fig7 indicates the bias for reused texts increases from –4 to –3 as sample size grows from 1000 to 4000.", "label": false }, { "paperid": "2410.16092v1", "paper_path": "./SciVer/papers/2410.16092v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "7(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16092v1_figure_7(a).png", "item2_path": "./SciVer/images/2410.16092v1_figure_7(b).png", "section": [ "3.5", "3.6" ], "request_id": 255, "origin_statement": "ChangeGuard successfully executes 257 of 299 code changes—about 2.11 times more than the baseline’s 122 executions—and with a median line coverage of 92% versus baseline’s 27%, ChangeGuard covers over three times more code lines overall.", "perturbed_statement": "ChangeGuard successfully executes 257 of 299 code changes—about 2.11 times more than the baseline’s 122 executions—and with a median line coverage of 92% versus baseline’s 37%, ChangeGuard covers over three times more code lines overall.", "perturbed_explanation": "The perturbation incorrectly states the baseline’s median line coverage as 37%. Figure 7(b) shows the baseline median coverage is about 27% (orange median line), not 37%, so the asserted coverage ratio (>3×) is based on a false premise and does not follow from the actual data.", "claim": "ChangeGuard successfully executes 257 of 299 code changes—about 2.11 times more than the baseline’s 122 executions—and with a median line coverage of 92% versus baseline’s 37%, ChangeGuard covers over three times more code lines overall.", "label": false }, { "paperid": "2411.12845v1", "paper_path": "./SciVer/papers/2411.12845v1.json", "claim_type": "sequential", "item1": "5", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12845v1_figure_5.png", "item2_path": "./SciVer/images/2411.12845v1_figure_7.png", "section": [ "4.2" ], "request_id": 256, "origin_statement": "At the end of 2023, Figure 5 shows CPI-Common-MS3 (green) below CPI-Common-MS4 (yellow); Figure 7 shows CPI-Common-MS3 (cyan) also below the structural-break CPI-Common-SC (green). Hence CPI-Common-MS3 provides a consistently more optimistic (lower) underlying inflation signal than both alternative measures at year-end.", "perturbed_statement": "At the end of 2023, Figure 5 shows CPI-Common-MS3 (green) above CPI-Common-MS4 (yellow); Figure 7 shows CPI-Common-MS3 (cyan) above the structural-break CPI-Common-SC (green). Hence CPI-Common-MS3 provides a consistently less optimistic (higher) underlying inflation signal than both alternative measures at year-end.", "perturbed_explanation": "The perturbed claim is incorrect because in Figure 5 the green line (CPI-Common-MS3) lies below the yellow line (CPI-Common-MS4) at the end of 2023, and in Figure 7 the cyan line (CPI-Common-MS3) lies below the green line (CPI-Common-SC) at the same date. Thus CPI-Common-MS3 is actually lower, not higher, than the other measures.", "claim": "At the end of 2023, Figure 5 shows CPI-Common-MS3 (green) above CPI-Common-MS4 (yellow); Figure 7 shows CPI-Common-MS3 (cyan) above the structural-break CPI-Common-SC (green). Hence CPI-Common-MS3 provides a consistently less optimistic (higher) underlying inflation signal than both alternative measures at year-end.", "label": false }, { "paperid": "2410.04784v1", "paper_path": "./SciVer/papers/2410.04784v1.json", "claim_type": "sequential", "item1": "2", "item2": "7", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.04784v1-Table2-1.png", "item2_path": "./SciVer/images/2410.04784v1-Table7-1.png", "section": [ "3.2" ], "request_id": 257, "origin_statement": "Table2 shows a 63.9% preference for newspaper over novel style. Table7 indicates “major” test statements use novel style. Therefore, for major-related queries framed in novel style, the model will still favor newspaper-style knowledge.", "perturbed_statement": "Table2 shows a 76.0% preference for newspaper over novel style. Table7 indicates “major” test statements mimic newspaper style. Therefore, for major-related queries in newspaper-style phrasing, the model strongly prefers newspaper-style knowledge.", "perturbed_explanation": "This is incorrect because Table2 reports a 63.9% preference for Newspapers vs Novels, not 76.0%. Also, Table7’s “major” statement (‘{} went to university and hone her skills in {}.’) is in novel style, not newspaper style.", "claim": "Table2 shows a 76.0% preference for newspaper over novel style. Table7 indicates “major” test statements mimic newspaper style. Therefore, for major-related queries in newspaper-style phrasing, the model strongly prefers newspaper-style knowledge.", "label": false }, { "paperid": "2410.20399v1", "paper_path": "./SciVer/papers/2410.20399v1.json", "claim_type": "sequential", "item1": "9(c)", "item2": "9(e)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.20399v1_figure_9(c).png", "item2_path": "./SciVer/images/2410.20399v1_figure_9(e).png", "section": [ "4.1" ], "request_id": 260, "origin_statement": "At sequence length 4096, ThunderKittens' linear attention reaches 210 TFLOPs, and its fused dropout-residual-norm runs at 1.1 TFLOPs versus PyTorch’s 0.7 TFLOPs, showing a 0.4 TFLOPs performance lift.", "perturbed_statement": "At sequence length 4096, ThunderKittens' linear attention reaches 210 TFLOPs, and its fused dropout-residual-norm runs at 1.1 TFLOPs versus PyTorch’s 0.5 TFLOPs, showing a 0.6 TFLOPs performance lift.", "perturbed_explanation": "This is incorrect because in the fused dropout-residual-norm chart at length 4096, PyTorch actually achieves 0.7 TFLOPs (not 0.5). Therefore the true performance uplift is 1.1 – 0.7 = 0.4 TFLOPs, not 0.6 TFLOPs.", "claim": "At sequence length 4096, ThunderKittens' linear attention reaches 210 TFLOPs, and its fused dropout-residual-norm runs at 1.1 TFLOPs versus PyTorch’s 0.5 TFLOPs, showing a 0.6 TFLOPs performance lift.", "label": false }, { "paperid": "2410.01485v1", "paper_path": "./SciVer/papers/2410.01485v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.01485v1-Table5-1.png", "item2_path": "./SciVer/images/2410.01485v1-Table6-1.png", "section": [ "4.4" ], "request_id": 262, "origin_statement": "Table 5 shows that placing full attention layers in the middle yields the highest average BABILong score (0.27). Table 6 shows that using 1/3 full layers also yields an average of 0.27. Therefore, placing 1/3 full layers in the middle maximizes long-context performance.", "perturbed_statement": "Table 5 shows that interleaving full attention layers yields the highest average BABILong score (0.25). Table 6 shows that using 1/5 full layers yields an average of 0.26. Therefore, interleaving 1/5 full layers maximizes long-context performance.", "perturbed_explanation": "The perturbed statement is wrong because Table 5 actually reports the highest average score for middle placement (0.27), whereas interleaving only achieves 0.24, not 0.25. Moreover, Table 6 shows 1/3 full layers achieves 0.27, outperforming 1/5 full layers at 0.26, so interleaving with 1/5 layers does not maximize performance.", "claim": "Table 5 shows that interleaving full attention layers yields the highest average BABILong score (0.25). Table 6 shows that using 1/5 full layers yields an average of 0.26. Therefore, interleaving 1/5 full layers maximizes long-context performance.", "label": false }, { "paperid": "2410.01964v1", "paper_path": "./SciVer/papers/2410.01964v1.json", "claim_type": "sequential", "item1": "13", "item2": "16", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01964v1_figure_13.png", "item2_path": "./SciVer/images/2410.01964v1_figure_16.png", "section": [ "4.2.2" ], "request_id": 263, "origin_statement": "Figure 13 shows Crack 2 extends ~0.8 mm parallel to the Ta/W interface in 14M1. Figure 16 shows BM2 exhibits linear diffuse dye indications at the interface. Therefore, BM2 likely harbors superficial intergranular cracks analogous to 14M1’s interface cracks.", "perturbed_statement": "Figure 13 shows Crack 2 extends ~1.5 mm along the Ta/W interface in 14M1. Figure 16 shows BM1 exhibits linear diffuse dye indications at the interface. Therefore, BM1 likely harbors superficial intergranular cracks analogous to 14M1’s interface cracks.", "perturbed_explanation": "The perturbed statement is incorrect because Crack 2 actually measures ~0.8 mm long (not 1.5 mm) in Figure 13, and Figure 16 shows BM1 had no linear diffuse indications at the interface (only BM2 did). These details contradict the premises of the false claim.", "claim": "Figure 13 shows Crack 2 extends ~1.5 mm along the Ta/W interface in 14M1. Figure 16 shows BM1 exhibits linear diffuse dye indications at the interface. Therefore, BM1 likely harbors superficial intergranular cracks analogous to 14M1’s interface cracks.", "label": false }, { "paperid": "2409.05769v1", "paper_path": "./SciVer/papers/2409.05769v1.json", "claim_type": "sequential", "item1": "12", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05769v1_figure_12.png", "item2_path": "./SciVer/images/2409.05769v1-Table3-1.png", "section": [ "4" ], "request_id": 265, "origin_statement": "In the upward‐flow phase, the photospheric LOS velocity increases to 2–3 km s⁻¹ (Table 3), and at 4.2 min Fig. 12 C shows a chromospheric jet emerging near 1500 km, implying that the photospheric rebound shock drives the high‐altitude jet.", "perturbed_statement": "In the upward‐flow phase, the photospheric LOS velocity increases to 5–10 km s⁻¹ (Table 3), and at 4.2 min Fig. 12 C shows a chromospheric jet emerging near 1500 km, implying that the photospheric rebound shock drives the high‐altitude jet.", "perturbed_explanation": "The perturbation is incorrect because Table 3 lists the photospheric LOS velocity in the upward‐flow phase as 2–3 km s⁻¹, not 5–10 km s⁻¹, so the claimed 5–10 km s⁻¹ does not match the data.", "claim": "In the upward‐flow phase, the photospheric LOS velocity increases to 5–10 km s⁻¹ (Table 3), and at 4.2 min Fig. 12 C shows a chromospheric jet emerging near 1500 km, implying that the photospheric rebound shock drives the high‐altitude jet.", "label": false }, { "paperid": "2411.07517v1", "paper_path": "./SciVer/papers/2411.07517v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07517v1_figure_3.png", "item2_path": "./SciVer/images/2411.07517v1-Table1-1.png", "section": [ "3.2" ], "request_id": 266, "origin_statement": "Using Table 1’s ρ_air=1.21 kg/m3 and c_air=340 m/s alongside ρ_EPS=28.0 kg/m3 and c_EPS=414 m/s, acoustic impedances are ≈411 Rayl and ≈11592 Rayl, giving a reflectivity of ≈93.2%, consistent with the strong reflection fringes around object silhouettes in Fig.3(b).", "perturbed_statement": "Using Table 1’s ρ_air=1.21 kg/m3 and c_air=340 m/s alongside ρ_EPS=18.0 kg/m3 and c_EPS=414 m/s, acoustic impedances are ≈411 Rayl and ≈7452 Rayl, giving a reflectivity of ≈89.6%, consistent with the strong reflection fringes around object silhouettes in Fig.3(b).", "perturbed_explanation": "The perturbation erroneously uses ρ_EPS=18.0 kg/m3, but Table 1 specifies ρ_EPS=28.0 kg/m3. This incorrect density yields a wrong impedance and reflectivity (~89.6%) that contradicts the true reflectivity of ≈93.2% stated in the context.", "claim": "Using Table 1’s ρ_air=1.21 kg/m3 and c_air=340 m/s alongside ρ_EPS=18.0 kg/m3 and c_EPS=414 m/s, acoustic impedances are ≈411 Rayl and ≈7452 Rayl, giving a reflectivity of ≈89.6%, consistent with the strong reflection fringes around object silhouettes in Fig.3(b).", "label": false }, { "paperid": "2410.17276v2", "paper_path": "./SciVer/papers/2410.17276v2.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.17276v2_figure_2.png", "item2_path": "./SciVer/images/2410.17276v2-Table2-1.png", "section": [ "4.3" ], "request_id": 267, "origin_statement": "The count ratio of head to mid items in RetailRocket (2337/7655 ≈30.5%, Fig. 2) closely matches their item percentage ratio (6.3%/20.8% ≈30.3%, Table 2), indicating consistent popularity cohort definitions across raw counts and percentages.", "perturbed_statement": "The count ratio of head to mid items in RetailRocket (2000/7655 ≈26.1%, Fig. 2) closely matches their item percentage ratio (6.3%/20.8% ≈30.3%, Table 2), indicating consistent popularity cohort definitions across raw counts and percentages.", "perturbed_explanation": "This is incorrect because Fig. 2 shows 2337 head items, not 2000. Using the actual 2337 head items gives a ratio of ≈30.5%, which aligns with the 30.3% ratio from Table 2, so the stated 26.1% is wrong.", "claim": "The count ratio of head to mid items in RetailRocket (2000/7655 ≈26.1%, Fig. 2) closely matches their item percentage ratio (6.3%/20.8% ≈30.3%, Table 2), indicating consistent popularity cohort definitions across raw counts and percentages.", "label": false }, { "paperid": "2410.08469v2", "paper_path": "./SciVer/papers/2410.08469v2.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.08469v2_figure_3.png", "item2_path": "./SciVer/images/2410.08469v2-Table1-1.png", "section": [ "4.1", "4.1.2" ], "request_id": 268, "origin_statement": "Figure 3 shows 'striped' attention weight is low for Blue headed Vireo versus White eyed Vireo, implying focus shifts to semantic cues. Table 1 shows adding nonsensical tokens drops Caltech101 accuracy (97.42%→97.30%), demonstrating gains stem from semantic attention, not parameter count.", "perturbed_statement": "Figure 3 shows 'striped' attention weight is high for Blue headed Vireo versus White eyed Vireo, implying striped is the key cue. Table 1 shows adding nonsensical tokens raises Caltech101 accuracy (97.42%→97.82%), demonstrating gains stem from extra parameters, not semantic attention.", "perturbed_explanation": "The perturbation is wrong because Figure 3 actually shows the 'striped' token weight drops (becomes low) when distinguishing Blue headed Vireo from White eyed Vireo, and Table 1 reports Caltech101 accuracy falls from 97.42% to 97.30% with nonsensical tokens, not rises to 97.82%.", "claim": "Figure 3 shows 'striped' attention weight is high for Blue headed Vireo versus White eyed Vireo, implying striped is the key cue. Table 1 shows adding nonsensical tokens raises Caltech101 accuracy (97.42%→97.82%), demonstrating gains stem from extra parameters, not semantic attention.", "label": false }, { "paperid": "2411.16342v1", "paper_path": "./SciVer/papers/2411.16342v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.16342v1-Table2-1.png", "item2_path": "./SciVer/images/2411.16342v1-Table4-1.png", "section": [ "3.2" ], "request_id": 269, "origin_statement": "With 8 tiling schemes in Table 2, the base+features+log model in Table 4 reduces degradation over optimal on SparseSuite.Mix from 92.68% to 5.24%, a drop of 87.44%, demonstrating significant accuracy improvement across all tilings.", "perturbed_statement": "With 7 tiling schemes in Table 2, the base+features+log model in Table 4 reduces degradation over optimal on SparseSuite.Mix from 92.68% to 2.34%, a drop of 90.34%, demonstrating exceptional accuracy improvement across all tilings.", "perturbed_explanation": "The perturbed statement is wrong because Table 2 actually lists 8 tiling schemes, not 7, and Table 4 reports the base+features+log degradation over optimal for SparseSuite.Mix as 5.24%, not 2.34%. Both details contradict the context.", "claim": "With 7 tiling schemes in Table 2, the base+features+log model in Table 4 reduces degradation over optimal on SparseSuite.Mix from 92.68% to 2.34%, a drop of 90.34%, demonstrating exceptional accuracy improvement across all tilings.", "label": false }, { "paperid": "2409.05048v1", "paper_path": "./SciVer/papers/2409.05048v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05048v1_figure_3.png", "item2_path": "./SciVer/images/2409.05048v1_figure_4.png", "section": [ "5" ], "request_id": 270, "origin_statement": "Figure 3 shows plskz362 converges in fewer iterations (~2000) than plsk1919 (~3000); Figure 4 demonstrates average CPU per eigenpair increases with iteration count, so plskz362 incurs lower average CPU time than plsk1919.", "perturbed_statement": "Figure 3 shows plskz362 converges in fewer iterations (~2000) than plsk1919 (~3000); Figure 4 demonstrates average CPU per eigenpair decreases with iteration count, so plskz362 incurs higher average CPU time than plsk1919.", "perturbed_explanation": "The perturbed statement wrongly claims that average CPU decreases with iterations, but Figure 4 actually shows that average CPU per eigenpair increases as iteration count grows. Therefore, plskz362—with fewer iterations—cannot incur higher average CPU time.", "claim": "Figure 3 shows plskz362 converges in fewer iterations (~2000) than plsk1919 (~3000); Figure 4 demonstrates average CPU per eigenpair decreases with iteration count, so plskz362 incurs higher average CPU time than plsk1919.", "label": false }, { "paperid": "2409.19611v1", "paper_path": "./SciVer/papers/2409.19611v1.json", "claim_type": "sequential", "item1": "3", "item2": "5", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.19611v1-Table3-1.png", "item2_path": "./SciVer/images/2409.19611v1-Table5-1.png", "section": [ "7.1" ], "request_id": 271, "origin_statement": "Table 3 shows CB is a SuperGLUE NLI dataset, and Table 5 assigns NLI tasks the prompt “What is the logical relationship between the ‘sentence 1’ and the ‘sentence 2’?”, so CB uses that NLI prompt.", "perturbed_statement": "Table 3 shows CB is a SuperGLUE QA dataset, and Table 5 assigns QA tasks the BoolQA prompt “According to the following passage, is the question true or false? Choose one from the option.”, so CB uses that QA prompt.", "perturbed_explanation": "In Table 3, CB’s Task is listed as NLI (not QA), so it should use the NLI prompt from Table 5, not the BoolQA QA template. The perturbed step mislabels CB’s task type, contradicting Table 3.", "claim": "Table 3 shows CB is a SuperGLUE QA dataset, and Table 5 assigns QA tasks the BoolQA prompt “According to the following passage, is the question true or false? Choose one from the option.”, so CB uses that QA prompt.", "label": false }, { "paperid": "2409.10046v1", "paper_path": "./SciVer/papers/2409.10046v1.json", "claim_type": "sequential", "item1": "3", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.10046v1_figure_3.png", "item2_path": "./SciVer/images/2409.10046v1-Table2-1.png", "section": [ "2.2", "4.3" ], "request_id": 272, "origin_statement": "Table 2 shows the full-feature XGBoost model (Model 5) has 91.6% accuracy. Figure 3a shows North America’s regional accuracy exceeds 0.90. Therefore, the full XGBoost model generalizes exceptionally well in North America.", "perturbed_statement": "Table 2 shows the full-feature XGBoost model (Model 5) has 91.6% accuracy. Figure 3a shows Europe’s regional accuracy exceeds 0.95. Therefore, the full XGBoost model generalizes exceptionally well in Europe.", "perturbed_explanation": "In Figure 3a, European grid cells are colored light green to yellow, corresponding to accuracies around 0.6–0.9, not above 0.95. No region in Europe on that map reaches the 0.95 accuracy threshold, contradicting the perturbed claim.", "claim": "Table 2 shows the full-feature XGBoost model (Model 5) has 91.6% accuracy. Figure 3a shows Europe’s regional accuracy exceeds 0.95. Therefore, the full XGBoost model generalizes exceptionally well in Europe.", "label": false }, { "paperid": "2411.05266v1", "paper_path": "./SciVer/papers/2411.05266v1.json", "claim_type": "sequential", "item1": "11", "item2": "12", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05266v1_figure_11.png", "item2_path": "./SciVer/images/2411.05266v1_figure_12.png", "section": [ "4.3" ], "request_id": 275, "origin_statement": "In Figure 13, the volume intersection scatter plot shows deletion-induced distances align closely with substitution-induced distances (points near the diagonal). Figure 12’s middle-row heatmaps further show that changing bandwidth barely affects these distances. Therefore, volume intersection method consistently equates deletion and substitution mutation distances across bandwidths.", "perturbed_statement": "In Figure 13, the volume intersection scatter plot shows deletion-induced distances are much larger than substitution-induced distances (points above the diagonal). Figure 12’s middle-row heatmaps further show that changing bandwidth dramatically alters these distances. Therefore, volume intersection method inconsistently compares deletion and substitution distances under different bandwidths.", "perturbed_explanation": "This statement is wrong because in Figure 13 the volume intersection points cluster along the diagonal, indicating deletion distances closely match substitution distances, not greatly exceed them. Moreover, Figure 12’s middle-row heatmaps exhibit only subtle distance changes across bandwidths, not the dramatic alterations claimed.", "claim": "In Figure 13, the volume intersection scatter plot shows deletion-induced distances are much larger than substitution-induced distances (points above the diagonal). Figure 12’s middle-row heatmaps further show that changing bandwidth dramatically alters these distances. Therefore, volume intersection method inconsistently compares deletion and substitution distances under different bandwidths.", "label": false }, { "paperid": "2411.13870v1", "paper_path": "./SciVer/papers/2411.13870v1.json", "claim_type": "sequential", "item1": "2", "item2": "8(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13870v1_figure_2.png", "item2_path": "./SciVer/images/2411.13870v1_figure_8(a).png", "section": [ "3.5" ], "request_id": 276, "origin_statement": "Fig. 2c shows gas velocities spanning –38.25 to –27.25 km/s. Fig. 8a’s l–v diagram splits at –35.25 km/s into blue (–38.25 to –35.5 km/s) and red (–35.25 to –27.25 km/s) components, confirming the blue-shifted filament corresponds to the lower-velocity tail in the moment-1 map.", "perturbed_statement": "Fig. 2c shows gas velocities spanning –38.25 to –27.25 km/s. Fig. 8a’s l–v diagram splits at –36.75 km/s into blue (–38.25 to –35.5 km/s) and red (–35.25 to –27.25 km/s) components, confirming the blue-shifted filament corresponds to the lower-velocity tail in the moment-1 map.", "perturbed_explanation": "The perturbation errs on the division velocity: Figure 8a’s yellow dotted line actually marks the split at –35.25 km/s, not –36.75 km/s. Therefore the claimed separation point contradicts the context’s specified –35.25 km/s division.", "claim": "Fig. 2c shows gas velocities spanning –38.25 to –27.25 km/s. Fig. 8a’s l–v diagram splits at –36.75 km/s into blue (–38.25 to –35.5 km/s) and red (–35.25 to –27.25 km/s) components, confirming the blue-shifted filament corresponds to the lower-velocity tail in the moment-1 map.", "label": false }, { "paperid": "2410.12057v2", "paper_path": "./SciVer/papers/2410.12057v2.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.12057v2_figure_1.png", "item2_path": "./SciVer/images/2410.12057v2-Table1-1.png", "section": [ "4" ], "request_id": 278, "origin_statement": "Figure 1 shows GPT-2 assigns next‐word probabilities near 1×10⁻⁸ even when cloze probabilities exceed 0.1; Table 1 then reveals GPT-2’s top completion is “bee” while humans choose “hive,” indicating GPT-2 underestimates and misranks the human-preferred continuation.", "perturbed_statement": "Figure 1 shows GPT-2 assigns next‐word probabilities near 1×10⁻⁸ even when cloze probabilities exceed 0.1; Table 1 then shows GPT-2’s top completion is “hive,” matching the human’s first choice, indicating GPT-2 correctly predicts the most frequent human continuation.", "perturbed_explanation": "Table 1 actually lists GPT-2’s top completion as “bee,” not “hive.” Therefore the claim that GPT-2’s top completion is “hive” contradicts the tabulated data.", "claim": "Figure 1 shows GPT-2 assigns next‐word probabilities near 1×10⁻⁸ even when cloze probabilities exceed 0.1; Table 1 then shows GPT-2’s top completion is “hive,” matching the human’s first choice, indicating GPT-2 correctly predicts the most frequent human continuation.", "label": false }, { "paperid": "2409.11176v1", "paper_path": "./SciVer/papers/2409.11176v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11176v1_figure_4(b).png", "item2_path": "./SciVer/images/2409.11176v1-Table3-1.png", "section": [ "3.1" ], "request_id": 279, "origin_statement": "The steeper slope of the high‐energy points in the 0.5–2.0″ rotation diagram (Figure 4) implies a hot H₂ component at ~1260 K, which Table 3 then converts into an H₂ mass of 3.1×10⁻⁷ M⊙ for that component.", "perturbed_statement": "The steeper slope of the high‐energy points in the 0.5–2.0″ rotation diagram (Figure 4) implies a warm H₂ component at ~600 K, which Table 3 then converts into an H₂ mass of 3.1×10⁻⁷ M⊙ for that component.", "perturbed_explanation": "Table 3 reports the 3.1×10⁻⁷ M⊙ mass specifically for the hot component at T_hot=1260±90 K in the 0.5–2.0″ region. There is no listed M_warm for that aperture, and the 600 K temperature refers to the warm component only in the full or inner apertures, not the annulus. Thus attributing the 3.1×10⁻⁷ M⊙ to a 600 K warm component contradicts the table.", "claim": "The steeper slope of the high‐energy points in the 0.5–2.0″ rotation diagram (Figure 4) implies a warm H₂ component at ~600 K, which Table 3 then converts into an H₂ mass of 3.1×10⁻⁷ M⊙ for that component.", "label": false }, { "paperid": "2409.07088v1", "paper_path": "./SciVer/papers/2409.07088v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.07088v1-Table2-1.png", "item2_path": "./SciVer/images/2409.07088v1-Table3-1.png", "section": [ "6" ], "request_id": 280, "origin_statement": "Table 2 indicates WikiOFGraph’s BLEU (45.85) surpasses LAGRANGE’s (40.40) by 5.45 points; in Table 3, WikiOFGraph’s BLEU (69.27) surpasses LAGRANGE’s (39.37) by 29.90 points, thus the BLEU gap is wider on the WikiOFGraph test set than on GenWiki.", "perturbed_statement": "Table 2 indicates WikiOFGraph’s BLEU (45.85) surpasses LAGRANGE’s (40.40) by 15.45 points; in Table 3, WikiOFGraph’s BLEU (69.27) surpasses LAGRANGE’s (39.37) by 9.90 points, thus the BLEU gap is narrower on the WikiOFGraph test set than on GenWiki.", "perturbed_explanation": "Both intermediate margins are incorrect. Table 2’s actual BLEU difference is 45.85−40.40=5.45, not 15.45. Table 3’s actual difference is 69.27−39.37=29.90, not 9.90. Therefore, the BLEU gap on the WikiOFGraph test is larger, not narrower.", "claim": "Table 2 indicates WikiOFGraph’s BLEU (45.85) surpasses LAGRANGE’s (40.40) by 15.45 points; in Table 3, WikiOFGraph’s BLEU (69.27) surpasses LAGRANGE’s (39.37) by 9.90 points, thus the BLEU gap is narrower on the WikiOFGraph test set than on GenWiki.", "label": false }, { "paperid": "2409.10951v1", "paper_path": "./SciVer/papers/2409.10951v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10951v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.10951v1_figure_2(c).png", "section": [ "3.1" ], "request_id": 282, "origin_statement": "In Figure 2(a), green protected normals tightly cluster with pink protected anomalies, causing traditional AD to mislabel them; in Figure 2(c), FairAD uniformly disperses green points away from pink anomalies, thereby reducing false positives on the protected group.", "perturbed_statement": "In Figure 2(a), green protected normals tightly cluster with pink anomalies, causing traditional AD to mislabel them; in Figure 2(c), FairAD still keeps green points clustered with pink anomalies, so protected false positives remain high.", "perturbed_explanation": "The perturbed statement is incorrect because in Figure 2(c) the green protected normals are uniformly dispersed and separated from the pink anomalies rather than clustered. The image clearly shows no tight green–pink cluster in (c), contradicting the claim that they remain grouped.", "claim": "In Figure 2(a), green protected normals tightly cluster with pink anomalies, causing traditional AD to mislabel them; in Figure 2(c), FairAD still keeps green points clustered with pink anomalies, so protected false positives remain high.", "label": false }, { "paperid": "2411.13677v1", "paper_path": "./SciVer/papers/2411.13677v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.13677v1-Table5-1.png", "item2_path": "./SciVer/images/2411.13677v1-Table6-1.png", "section": [ "8" ], "request_id": 283, "origin_statement": "BiDex’s bill of materials totals $6,395 (Table 5), and the mobile robot setup costs $31,200 (Table 6). Therefore, BiDex comprises about 20% of the total mobile robot setup cost.", "perturbed_statement": "BiDex’s bill of materials totals $695 (Table 5), and the mobile robot setup costs $31,200 (Table 6). Therefore, BiDex comprises about 2% of the total mobile robot setup cost.", "perturbed_explanation": "The perturbed claim misreads the Manus Meta Gloves cost as $300 (actually $6,000 in Table 5) and so miscalculates BiDex’s total as $695 instead of the correct $6,395.", "claim": "BiDex’s bill of materials totals $695 (Table 5), and the mobile robot setup costs $31,200 (Table 6). Therefore, BiDex comprises about 2% of the total mobile robot setup cost.", "label": false }, { "paperid": "2410.23090v1", "paper_path": "./SciVer/papers/2410.23090v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.23090v1_figure_5.png", "item2_path": "./SciVer/images/2410.23090v1-Table3-1.png", "section": [ "5.2" ], "request_id": 286, "origin_statement": "LLM Summarization without SFT yields a lower BLEU-1 score than Raw Context (21.0 vs 22.2), and correspondingly its GPT-4 evaluation model wins drop from 48.5% to 47.9%.", "perturbed_statement": "LLM Summarization without SFT yields a higher BLEU-1 score than Raw Context (23.5 vs 22.2), yet its GPT-4 evaluation model wins still drop from 48.5% to 47.9%.", "perturbed_explanation": "The perturbation is wrong because Table 3 shows that LLM Summarization without SFT has a BLEU-1 of 21.0, not 23.5. The incorrect BLEU-1 value invalidates the claim that Summarization has higher BLEU-1 than Raw Context.", "claim": "LLM Summarization without SFT yields a higher BLEU-1 score than Raw Context (23.5 vs 22.2), yet its GPT-4 evaluation model wins still drop from 48.5% to 47.9%.", "label": false }, { "paperid": "2409.14381v1", "paper_path": "./SciVer/papers/2409.14381v1.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14381v1_figure_2.png", "item2_path": "./SciVer/images/2409.14381v1-Table2-1.png", "section": [ "6.2" ], "request_id": 287, "origin_statement": "Table 2 lists FFN3 as a cornerstone layer in Llama3-70B; Figure 2 shows FFN3 accounts for 23.7% of the ARC-Easy Shapley values; therefore FFN3 alone contributes nearly a quarter of ARC-Easy performance.", "perturbed_statement": "Table 2 lists FFN3 as a cornerstone layer in Llama3-70B; Figure 2 shows FFN3 accounts for only 16.9% of the ARC-Easy Shapley values; therefore FFN3 alone contributes less than a fifth of ARC-Easy performance.", "perturbed_explanation": "In Figure 2’s ARC-Easy pie chart for Llama3-70B, the slice labeled 23.7% corresponds to FFN3, not 16.9%. The 16.9% slice actually refers to FFN0. Thus the perturbed statement misreports FFN3’s Shapley value.", "claim": "Table 2 lists FFN3 as a cornerstone layer in Llama3-70B; Figure 2 shows FFN3 accounts for only 16.9% of the ARC-Easy Shapley values; therefore FFN3 alone contributes less than a fifth of ARC-Easy performance.", "label": false }, { "paperid": "2411.15835v1", "paper_path": "./SciVer/papers/2411.15835v1.json", "claim_type": "sequential", "item1": "8(a)", "item2": "8(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15835v1_figure_8(a).png", "item2_path": "./SciVer/images/2411.15835v1_figure_8(b).png", "section": [ "4.3" ], "request_id": 291, "origin_statement": "In Figure 8a, the original Q3 plan shows two binary Join(...) operators under Calc nodes; Figure 8b replaces the top-level binary Join with a single MultiJoin(...) node, thus consolidating the binary join tree into one integrated multi-stream join.", "perturbed_statement": "Figure 8a shows three Join operators under Calc nodes, and Figure 8b replaces all three binary joins with a single MultiJoin node, implying consolidation of three separate join streams into one integrated multi-stream join.", "perturbed_explanation": "Figure 8a actually has only two Join operators (one top-level and one nested), not three; and Figure 8b replaces only the top-level Join with a MultiJoin, so it does not consolidate three joins into one.", "claim": "Figure 8a shows three Join operators under Calc nodes, and Figure 8b replaces all three binary joins with a single MultiJoin node, implying consolidation of three separate join streams into one integrated multi-stream join.", "label": false }, { "paperid": "2410.13000v1", "paper_path": "./SciVer/papers/2410.13000v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.13000v1_figure_2.png", "item2_path": "./SciVer/images/2410.13000v1_figure_3.png", "section": [ "5" ], "request_id": 295, "origin_statement": "Figure 2 shows that nnGP’s prediction error (red) rises steeply as ν increases. Therefore, at ν=1 in Figure 3 (ρ=1), nnGP’s posterior probability error grows faster with N than rational or FEM, making nnGP the least accurate for moderate smoothness and dense predictions.", "perturbed_statement": "Figure 2 shows that nnGP’s prediction error (red) stays nearly constant as ν increases. Therefore, at ν=1 in Figure 3 (ρ=1), nnGP’s posterior probability error grows slower with N than rational or FEM, making nnGP the most accurate for moderate smoothness and dense predictions.", "perturbed_explanation": "Both premises contradict the figures. In Figure 2, the nnGP (dotted red) curve actually rises steeply with ν, not remaining constant. In Figure 3 at ρ=1, the nnGP posterior error (red) increases faster with N than both rational (black) and FEM (dot‐dash), rather than slower.", "claim": "Figure 2 shows that nnGP’s prediction error (red) stays nearly constant as ν increases. Therefore, at ν=1 in Figure 3 (ρ=1), nnGP’s posterior probability error grows slower with N than rational or FEM, making nnGP the most accurate for moderate smoothness and dense predictions.", "label": false }, { "paperid": "2410.01839v1", "paper_path": "./SciVer/papers/2410.01839v1.json", "claim_type": "sequential", "item1": "2(b)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.01839v1_figure_2(b).png", "item2_path": "./SciVer/images/2410.01839v1_figure_2(c).png", "section": [ "3.1", "3.3" ], "request_id": 299, "origin_statement": "Figure 2(b) shows three loops: one large and two small. In Figure 2(c), the large loop is split across the green boundary into blue and yellow sub-features. Thus, DaC must merge these potential features to recover the full large loop.", "perturbed_statement": "Figure 2(b) shows two loops: one large and one small. In Figure 2(c), the large loop lies entirely in the left sub-region as blue and yellow sub-features. Thus, DaC must merge these potential features to recover the full large loop.", "perturbed_explanation": "The perturbed statement is incorrect because Figure 2(b) actually displays three loops—not two—and in Figure 2(c) the large loop is bisected by the vertical boundary, not wholly contained in the left sub-region. These details contradict the claim’s premises.", "claim": "Figure 2(b) shows two loops: one large and one small. In Figure 2(c), the large loop lies entirely in the left sub-region as blue and yellow sub-features. Thus, DaC must merge these potential features to recover the full large loop.", "label": false }, { "paperid": "2410.17196v2", "paper_path": "./SciVer/papers/2410.17196v2.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17196v2_figure_3.png", "item2_path": "./SciVer/images/2410.17196v2_figure_4.png", "section": [ "4.2" ], "request_id": 300, "origin_statement": "VITA’s accuracy on North Indian accent in SD-QA is ~12%, lowest among its accents; its response score on Indian accent in AlpacaEval is ~2.95, also the lowest; thus VITA exhibits the poorest performance on Indian accents across both real and synthetic evaluations.", "perturbed_statement": "VITA’s accuracy on North Indian accent in SD-QA is ~22%, moderately high; its response score on Indian accent in AlpacaEval is ~3.7, also among the highest; thus VITA handles Indian accents robustly across both evaluations.", "perturbed_explanation": "Both premises are factually incorrect. In Figure 3, VITA’s accuracy on the North Indian accent (IND-N) is about 12.2%, not 22%. In Figure 4, VITA’s response score for the Indian accent is about 2.95, not 3.7. These misreadings invalidate the claim that VITA handles Indian accents robustly.", "claim": "VITA’s accuracy on North Indian accent in SD-QA is ~22%, moderately high; its response score on Indian accent in AlpacaEval is ~3.7, also among the highest; thus VITA handles Indian accents robustly across both evaluations.", "label": false }, { "paperid": "2411.12509v1", "paper_path": "./SciVer/papers/2411.12509v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.12509v1-Table3-1.png", "item2_path": "./SciVer/images/2411.12509v1-Table4-1.png", "section": [ "4.1" ], "request_id": 302, "origin_statement": "Table 3 reports that during the first spectator ban home goals fell by 0.452; Table 4 shows this estimate shrinks to 0.374 once covariates are added, implying controls explain part of the initial home goal reduction.", "perturbed_statement": "Table 3 reports that during the first spectator ban home goals fell by 0.252; Table 4 shows this estimate expands to 0.482 once covariates are added, implying controls amplify the home goal loss.", "perturbed_explanation": "The perturbed claim misstates the coefficients in both tables. Table 3 actually shows a –0.452 decline in home goals (not –0.252), and Table 4 shows –0.374 (not –0.482), so covariates do not amplify but rather reduce the estimated drop.", "claim": "Table 3 reports that during the first spectator ban home goals fell by 0.252; Table 4 shows this estimate expands to 0.482 once covariates are added, implying controls amplify the home goal loss.", "label": false }, { "paperid": "2409.14704v2", "paper_path": "./SciVer/papers/2409.14704v2.json", "claim_type": "sequential", "item1": "3(c)", "item2": "3(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14704v2_figure_3(c).png", "item2_path": "./SciVer/images/2409.14704v2_figure_3(d).png", "section": [ "4.1" ], "request_id": 305, "origin_statement": "From the CLIP plot, average similarity drops from about 0.34 at step 0 to around 0.28 by 1000 steps; using that, the VLEU graph also falls from ~3.2 to ~1.1 in 1000 steps, indicating diminishing model generalizability during finetuning.", "perturbed_statement": "The CLIP score increases from about 0.34 at step 0 to around 0.28 by 1000 steps; similarly, the VLEU metric climbs from ~3.2 to ~1.1 in the first 1000 steps, demonstrating enhanced model generalizability during finetuning.", "perturbed_explanation": "This is wrong because both premises contradict the actual plots: the CLIP score actually decreases from ~0.34 to ~0.28 over 1000 steps (not increases), and the VLEU metric likewise falls from ~3.2 to ~1.1 (not climbs). Hence, the claim of improved generalizability is false.", "claim": "The CLIP score increases from about 0.34 at step 0 to around 0.28 by 1000 steps; similarly, the VLEU metric climbs from ~3.2 to ~1.1 in the first 1000 steps, demonstrating enhanced model generalizability during finetuning.", "label": false }, { "paperid": "2411.03128v1", "paper_path": "./SciVer/papers/2411.03128v1.json", "claim_type": "sequential", "item1": "6", "item2": "7", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03128v1_figure_6.png", "item2_path": "./SciVer/images/2411.03128v1_figure_7.png", "section": [ "4.2" ], "request_id": 307, "origin_statement": "The MQDT-calculated curve in Fig. 6 peaks at roughly +450 MHz around principal quantum number n≈42 due to the 7f2(J=1) perturber; Fig. 7(a) shows residuals reaching ±30 MHz near n≈42, highlighting the vibrational-channel interaction’s impact on fit accuracy.", "perturbed_statement": "The MQDT-calculated curve in Fig. 6 peaks at roughly +450 MHz around n≈50 due to the 7f2(J=1) perturber; Fig. 7(a) shows residuals reaching ±30 MHz near n≈50, highlighting the vibrational-channel interaction’s impact on fit accuracy.", "perturbed_explanation": "This statement is wrong because Fig. 6’s prominent binding‐energy peak appears at n≈42 (not n≈50), and Fig. 7(a) residuals also reach their maximum near n≈42. There is no corresponding ±30 MHz residual feature at n≈50.", "claim": "The MQDT-calculated curve in Fig. 6 peaks at roughly +450 MHz around n≈50 due to the 7f2(J=1) perturber; Fig. 7(a) shows residuals reaching ±30 MHz near n≈50, highlighting the vibrational-channel interaction’s impact on fit accuracy.", "label": false }, { "paperid": "2409.01971v1", "paper_path": "./SciVer/papers/2409.01971v1.json", "claim_type": "sequential", "item1": "6", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.01971v1_figure_6.png", "item2_path": "./SciVer/images/2409.01971v1-Table3-1.png", "section": [ "5.2" ], "request_id": 308, "origin_statement": "At 10 observed timesteps, the pre-trained Snapshot model achieves an ADE of 0.567 (Fig.6), and applying the collision risk agent selection (Table 3) further reduces ADE to 0.548, yielding a net improvement of 0.019.", "perturbed_statement": "At 10 observed timesteps, the pre-trained Snapshot model achieves an ADE of 0.600 (Fig.6), and applying the collision risk agent selection (Table 3) further reduces ADE to 0.548, yielding a net improvement of 0.052.", "perturbed_explanation": "The perturbation is incorrect because Fig. 6 shows the pre-trained ADE at 10 timesteps is 0.567, not 0.600. Therefore, the improvement from 0.567 to 0.548 is 0.019, not 0.052.", "claim": "At 10 observed timesteps, the pre-trained Snapshot model achieves an ADE of 0.600 (Fig.6), and applying the collision risk agent selection (Table 3) further reduces ADE to 0.548, yielding a net improvement of 0.052.", "label": false }, { "paperid": "2409.11531v1", "paper_path": "./SciVer/papers/2409.11531v1.json", "claim_type": "sequential", "item1": "6", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11531v1_figure_6.png", "item2_path": "./SciVer/images/2409.11531v1-Table1-1.png", "section": [ "5.2.1" ], "request_id": 311, "origin_statement": "Because Table 1 shows ESV has the highest ΔCompound in Scenario 1 (0.567), and the S1 Valence chart shows ESV’s valence increased from ~2.7 to ~4.5, the ESV condition most effectively improves emotions after failure scenarios.", "perturbed_statement": "The Text condition yields the greatest sentiment improvement in Scenario 1 (ΔPost–Pre = 0.567) and corresponds with the largest valence gain (from 2.9 to 4.6) on the S1 Valence chart, suggesting Text best shifts emotions for failures.", "perturbed_explanation": "Table 1 actually lists Text’s ΔCompound for Scenario 1 as 0.363 (not 0.567), while ESV’s is 0.567. Therefore attributing the highest ΔCompound to Text contradicts the concrete values in Table 1.", "claim": "The Text condition yields the greatest sentiment improvement in Scenario 1 (ΔPost–Pre = 0.567) and corresponds with the largest valence gain (from 2.9 to 4.6) on the S1 Valence chart, suggesting Text best shifts emotions for failures.", "label": false }, { "paperid": "2409.19136v1", "paper_path": "./SciVer/papers/2409.19136v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19136v1_figure_1.png", "item2_path": "./SciVer/images/2409.19136v1_figure_3.png", "section": [ "5.1.1" ], "request_id": 313, "origin_statement": "Figure 1 shows User 128 has the highest true positives (75). Figure 3 shows its purple points cluster vertically around 22–25 km/h max speed. Thus, the decision tree’s high accuracy stems from User 128’s consistently ~24 km/h max speed across trips.", "perturbed_statement": "Figure 1 shows User 128 has only 68 true positives. Figure 3 shows its purple points cluster vertically around 18–20 km/h max speed. Thus, the decision tree’s high accuracy stems from User 128’s consistently ~19 km/h max speed across trips.", "perturbed_explanation": "This is incorrect because in Figure 1 the confusion matrix cell for User 128 shows 75 true positives, not 68. In Figure 3 the purple points cluster around 22–25 km/h, not 18–20 km/h, so the claimed ~19 km/h consistency is contradicted by the data.", "claim": "Figure 1 shows User 128 has only 68 true positives. Figure 3 shows its purple points cluster vertically around 18–20 km/h max speed. Thus, the decision tree’s high accuracy stems from User 128’s consistently ~19 km/h max speed across trips.", "label": false }, { "paperid": "2411.08087v1", "paper_path": "./SciVer/papers/2411.08087v1.json", "claim_type": "sequential", "item1": "7", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.08087v1_figure_7.png", "item2_path": "./SciVer/images/2411.08087v1-Table4-1.png", "section": [ "5.1" ], "request_id": 315, "origin_statement": "Figure 7 shows masked SMICA χ²ℓ lies within the 1σ simulation band at Δℓ=15, indicating Gaussianity. Table 4 reports SMICA’s Planck-scale p-value remains low (0.43) when anisotropic noise is added. Thus, SMICA is consistent with isotropy when masked but still rejects isotropy in full-sky with noise.", "perturbed_statement": "Figure 7 shows masked SMICA χ²ℓ exceeds the 2σ simulation band at Δℓ=15, implying non-Gaussianity. Table 4 gives SMICA’s Planck-scale p-value as 0.87 when anisotropic noise is added. Thus, SMICA appears anisotropic when masked but consistent with isotropy in full-sky with noise.", "perturbed_explanation": "This is wrong because Figure 7 actually shows SMICA’s χ²ℓ points (blue) remain within the grey 1σ band at Δℓ=15, not above a 2σ threshold. Additionally, Table 4 lists SMICA’s Planck-scale p-value as 0.43, not 0.87, so the conclusion of full-sky consistency is invalid.", "claim": "Figure 7 shows masked SMICA χ²ℓ exceeds the 2σ simulation band at Δℓ=15, implying non-Gaussianity. Table 4 gives SMICA’s Planck-scale p-value as 0.87 when anisotropic noise is added. Thus, SMICA appears anisotropic when masked but consistent with isotropy in full-sky with noise.", "label": false }, { "paperid": "2410.14059v2", "paper_path": "./SciVer/papers/2410.14059v2.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.14059v2_figure_2.png", "item2_path": "./SciVer/images/2410.14059v2-Table2-1.png", "section": [ "4.3" ], "request_id": 317, "origin_statement": "Figure2 shows the verb-noun pair 'include assets' is most frequent; Table2 lists Asset Valuation Reporting among the few-shot tasks; thus the benchmark emphasizes asset-related interactions in few-shot settings.", "perturbed_statement": "Figure2 shows 'discuss shares' as the most frequent verb-noun pair; Table2 lists Asset Valuation Reporting as a zero-shot task; thus zero-shot tasks heavily emphasize share-focused interactions.", "perturbed_explanation": "The perturbation is incorrect for two reasons: (1) Figure 2’s largest slice is labeled 'include' with its top object 'assets', not 'discuss shares'. (2) In Table 2, Asset Valuation Reporting appears under the Few-shot category, not Zero-shot. Both details contradict the perturbed claim.", "claim": "Figure2 shows 'discuss shares' as the most frequent verb-noun pair; Table2 lists Asset Valuation Reporting as a zero-shot task; thus zero-shot tasks heavily emphasize share-focused interactions.", "label": false }, { "paperid": "2411.02712v1", "paper_path": "./SciVer/papers/2411.02712v1.json", "claim_type": "sequential", "item1": "5(c)", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02712v1_figure_5(c).png", "item2_path": "./SciVer/images/2411.02712v1_figure_6.png", "section": [ "5.4" ], "request_id": 318, "origin_statement": "Figure 5 shows a 0.00 shift for textual-only generations in V-DPO, indicating reduced language priors; therefore in Figure 6's first example, V-DPO correctly answers “no people eating” instead of defaulting to “four people”.", "perturbed_statement": "Figure 5 shows a 9.37 shift for textual-only generations in V-DPO, indicating amplified language priors; therefore in Figure 6's first example, V-DPO incorrectly answers “four people eating” like the baseline.", "perturbed_explanation": "This is incorrect because Figure 5’s middle panel reports a 0.00 shift for textual-only outputs (not 9.37), and in Figure 6 V-DPO’s first example explicitly responds “no people eating,” not “four people eating.”", "claim": "Figure 5 shows a 9.37 shift for textual-only generations in V-DPO, indicating amplified language priors; therefore in Figure 6's first example, V-DPO incorrectly answers “four people eating” like the baseline.", "label": false }, { "paperid": "2410.21329v1", "paper_path": "./SciVer/papers/2410.21329v1.json", "claim_type": "sequential", "item1": "5", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21329v1_figure_5.png", "item2_path": "./SciVer/images/2410.21329v1_figure_9.png", "section": [ "3.2" ], "request_id": 320, "origin_statement": "At a 5-hour lead time, Figure 5a shows CloudCast’s MAESS (~0.34) exceeds MEPS’s (~0.28) by 0.06; correspondingly, Figure 9 reports CloudCast classifies clear skies at 61% accuracy versus MEPS’s 38%.", "perturbed_statement": "At a 5-hour lead time, Figure 5a shows CloudCast’s MAESS (~0.34) is below MEPS’s (~0.28) by 0.06; correspondingly, Figure 9 reports CloudCast classifies clear skies at 38% accuracy versus MEPS’s 61%.", "perturbed_explanation": "The perturbed claim is wrong because in Figure 5a CloudCast’s MAESS at 5 h (~0.34) actually exceeds MEPS’s (~0.28), not the reverse, and in Figure 9 CloudCast’s clear-sky accuracy is 61%, not 38%, compared to MEPS’s 38%.", "claim": "At a 5-hour lead time, Figure 5a shows CloudCast’s MAESS (~0.34) is below MEPS’s (~0.28) by 0.06; correspondingly, Figure 9 reports CloudCast classifies clear skies at 38% accuracy versus MEPS’s 61%.", "label": false }, { "paperid": "2411.16506v1", "paper_path": "./SciVer/papers/2411.16506v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.16506v1_figure_5.png", "item2_path": "./SciVer/images/2411.16506v1_figure_6.png", "section": [ "4.2" ], "request_id": 324, "origin_statement": "Figure 10 shows off+PIBT(vanilla) deadlocks by timestep 400, yielding near-zero average tasks per step over 1000 timesteps. Figure 9 shows warehouse-d throughput at m=200 remains above 4.0 tasks per timestep. Hence, off+PIBT(vanilla)’s random-map throughput is well below its warehouse-d performance at m=200.", "perturbed_statement": "Figure 10 shows off+PIBT(vanilla) sustains around 8 reached goals per timestep across all 1000 timesteps. Figure 9 shows warehouse-d throughput at m=200 is about 4.3 tasks per timestep. Therefore, off+PIBT(vanilla)’s random-map throughput is roughly double its warehouse-d throughput at m=200.", "perturbed_explanation": "The perturbed statement is wrong because Figure 10’s purple curve for off+PIBT(vanilla) actually drops to zero by around timestep 400, rather than sustaining ~8 goals per step across all 1000 timesteps. Thus it does not maintain that high throughput throughout.", "claim": "Figure 10 shows off+PIBT(vanilla) sustains around 8 reached goals per timestep across all 1000 timesteps. Figure 9 shows warehouse-d throughput at m=200 is about 4.3 tasks per timestep. Therefore, off+PIBT(vanilla)’s random-map throughput is roughly double its warehouse-d throughput at m=200.", "label": false }, { "paperid": "2409.20058v1", "paper_path": "./SciVer/papers/2409.20058v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.20058v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.20058v1-Table3-1.png", "section": [ "4.3" ], "request_id": 325, "origin_statement": "Fig. 3 shows the GRXE cutoffpl component (blue) falls near the power-law (red) at ∼80 keV, and Table 3 reports an IPM-to-total 30–80 keV flux fraction of 0.73±0.13 for GB, indicating intermediate polars dominate the hard (30–80 keV) X-ray emission.", "perturbed_statement": "Fig. 3 shows the GRXE cutoffpl component (blue) falls near the power-law (red) at ∼80 keV, and Table 3 reports an IPM-to-total 30–80 keV flux fraction of 0.27±0.11 for GB, indicating intermediate polars dominate the hard (30–80 keV) X-ray emission.", "perturbed_explanation": "Table 3 actually lists the IPM-to-total 30–80 keV flux ratio for GB as 0.73±0.13, not 0.27±0.11. The value 0.27±0.11 corresponds to the power-law component fraction, so the perturbed statement underestimates the IPM contribution and contradicts the table.", "claim": "Fig. 3 shows the GRXE cutoffpl component (blue) falls near the power-law (red) at ∼80 keV, and Table 3 reports an IPM-to-total 30–80 keV flux fraction of 0.27±0.11 for GB, indicating intermediate polars dominate the hard (30–80 keV) X-ray emission.", "label": false }, { "paperid": "2410.18069v1", "paper_path": "./SciVer/papers/2410.18069v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18069v1_figure_4.png", "item2_path": "./SciVer/images/2410.18069v1_figure_5.png", "section": [ "5.1.2" ], "request_id": 326, "origin_statement": "In Fig. 5.4 the yellow curve in the s=0 sector asymptotically approaches normalized energy 1 (one-breather mass). Therefore in Fig. 5.5 the yellow curve likewise approaches E(r)=m, confirming it represents the one-breather state in the s=1 sector.", "perturbed_statement": "In Fig. 5.4 the green curve in the s=0 sector asymptotically approaches normalized energy 1 (one-breather mass). Therefore in Fig. 5.5 the green curve likewise approaches E(r)=m, confirming it represents the one-breather state in the s=1 sector.", "perturbed_explanation": "Figure 5.4 shows that the green curve actually asymptotes to normalized energy 2 (two-breather mass), not 1. Thus the green curve cannot represent the one-breather state, making the perturbed claim incorrect.", "claim": "In Fig. 5.4 the green curve in the s=0 sector asymptotically approaches normalized energy 1 (one-breather mass). Therefore in Fig. 5.5 the green curve likewise approaches E(r)=m, confirming it represents the one-breather state in the s=1 sector.", "label": false }, { "paperid": "2411.03896v1", "paper_path": "./SciVer/papers/2411.03896v1.json", "claim_type": "sequential", "item1": "1", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03896v1_figure_1.png", "item2_path": "./SciVer/images/2411.03896v1_figure_4.png", "section": [ "5" ], "request_id": 327, "origin_statement": "From Fig.1, HiLLiPoP’s marginalized Σ0 posterior peaks closest to zero, and in Fig.4, HiLLiPoP’s γ0 posterior likewise lies nearest the GR baseline at 0.55, implying that the dataset with the smallest Σ0 anomaly also yields the smallest γ0 deviation.", "perturbed_statement": "From Fig.1, Plik’s marginalized Σ0 posterior peaks closest to zero, and in Fig.4, Plik’s γ0 posterior likewise lies nearest the GR baseline at 0.55, implying that the dataset with the smallest Σ0 anomaly also yields the smallest γ0 deviation.", "perturbed_explanation": "This is incorrect because in Fig.1 Plik’s Σ0 posterior actually peaks near 0.3, not at zero, and in Fig.4 Plik’s γ0 posterior peaks around 0.8, far above the GR baseline of 0.55. Therefore, Plik neither minimizes Σ0 nor γ0 deviations.", "claim": "From Fig.1, Plik’s marginalized Σ0 posterior peaks closest to zero, and in Fig.4, Plik’s γ0 posterior likewise lies nearest the GR baseline at 0.55, implying that the dataset with the smallest Σ0 anomaly also yields the smallest γ0 deviation.", "label": false }, { "paperid": "2410.07679v2", "paper_path": "./SciVer/papers/2410.07679v2.json", "claim_type": "sequential", "item1": "9(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07679v2_figure_9(b).png", "item2_path": "./SciVer/images/2410.07679v2-Table3-1.png", "section": [ "5.3" ], "request_id": 332, "origin_statement": "M_P2P’s FID drops below the RCFD baseline (3.800) and hits its lowest at β=0.3 (≈3.735). Table 3 then shows combining intra-sample (L_IS) and memory-based (L_M) losses cuts CIFAR-10 FID from 8.92 to 8.16. Optimizing β and these relational losses maximizes gains.", "perturbed_statement": "M_P2P’s FID is lowest at β=0.4 (≈3.738), and Table 3 shows combining inter-image (L_II) and memory-based (L_M) losses reduces FID from 8.92 to 8.16. Therefore, setting β to 0.4 with L_II and L_M yields the best distillation performance.", "perturbed_explanation": "The graph in Figure 9b shows the minimum FID for M_P2P occurs at β=0.3 (≈3.735), not at β=0.4 (≈3.738). Additionally, Table 3 records the lowest CIFAR-10 FID of 8.16 when combining L_IS and L_M, not L_II and L_M, so the perturbed claim contradicts both modalities.", "claim": "M_P2P’s FID is lowest at β=0.4 (≈3.738), and Table 3 shows combining inter-image (L_II) and memory-based (L_M) losses reduces FID from 8.92 to 8.16. Therefore, setting β to 0.4 with L_II and L_M yields the best distillation performance.", "label": false }, { "paperid": "2410.24169v1", "paper_path": "./SciVer/papers/2410.24169v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.24169v1-Table1-1.png", "item2_path": "./SciVer/images/2410.24169v1-Table2-1.png", "section": [ "5.1" ], "request_id": 333, "origin_statement": "EScAIP-Small trains at 35.84 samples/sec (Table 2), over 2× faster than EquiformerV2’s 14.22 samples/sec, while achieving a test Energy MAE of only 233 meV on OC20 All+MD (Table 1), just 14 meV worse.", "perturbed_statement": "EScAIP-Small trains at 25.36 samples/sec (Table 2), over 1.8× faster than EquiformerV2’s 14.22 samples/sec, while achieving a test Energy MAE of only 223 meV on OC20 All+MD (Table 1), just 14 meV worse.", "perturbed_explanation": "The perturbed statement misreports EScAIP-Small’s training speed: Table 2 shows 35.84 samples/sec (not 25.36). It also misstates its test Energy MAE: Table 1 lists 233 meV (not 223), so both the speed and MAE values contradict the context.", "claim": "EScAIP-Small trains at 25.36 samples/sec (Table 2), over 1.8× faster than EquiformerV2’s 14.22 samples/sec, while achieving a test Energy MAE of only 223 meV on OC20 All+MD (Table 1), just 14 meV worse.", "label": false }, { "paperid": "2409.02554v1", "paper_path": "./SciVer/papers/2409.02554v1.json", "claim_type": "sequential", "item1": "6", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.02554v1_figure_6.png", "item2_path": "./SciVer/images/2409.02554v1-Table4-1.png", "section": [ "3" ], "request_id": 335, "origin_statement": "Table 4 shows that the mean CME speed for LFG events exceeds that for HFG by 571 km s^-1. Figure 6 shows LFG events have 36% X-class flare association versus 12% for HFG. Thus, the larger 571 km/s CME speed offset aligns with a threefold X-class flare increase.", "perturbed_statement": "Table 4 shows that the mean CME speed for LFG events exceeds that for HFG by 365 km s^-1. Figure 6 shows LFG events have 36% X-class flare association versus 12% for HFG. Thus, the 365 km/s CME speed gap aligns with a threefold X-class flare increase.", "perturbed_explanation": "The perturbation incorrectly states the CME speed difference as 365 km s^-1. Table 4 actually reports a ΔV_LFG–HFG of 571 km s^-1, so the claimed 365 km/s gap contradicts the context.", "claim": "Table 4 shows that the mean CME speed for LFG events exceeds that for HFG by 365 km s^-1. Figure 6 shows LFG events have 36% X-class flare association versus 12% for HFG. Thus, the 365 km/s CME speed gap aligns with a threefold X-class flare increase.", "label": false }, { "paperid": "2409.19942v2", "paper_path": "./SciVer/papers/2409.19942v2.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19942v2_figure_3.png", "item2_path": "./SciVer/images/2409.19942v2_figure_4.png", "section": [ "3.6" ], "request_id": 337, "origin_statement": "Figure 4(i) shows 152 collisions when cyclists move left and objects move forward; Fig 3(iii) shows cars account for the highest count of other objects (~630); therefore, most of those 152 left‐forward collisions involve cars.", "perturbed_statement": "Figure 4(i) shows 152 collisions when cyclists move left and objects move forward; Fig 3(iii) shows pedestrians account for the highest count of other objects (~630); therefore, most of those 152 left‐forward collisions involve pedestrians.", "perturbed_explanation": "The perturbation is wrong because Fig 3(iii) actually shows cars are the most frequent other object (around 630), while pedestrians appear far less (about 80). Thus pedestrians do not account for the highest count in Fig 3(iii), and most left-forward collisions cannot involve pedestrians as claimed.", "claim": "Figure 4(i) shows 152 collisions when cyclists move left and objects move forward; Fig 3(iii) shows pedestrians account for the highest count of other objects (~630); therefore, most of those 152 left‐forward collisions involve pedestrians.", "label": false }, { "paperid": "2409.04846v1", "paper_path": "./SciVer/papers/2409.04846v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04846v1_figure_5.png", "item2_path": "./SciVer/images/2409.04846v1-Table4-1.png", "section": [ "3.2" ], "request_id": 339, "origin_statement": "Table 4 shows 402 [S II] All exposures, and Fig. 5’s [S II] All histogram bin at 1.1″ contains about 105 counts. Thus roughly 105/402 ≈ 26% of [S II] All observations have seeing near 1.1″.", "perturbed_statement": "Since Table 4 shows 184 [S II] All exposures, and Fig. 5’s [S II] All histogram bin at 1.1″ contains about 40 counts, only about 40/184 ≈ 22% of [S II] All observations have seeing near 1.1″.", "perturbed_explanation": "This is wrong because Table 4 actually lists 402 [S II] All exposures (not 184), and Fig.​5’s bin at 1.1″ for [S II] All has about 105 counts (not 40). The perturbed values contradict the context.", "claim": "Since Table 4 shows 184 [S II] All exposures, and Fig. 5’s [S II] All histogram bin at 1.1″ contains about 40 counts, only about 40/184 ≈ 22% of [S II] All observations have seeing near 1.1″.", "label": false }, { "paperid": "2411.07070v2", "paper_path": "./SciVer/papers/2411.07070v2.json", "claim_type": "sequential", "item1": "3(a)", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.07070v2_figure_3(a).png", "item2_path": "./SciVer/images/2411.07070v2_figure_6.png", "section": [ "5.2" ], "request_id": 341, "origin_statement": "Combining forward and backward signals at ~99% fine-tuning accuracy on GPT-2-medium yields ~72% balanced attack accuracy, about 9.5 points higher than the forward-only peak of ~62.5% observed around epoch 16.", "perturbed_statement": "Combining forward and backward signals at ~99% fine-tuning accuracy on GPT-2-medium yields ~75% balanced attack accuracy, about 12 points higher than the forward-only peak of ~63% observed around epoch 16.", "perturbed_explanation": "Figure 3 (top) shows the last-block-output forward attack peaks at about 0.625 (not 0.63), and Figure 6 shows the combined attack at ~0.99 fine-tuning accuracy reaches about 0.72 (not 0.75). These values contradict the perturbed numbers.", "claim": "Combining forward and backward signals at ~99% fine-tuning accuracy on GPT-2-medium yields ~75% balanced attack accuracy, about 12 points higher than the forward-only peak of ~63% observed around epoch 16.", "label": false }, { "paperid": "2411.03445v1", "paper_path": "./SciVer/papers/2411.03445v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.03445v1-Table5-1.png", "item2_path": "./SciVer/images/2411.03445v1-Table6-1.png", "section": [ "5.3" ], "request_id": 342, "origin_statement": "Table 5 shows Config D sorting increases TDC22 ViT GTSRB AUC from 0.44 to 0.90. This demonstrates feature alignment's effectiveness on random-initialized models; accordingly, Table 6 shows Config D sorting likewise elevates WM+Clean→Checker+Clean CNN AUC from 0.45 to 0.97.", "perturbed_statement": "Table 5 shows Config D sorting increases TDC22 ViT GTSRB AUC from 0.44 to 0.90. This demonstrates feature alignment's effectiveness on random-initialized models; accordingly, Table 6 shows Config D sorting likewise elevates WM+Clean→Checker+Clean CNN AUC from 0.45 to 0.87.", "perturbed_explanation": "The perturbed claim gives a CNN AUC of 0.87 for WM+Clean→Checker+Clean under Config D, but Table 6 actually reports a Config D AUC of 0.97 in that row, so the 0.87 figure contradicts the context.", "claim": "Table 5 shows Config D sorting increases TDC22 ViT GTSRB AUC from 0.44 to 0.90. This demonstrates feature alignment's effectiveness on random-initialized models; accordingly, Table 6 shows Config D sorting likewise elevates WM+Clean→Checker+Clean CNN AUC from 0.45 to 0.87.", "label": false }, { "paperid": "2411.06171v1", "paper_path": "./SciVer/papers/2411.06171v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.06171v1-Table2-1.png", "item2_path": "./SciVer/images/2411.06171v1-Table3-1.png", "section": [ "4.4" ], "request_id": 343, "origin_statement": "Table 2 shows SEEKR (1%) reduces GA drop to –1.05 on LLaMA-2-7B; thus, Table 3 indicates SEEKR achieves the lowest BWT magnitude of –2.52 on Order 4 among all methods.", "perturbed_statement": "Table 2 shows SEEKR (1%) reduces GA drop to –1.05 on LLaMA-2-7B; thus, Table 3 indicates SEEKR achieves the lowest BWT magnitude of –5.31 on Order 4 among all methods.", "perturbed_explanation": "The perturbation is wrong because Table 3 actually reports SEEKR’s BWT magnitude on Order 4 as –2.52, not –5.31. The value –5.31 corresponds to Replay (1%), so assigning it to SEEKR contradicts the table.", "claim": "Table 2 shows SEEKR (1%) reduces GA drop to –1.05 on LLaMA-2-7B; thus, Table 3 indicates SEEKR achieves the lowest BWT magnitude of –5.31 on Order 4 among all methods.", "label": false }, { "paperid": "2409.00671v1", "paper_path": "./SciVer/papers/2409.00671v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.00671v1_figure_3.png", "item2_path": "./SciVer/images/2409.00671v1-Table3-1.png", "section": [ "5.7" ], "request_id": 347, "origin_statement": "Figure 3 shows InvariantStock’s cumulative return peaks around 3.8 by end-2023; Table 3 reports its Sharpe ratio of 1.9119, implying it delivers the highest risk-adjusted return among methods.", "perturbed_statement": "Figure 3 shows DoubleAdapt’s cumulative return peaks around 3.8 by end-2023; Table 3 reports its Sharpe ratio of 1.9119, implying it delivers the highest risk-adjusted return among methods.", "perturbed_explanation": "The perturbation is wrong because Figure 3 actually shows InvariantStock (purple) reaching the highest cumulative return (~3.8), not DoubleAdapt (green peaks at ~2.3). Additionally, Table 3 gives DoubleAdapt a Sharpe ratio of 1.2788, not 1.9119.", "claim": "Figure 3 shows DoubleAdapt’s cumulative return peaks around 3.8 by end-2023; Table 3 reports its Sharpe ratio of 1.9119, implying it delivers the highest risk-adjusted return among methods.", "label": false }, { "paperid": "2410.23910v1", "paper_path": "./SciVer/papers/2410.23910v1.json", "claim_type": "sequential", "item1": "3", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23910v1_figure_3.png", "item2_path": "./SciVer/images/2410.23910v1_figure_5(b).png", "section": [ "4.3" ], "request_id": 349, "origin_statement": "Figure 3(b) shows box-level uncertainty pools BEV-cell uncertainties via ROI pooling into a per-box score; using this, Figure 5’s red ROC and PR curves outperform baselines across all FPR and recall thresholds, proving our uncertainty measure detects erroneous boxes best.", "perturbed_statement": "Figure 3(a) shows scene-level uncertainty pools BEV-cell uncertainties via ROI pooling into a per-box score; using this, Figure 5’s red PR curve falls below the green MC-DP baseline across most recall thresholds, indicating our measure underperforms.", "perturbed_explanation": "The perturbed statement misattributes ROI pooling to scene-level uncertainty (Figure 3a) whereas ROI pooling is applied only for box-level uncertainty in Figure 3b. It also incorrectly claims the red PR curve lies below MC-DP; in Figure 5 the red curve is actually above the green MC-DP curve at most recall values, so our method outperforms MC-DP rather than underperforms.", "claim": "Figure 3(a) shows scene-level uncertainty pools BEV-cell uncertainties via ROI pooling into a per-box score; using this, Figure 5’s red PR curve falls below the green MC-DP baseline across most recall thresholds, indicating our measure underperforms.", "label": false }, { "paperid": "2410.19483v1", "paper_path": "./SciVer/papers/2410.19483v1.json", "claim_type": "sequential", "item1": "1", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19483v1_figure_1.png", "item2_path": "./SciVer/images/2410.19483v1_figure_3(c).png", "section": [ "1" ], "request_id": 354, "origin_statement": "Figure 1 shows the lego scene is richly textured, so content-aware quantization assigns it a higher bitwidth. In Figure 3(c), at B=8, QAT_layer-wise achieves ≈36 dB PSNR, outperforming PTQ_layer-wise by about 1 dB.", "perturbed_statement": "Figure 1 shows the lego scene is low-texture, so quantization uses a lower bitwidth. In Figure 3(c), at B=4, PTQ_layer-wise achieves ≈25 dB PSNR, surpassing QAT_layer-wise by about 2 dB.", "perturbed_explanation": "This is wrong because Figure 1 actually depicts the lego scene as richly textured, implying a higher bitwidth, not lower. Moreover, in Figure 3(c) at B=4, PTQ_layer-wise attains only about 21 dB while QAT_layer-wise reaches around 24 dB, so PTQ_layer-wise does not surpass QAT_layer-wise.", "claim": "Figure 1 shows the lego scene is low-texture, so quantization uses a lower bitwidth. In Figure 3(c), at B=4, PTQ_layer-wise achieves ≈25 dB PSNR, surpassing QAT_layer-wise by about 2 dB.", "label": false }, { "paperid": "2409.05249v1", "paper_path": "./SciVer/papers/2409.05249v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.05249v1_figure_3.png", "item2_path": "./SciVer/images/2409.05249v1_figure_4.png", "section": [ "4.3" ], "request_id": 360, "origin_statement": "In Figure 3, NetDPSyn achieves 0.889 accuracy on TON with a decision tree; Figure 4 shows NetDPSyn’s SS relative error on DC is 1.26, the highest among methods; thus, despite near-89% classification fidelity, NetDPSyn’s packet sampling error can spike above 1.2.", "perturbed_statement": "In Figure 3, NetDPSyn achieves 0.98 accuracy on TON with a decision tree; Figure 4 shows NetDPSyn’s SS relative error on DC is 0.03, the lowest among methods; thus, NetDPSyn uniformly excels in both flow classification and packet sampling.", "perturbed_explanation": "The altered premise misstates NetDPSyn’s TON accuracy: Figure 3 reports 0.889, not 0.98. It also misreports the SS relative error on DC: Figure 4 shows 1.26 (highest), not 0.03 (lowest). These errors invalidate the uniform-excellence conclusion.", "claim": "In Figure 3, NetDPSyn achieves 0.98 accuracy on TON with a decision tree; Figure 4 shows NetDPSyn’s SS relative error on DC is 0.03, the lowest among methods; thus, NetDPSyn uniformly excels in both flow classification and packet sampling.", "label": false }, { "paperid": "2411.13584v1", "paper_path": "./SciVer/papers/2411.13584v1.json", "claim_type": "sequential", "item1": "3", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13584v1_figure_3.png", "item2_path": "./SciVer/images/2411.13584v1_figure_4(b).png", "section": [ "3.4.5" ], "request_id": 362, "origin_statement": "Figure 3 shows our retriever forms distinct clusters for each of the 12 Beijing stations, implying precise station-level separation; consequently, Figure 4’s low MSE (0.007) and high R^2 (0.79) confirm a strong linear link between embedding and actual distances.", "perturbed_statement": "Figure 3 shows our retriever forms distinct clusters for each of the 12 Beijing stations, implying precise station-level separation; consequently, Figure 4’s MSE of 0.05 and R^2 of 0.45 indicate a weak correlation between embedding and actual distances.", "perturbed_explanation": "The perturbed statement is wrong because Figure 4 actually reports MSE = 0.007 (not 0.05) and R^2 = 0.79 (not 0.45), which demonstrates a strong rather than weak correlation between embedding distances and real-world geographical distances.", "claim": "Figure 3 shows our retriever forms distinct clusters for each of the 12 Beijing stations, implying precise station-level separation; consequently, Figure 4’s MSE of 0.05 and R^2 of 0.45 indicate a weak correlation between embedding and actual distances.", "label": false }, { "paperid": "2410.10289v1", "paper_path": "./SciVer/papers/2410.10289v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10289v1_figure_5.png", "item2_path": "./SciVer/images/2410.10289v1-Table4-1.png", "section": [ "4.2" ], "request_id": 363, "origin_statement": "Figure 5 shows that CAP alone yields a pixel-level PRO of about 66% on industrial datasets at prompt size (10,4), and Table 4 reports FAPrompt achieves an industrial pixel-level PRO of 85.0%. Thus FAPrompt improves pixel-level PRO by approximately 19 percentage points over CAP.", "perturbed_statement": "Figure 5 shows that CAP alone yields a pixel-level PRO of about 82% on industrial datasets at prompt size (10,4), and Table 4 reports FAPrompt achieves an industrial pixel-level PRO of 85.0%. Thus FAPrompt improves pixel-level PRO by approximately 3 percentage points over CAP.", "perturbed_explanation": "The perturbation is wrong because Figure 5 actually shows the P-PRO bar at prompt size (10,4) for industrial datasets is around 65–66%, not 82%. Therefore the improvement from ~66% to 85.0% is about 19 points, not 3.", "claim": "Figure 5 shows that CAP alone yields a pixel-level PRO of about 82% on industrial datasets at prompt size (10,4), and Table 4 reports FAPrompt achieves an industrial pixel-level PRO of 85.0%. Thus FAPrompt improves pixel-level PRO by approximately 3 percentage points over CAP.", "label": false }, { "paperid": "2409.00700v1", "paper_path": "./SciVer/papers/2409.00700v1.json", "claim_type": "sequential", "item1": "9(a)", "item2": "9(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.00700v1_figure_9(a).png", "item2_path": "./SciVer/images/2409.00700v1_figure_9(b).png", "section": [ "4.4.4", "6.1" ], "request_id": 367, "origin_statement": "Spectrograms from Speaker A show a consistent dominant energy band around 300–400 Hz across three angles; treating this as a baseline, Speaker B’s spectrograms likewise maintain a stable energy peak near 250–350 Hz, confirming the model preserves each speaker’s pitch profile irrespective of viewing angle.", "perturbed_statement": "Spectrograms from Speaker A show a shifting energy band from 300 Hz to 800 Hz across three angles; treating this as baseline, Speaker B’s spectrograms similarly fluctuate between 400 Hz and 900 Hz, indicating the model introduces significant pitch variation with viewpoint changes.", "perturbed_explanation": "The perturbed claim is false because all mel‐spectrograms in Figure 9 actually show a stable low‐frequency band near 250–400 Hz across different face angles—there is no shift up to 800 Hz or 900 Hz as stated.", "claim": "Spectrograms from Speaker A show a shifting energy band from 300 Hz to 800 Hz across three angles; treating this as baseline, Speaker B’s spectrograms similarly fluctuate between 400 Hz and 900 Hz, indicating the model introduces significant pitch variation with viewpoint changes.", "label": false }, { "paperid": "2409.04257v1", "paper_path": "./SciVer/papers/2409.04257v1.json", "claim_type": "sequential", "item1": "2", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.04257v1_figure_2.png", "item2_path": "./SciVer/images/2409.04257v1-Table1-1.png", "section": [ "3.4" ], "request_id": 372, "origin_statement": "Table1 shows synthesis 2 yields the highest DCAP for depression at 17.45%. Figure 2 shows the CAP_d for depression in the original data is about 75%. Thus, the synthetic data reduces the attribute disclosure risk for depression by roughly 57.5 percentage points.", "perturbed_statement": "Table1 shows synthesis 2 yields the highest DCAP for depression at 32.50%. Figure 2 shows the CAP_d for depression in the original data is about 25%. Thus, the synthetic data reduces attribute disclosure risk for depression by roughly 7.5 percentage points.", "perturbed_explanation": "The statement misreads Table 1: 32.50% is the DiS value for synthesis 2, not DCAP (which is actually 17.45%). It also understates the original CAP_d: Figure 2 shows CAP_d for depression around 75–80%, not 25%. Both errors make the 7.5-point reduction incorrect.", "claim": "Table1 shows synthesis 2 yields the highest DCAP for depression at 32.50%. Figure 2 shows the CAP_d for depression in the original data is about 25%. Thus, the synthetic data reduces attribute disclosure risk for depression by roughly 7.5 percentage points.", "label": false }, { "paperid": "2411.01472v1", "paper_path": "./SciVer/papers/2411.01472v1.json", "claim_type": "sequential", "item1": "2(d)", "item2": "2(i)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01472v1_figure_2(d).png", "item2_path": "./SciVer/images/2411.01472v1_figure_2(i).png", "section": [ "4.2" ], "request_id": 374, "origin_statement": "Table 1 shows our method yields a 2.1 dB PSNR gain over MZSR on the SID dataset; in Figure 2’s second-row error map, our output appears red (near-zero error) versus MZSR’s blue, so that PSNR gain corresponds to visibly reduced raw reconstruction error in low-light DSLR captures.", "perturbed_statement": "Table 1 shows our method yields a 4.5 dB PSNR gain over MZSR on the SID dataset; in Figure 2’s second-row error map, our output appears deeper blue (higher error) than MZSR’s, so that PSNR gain corresponds to increased raw reconstruction error in low-light DSLR captures.", "perturbed_explanation": "This is incorrect because Table 1 reports only a 2.1 dB PSNR improvement on the SID dataset, not 4.5 dB, and Figure 2’s second-row error map actually shows our method’s result as red (low error) compared to MZSR’s blue (high error), not the other way around.", "claim": "Table 1 shows our method yields a 4.5 dB PSNR gain over MZSR on the SID dataset; in Figure 2’s second-row error map, our output appears deeper blue (higher error) than MZSR’s, so that PSNR gain corresponds to increased raw reconstruction error in low-light DSLR captures.", "label": false }, { "paperid": "2409.04050v1", "paper_path": "./SciVer/papers/2409.04050v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.04050v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.04050v1_figure_4(d).png", "section": [ "4.2.2" ], "request_id": 377, "origin_statement": "EigenSR-β (Ours) achieves the highest PSNR of 38.91 dB in the PSNR image, indicating superior spatial reconstruction; then in the NRQMS image, it also records the lowest NRQMS of 17.85, confirming best spectral fidelity among methods.", "perturbed_statement": "SFCSR achieves the highest PSNR of 37.77 dB in the PSNR image, indicating superior spatial reconstruction; then in the NRQMS image, it also records the lowest NRQMS of 17.93, confirming best spectral fidelity among methods.", "perturbed_explanation": "This is incorrect because the PSNR image shows EigenSR-β at 38.91 dB, higher than SFCSR’s 37.77 dB, so SFCSR does not have the highest PSNR. Additionally, the NRQMS image lists EigenSR-β at 17.85, lower than SFCSR’s 17.93, so SFCSR does not have the lowest NRQMS.", "claim": "SFCSR achieves the highest PSNR of 37.77 dB in the PSNR image, indicating superior spatial reconstruction; then in the NRQMS image, it also records the lowest NRQMS of 17.93, confirming best spectral fidelity among methods.", "label": false }, { "paperid": "2410.22015v1", "paper_path": "./SciVer/papers/2410.22015v1.json", "claim_type": "sequential", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22015v1_figure_4.png", "item2_path": "./SciVer/images/2410.22015v1-Table1-1.png", "section": [ "2.1.3" ], "request_id": 378, "origin_statement": "Figure 4b shows pedestrian counts peak around 18:00 at about 2.2× the daily average; Table 1 reports a median LOOCV error of –563 ped·h⁻¹. Thus, the LOOCV model underestimates evening peak flows by over 560 pedestrians per hour.", "perturbed_statement": "Figure 4b shows pedestrian counts peak around 12:00 at about 2.2× the daily average; Table 1 reports a median LOOCV error of –563 ped·h⁻¹. Thus, the LOOCV model underestimates midday peak flows by over 560 pedestrians per hour.", "perturbed_explanation": "Figure 4b’s averaged daily profile actually peaks around 18:00 at ~2.2× average, not at 12:00 (where normalized counts are closer to 1.6). Therefore, claiming a noon peak of 2.2× misreads the data, making the perturbed statement incorrect.", "claim": "Figure 4b shows pedestrian counts peak around 12:00 at about 2.2× the daily average; Table 1 reports a median LOOCV error of –563 ped·h⁻¹. Thus, the LOOCV model underestimates midday peak flows by over 560 pedestrians per hour.", "label": false }, { "paperid": "2411.03878v1", "paper_path": "./SciVer/papers/2411.03878v1.json", "claim_type": "sequential", "item1": "16", "item2": "17", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.03878v1_figure_16.png", "item2_path": "./SciVer/images/2411.03878v1_figure_17.png", "section": [ "5" ], "request_id": 379, "origin_statement": "From Figure 16, a 2-select-qubit rotational multiplexor decomposes into 4 multi-controlled rotations; Figure 17 places 3 such multiplexors per layer in the QSD of an N-qubit unitary, yielding 12 multi-controlled rotations per decomposition stage.", "perturbed_statement": "From Figure 16, a 3-select-qubit rotational multiplexor decomposes into 8 multi-controlled rotations; Figure 17 places 4 such multiplexors per layer in the QSD, yielding 32 multi-controlled rotations per stage.", "perturbed_explanation": "Figure 16 actually shows only 2 white-square select qubits (4 basis states), not 3 (8 states), so it decomposes into 4 rotations. Figure 17 explicitly depicts exactly 3 multiplexors (R_z, R_y, R_z), not 4, so the product 8×4=32 contradicts both diagrams.", "claim": "From Figure 16, a 3-select-qubit rotational multiplexor decomposes into 8 multi-controlled rotations; Figure 17 places 4 such multiplexors per layer in the QSD, yielding 32 multi-controlled rotations per stage.", "label": false }, { "paperid": "2411.02091v1", "paper_path": "./SciVer/papers/2411.02091v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02091v1_figure_4.png", "item2_path": "./SciVer/images/2411.02091v1-Table3-1.png", "section": [ "4.5" ], "request_id": 387, "origin_statement": "Table 3 shows LOCADD correlates with regression bug fixing time at 0.18 (p < 0.01); Fig. 4 shows the lib subsystem has the highest median fix time (~8 days); thus, code additions alone explain little of lib’s delay, suggesting subsystem complexity drives the longer fixes.", "perturbed_statement": "Table 3 shows LOCADD correlates with regression bug fixing time at 0.82 (p < 0.01); Fig. 4 shows the lib subsystem has the highest median fix time (~8 days); thus, code additions fully account for lib’s extended fix times.", "perturbed_explanation": "The perturbation falsely inflates LOCADD’s correlation to 0.82, but Table 3 actually reports a coefficient of 0.18. This incorrect premise undermines the conclusion about code additions driving lib’s fix times.", "claim": "Table 3 shows LOCADD correlates with regression bug fixing time at 0.82 (p < 0.01); Fig. 4 shows the lib subsystem has the highest median fix time (~8 days); thus, code additions fully account for lib’s extended fix times.", "label": false }, { "paperid": "2409.13171v1", "paper_path": "./SciVer/papers/2409.13171v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13171v1_figure_3.png", "item2_path": "./SciVer/images/2409.13171v1-Table4-1.png", "section": [ "3.4" ], "request_id": 389, "origin_statement": "Figure 3 shows the first build has 65 layer-wise images. Table 4 shows training on one part yields a zero-shot MAE of 0.029, while training on four parts reduces MAE to 0.014; thus, multi-part training halves the zero-shot MAE.", "perturbed_statement": "Figure 3 shows the first build has 65 layer-wise images. Table 4 shows training on one part yields a zero-shot MAE of 0.039, while training on four parts reduces MAE to 0.014; thus, multi-part training cuts the zero-shot MAE by more than 60%.", "perturbed_explanation": "The perturb statement misquotes the one-part MAE from Table 4 as 0.039, but the table actually reports 0.029 ± 0.024 for n=1. Therefore the claimed >60% reduction is based on an incorrect intermediate value.", "claim": "Figure 3 shows the first build has 65 layer-wise images. Table 4 shows training on one part yields a zero-shot MAE of 0.039, while training on four parts reduces MAE to 0.014; thus, multi-part training cuts the zero-shot MAE by more than 60%.", "label": false }, { "paperid": "2411.12785v1", "paper_path": "./SciVer/papers/2411.12785v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.12785v1-Table1-1.png", "item2_path": "./SciVer/images/2411.12785v1-Table2-1.png", "section": [ "6.1" ], "request_id": 390, "origin_statement": "On ViT-B/16 gender debiasing, our method reduces in-domain MS from 0.080 on FairFace (Table 1) to 0.048 on UTKFace (Table 2), a 40% drop.", "perturbed_statement": "On ViT-B/16 gender debiasing, our method reduces in-domain MS from 0.080 on FairFace (Table 1) to 0.056 on UTKFace (Table 2), a 30% drop.", "perturbed_explanation": "Table 2 actually lists the in-domain UTKFace MS for our method as 0.048, not 0.056. Because the premise '0.056' is incorrect, the claimed 30% reduction is also unfounded.", "claim": "On ViT-B/16 gender debiasing, our method reduces in-domain MS from 0.080 on FairFace (Table 1) to 0.056 on UTKFace (Table 2), a 30% drop.", "label": false }, { "paperid": "2411.15060v1", "paper_path": "./SciVer/papers/2411.15060v1.json", "claim_type": "sequential", "item1": "4", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15060v1_figure_4.png", "item2_path": "./SciVer/images/2411.15060v1_figure_6.png", "section": [ "5.2" ], "request_id": 394, "origin_statement": "Fig4 shows hallucinatory instances (⋆⋆) in SRS-to-HE often lie within dense UMAP clusters, and Fig6 shows NHP consistently converges to k=1; thus NHP leverages a single-nearest neighbor to detect hallucinations embedded in dense regions.", "perturbed_statement": "Fig4 shows all hallucinatory instances in SRS-to-HE occur only at the far outskirts of the UMAP, and Fig6 shows NHP consistently converges to k=200; thus NHP relies on 200-nearest neighbors to detect hallucinations in sparse outlier regions.", "perturbed_explanation": "This is incorrect because in Fig4 the hallucination stars (⋆⋆) are visibly interspersed within dense clusters, not solely at the outskirts. Moreover, Fig6’s histogram for k peaks at k=1 (and other small values), not at k=200.", "claim": "Fig4 shows all hallucinatory instances in SRS-to-HE occur only at the far outskirts of the UMAP, and Fig6 shows NHP consistently converges to k=200; thus NHP relies on 200-nearest neighbors to detect hallucinations in sparse outlier regions.", "label": false }, { "paperid": "2410.09865v2", "paper_path": "./SciVer/papers/2410.09865v2.json", "claim_type": "sequential", "item1": "7(b)", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09865v2_figure_7(b).png", "item2_path": "./SciVer/images/2410.09865v2-Table6-1.png", "section": [ "5.3" ], "request_id": 397, "origin_statement": "In Figure 7b, the MoCo v3 model trained on 1.0 M synthetic images achieves ~91.3% RAF-DB linear-probe accuracy; FEText+FAUs+SG in the table yields 91.95%, thus outperforming SSL by 0.65%.", "perturbed_statement": "In Figure 7b, the MoCo v3 model trained on 1.0 M synthetic images achieves ~91.3% RAF-DB linear-probe accuracy; FEText+FAUs+SG in the table yields only 90.5%, thus underperforming SSL by 0.8%.", "perturbed_explanation": "The perturbed claim misreports the FEText+FAUs+SG performance. The table actually shows FEText+FAUs+SG achieves 91.95% on RAF-DB, not 90.5%, so it still outperforms the 91.3% SSL result.", "claim": "In Figure 7b, the MoCo v3 model trained on 1.0 M synthetic images achieves ~91.3% RAF-DB linear-probe accuracy; FEText+FAUs+SG in the table yields only 90.5%, thus underperforming SSL by 0.8%.", "label": false }, { "paperid": "2409.12993v1", "paper_path": "./SciVer/papers/2409.12993v1.json", "claim_type": "sequential", "item1": "1", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12993v1_figure_1.png", "item2_path": "./SciVer/images/2409.12993v1_figure_6.png", "section": [ "4.3" ], "request_id": 398, "origin_statement": "Figure1 indicates 54 problems with 0% pass at both checkpoints. Using Figure6, pass@1 on VerilogEval-NonText increases from 0.103 (0k) to 0.551 (5k), implying targeted CC data could solve over half of those initially unsolved problems.", "perturbed_statement": "Figure1 indicates 54 problems with 0% pass at both checkpoints. Using Figure6, pass@1 on VerilogEval-NonText increases from 0.103 (0k) to 0.760 (5k), implying targeted CC data could solve over half of those initially unsolved problems.", "perturbed_explanation": "The perturbed statement is incorrect because Figure6 shows the pass@1 for VerilogEval-NonText at 5k CC samples is 0.551, not 0.760. The 0.760 value at 5k corresponds to FSM problems, not the non-textual benchmark.", "claim": "Figure1 indicates 54 problems with 0% pass at both checkpoints. Using Figure6, pass@1 on VerilogEval-NonText increases from 0.103 (0k) to 0.760 (5k), implying targeted CC data could solve over half of those initially unsolved problems.", "label": false }, { "paperid": "2411.09534v1", "paper_path": "./SciVer/papers/2411.09534v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09534v1_figure_3.png", "item2_path": "./SciVer/images/2411.09534v1_figure_4.png", "section": [ "4.2" ], "request_id": 399, "origin_statement": "Fig.3 shows two low-power acquisition periods (light sleep) preceding active processing. Fig.4 indicates that each acquisition cycle uses SPI receive via DMA in light sleep and only transitions to active processing upon a full-window interrupt after multiple DMA loops.", "perturbed_statement": "Fig.3 shows two deep-sleep acquisition periods preceding active processing. Fig.4 indicates each acquisition uses SPI receive via CPU polling in deep sleep and immediately transitions to active processing after the first interrupt, regardless of buffer fullness.", "perturbed_explanation": "This is wrong because Figure 3 explicitly labels acquisition as occurring in light sleep (the orange “Acquisition” bars under “Light sleep”), not deep sleep. Moreover, Figure 4 and the text state that data transfer uses DMA (not CPU polling) and that the core only enters active processing when a full-window interrupt occurs, not immediately after the first interrupt.", "claim": "Fig.3 shows two deep-sleep acquisition periods preceding active processing. Fig.4 indicates each acquisition uses SPI receive via CPU polling in deep sleep and immediately transitions to active processing after the first interrupt, regardless of buffer fullness.", "label": false }, { "paperid": "2409.19663v2", "paper_path": "./SciVer/papers/2409.19663v2.json", "claim_type": "sequential", "item1": "1", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.19663v2-Table1-1.png", "item2_path": "./SciVer/images/2409.19663v2-Table4-1.png", "section": [ "5.3" ], "request_id": 400, "origin_statement": "UnKE’s Linear identifier achieves the highest F1 (0.854) on Llama3.1-8B (Table 1), yet Table 4 reports UnKE’s reliability at 0.393 versus FT-M’s 0.993, showing top identification scores do not equate to high editing reliability.", "perturbed_statement": "UnKE’s Linear identifier achieves the highest F1 (0.855) on Llama2-13B (Table 1), yet Table 4 reports UnKE’s reliability at 0.393 versus FT-M’s 0.996, showing top identification scores do not guarantee high editing reliability.", "perturbed_explanation": "This is incorrect because Table 1 shows UnKE’s Linear identifier F1 on Llama2-13B is actually 0.754 (not 0.855), and Table 4 lists UnKE’s reliability for Llama2-13B as 0.021 (not 0.393).", "claim": "UnKE’s Linear identifier achieves the highest F1 (0.855) on Llama2-13B (Table 1), yet Table 4 reports UnKE’s reliability at 0.393 versus FT-M’s 0.996, showing top identification scores do not guarantee high editing reliability.", "label": false }, { "paperid": "2411.09458v1", "paper_path": "./SciVer/papers/2411.09458v1.json", "claim_type": "sequential", "item1": "4", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09458v1_figure_4.png", "item2_path": "./SciVer/images/2411.09458v1-Table1-1.png", "section": [ "5.1.2" ], "request_id": 401, "origin_statement": "Table 1 gives B(B0→D−K+)/B(B0→D−π+) = 8.22×10^−2; using this normalization, the full‐likelihood fit in Fig. 4 yields fs/fd ≃ 0.28 at 7 TeV.", "perturbed_statement": "Table 1 gives B(B0→D−K+)/B(B0→D−π+) = 7.22×10^−2; using this normalization, the full‐likelihood fit in Fig. 4 yields fs/fd ≃ 0.32 at 7 TeV.", "perturbed_explanation": "The perturbed statement is wrong because Table 1 actually reports the ratio as (8.22±0.28)×10^−2, not 7.22×10^−2, and Fig. 4 shows the full‐likelihood mean of fs/fd around 0.28, not 0.32.", "claim": "Table 1 gives B(B0→D−K+)/B(B0→D−π+) = 7.22×10^−2; using this normalization, the full‐likelihood fit in Fig. 4 yields fs/fd ≃ 0.32 at 7 TeV.", "label": false }, { "paperid": "2410.01109v1", "paper_path": "./SciVer/papers/2410.01109v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.01109v1_figure_4.png", "item2_path": "./SciVer/images/2410.01109v1-Table2-1.png", "section": [ "4.3" ], "request_id": 402, "origin_statement": "Phi-3-Small experiments combining Open-Orca and Orca-Math attained the highest normalized average score (about 0.85) in Fig. 4. In Table 2, Phi-3-Small multi-task fine-tuning achieved a FinNerCLS of 0.98. Therefore, adding both non-financial datasets likely enabled near-perfect financial entity classification.", "perturbed_statement": "Phi-3-Small experiments using only finance data attained the highest normalized average score (about 0.85) in Fig. 4. In Table 2, Phi-3-Small multi-task fine-tuning achieved a FinNerCLS of 0.98. Therefore, adding only finance data likely enabled near-perfect financial entity classification.", "perturbed_explanation": "The perturbed statement is incorrect because Fig. 4 shows that the ‘only finance data’ experiments (green points) yield the lowest normalized average scores across all models, not the highest. The highest scores for Phi-3-Small in Fig. 4 come from experiments combining Open-Orca and Orca-Math (purple points).", "claim": "Phi-3-Small experiments using only finance data attained the highest normalized average score (about 0.85) in Fig. 4. In Table 2, Phi-3-Small multi-task fine-tuning achieved a FinNerCLS of 0.98. Therefore, adding only finance data likely enabled near-perfect financial entity classification.", "label": false }, { "paperid": "2411.15743v1", "paper_path": "./SciVer/papers/2411.15743v1.json", "claim_type": "sequential", "item1": "4", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.15743v1_figure_4.png", "item2_path": "./SciVer/images/2411.15743v1_figure_8.png", "section": [ "6.1" ], "request_id": 403, "origin_statement": "Fig.4 reports that pre-trained TimesFM achieves low log-MSE (~10^-4) at the 1/12 and 1/24 frequencies but a much higher MSE (~10^-1) at 1/25; accordingly, Fig.8 shows TimesFM’s two-harmonic 1/25 forecasts with clear amplitude underestimation and phase misalignment.", "perturbed_statement": "Fig.4 reports that pre-trained TimesFM achieves extremely low log-MSE (~10^-5) at 1/25; accordingly, Fig.8 shows TimesFM’s two-harmonic 1/25 forecasts with perfect amplitude matching and no phase misalignment.", "perturbed_explanation": "The perturbation is incorrect because in Fig.4 TimesFM’s MSE at 1/25 is not ~10^-5 but around ~10^-1 (far above its errors at 1/12 and 1/24). Consequently, Fig.8’s two-harmonic 1/25 forecasts (red curve, middle right) clearly under-estimate amplitude and exhibit phase offsets rather than perfect alignment.", "claim": "Fig.4 reports that pre-trained TimesFM achieves extremely low log-MSE (~10^-5) at 1/25; accordingly, Fig.8 shows TimesFM’s two-harmonic 1/25 forecasts with perfect amplitude matching and no phase misalignment.", "label": false }, { "paperid": "2410.02099v1", "paper_path": "./SciVer/papers/2410.02099v1.json", "claim_type": "sequential", "item1": "1(d)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02099v1_figure_1(d).png", "item2_path": "./SciVer/images/2410.02099v1-Table3-1.png", "section": [ "5.5.3" ], "request_id": 404, "origin_statement": "At 20% token corruption, our watermark scheme achieves about 87% AUC (Figure 1). Table 3 reports that the uniform KDE LRT with m=16 yields only 66.6% AUC. Hence, our watermark detection still outperforms the uniform KDE likelihood‐ratio test under moderate corruption.", "perturbed_statement": "At 20% token corruption, our watermark scheme achieves about 87% AUC (Figure 1). Table 3 reports that the uniform KDE LRT with m=16 yields 97.8% AUC. Hence, the uniform KDE likelihood‐ratio test significantly outperforms our watermark detection under moderate corruption.", "perturbed_explanation": "This is incorrect because Table 3 shows that under Unif. KDE LRT for Flat (k=1) at m=16, the AUC is 66.6%, not 97.8%. The 97.8% AUC actually corresponds to Flat (k=10), so attributing it to k=1 misreads the table.", "claim": "At 20% token corruption, our watermark scheme achieves about 87% AUC (Figure 1). Table 3 reports that the uniform KDE LRT with m=16 yields 97.8% AUC. Hence, the uniform KDE likelihood‐ratio test significantly outperforms our watermark detection under moderate corruption.", "label": false }, { "paperid": "2409.02120v1", "paper_path": "./SciVer/papers/2409.02120v1.json", "claim_type": "sequential", "item1": "14", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.02120v1_figure_14.png", "item2_path": "./SciVer/images/2409.02120v1-Table6-1.png", "section": [ "6.4" ], "request_id": 405, "origin_statement": "The 5 m downscaled map (Fig 14) highlights canals as cooler (purple). Table 6 reports a 15.8 % median error for minimum air temperature downscaling. Therefore, these fine-scale nocturnal cooling patterns may be misrepresented by about one-sixth of the true value.", "perturbed_statement": "The 5 m downscaled map (Fig 14) highlights canals as cooler (purple). Table 6 reports a 9.0 % median error for minimum air temperature downscaling. Therefore, these fine-scale nocturnal cooling patterns may be misrepresented by around 9 %.", "perturbed_explanation": "Table 6 shows the median error for minimum air temperature downscaling is 15.8 %, not 9.0 %. The 9.0 % figure is actually the median error for average air temperature, so the premise attributing 9.0 % to the minimum temperature error contradicts the tabulated data.", "claim": "The 5 m downscaled map (Fig 14) highlights canals as cooler (purple). Table 6 reports a 9.0 % median error for minimum air temperature downscaling. Therefore, these fine-scale nocturnal cooling patterns may be misrepresented by around 9 %.", "label": false }, { "paperid": "2409.02246v1", "paper_path": "./SciVer/papers/2409.02246v1.json", "claim_type": "sequential", "item1": "3", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.02246v1_figure_3.png", "item2_path": "./SciVer/images/2409.02246v1_figure_5.png", "section": [ "5.1" ], "request_id": 406, "origin_statement": "By iteration 10 in the high call volume setting, the patrol Qθ loss falls below 10, indicating patrol network convergence; consequently in the low call volume setting, the optimized patrol policy achieves a median response time of 5 iterations, one faster than the fully heuristic policy's median of 6.", "perturbed_statement": "By iteration 10 in the high call volume setting, the patrol Qθ loss falls below 5, indicating patrol network convergence; consequently in the low call volume setting, the optimized patrol policy achieves a median response time of 7 iterations, one slower than the fully heuristic policy's median of 6.", "perturbed_explanation": "Both premises contradict the figures. In Figure 3 (bottom), the patrol Qθ loss at iteration 10 is around 10, not below 5. In Figure 5(a), the optimized patrol policy’s median response time is 5 iterations, not 7, and the heuristic median is 6.", "claim": "By iteration 10 in the high call volume setting, the patrol Qθ loss falls below 5, indicating patrol network convergence; consequently in the low call volume setting, the optimized patrol policy achieves a median response time of 7 iterations, one slower than the fully heuristic policy's median of 6.", "label": false }, { "paperid": "2409.11704v1", "paper_path": "./SciVer/papers/2409.11704v1.json", "claim_type": "sequential", "item1": "2", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11704v1_figure_2.png", "item2_path": "./SciVer/images/2409.11704v1_figure_3(a).png", "section": [ "3.3" ], "request_id": 407, "origin_statement": "DPO training with the bold-and-list–augmented reward model boosts the bold pattern ratio to about 60% (Fig 2), and under iterative DPO this bias compounds, raising test-time bold usage from ~4% offline to ~27% by the third iteration (Fig 3).", "perturbed_statement": "DPO training with the bold-and-list–augmented reward model boosts the bold pattern ratio to about 65% (Fig 2), and under iterative DPO this bias compounds, raising test-time bold usage from ~5% offline to ~35% by the third iteration (Fig 3).", "perturbed_explanation": "Both numbers are incorrect. Figure 2 shows the bold-and-list–augmented model yields a bold ratio of ~60%, not ~65%. Figure 3 shows iterative DPO reaches ~27% bold usage by iteration 3 (and offline is ~4%), not ~35% (or ~5% offline).", "claim": "DPO training with the bold-and-list–augmented reward model boosts the bold pattern ratio to about 65% (Fig 2), and under iterative DPO this bias compounds, raising test-time bold usage from ~5% offline to ~35% by the third iteration (Fig 3).", "label": false }, { "paperid": "2410.02346v2", "paper_path": "./SciVer/papers/2410.02346v2.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.02346v2_figure_2.png", "item2_path": "./SciVer/images/2410.02346v2-Table2-1.png", "section": [ "2.3" ], "request_id": 409, "origin_statement": "Table 2 lists the COMs model with n_fin = 10^8 cm⁻³; Fig. 2 shows the COMs density curve reaches 10^8 cm⁻³ at ~1.55 Myr. At this moment Fig. 2 also indicates T_gas ≈ 200 K, so COMs thermal desorption occurs when ζ has dropped to ~10⁻¹⁷ s⁻¹.", "perturbed_statement": "Table 2 lists the COMs model with n_fin = 10^6 cm⁻³; Fig. 2 shows the COMs density curve reaches 10^6 cm⁻³ at ~1.55 Myr. At this moment Fig. 2 also indicates T_gas ≈ 200 K, so COMs thermal desorption occurs when ζ has dropped to ~10⁻¹⁷ s⁻¹.", "perturbed_explanation": "The perturb statement misreads Table 2: the COMs model n_fin is actually 10^8 cm⁻³, not 10^6 cm⁻³. Consequently, Fig. 2’s COMs density curve crosses 10^8 cm⁻³ at ~1.55 Myr, not 10^6 cm⁻³, making the perturbed premise and final claim incorrect.", "claim": "Table 2 lists the COMs model with n_fin = 10^6 cm⁻³; Fig. 2 shows the COMs density curve reaches 10^6 cm⁻³ at ~1.55 Myr. At this moment Fig. 2 also indicates T_gas ≈ 200 K, so COMs thermal desorption occurs when ζ has dropped to ~10⁻¹⁷ s⁻¹.", "label": false }, { "paperid": "2411.02359v1", "paper_path": "./SciVer/papers/2411.02359v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.02359v1_figure_3(a).png", "item2_path": "./SciVer/images/2411.02359v1_figure_3(b).png", "section": [ "4.1" ], "request_id": 410, "origin_statement": "From the ABC→D curve, DeeR-S runs at about 4.6 GFLOPs per action. Then, the GPU memory bar chart shows it uses 2 GB. Thus, DeeR-S achieves a 2.65 average successful length in ABC→D with 4.6 GFLOPs and 2 GB memory.", "perturbed_statement": "From the ABC→D curve, DeeR-S runs at about 6.5 GFLOPs per action. Then, the GPU memory bar chart shows it uses 4 GB. Thus, DeeR-S achieves a 2.65 average successful length in ABC→D with 6.5 GFLOPs and 4 GB memory.", "perturbed_explanation": "The ABC→D plot indicates DeeR-S operates at roughly 4.6 GFLOPs per action (not 6.5 GFLOPs), and the GPU memory chart shows a 2 GB footprint (not 4 GB). These discrepancies make the perturbed resource numbers inconsistent with the figures.", "claim": "From the ABC→D curve, DeeR-S runs at about 6.5 GFLOPs per action. Then, the GPU memory bar chart shows it uses 4 GB. Thus, DeeR-S achieves a 2.65 average successful length in ABC→D with 6.5 GFLOPs and 4 GB memory.", "label": false }, { "paperid": "2410.13949v1", "paper_path": "./SciVer/papers/2410.13949v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13949v1_figure_3.png", "item2_path": "./SciVer/images/2410.13949v1-Table3-1.png", "section": [ "6" ], "request_id": 411, "origin_statement": "Since Figure 3(r2) shows that ABC-MCMC with regression adjustment at h=1 yields a smaller log-RMSE for the correlation parameters θ_R than ABC-MCMC at h=10, Table 3 confirms that MCMC+Reg(h=1) ranks first in RMSE for θ_R, whereas MCMC(h=10) ranks third.", "perturbed_statement": "Since Figure 3(r2) shows that ABC-MCMC with regression adjustment at h=1 yields a smaller log-RMSE for the correlation parameters θ_R than ABC-MCMC at h=10, Table 3 confirms that MCMC+Reg(h=1) ranks second in RMSE for θ_R, whereas MCMC(h=10) ranks fourth.", "perturbed_explanation": "Table 3’s RMSE row for θ_R lists MCMC+Reg(h=1) as rank 1 (not rank 2) and MCMC(h=10) as rank 3 (not rank 4), so both rankings in the perturbed statement contradict the actual table entries.", "claim": "Since Figure 3(r2) shows that ABC-MCMC with regression adjustment at h=1 yields a smaller log-RMSE for the correlation parameters θ_R than ABC-MCMC at h=10, Table 3 confirms that MCMC+Reg(h=1) ranks second in RMSE for θ_R, whereas MCMC(h=10) ranks fourth.", "label": false }, { "paperid": "2410.22782v1", "paper_path": "./SciVer/papers/2410.22782v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.22782v1-Table1-1.png", "item2_path": "./SciVer/images/2410.22782v1-Table2-1.png", "section": [ "4.1.1", "4.1" ], "request_id": 413, "origin_statement": "From Table1, MALoRA’s latency (896.4 µs) is only ~7.5% above LoRA’s (833.3 µs); Table2 shows MALoRA’s intra-domain average (80.47) exceeds LoRA’s (78.48) by ~2 points, highlighting its efficiency–performance tradeoff.", "perturbed_statement": "MALoRA’s latency (896.4 µs) is only ~3.5% above LoRA’s (833.3 µs); Table2 shows MALoRA’s intra-domain average (80.47) exceeds LoRA’s (76.47) by ~4 points, highlighting its efficiency–performance tradeoff.", "perturbed_explanation": "The latency gap is actually ~7.5% (896.4 µs vs. 833.3 µs), not 3.5%, and LoRA’s average in Table 2 is 78.48, not 76.47, so both the stated percentage difference and baseline score are incorrect.", "claim": "MALoRA’s latency (896.4 µs) is only ~3.5% above LoRA’s (833.3 µs); Table2 shows MALoRA’s intra-domain average (80.47) exceeds LoRA’s (76.47) by ~4 points, highlighting its efficiency–performance tradeoff.", "label": false }, { "paperid": "2411.17820v1", "paper_path": "./SciVer/papers/2411.17820v1.json", "claim_type": "sequential", "item1": "7", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.17820v1_figure_7.png", "item2_path": "./SciVer/images/2411.17820v1-Table1-1.png", "section": [ "4.3" ], "request_id": 414, "origin_statement": "Figure 7 shows adding feature hallucination loss at 2000 hr reduces orientation loss from about 0.045 to 0.030 at 100 k steps; accordingly, Table 1 shows our fine-tuned model’s mean AOE(5) is 7.97°, outperforming fine-tuned ViNT’s 9.87°.", "perturbed_statement": "Figure 7 shows adding feature hallucination loss at 2000 hr reduces orientation loss from about 0.045 to 0.025 at 100 k steps; accordingly, Table 1 shows our fine-tuned model’s mean AOE(5) is 4.63°, outperforming fine-tuned ViNT’s 5.97°.", "perturbed_explanation": "Both numerical steps are misreported. In Figure 7, the orientation loss with hallucination at 2000 hr reaches about 0.030, not 0.025. In Table 1, the mean AOE(5) for our fine-tuned model is 7.97° (the 4.63° value is from the “All” column), and ViNT fine-tune’s mean AOE is 9.87°, not 5.97°.", "claim": "Figure 7 shows adding feature hallucination loss at 2000 hr reduces orientation loss from about 0.045 to 0.025 at 100 k steps; accordingly, Table 1 shows our fine-tuned model’s mean AOE(5) is 4.63°, outperforming fine-tuned ViNT’s 5.97°.", "label": false }, { "paperid": "2411.06529v1", "paper_path": "./SciVer/papers/2411.06529v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "1(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.06529v1_figure_1(c).png", "item2_path": "./SciVer/images/2411.06529v1_figure_1(d).png", "section": [ "3.1" ], "request_id": 415, "origin_statement": "From Figure 1(c), we see peak equivalent stress around 14 in the narrow white-phase bottleneck. Given that strain energy density scales with stress under constant 0.1% tensile strain, Figure 1(d) shows corresponding energy density peaks near 0.055 in the same regions, so bottlenecks store most elastic energy.", "perturbed_statement": "From Figure 1(c), we see peak equivalent stress around 8 in the narrow white-phase bottleneck. Given that strain energy density scales with stress under constant 0.1% tensile strain, Figure 1(d) shows corresponding energy density peaks near 0.03 in the same regions, so bottlenecks store most elastic energy.", "perturbed_explanation": "The perturbed statement underestimates both the stress and energy values. The colorbar in Figure 1(c) clearly shows peak equivalent stress up to about 14 (not 8), and the colorbar in Figure 1(d) reaches about 0.055 (not 0.03). These correct values contradict the perturbed numbers.", "claim": "From Figure 1(c), we see peak equivalent stress around 8 in the narrow white-phase bottleneck. Given that strain energy density scales with stress under constant 0.1% tensile strain, Figure 1(d) shows corresponding energy density peaks near 0.03 in the same regions, so bottlenecks store most elastic energy.", "label": false }, { "paperid": "2410.09432v1", "paper_path": "./SciVer/papers/2410.09432v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09432v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.09432v1_figure_1(b).png", "section": [ "1" ], "request_id": 416, "origin_statement": "FedIT separately averages each client’s low-rank adapters A_i and B_i (fig1a), so its update uses the product of means A·B instead of the mean of products. FedEx-LoRA computes ΔW_res=mean(B_i·A_i)−mean(B_i)·mean(A_i) and adds it to W₀ for exact aggregation (fig1b).", "perturbed_statement": "FedIT separately averages each client’s low-rank adapters A_i and B_i (fig1a), so its update uses the product of means A·B instead of the mean of products. FedEx-LoRA computes ΔW_res=mean(B_i·A_i)−mean(B_i)·mean(A_i) and subtracts it from W₀ for exact aggregation (fig1b).", "perturbed_explanation": "Figure 1b and the text state that FedEx-LoRA “adds the error residual term ΔW_res to the pretrained weight matrix W₀,” as shown by the block labeled W₀+ΔW_res. Subtracting ΔW_res contradicts this explicit detail from the context.", "claim": "FedIT separately averages each client’s low-rank adapters A_i and B_i (fig1a), so its update uses the product of means A·B instead of the mean of products. FedEx-LoRA computes ΔW_res=mean(B_i·A_i)−mean(B_i)·mean(A_i) and subtracts it from W₀ for exact aggregation (fig1b).", "label": false }, { "paperid": "2410.03090v1", "paper_path": "./SciVer/papers/2410.03090v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.03090v1-Table1-1.png", "item2_path": "./SciVer/images/2410.03090v1-Table2-1.png", "section": [ "4.2" ], "request_id": 417, "origin_statement": "FullKV at KV size 64 scores 30.54 (Table 1). Ours-remain-tokens-256 scores 29.74 under extreme compression (Table 2), staying within 0.8 points of full performance across QA tasks.", "perturbed_statement": "FullKV at KV size 64 scores 28.54 (Table 1). Ours-remain-tokens-256 scores 29.74 under extreme compression (Table 2), exceeding full performance by 1.2 points across QA tasks.", "perturbed_explanation": "Table 1 lists FullKV’s average score at KV size 64 as 30.54, not 28.54. Since Ours-remain-tokens-256 achieves 29.74 (Table 2), it remains 0.8 points below FullKV’s actual 30.54, so it cannot exceed full performance.", "claim": "FullKV at KV size 64 scores 28.54 (Table 1). Ours-remain-tokens-256 scores 29.74 under extreme compression (Table 2), exceeding full performance by 1.2 points across QA tasks.", "label": false }, { "paperid": "2410.14731v1", "paper_path": "./SciVer/papers/2410.14731v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "4(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.14731v1_figure_3(b).png", "item2_path": "./SciVer/images/2410.14731v1_figure_4(a).png", "section": [ "5.3" ], "request_id": 418, "origin_statement": "Because PCA initialization reduces evaluation loss below 1 after only 2.6 K SFT samples (Fig 3), using MatryoshkaKV with greedy search at a 30% cache budget attains roughly 49% average accuracy—about 1% higher than PCA with the same search and budget (Fig 4).", "perturbed_statement": "Because PCA initialization reduces evaluation loss below 1 after only 2.6 K SFT samples (Fig 3), using MatryoshkaKV with greedy search at a 30% cache budget attains roughly 55% average accuracy—about 7% higher than PCA with the same search and budget (Fig 4).", "perturbed_explanation": "Figure 4 (Left) shows that at a 30% cache budget, MatryoshkaKV with greedy search actually achieves about 49% average accuracy—not 55%. Therefore, the claimed 55% is inconsistent with the reported data.", "claim": "Because PCA initialization reduces evaluation loss below 1 after only 2.6 K SFT samples (Fig 3), using MatryoshkaKV with greedy search at a 30% cache budget attains roughly 55% average accuracy—about 7% higher than PCA with the same search and budget (Fig 4).", "label": false }, { "paperid": "2410.20807v2", "paper_path": "./SciVer/papers/2410.20807v2.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.20807v2-Table1-1.png", "item2_path": "./SciVer/images/2410.20807v2-Table3-1.png", "section": [ "4.2" ], "request_id": 419, "origin_statement": "AdaptOD achieves 89.05% AUC on the CIFAR OOD dataset under CIFAR10-LT (Table 1). DODA applied to DNE yields 94.69% AUC on CIFAR10-LT (Table 3). Thus, DODA on DNE outperforms AdaptOD on near-OOD CIFAR by 5.64%.", "perturbed_statement": "AdaptOD achieves 91.05% AUC on the CIFAR OOD dataset under CIFAR10-LT (Table 1). DODA applied to DNE yields 94.69% AUC on CIFAR10-LT (Table 3). Thus, DODA on DNE outperforms AdaptOD on near-OOD CIFAR by 3.64%.", "perturbed_explanation": "The perturbation is incorrect because Table 1 reports that AdaptOD’s AUC on the CIFAR OOD dataset under CIFAR10-LT is 89.05%, not 91.05%. Using the correct 89.05% yields a 5.64% gain, not 3.64%.", "claim": "AdaptOD achieves 91.05% AUC on the CIFAR OOD dataset under CIFAR10-LT (Table 1). DODA applied to DNE yields 94.69% AUC on CIFAR10-LT (Table 3). Thus, DODA on DNE outperforms AdaptOD on near-OOD CIFAR by 3.64%.", "label": false }, { "paperid": "2409.02358v1", "paper_path": "./SciVer/papers/2409.02358v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.02358v1_figure_1.png", "item2_path": "./SciVer/images/2409.02358v1_figure_2.png", "section": [ "5.2" ], "request_id": 420, "origin_statement": "Fig1 shows YouTube has a 1.61 positive residual for time management topics; Fig2 shows YouTube has a 4.67* residual for time management negative effects; thus YouTube’s autoplay feature exacerbates teens’ time management struggles.", "perturbed_statement": "Fig1 shows YouTube has a –0.58 residual for time management topics; Fig2 shows YouTube has a 4.67* residual for time management negative effects; thus YouTube’s autoplay feature exacerbates teens’ time management struggles.", "perturbed_explanation": "The perturbed statement is wrong because in Figure 1 YouTube’s standardized residual for time management is +1.61 (not –0.58), so the premise about a negative association is contradicted by the actual data.", "claim": "Fig1 shows YouTube has a –0.58 residual for time management topics; Fig2 shows YouTube has a 4.67* residual for time management negative effects; thus YouTube’s autoplay feature exacerbates teens’ time management struggles.", "label": false }, { "paperid": "2410.21205v1", "paper_path": "./SciVer/papers/2410.21205v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21205v1_figure_4.png", "item2_path": "./SciVer/images/2410.21205v1-Table2-1.png", "section": [ "4.2" ], "request_id": 421, "origin_statement": "By iteration 3, introducing intermediate F (Table 2) lowered the AIC to –580.40, which corresponds to the near-perfect overlap of model predictions and in-silico data for species C in Figure 4.", "perturbed_statement": "By iteration 2, introducing intermediate E (Table 2) lowered the AIC to –580.40, which corresponds to the near-perfect overlap of model predictions and in-silico data for species C in Figure 4.", "perturbed_explanation": "The perturbed statement incorrectly assigns the AIC of –580.40 to iteration 2. According to Table 2, iteration 2’s AIC is actually 848.18; the –580.40 value is exclusively reported for iteration 3.", "claim": "By iteration 2, introducing intermediate E (Table 2) lowered the AIC to –580.40, which corresponds to the near-perfect overlap of model predictions and in-silico data for species C in Figure 4.", "label": false }, { "paperid": "2410.13371v2", "paper_path": "./SciVer/papers/2410.13371v2.json", "claim_type": "sequential", "item1": "9(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13371v2_figure_9(b).png", "item2_path": "./SciVer/images/2410.13371v2-Table2-1.png", "section": [ "4.2" ], "request_id": 422, "origin_statement": "Table 2 shows the proposed method’s average computation time (0.41 s) is over five times faster than Schöps et al.’s (2.3 s), and since Figure 9 indicates both methods produce similarly uniform reprojection error directions, the proposed approach accelerates calibration without systematic bias.", "perturbed_statement": "Table 2 shows the proposed method’s average computation time (0.41 s) is more than ten times faster than Schöps et al.’s (2.3 s). Figure 9 shows reprojection errors concentrate toward the green direction for the proposed method, indicating a systematic directional bias despite the speed gain.", "perturbed_explanation": "The perturbed statement is incorrect on two counts: 1) The speedup is claimed to be over ten times, but 2.3 s divided by 0.41 s is roughly 5.6×, not >10×. 2) Figure 9 actually shows a uniform distribution of reprojection error directions, not a concentration toward green, so no systematic directional bias is present.", "claim": "Table 2 shows the proposed method’s average computation time (0.41 s) is more than ten times faster than Schöps et al.’s (2.3 s). Figure 9 shows reprojection errors concentrate toward the green direction for the proposed method, indicating a systematic directional bias despite the speed gain.", "label": false }, { "paperid": "2410.06313v1", "paper_path": "./SciVer/papers/2410.06313v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.06313v1_figure_4.png", "item2_path": "./SciVer/images/2410.06313v1-Table2-1.png", "section": [ "3.1" ], "request_id": 423, "origin_statement": "Figure (a) shows Health Economics papers forming a coherent cluster in the t-SNE embedding. Table 2 lists Manning and Marquis (1996) in JHE with a 0.999 combined score marked high confidence. Thus, high-confidence papers like Manning and Marquis likely occupy this dense blue cluster.", "perturbed_statement": "Figure (a) shows Health Economics papers forming a coherent cluster in the t-SNE embedding. Table 2 lists Chari et al. (2017) in JDevEc with a 0.525 combined score marked low confidence. Thus, low-confidence papers like Chari likely occupy this dense blue cluster.", "perturbed_explanation": "This is incorrect because the dense blue cluster in Figure (a) represents papers with clear, extreme classification probabilities—i.e., high-confidence predictions. Table 2 places Chari et al. (2017) in Section III with a low confidence score (0.525), which the text explains lies near the cutoff region, not in the homogeneous high-density cluster seen for high-confidence papers.", "claim": "Figure (a) shows Health Economics papers forming a coherent cluster in the t-SNE embedding. Table 2 lists Chari et al. (2017) in JDevEc with a 0.525 combined score marked low confidence. Thus, low-confidence papers like Chari likely occupy this dense blue cluster.", "label": false }, { "paperid": "2409.19974v2", "paper_path": "./SciVer/papers/2409.19974v2.json", "claim_type": "sequential", "item1": "4(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19974v2_figure_4(a).png", "item2_path": "./SciVer/images/2409.19974v2-Table2-1.png", "section": [ "4.3.1" ], "request_id": 424, "origin_statement": "The log–log plot shows the L2-norm error for uₙ scales as Δx²; since both the Newtonian network (μₙᵛ=4.0) and the viscoelastic network (μₙᵛ+μₙᵖ=2.0+2.0=4.0) share total viscosity 4.0, the viscoelastic simulation should likewise achieve second-order convergence in uₙ’s L2 norm.", "perturbed_statement": "The log–log plot shows the L2-norm error for uₙ scales roughly linearly with Δx; since both the Newtonian and viscoelastic networks share total viscosity 4.0, the viscoelastic simulation will only exhibit first-order convergence in uₙ’s L2 norm.", "perturbed_explanation": "The plot actually displays a slope of two (dashed O(Δx²) reference), not one; the error decreases ∝Δx², so convergence is second-order rather than first-order, making the perturbed claim incorrect.", "claim": "The log–log plot shows the L2-norm error for uₙ scales roughly linearly with Δx; since both the Newtonian and viscoelastic networks share total viscosity 4.0, the viscoelastic simulation will only exhibit first-order convergence in uₙ’s L2 norm.", "label": false }, { "paperid": "2409.03054v1", "paper_path": "./SciVer/papers/2409.03054v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03054v1_figure_2.png", "item2_path": "./SciVer/images/2409.03054v1_figure_4.png", "section": [ "4.1" ], "request_id": 429, "origin_statement": "Figure 2 shows the News webpage’s image has an empty alt attribute; our system preserves existing alt text and displays new descriptions separately; thus clicking that News image would pop up both context-aware and context-free descriptions in the extension window, as demonstrated in Figure 4.", "perturbed_statement": "Figure 2 shows the News webpage’s image has an empty alt attribute; our system replaces existing alt text directly in-place; thus clicking that News image would embed the context-aware description inline on the webpage as shown in Figure 4.", "perturbed_explanation": "This is wrong because Section 4.1 and Figure 4 state that the tool \"preserves existing alternative text\" and opens a separate extension window to provide descriptions, rather than replacing or embedding descriptions in-place on the original webpage.", "claim": "Figure 2 shows the News webpage’s image has an empty alt attribute; our system replaces existing alt text directly in-place; thus clicking that News image would embed the context-aware description inline on the webpage as shown in Figure 4.", "label": false }, { "paperid": "2411.04188v3", "paper_path": "./SciVer/papers/2411.04188v3.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.04188v3_figure_4.png", "item2_path": "./SciVer/images/2411.04188v3_figure_5.png", "section": [ "5" ], "request_id": 436, "origin_statement": "Figure 4 shows that ML=2 corresponds to an average LR score of about 2.6; yet in Figure 5b, a source with ML=2 received LR=5, highlighting a notable disagreement for that mid‐range ML rating.", "perturbed_statement": "Figure 4 shows that ML=2 corresponds to an average LR score of about 3.3; yet in Figure 5b, a source with ML=2 received LR=5, highlighting a notable disagreement for that mid‐range ML rating.", "perturbed_explanation": "Figure 4’s data point at ML=2 has an average LR score of ~2.6 (error bar centered near 2.6), not 3.3. The perturbation incorrectly inflates the mean LR score for ML=2, making the claim false.", "claim": "Figure 4 shows that ML=2 corresponds to an average LR score of about 3.3; yet in Figure 5b, a source with ML=2 received LR=5, highlighting a notable disagreement for that mid‐range ML rating.", "label": false }, { "paperid": "2411.01423v1", "paper_path": "./SciVer/papers/2411.01423v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01423v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.01423v1_figure_4(c).png", "section": [ "5.2" ], "request_id": 437, "origin_statement": "Panel A shows that under τ=0, CLaSMO optimization shifts most scaffold QED values above 0.7. Panel C shows at τ=0.5, fewer optimized molecules exceed 0.7, illustrating that tighter similarity constraints reduce high-QED outcomes.", "perturbed_statement": "Panel A shows that under τ=0, CLaSMO optimization shifts most scaffold QED values above 0.8. Panel C shows at τ=0.5, fewer optimized molecules exceed 0.8, illustrating that tighter similarity constraints reduce high-QED outcomes.", "perturbed_explanation": "In Panel A, the blue bars (CLaSMO τ=0) cluster mainly in the 0.6–0.8 range, not above 0.8. Only a small fraction of optimized molecules reach above 0.8, so claiming that most exceed 0.8 contradicts the histogram data.", "claim": "Panel A shows that under τ=0, CLaSMO optimization shifts most scaffold QED values above 0.8. Panel C shows at τ=0.5, fewer optimized molecules exceed 0.8, illustrating that tighter similarity constraints reduce high-QED outcomes.", "label": false }, { "paperid": "2411.13151v1", "paper_path": "./SciVer/papers/2411.13151v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.13151v1_figure_2(a).png", "item2_path": "./SciVer/images/2411.13151v1-Table2-1.png", "section": [ "4" ], "request_id": 439, "origin_statement": "Table 2 shows variants D, DF, and DFC implement DDD. Figure 2 reveals that DDD variants have about half the median timed nodes of their BC counterparts (e.g., D ~800 vs B ~1200).", "perturbed_statement": "Table 2 shows variants D, BF, and DFC implement DDD. Therefore, Figure 2 indicates BF has roughly half the median timed nodes of DF.", "perturbed_explanation": "This is wrong because Table 2 clearly shows BF does not have a check under DDD (only D, DF, and DFC do). Furthermore, in Figure 2 BF’s median timed nodes (~1000) are actually higher than DF’s (~600), not half as stated.", "claim": "Table 2 shows variants D, BF, and DFC implement DDD. Therefore, Figure 2 indicates BF has roughly half the median timed nodes of DF.", "label": false }, { "paperid": "2411.00387v1", "paper_path": "./SciVer/papers/2411.00387v1.json", "claim_type": "sequential", "item1": "1", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.00387v1_figure_1.png", "item2_path": "./SciVer/images/2411.00387v1_figure_3(a).png", "section": [ "1" ], "request_id": 441, "origin_statement": "T appears predominantly as a vector variable in STEM-PoM: Figure 3 shows 23 of its 41 instances are labeled as vector, and Figure 1 confirms vectors fall under the variable class.", "perturbed_statement": "T appears predominantly as a scalar variable in STEM-PoM: Figure 3 shows 28 of its 41 instances are labeled as scalar, and Figure 1 confirms scalars fall under the variable class.", "perturbed_explanation": "Figure 3 actually shows only about 5 scalar instances for T (not 28), while vector instances number about 23. Therefore, T is not predominantly used as a scalar variable.", "claim": "T appears predominantly as a scalar variable in STEM-PoM: Figure 3 shows 28 of its 41 instances are labeled as scalar, and Figure 1 confirms scalars fall under the variable class.", "label": false }, { "paperid": "2409.08388v1", "paper_path": "./SciVer/papers/2409.08388v1.json", "claim_type": "sequential", "item1": "4", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.08388v1_figure_4.png", "item2_path": "./SciVer/images/2409.08388v1-Table4-1.png", "section": [ "5.3" ], "request_id": 443, "origin_statement": "At stage 9, focal loss yields 72.1% accuracy (Fig. 4). Table 4 shows input-only ACC_last is 76.7%, and using input+local+global reaches 80.8%, thus combining all three features improves final accuracy by 8.7% over focal loss alone.", "perturbed_statement": "At stage 9, focal loss yields 72.1% accuracy (Fig. 4). Table 4 reports input+local+global ACC_last is only 78.8%, so combining all three features improves final accuracy by 6.7% over focal loss.", "perturbed_explanation": "This is wrong because Table 4 actually lists ACC_last of 80.8% for the input+local+global combination; 78.8% is the ACC_last for input+global only, not the full three‐feature setup.", "claim": "At stage 9, focal loss yields 72.1% accuracy (Fig. 4). Table 4 reports input+local+global ACC_last is only 78.8%, so combining all three features improves final accuracy by 6.7% over focal loss.", "label": false }, { "paperid": "2411.14164v1", "paper_path": "./SciVer/papers/2411.14164v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14164v1_figure_1.png", "item2_path": "./SciVer/images/2411.14164v1_figure_3.png", "section": [ "3.1" ], "request_id": 444, "origin_statement": "Layer 23 in CLIP shows over 80% of attention on under 25% of tokens, and since GQA inputs contain 96% visual tokens, only about 24% of all tokens carry critical visual information.", "perturbed_statement": "Layer 23 in CLIP shows over 80% of attention on under 25% of tokens, and since GQA inputs contain 89% visual tokens, only about 22% of all tokens carry critical visual information.", "perturbed_explanation": "Figure 3 reports that GQA inputs actually contain 96% visual tokens (2110), not 89%. Using the correct 96% yields roughly 0.25×96%=24%, so the 22% claim is inaccurate.", "claim": "Layer 23 in CLIP shows over 80% of attention on under 25% of tokens, and since GQA inputs contain 89% visual tokens, only about 22% of all tokens carry critical visual information.", "label": false }, { "paperid": "2409.06123v1", "paper_path": "./SciVer/papers/2409.06123v1.json", "claim_type": "sequential", "item1": "9", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06123v1_figure_9.png", "item2_path": "./SciVer/images/2409.06123v1-Table3-1.png", "section": [ "6.1.2" ], "request_id": 449, "origin_statement": "From Figure 9, SubTab FL has the highest delta accuracy on the synthetic dataset (≈0.5). Table 3 lists SubTab FL’s delta for syn at 0.4936, confirming it remains the highest under 25% data size.", "perturbed_statement": "From Figure 9, LL has the highest delta accuracy on the synthetic dataset (≈0.5). Table 3 lists LL’s delta for syn at 0.4936, confirming it remains the highest under 25% client size.", "perturbed_explanation": "Figure 9 shows LL’s delta on syn is only about 0.05, not ≈0.5, and Table 3 reports LL’s syn delta as 0.050099, not 0.4936. Thus LL neither has the highest nor the quoted delta value, contradicting the perturbed claim.", "claim": "From Figure 9, LL has the highest delta accuracy on the synthetic dataset (≈0.5). Table 3 lists LL’s delta for syn at 0.4936, confirming it remains the highest under 25% client size.", "label": false }, { "paperid": "2410.21647v3", "paper_path": "./SciVer/papers/2410.21647v3.json", "claim_type": "sequential", "item1": "6(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21647v3_figure_6(b).png", "item2_path": "./SciVer/images/2410.21647v3-Table3-1.png", "section": [ "5.2" ], "request_id": 452, "origin_statement": "Table 3 reports RepoCod has 980 repository-level context problems; Figure 6(b) shows GPT-4o’s pass@1 drops below 10% for canonical solutions longer than 232 tokens; thus RepoCod’s heavy repository-level tasks likely include many of these long solutions, posing significant challenges to GPT-4o.", "perturbed_statement": "Table 3 reports RepoCod has 316 repository-level context problems; Figure 6(b) shows GPT-4o’s pass@1 remains above 20% for canonical solutions longer than 232 tokens; thus RepoCod’s tasks are unlikely to challenge GPT-4o.", "perturbed_explanation": "This is incorrect because Table 3 shows RepoCod actually has 980 repository-level tasks (not 316), and Figure 6(b) demonstrates GPT-4o’s pass@1 falls to around 8% for functions over 232 tokens, well below 20%, invalidating both premises.", "claim": "Table 3 reports RepoCod has 316 repository-level context problems; Figure 6(b) shows GPT-4o’s pass@1 remains above 20% for canonical solutions longer than 232 tokens; thus RepoCod’s tasks are unlikely to challenge GPT-4o.", "label": false }, { "paperid": "2409.15568v1", "paper_path": "./SciVer/papers/2409.15568v1.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15568v1_figure_1.png", "item2_path": "./SciVer/images/2409.15568v1-Table1-1.png", "section": [ "5.1" ], "request_id": 454, "origin_statement": "Table 1 reports 19014 shared users in the inter-domain Video dataset; Figure 1(b) shows test users are drawn as a disjoint subset from those shared users. Since Video test users number 216, the remaining shared users for training equal 19014 − 216 = 18798.", "perturbed_statement": "Table 1 reports 19014 shared users in the inter-domain Video dataset; Figure 1(b) shows test users are drawn as a disjoint subset from those shared users. Since Video test users number 212, the remaining shared users for training equal 19014 − 212 = 18702.", "perturbed_explanation": "The perturbation wrongly uses 212 as the number of Video test users, but Table 1 actually lists 216 test users for Video; 212 is the test user count for Games, not Video, so the subtraction 19014 − 212 is invalid for Video.", "claim": "Table 1 reports 19014 shared users in the inter-domain Video dataset; Figure 1(b) shows test users are drawn as a disjoint subset from those shared users. Since Video test users number 212, the remaining shared users for training equal 19014 − 212 = 18702.", "label": false }, { "paperid": "2410.12705v2", "paper_path": "./SciVer/papers/2410.12705v2.json", "claim_type": "sequential", "item1": "4", "item2": "9", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.12705v2_figure_4.png", "item2_path": "./SciVer/images/2410.12705v2-Table9-1.png", "section": [ "2.2" ], "request_id": 455, "origin_statement": "India appears with 127 dishes in Figure 4. Table 9 shows Southern Asia has 200 entries. Thus, India alone accounts for about 63.5% of all Southern Asia food entries in the dataset.", "perturbed_statement": "India appears with 127 dishes in Figure 4. Table 9 shows Southern Asia has 170 entries. Thus, India alone accounts for about 75% of all Southern Asia food entries in the dataset.", "perturbed_explanation": "The perturbed statement misreports Table 9: Southern Asia actually has 200 food entries, not 170. Using the correct 200 entries yields 127/200 ≈ 63.5%, so the claim of 75% is incorrect.", "claim": "India appears with 127 dishes in Figure 4. Table 9 shows Southern Asia has 170 entries. Thus, India alone accounts for about 75% of all Southern Asia food entries in the dataset.", "label": false }, { "paperid": "2410.06827v1", "paper_path": "./SciVer/papers/2410.06827v1.json", "claim_type": "sequential", "item1": "9(c)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.06827v1_figure_9(c).png", "item2_path": "./SciVer/images/2410.06827v1-Table3-1.png", "section": [ "3.3" ], "request_id": 462, "origin_statement": "In Fig. 9 the enhancement factor peaks around M_{H^±}≈800 GeV with λ_{HΦ}≈4; Table 3 gives the h H⁺H⁻ coupling as −i v λ_{HΦ}, so at that point |g_{hH⁺H⁻}|≈4 v, matching the maximal cross-section boost.", "perturbed_statement": "In Fig. 9 the enhancement factor peaks around M_{H^±}≈800 GeV with λ_{HΦ}≈4; Table 3 gives the h H⁺H⁻ coupling as −i λ_{HΦ}, so at that point |g_{hH⁺H⁻}|≈4, matching the maximal cross-section boost.", "perturbed_explanation": "Table 3 actually lists the h H⁺H⁻ coupling as −i v λ_{HΦ}, not −i λ_{HΦ}. Omitting the factor of v changes the coupling magnitude from 4 v to 4, contradicting the entry “−i v λ_{HΦ}” in the ‘Couplings’ column of Table 3.", "claim": "In Fig. 9 the enhancement factor peaks around M_{H^±}≈800 GeV with λ_{HΦ}≈4; Table 3 gives the h H⁺H⁻ coupling as −i λ_{HΦ}, so at that point |g_{hH⁺H⁻}|≈4, matching the maximal cross-section boost.", "label": false }, { "paperid": "2409.13828v1", "paper_path": "./SciVer/papers/2409.13828v1.json", "claim_type": "sequential", "item1": "8(b)", "item2": "7", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13828v1_figure_8(b).png", "item2_path": "./SciVer/images/2409.13828v1-Table7-1.png", "section": [ "3" ], "request_id": 463, "origin_statement": "Table 7 shows that for Tiny-ImageNet, PGD uses ε = 0.06 and ε_step = 0.006; Figure 8’s reconstructed PGD image under 0.5 masking still retains visible adversarial perturbations, indicating the masking ratio fails to fully remove noise.", "perturbed_statement": "Table 7 shows that for Tiny-ImageNet, PGD uses ε = 0.06 and ε_step = 0.006; Figure 8’s reconstructed PGD image under 0.5 masking removes all adversarial perturbations, indicating the masking ratio fully eliminates noise.", "perturbed_explanation": "The perturbed claim is false because Figure 8’s reconstructed PGD image clearly still displays adversarial artifacts (visible noise) under a 0.5 masking ratio, rather than being fully cleaned of perturbations.", "claim": "Table 7 shows that for Tiny-ImageNet, PGD uses ε = 0.06 and ε_step = 0.006; Figure 8’s reconstructed PGD image under 0.5 masking removes all adversarial perturbations, indicating the masking ratio fully eliminates noise.", "label": false }, { "paperid": "2409.20553v2", "paper_path": "./SciVer/papers/2409.20553v2.json", "claim_type": "sequential", "item1": "2", "item2": "10", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.20553v2_figure_2.png", "item2_path": "./SciVer/images/2409.20553v2_figure_10.png", "section": [ "4.1" ], "request_id": 464, "origin_statement": "At the highest skill pairing (2000 vs 2000), Maia-2 achieves 55.7% move prediction accuracy (Figure 2), and according to Figure 10, for optimal moves (win-rate loss ≈ 0), its accuracy rises to about 65%, implying a 9.3-point improvement on top-level high-quality moves.", "perturbed_statement": "At the highest skill pairing (2000 vs 2000), Maia-2 achieves 55.7% move prediction accuracy (Figure 2), and according to Figure 10, for optimal moves (win-rate loss ≈ 0), its accuracy rises to about 60%, implying only a 4.3-point improvement on top-level high-quality moves.", "perturbed_explanation": "Figure 10 actually shows Maia-2 scoring around 65% accuracy at win-rate loss ≈ 0. The claim’s use of 60% understates the true performance, making the stated 4.3-point gain inconsistent with the actual ~9.3-point increase.", "claim": "At the highest skill pairing (2000 vs 2000), Maia-2 achieves 55.7% move prediction accuracy (Figure 2), and according to Figure 10, for optimal moves (win-rate loss ≈ 0), its accuracy rises to about 60%, implying only a 4.3-point improvement on top-level high-quality moves.", "label": false }, { "paperid": "2409.10756v1", "paper_path": "./SciVer/papers/2409.10756v1.json", "claim_type": "sequential", "item1": "2", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.10756v1_figure_2.png", "item2_path": "./SciVer/images/2409.10756v1-Table5-1.png", "section": [ "4.1.2" ], "request_id": 469, "origin_statement": "Figure 2 shows Llama3-70b achieves about 68% accuracy on SVD4 (patched code detection). Table 5 reports its recall for SVD3,4 combined is only 48.53%. This gap shows that strong patched-code accuracy doesn’t translate into high overall recall on vulnerability detection.", "perturbed_statement": "Figure 2 shows Llama3-70b achieves about 68% accuracy on SVD4 (patched code detection). Table 5 reports its recall for SVD3,4 combined is 65.53%. This near-parity implies that high patched-code accuracy directly translates into high overall recall.", "perturbed_explanation": "The perturbed statement misstates Llama3-70b’s combined SVD3,4 recall. Table 5 actually lists its recall as 48.53%, not 65.53%. Because the recall is substantially lower than the claimed 65.53%, the conclusion of near-parity is incorrect.", "claim": "Figure 2 shows Llama3-70b achieves about 68% accuracy on SVD4 (patched code detection). Table 5 reports its recall for SVD3,4 combined is 65.53%. This near-parity implies that high patched-code accuracy directly translates into high overall recall.", "label": false }, { "paperid": "2410.22269v1", "paper_path": "./SciVer/papers/2410.22269v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22269v1_figure_3.png", "item2_path": "./SciVer/images/2410.22269v1-Table1-1.png", "section": [ "4" ], "request_id": 470, "origin_statement": "Figure 3 shows the Fourier head yields a smoothness of 0.0312 on the GMM dataset. Table 1 confirms the Fourier smoothness is 0.034 ± 0.010 versus Linear’s 0.078 ± 0.041. Thus the Fourier head is over twice as smooth as the linear head on GMM.", "perturbed_statement": "Figure 3 shows the Fourier head yields a smoothness of 0.0778 on the GMM dataset. Table 1 confirms the Fourier smoothness is 0.057 ± 0.011 versus Linear’s 0.078 ± 0.041, implying the Fourier head offers only a marginal smoothness improvement over the linear head on GMM.", "perturbed_explanation": "The perturbation misreads the Gaussian dataset’s Fourier smoothness (0.0778 in Figure 3 top right) as applying to GMM. It also cites 0.057 ± 0.011 from the Gaussian row in Table 1 rather than the correct 0.034 ± 0.010 for GMM. Consequently, the claimed marginal improvement on GMM is false.", "claim": "Figure 3 shows the Fourier head yields a smoothness of 0.0778 on the GMM dataset. Table 1 confirms the Fourier smoothness is 0.057 ± 0.011 versus Linear’s 0.078 ± 0.041, implying the Fourier head offers only a marginal smoothness improvement over the linear head on GMM.", "label": false }, { "paperid": "2409.12680v2", "paper_path": "./SciVer/papers/2409.12680v2.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.12680v2-Table1-1.png", "item2_path": "./SciVer/images/2409.12680v2-Table3-1.png", "section": [ "4.2" ], "request_id": 471, "origin_statement": "STPG improves mIoU by 8.03% over the supervised baseline on 1/16 Cityscapes (60.43→68.46) and by 17.97% on 1/8 PASCAL VOC (54.32→72.29), indicating its relative gain more than doubles on PASCAL.", "perturbed_statement": "STPG improves mIoU by 8.03% over the supervised baseline on 1/16 Cityscapes (60.43→68.46) and by 15.97% on 1/8 PASCAL VOC (56.32→72.29), indicating its gain on PASCAL remains smaller than on Cityscapes.", "perturbed_explanation": "The perturbed statement misreports the PASCAL VOC supervised baseline as 56.32 and thus the improvement as 15.97%. In Table 3, the actual baseline at 1/8 is 54.32 and STPG reaches 72.29, so the true gain is 17.97%, not 15.97%.", "claim": "STPG improves mIoU by 8.03% over the supervised baseline on 1/16 Cityscapes (60.43→68.46) and by 15.97% on 1/8 PASCAL VOC (56.32→72.29), indicating its gain on PASCAL remains smaller than on Cityscapes.", "label": false }, { "paperid": "2410.03415v1", "paper_path": "./SciVer/papers/2410.03415v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "4(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.03415v1_figure_4(b).png", "item2_path": "./SciVer/images/2410.03415v1_figure_4(c).png", "section": [ "5.3" ], "request_id": 478, "origin_statement": "From the Llama-3-8B-Ins heatmap (Figure 4), the highest refusal impact of the true refusal vector occurs around layer 14 (token position −1), and when λ increases to 1, the most effective false refusal vector shifts deeper to around layer 17.", "perturbed_statement": "From the Llama-3-8B-Ins heatmap (Figure 4), the highest refusal impact of the true refusal vector occurs around layer 16 (token position −1), and when λ increases to 1, the most effective false refusal vector shifts deeper to around layer 19.", "perturbed_explanation": "The perturbed claim is incorrect because the true refusal vector in the top row of the Llama-3-8B-Ins heatmap actually peaks at layer 14 (not 16), and the false refusal vector at λ=1 peaks at layer 17 (not 19), as shown by the darkest purple region around those actual layers.", "claim": "From the Llama-3-8B-Ins heatmap (Figure 4), the highest refusal impact of the true refusal vector occurs around layer 16 (token position −1), and when λ increases to 1, the most effective false refusal vector shifts deeper to around layer 19.", "label": false }, { "paperid": "2409.16718v2", "paper_path": "./SciVer/papers/2409.16718v2.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.16718v2_figure_4.png", "item2_path": "./SciVer/images/2409.16718v2_figure_5.png", "section": [ "4.3" ], "request_id": 479, "origin_statement": "Fig.4a shows the bias gradient sum is highest at text layer 0 and steadily decreases. Leveraging this to penalize low-level changes via KD loss (Fig.4b) preserves essential features, correlating with Fig.5b’s clearer CLIPFit clusters over zero-shot, evidencing improved class separation.", "perturbed_statement": "Fig.4a shows the bias gradient sum peaks at layer 3 and declines thereafter. Leveraging this to penalize low-level changes via MSE loss (Fig.4b) preserves essential features, correlating with Fig.5a’s clearer zero-shot CLIP clusters over CLIPFit, evidencing improved class separation.", "perturbed_explanation": "This is incorrect because Fig.4a actually peaks at layer 0, not layer 3. Moreover, MSE loss (green curve in Fig.4b) reduces low-level bias changes more than KD loss, and Fig.5b (not Fig.5a) shows CLIPFit clusters are clearer than zero-shot CLIP.", "claim": "Fig.4a shows the bias gradient sum peaks at layer 3 and declines thereafter. Leveraging this to penalize low-level changes via MSE loss (Fig.4b) preserves essential features, correlating with Fig.5a’s clearer zero-shot CLIP clusters over CLIPFit, evidencing improved class separation.", "label": false }, { "paperid": "2411.08124v1", "paper_path": "./SciVer/papers/2411.08124v1.json", "claim_type": "sequential", "item1": "2", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.08124v1_figure_2.png", "item2_path": "./SciVer/images/2411.08124v1_figure_4.png", "section": [ "2.4" ], "request_id": 480, "origin_statement": "At logU=–2 and n≈10^3 cm^–3, Fig. 2 shows SB99 quenched SF yields a thinner H II region than BPASS. Fig. 4’s C III]/[C II] contours then indicate the SB99 model predicts a ~0.3 dex lower C III]/[C II] ratio than BPASS continuous SF.", "perturbed_statement": "At logU=–2 and n≈10^3 cm^–3, Fig. 2 shows SB99 quenched SF yields a thicker H II region than BPASS. Consequently, Fig. 4’s C III]/[C II] contours indicate the SB99 model predicts a ~0.3 dex higher C III]/[C II] ratio than BPASS continuous SF.", "perturbed_explanation": "The perturbation is wrong because Fig. 2 (right panel) shows the SB99 quenched (red dashed) H II region ends at a lower N_H than the BPASS continuous (blue solid), i.e. SB99 is thinner, not thicker. Moreover, the text and Fig. 4 state SB99 quenched SF predicts systematically lower, not higher, C III]/[C II] ratios.", "claim": "At logU=–2 and n≈10^3 cm^–3, Fig. 2 shows SB99 quenched SF yields a thicker H II region than BPASS. Consequently, Fig. 4’s C III]/[C II] contours indicate the SB99 model predicts a ~0.3 dex higher C III]/[C II] ratio than BPASS continuous SF.", "label": false }, { "paperid": "2411.07565v1", "paper_path": "./SciVer/papers/2411.07565v1.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07565v1_figure_2.png", "item2_path": "./SciVer/images/2411.07565v1-Table2-1.png", "section": [ "4.1" ], "request_id": 487, "origin_statement": "cc-pVTZ has 120 spin orbitals (Table 2), so N_H<3×10^4. In Figure 2 at 16 threads, its Hc update takes ≈1000 s; by contrast, cc-pVQZ (220 spin orbitals, N_H<8×10^4) takes ≈2000 s, showing larger N_H increases update time.", "perturbed_statement": "Since cc-pVTZ has 220 spin orbitals (Table 2) and thus N_H<8×10^4, its Hc update at 16 threads takes ≈2000 s, double cc-pVQZ’s ≈1000 s, implying higher N_H speeds up smaller bases.", "perturbed_explanation": "The perturbed statement incorrectly assigns cc-pVTZ 220 spin orbitals and N_H<8×10^4, whereas Table 2 lists 120 orbitals and N_H<3×10^4. Also, Figure 2 shows cc-pVTZ’s Hc update time at 16 threads is ≈1000 s and cc-pVQZ’s is ≈2000 s, not vice versa.", "claim": "Since cc-pVTZ has 220 spin orbitals (Table 2) and thus N_H<8×10^4, its Hc update at 16 threads takes ≈2000 s, double cc-pVQZ’s ≈1000 s, implying higher N_H speeds up smaller bases.", "label": false }, { "paperid": "2410.18462v1", "paper_path": "./SciVer/papers/2410.18462v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18462v1_figure_3.png", "item2_path": "./SciVer/images/2410.18462v1_figure_4.png", "section": [ "2.1.2" ], "request_id": 488, "origin_statement": "At a steering angle of ±0.2 rad and 200 km/h the steering penalty is about −0.04 (Fig 3). Fig 4 shows the segment penalty reaches −0.055 at 1000 s. Thus for very long segment times the time-based penalty exceeds the maximum steering penalty.", "perturbed_statement": "At a steering angle of ±0.2 rad and 200 km/h the steering penalty is about −0.07. Since Fig 4 shows the segment penalty peaks at only −0.03 at 1000 s, the steering penalty actually exceeds the time-based penalty.", "perturbed_explanation": "The perturbed claim misreads Fig 3: the maximum steering penalty at ±0.2 rad and high speed is about −0.04, not −0.07. It also underestimates the segment penalty from Fig 4, which actually reaches around −0.055 at 1000 s, not −0.03.", "claim": "At a steering angle of ±0.2 rad and 200 km/h the steering penalty is about −0.07. Since Fig 4 shows the segment penalty peaks at only −0.03 at 1000 s, the steering penalty actually exceeds the time-based penalty.", "label": false }, { "paperid": "2410.08320v1", "paper_path": "./SciVer/papers/2410.08320v1.json", "claim_type": "sequential", "item1": "3", "item2": "12", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.08320v1-Table3-1.png", "item2_path": "./SciVer/images/2410.08320v1-Table12-1.png", "section": [ "4.2" ], "request_id": 490, "origin_statement": "From Figure 2(b) (PubMed Corpus histogram) showing synthetic and true query score distributions closely align, Table 8 reveals that on the Textbooks corpus, the GoF Energy test’s TPR on synthetic queries differs by less than 2% from true IK queries.", "perturbed_statement": "Figure 2(b) shows a marked shift between synthetic and true PubMed query score distributions, so Table 8 reveals that the GoF Energy test’s TPR on synthetic Textbooks queries is 15% higher than with true IK queries.", "perturbed_explanation": "This is incorrect because Figure 2(b) actually shows that synthetic and true PubMed query distributions closely match, not shift apart. Moreover, Table 8 reports only a negligible TPR difference (under 2%), not a 15% increase.", "claim": "Figure 2(b) shows a marked shift between synthetic and true PubMed query score distributions, so Table 8 reveals that the GoF Energy test’s TPR on synthetic Textbooks queries is 15% higher than with true IK queries.", "label": false }, { "paperid": "2411.04200v1", "paper_path": "./SciVer/papers/2411.04200v1.json", "claim_type": "sequential", "item1": "8", "item2": "9", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.04200v1-Table8-1.png", "item2_path": "./SciVer/images/2411.04200v1-Table9-1.png", "section": [ "8" ], "request_id": 491, "origin_statement": "Category2 accounts for 70% of customers (Table 8). Regular CDPs have weekly capacity b_d=0 (Table 9). Thus, regular lockers cannot serve the majority Category 2 demand on their own.", "perturbed_statement": "Category2 accounts for 70% of customers (Table 8). Regular CDPs have weekly capacity b_d=500 (Table 9). Thus, a single regular locker can serve the majority Category 2 demand each week.", "perturbed_explanation": "The perturbed statement misreads Table 9: b_d (weekly capacity) for regular CDPs is 0, not 500. The value 500 in the table refers to f_d (fixed cost), so the premise about capacity is incorrect, making the final claim false.", "claim": "Category2 accounts for 70% of customers (Table 8). Regular CDPs have weekly capacity b_d=500 (Table 9). Thus, a single regular locker can serve the majority Category 2 demand each week.", "label": false }, { "paperid": "2411.13694v1", "paper_path": "./SciVer/papers/2411.13694v1.json", "claim_type": "sequential", "item1": "10(b)", "item2": "10(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.13694v1_figure_10(b).png", "item2_path": "./SciVer/images/2411.13694v1_figure_10(d).png", "section": [ "5.1" ], "request_id": 492, "origin_statement": "The coordinator set the participant count slider to 3 in the setup screen (image b), and the following exchange screen (image d) displays “Group of 3 people,” confirming the selected group size was correctly applied to the pairing session.", "perturbed_statement": "The coordinator set the participant count slider to 8 in the setup screen (image b), and the following exchange screen (image d) displays “Group of 8 people,” confirming the selected group size was correctly applied to the pairing session.", "perturbed_explanation": "This statement is incorrect because image (b) clearly shows the slider positioned at 3, not 8, and image (d) explicitly reads “Group of 3 people,” not “Group of 8 people,” directly contradicting the claim.", "claim": "The coordinator set the participant count slider to 8 in the setup screen (image b), and the following exchange screen (image d) displays “Group of 8 people,” confirming the selected group size was correctly applied to the pairing session.", "label": false }, { "paperid": "2410.06971v2", "paper_path": "./SciVer/papers/2410.06971v2.json", "claim_type": "sequential", "item1": "3(b)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.06971v2_figure_3(b).png", "item2_path": "./SciVer/images/2410.06971v2-Table3-1.png", "section": [ "4.1" ], "request_id": 493, "origin_statement": "From Table 3 (col. 1), each unit increase in industry complexity raises intra-firm wage entropy by 0.321. Figure 3 (right panel) shows average complexity increases by about 0.02 when log population rises from 10 to 14. Thus, wage entropy increases by roughly 0.0064 across that range.", "perturbed_statement": "Table 3 (col. 1) shows a wage entropy coefficient of 0.5 for industry complexity, and Figure 3 indicates complexity rises by 0.05 between log population 10 and 14. Hence, wage entropy increases by about 0.025 across that population increase.", "perturbed_explanation": "The perturbed statement misreads two key figures: Table 3’s complexity coefficient in column (1) is actually 0.321, not 0.5 (second image), and Figure 3 (right panel) shows a complexity rise of about 0.02 between log population 10 and 14, not 0.05 (first image). These errors invalidate the 0.025 estimate.", "claim": "Table 3 (col. 1) shows a wage entropy coefficient of 0.5 for industry complexity, and Figure 3 indicates complexity rises by 0.05 between log population 10 and 14. Hence, wage entropy increases by about 0.025 across that population increase.", "label": false }, { "paperid": "2410.23494v1", "paper_path": "./SciVer/papers/2410.23494v1.json", "claim_type": "sequential", "item1": "11(g)", "item2": "11(i)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23494v1_figure_11(g).png", "item2_path": "./SciVer/images/2410.23494v1_figure_11(i).png", "section": [ "4" ], "request_id": 494, "origin_statement": "The lighting factor G increases from –0.5 at the leftmost corrupted image to +0.5 at the rightmost; the leftmost frame shows the densest speckle noise, so G=–0.5 causes the highest sensor noise under the Natural domain SCM.", "perturbed_statement": "The lighting factor G increases from –0.5 at the leftmost corrupted image to +0.5 at the rightmost; the rightmost frame shows the densest speckle noise, so G=+0.5 causes the highest sensor noise under the Natural domain SCM.", "perturbed_explanation": "The perturbation wrongly claims the rightmost image has the densest noise, but visually the leftmost image exhibits far more speckle noise. Therefore G=+0.5 does not correspond to the highest noise—instead G=–0.5 does, contradicting the perturbed statement.", "claim": "The lighting factor G increases from –0.5 at the leftmost corrupted image to +0.5 at the rightmost; the rightmost frame shows the densest speckle noise, so G=+0.5 causes the highest sensor noise under the Natural domain SCM.", "label": false }, { "paperid": "2410.19779v1", "paper_path": "./SciVer/papers/2410.19779v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.19779v1_figure_4.png", "item2_path": "./SciVer/images/2410.19779v1_figure_5.png", "section": [ "3.3" ], "request_id": 495, "origin_statement": "Figure4(b) shows the Giant model achieves about 63.7% accuracy on MI; Figure5(b) shows that with 1B tokens MI only reaches 60.4%; thus scaling model size yields a larger MI gain than scaling data volume.", "perturbed_statement": "Figure4(b) shows the Giant model achieves only about 58.0% accuracy on MI; Figure5(b) shows that with 1B tokens MI reaches 60.4%; thus scaling model size yields a smaller MI gain than scaling data volume.", "perturbed_explanation": "In Figure4(b), the Giant model’s MI accuracy is actually around 63.7%, not 58.0%. With correct values, model scaling boosts MI from ~59.3% (Base) to ~63.7% (+4.4%), which exceeds the data scaling gain of +3.9% (56.5% to 60.4%). The perturbed premise thus contradicts the plotted data and invalidates its conclusion.", "claim": "Figure4(b) shows the Giant model achieves only about 58.0% accuracy on MI; Figure5(b) shows that with 1B tokens MI reaches 60.4%; thus scaling model size yields a smaller MI gain than scaling data volume.", "label": false }, { "paperid": "2410.21100v1", "paper_path": "./SciVer/papers/2410.21100v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21100v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.21100v1_figure_2(c).png", "section": [ "4.3" ], "request_id": 496, "origin_statement": "On FF25 at ν=0% the 1/N portfolio accumulates ∼350 wealth, while mSSRM-PGA(m=20) reaches ∼620, a gap of ∼270. On FF32, 1/N yields ∼430 and mSSRM-PGA(m=20) reaches ∼1000, widening the gap to ∼570. Thus, mSSRM-PGA’s relative gain is greater on FF32.", "perturbed_statement": "On FF25 at ν=0% the 1/N portfolio accumulates ∼350 wealth, while mSSRM-PGA(m=20) reaches ∼620, a gap of ∼270. On FF32, 1/N yields ∼430 and mSSRM-PGA(m=20) reaches ∼650, narrowing the gap to ∼220. Thus, mSSRM-PGA’s relative gain is smaller on FF32.", "perturbed_explanation": "The perturbation misstates the mSSRM-PGA(m=20) wealth on FF32 at ν=0% as ∼650, but the actual red curve in Figure 2(c) shows it reaches about ∼1000. Therefore the gap is ∼570 (not ∼220), so the gain on FF32 remains larger, contradicting the perturbed claim.", "claim": "On FF25 at ν=0% the 1/N portfolio accumulates ∼350 wealth, while mSSRM-PGA(m=20) reaches ∼620, a gap of ∼270. On FF32, 1/N yields ∼430 and mSSRM-PGA(m=20) reaches ∼650, narrowing the gap to ∼220. Thus, mSSRM-PGA’s relative gain is smaller on FF32.", "label": false }, { "paperid": "2409.17561v1", "paper_path": "./SciVer/papers/2409.17561v1.json", "claim_type": "sequential", "item1": "8", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17561v1_figure_8.png", "item2_path": "./SciVer/images/2409.17561v1_figure_9.png", "section": [ "5.1.1" ], "request_id": 498, "origin_statement": "Figure 8 shows CodeLlama misunderstanding prompts by outputting textual descriptions instead of executable code. Similarly, Figure 9 demonstrates GPT-4 inserting raw template placeholders like , indicating persistent syntax errors despite increased neural parameters.", "perturbed_statement": "Figure 8 shows GPT-3.5 misunderstanding prompts by outputting textual descriptions instead of executable code. Similarly, Figure 9 demonstrates CodeLlama inserting raw template placeholders like , indicating persistent syntax errors despite increased neural parameters.", "perturbed_explanation": "This perturbed statement is wrong because Figure 8 illustrates CodeLlama’s output (not GPT-3.5) producing descriptive suggestions, and Figure 9 exemplifies template placeholders generated by GPT-3.5/GPT-4 (not CodeLlama). The model attributions contradict the captions and context.", "claim": "Figure 8 shows GPT-3.5 misunderstanding prompts by outputting textual descriptions instead of executable code. Similarly, Figure 9 demonstrates CodeLlama inserting raw template placeholders like , indicating persistent syntax errors despite increased neural parameters.", "label": false }, { "paperid": "2409.08732v1", "paper_path": "./SciVer/papers/2409.08732v1.json", "claim_type": "sequential", "item1": "1", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.08732v1_figure_1.png", "item2_path": "./SciVer/images/2409.08732v1-Table5-1.png", "section": [ "4.4" ], "request_id": 505, "origin_statement": "In 2020Q2, NCDENow predicted a -2.2% GDP drop vs DFM's -1.2%, closer to the ground-truth -3.2%; Table 5 shows NCDENow’s MSE is 0.9900 vs DFM’s 4.6265, confirming NCDENow better captures the sudden drop.", "perturbed_statement": "In 2020Q2, NCDENow predicted a -1.2% GDP drop vs DFM's -2.2%, closer to the ground-truth -3.2%; Table 5 shows NCDENow’s MSE is 4.6265 vs DFM’s 0.9900, confirming NCDENow better captures the sudden drop.", "perturbed_explanation": "The figure actually shows NCDENow’s 2020Q2 prediction is -2.2%, not -1.2%, and Table 5 reports NCDENow’s MSE as 0.9900, not 4.6265. These two errors invalidate the perturbed claim.", "claim": "In 2020Q2, NCDENow predicted a -1.2% GDP drop vs DFM's -2.2%, closer to the ground-truth -3.2%; Table 5 shows NCDENow’s MSE is 4.6265 vs DFM’s 0.9900, confirming NCDENow better captures the sudden drop.", "label": false }, { "paperid": "2410.05341v2", "paper_path": "./SciVer/papers/2410.05341v2.json", "claim_type": "sequential", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.05341v2_figure_5.png", "item2_path": "./SciVer/images/2410.05341v2-Table1-1.png", "section": [ "3.4" ], "request_id": 506, "origin_statement": "NeuroBOLT’s median reconstruction correlation for the global signal (R = 0.615 in Figure 5) exceeds its inter-subject average correlation of 0.564 for the same signal (Table 1), indicating stronger performance in median-case reconstructions than on average.", "perturbed_statement": "NeuroBOLT’s median reconstruction correlation for the global signal (R = 0.615 in Figure 5) exceeds its inter-subject average correlation of 0.620 for the same signal (Table 1), indicating stronger performance in median-case reconstructions than on average.", "perturbed_explanation": "The perturbed statement misreports the inter-subject average correlation for the global signal as 0.620, whereas Table 1 actually lists it as 0.564. Hence 0.615 does not exceed the true average of 0.564, making the claim false.", "claim": "NeuroBOLT’s median reconstruction correlation for the global signal (R = 0.615 in Figure 5) exceeds its inter-subject average correlation of 0.620 for the same signal (Table 1), indicating stronger performance in median-case reconstructions than on average.", "label": false }, { "paperid": "2410.07416v1", "paper_path": "./SciVer/papers/2410.07416v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.07416v1_figure_3(b).png", "item2_path": "./SciVer/images/2410.07416v1_figure_4.png", "section": [ "4.2" ], "request_id": 507, "origin_statement": "Figure 6 shows that when \\hat\\sigma^{25:40}_2 exceeds 4 M_⊙, the median \\hat\\mu^{25:40}_2 increases from 31.0 to 32.0 M_⊙. Figure 4 shows that m_max and m99 have similarly shaped H₀–mass contours, suggesting m_max correlates with H₀ almost as strongly as \\hat\\mu^{25:40}_2.", "perturbed_statement": "Figure 6 shows that when \\hat\\sigma^{25:40}_2 exceeds 4 M_⊙, the median \\hat\\mu^{25:40}_2 decreases from 31.0 to 30.0 M_⊙. Figure 4 shows that m_max has much tighter H₀–mass contours than m99, implying m_max correlates with H₀ substantially better than \\hat\\mu^{25:40}_2.", "perturbed_explanation": "The perturbed claim is incorrect because Figure 6 actually reports an increase in median \\hat\\mu^{25:40}_2 from 31.0 to 32.0 M_⊙ (not a decrease to 30.0 M_⊙). Additionally, Figure 4 displays comparably sized credible regions for m_max and m99, so m_max does not have significantly tighter H₀–mass contours or a substantially stronger correlation.", "claim": "Figure 6 shows that when \\hat\\sigma^{25:40}_2 exceeds 4 M_⊙, the median \\hat\\mu^{25:40}_2 decreases from 31.0 to 30.0 M_⊙. Figure 4 shows that m_max has much tighter H₀–mass contours than m99, implying m_max correlates with H₀ substantially better than \\hat\\mu^{25:40}_2.", "label": false }, { "paperid": "2410.08642v1", "paper_path": "./SciVer/papers/2410.08642v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.08642v1_figure_5.png", "item2_path": "./SciVer/images/2410.08642v1_figure_6.png", "section": [ "5.2" ], "request_id": 509, "origin_statement": "Figure 5 depicts multiple rocket trails over Gaza, signaling heavy bombardment; Figure 6 then repurposes such streak visuals in memes by labeling them 'US Taxpayer Dollars,' satirizing U.S. financial support in the Israel-Ukraine conflict.", "perturbed_statement": "Figure 5 depicts multiple rocket trails over Gaza, signaling heavy bombardment; Figure 6 then repurposes such streak visuals in memes by labeling them 'Russian Taxpayer Dollars,' satirizing Russian financial support in the Israel-Ukraine conflict.", "perturbed_explanation": "In Figure 6 the overlaid text on the streaks reads “US Taxpayer Dollars,” not “Russian Taxpayer Dollars.” The perturbed claim mislabels the visible caption and thus contradicts the actual meme text.", "claim": "Figure 5 depicts multiple rocket trails over Gaza, signaling heavy bombardment; Figure 6 then repurposes such streak visuals in memes by labeling them 'Russian Taxpayer Dollars,' satirizing Russian financial support in the Israel-Ukraine conflict.", "label": false }, { "paperid": "2410.23426v1", "paper_path": "./SciVer/papers/2410.23426v1.json", "claim_type": "sequential", "item1": "7", "item2": "9", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23426v1_figure_7.png", "item2_path": "./SciVer/images/2410.23426v1_figure_9.png", "section": [ "5" ], "request_id": 510, "origin_statement": "Mistral-7B scored about 45% self-report satisfaction in Organizational Behavior (Figure 7), and Figure 9 shows it has a 36% inconsistency rate in that subject, illustrating its low self-reported satisfaction corresponds with high inconsistency.", "perturbed_statement": "Mistral-7B scored about 60% self-report satisfaction in Organizational Behavior, and Figure 9 shows it has only a 26% inconsistency rate in that subject, illustrating its moderate satisfaction corresponds with low inconsistency.", "perturbed_explanation": "Figure 7 actually shows Mistral-7B’s self-report satisfaction in Organizational Behavior is ~45%, not 60%. Figure 9 indicates its inconsistency rate in that subject is 36%, not 26%, so both values in the perturbed statement contradict the original data.", "claim": "Mistral-7B scored about 60% self-report satisfaction in Organizational Behavior, and Figure 9 shows it has only a 26% inconsistency rate in that subject, illustrating its moderate satisfaction corresponds with low inconsistency.", "label": false }, { "paperid": "2411.09127v1", "paper_path": "./SciVer/papers/2411.09127v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.09127v1_figure_3(b).png", "item2_path": "./SciVer/images/2411.09127v1_figure_3(c).png", "section": [ "6.1" ], "request_id": 512, "origin_statement": "The architecture with logγ=2.5e−2 achieves a 41.47% pPR, which is 16.14 percentage points higher than the 25.33% pPR at logγ=1.5e−2, indicating that increasing γ amplifies parameter pruning more than it does flop pruning (fPR increased only from 48.46% to 50.27%).", "perturbed_statement": "The architecture with logγ=2.5e−2 achieves a 41.47% pPR, which is just over 4 percentage points higher than the 25.33% pPR at logγ=1.5e−2, indicating that increasing γ has only a modest effect on parameter pruning compared to flop pruning.", "perturbed_explanation": "This is incorrect because the actual difference in pPR between 41.47% (image two) and 25.33% (image one) is 16.14 percentage points, not about 4 points as claimed.", "claim": "The architecture with logγ=2.5e−2 achieves a 41.47% pPR, which is just over 4 percentage points higher than the 25.33% pPR at logγ=1.5e−2, indicating that increasing γ has only a modest effect on parameter pruning compared to flop pruning.", "label": false }, { "paperid": "2411.01603v1", "paper_path": "./SciVer/papers/2411.01603v1.json", "claim_type": "sequential", "item1": "5", "item2": "6", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.01603v1-Table5-1.png", "item2_path": "./SciVer/images/2411.01603v1-Table6-1.png", "section": [ "6.3" ], "request_id": 513, "origin_statement": "UAV2’s search phase lasts 35.00 s (Table 5); Table 6 shows kp=0.5, ki=0, kd=0 for search, so UAV2’s controller during search relied solely on a proportional gain of 0.5.", "perturbed_statement": "UAV2’s search phase lasts 35.00 s (Table 5); Table 6 shows kp=0.5, ki=0.001, kd=0 for search, so UAV2’s controller during search employed an integral gain of 0.001.", "perturbed_explanation": "Table 6 clearly lists ki=0 for the search process, not 0.001, so the claim that an integral gain of 0.001 was used contradicts the actual parameter in the table.", "claim": "UAV2’s search phase lasts 35.00 s (Table 5); Table 6 shows kp=0.5, ki=0.001, kd=0 for search, so UAV2’s controller during search employed an integral gain of 0.001.", "label": false }, { "paperid": "2411.14254v1", "paper_path": "./SciVer/papers/2411.14254v1.json", "claim_type": "sequential", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14254v1_figure_7.png", "item2_path": "./SciVer/images/2411.14254v1_figure_8.png", "section": [ "4.5.2" ], "request_id": 519, "origin_statement": "Figure 7 shows three orange misclassified cells, and Figure 8 shows one; thus, the model misclassified the Mobile Computing course three times more than the Calculus course.", "perturbed_statement": "Figure 7 shows four orange misclassified cells, and Figure 8 shows one; thus, the model misclassified the Mobile Computing course four times more than the Calculus course.", "perturbed_explanation": "The perturbed statement incorrectly asserts that Figure 7 has four orange misclassified cells. The provided context and Figure 7 itself highlight only three misclassified (orange) cells, so the basis for claiming four misclassifications is false.", "claim": "Figure 7 shows four orange misclassified cells, and Figure 8 shows one; thus, the model misclassified the Mobile Computing course four times more than the Calculus course.", "label": false }, { "paperid": "2409.17531v2", "paper_path": "./SciVer/papers/2409.17531v2.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17531v2_figure_1.png", "item2_path": "./SciVer/images/2409.17531v2_figure_2.png", "section": [ "1" ], "request_id": 520, "origin_statement": "SimVG’s multi-modality encoder (Fig.1d) directly fuses object, image, and text tokens. From Fig.2, this design yields a 6.28% relative improvement on RefCOCOg (average 8.5-word expressions), compared to only 0.15% improvement on Flickr30K (1.68-word expressions).", "perturbed_statement": "SimVG’s transformer-based decoder (Fig.1c) directly fuses object, image, and text tokens. From Fig.2, this design yields a 4.46% relative improvement on RefCOCOg (average 8.5-word expressions).", "perturbed_explanation": "This is incorrect because Fig.1d – not the transformer-based decoder in Fig.1c – shows SimVG’s multi-modality encoder is used for fusion. Additionally, Fig.2 reports a 6.28% improvement for RefCOCOg, whereas the 4.46% bar corresponds to the ReferIt dataset, not RefCOCOg.", "claim": "SimVG’s transformer-based decoder (Fig.1c) directly fuses object, image, and text tokens. From Fig.2, this design yields a 4.46% relative improvement on RefCOCOg (average 8.5-word expressions).", "label": false }, { "paperid": "2409.11143v1", "paper_path": "./SciVer/papers/2409.11143v1.json", "claim_type": "sequential", "item1": "7(a)", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11143v1_figure_7(a).png", "item2_path": "./SciVer/images/2409.11143v1-Table5-1.png", "section": [ "5.2" ], "request_id": 521, "origin_statement": "On SST-2 with one in-context example, Semformer(0.1) improves accuracy over TF by 11.6 points; consequently, on XSum summarization Semformer outperforms TF by 0.61 in ROUGE-1.", "perturbed_statement": "On SST-2 with one in-context example, Semformer(0.1) improves accuracy over TF by 14.5 points; consequently, on XSum summarization Semformer outperforms TF by 1.20 in ROUGE-1.", "perturbed_explanation": "The perturbation is incorrect because Figure 7 shows Semformer(0.1) accuracy is 66.1 versus TF’s 54.5, a gap of 11.6 points, not 14.5. Table 5 shows the ROUGE-1 gain of Semformer over TF on XSum is 36.47−35.86=0.61, not 1.20.", "claim": "On SST-2 with one in-context example, Semformer(0.1) improves accuracy over TF by 14.5 points; consequently, on XSum summarization Semformer outperforms TF by 1.20 in ROUGE-1.", "label": false }, { "paperid": "2411.17251v1", "paper_path": "./SciVer/papers/2411.17251v1.json", "claim_type": "sequential", "item1": "7", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.17251v1_figure_7.png", "item2_path": "./SciVer/images/2411.17251v1_figure_8.png", "section": [ "4.4.3" ], "request_id": 523, "origin_statement": "Figure 7 shows Cng mAP@0.5 of 0.977. Figure 8 indicates 286 true positive Cng detections and only 4 misclassifications. Therefore, the implied precision of 286/(286+4)=0.986 corroborates the high mAP value for the Cng class.", "perturbed_statement": "Figure 7 shows Cng mAP@0.5 of 0.950. Figure 8 indicates 286 true positive Cng detections and only 4 misclassifications. Therefore, the implied precision of 286/(286+4)=0.986 contradicts the low mAP value for the Cng class.", "perturbed_explanation": "The legend in Figure 7 actually lists Cng mAP@0.5 as 0.977, not 0.950 (0.950 corresponds to Covered-Van). This makes the first premise false and invalidates the perturbed claim.", "claim": "Figure 7 shows Cng mAP@0.5 of 0.950. Figure 8 indicates 286 true positive Cng detections and only 4 misclassifications. Therefore, the implied precision of 286/(286+4)=0.986 contradicts the low mAP value for the Cng class.", "label": false }, { "paperid": "2411.15524v1", "paper_path": "./SciVer/papers/2411.15524v1.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15524v1_figure_2.png", "item2_path": "./SciVer/images/2411.15524v1-Table2-1.png", "section": [ "3" ], "request_id": 528, "origin_statement": "The IRAS 02438+2122 spectrum in Figure 2 shows the strongest maser spike near 7000 km/s (~35 mJy); Table 2 confirms its peak flux density of 35.4 mJy and an SNR of 15.4, making it the highest‐SNR detection among the four new megamasers.", "perturbed_statement": "The CGCG 147-020 spectrum in Figure 2 shows the strongest maser spike near 5600 km/s (~25 mJy); Table 2 confirms its peak flux density of 20.4 mJy and an SNR of 12.0, making it the highest‐SNR detection among the four new megamasers.", "perturbed_explanation": "This claim is false because Table 2 lists IRAS 02438+2122 with peak flux 35.4 mJy and SNR 15.4, both higher than CGCG 147-020’s 20.4 mJy and SNR 12.0, so CGCG 147-020 is not the highest‐SNR detection.", "claim": "The CGCG 147-020 spectrum in Figure 2 shows the strongest maser spike near 5600 km/s (~25 mJy); Table 2 confirms its peak flux density of 20.4 mJy and an SNR of 12.0, making it the highest‐SNR detection among the four new megamasers.", "label": false }, { "paperid": "2411.15668v1", "paper_path": "./SciVer/papers/2411.15668v1.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15668v1_figure_2.png", "item2_path": "./SciVer/images/2411.15668v1-Table2-1.png", "section": [ "3.3.3" ], "request_id": 530, "origin_statement": "Figure 2b shows the λ-transition at Tw ≈ 2 K under He II at 150 kPa, confirming He II conditions. Table 2 then shows the exponent b for He II (1.023) exceeds 1, whereas for He I it is 0.915, implying freeze range grows superlinearly with inlet mass flow only in He II.", "perturbed_statement": "Figure 2b shows the λ-transition at the threshold level of 4 K under He II, so the conditions correspond to He I. Table 2 lists b = 1.084 for He I, indicating a superlinear freeze-range dependence on inlet mass flow in the He I case.", "perturbed_explanation": "In Figure 2b the λ-transition arrow points at ≈2 K, not at 4 K (the 4 K dashed line is the general threshold level). Also, Table 2 gives b = 0.915 for He I, not 1.084 (which is c), so He I’s exponent is sublinear, making the stated superlinear dependence incorrect.", "claim": "Figure 2b shows the λ-transition at the threshold level of 4 K under He II, so the conditions correspond to He I. Table 2 lists b = 1.084 for He I, indicating a superlinear freeze-range dependence on inlet mass flow in the He I case.", "label": false }, { "paperid": "2411.12892v1", "paper_path": "./SciVer/papers/2411.12892v1.json", "claim_type": "sequential", "item1": "2", "item2": "3(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.12892v1_figure_2.png", "item2_path": "./SciVer/images/2411.12892v1_figure_3(a).png", "section": [ "4.1" ], "request_id": 534, "origin_statement": "Figure2 shows SSA’s operator norm is roughly one-third that of vanilla. Leveraging this smaller norm, in Figure3c SSA approximates the optimal matrix with an ℓ1 error of 0.358, versus 0.543 for vanilla attention. Hence, lower norm yields better approximation.", "perturbed_statement": "Figure2 shows SSA’s operator norm is roughly one-third that of vanilla. Leveraging this smaller norm, in Figure3c SSA approximates the optimal matrix with an ℓ1 error of 0.543, versus 0.358 for vanilla attention. Hence, lower norm yields better approximation.", "perturbed_explanation": "The perturbed claim swaps the reported ℓ1 errors: Figure 3(c) actually shows SSA achieves an ℓ1 error of 0.358 and vanilla attention 0.543. Thus stating SSA’s error as 0.543 and vanilla’s as 0.358 directly contradicts the data in Figure 3.", "claim": "Figure2 shows SSA’s operator norm is roughly one-third that of vanilla. Leveraging this smaller norm, in Figure3c SSA approximates the optimal matrix with an ℓ1 error of 0.543, versus 0.358 for vanilla attention. Hence, lower norm yields better approximation.", "label": false }, { "paperid": "2411.03743v1", "paper_path": "./SciVer/papers/2411.03743v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.03743v1-Table1-1.png", "item2_path": "./SciVer/images/2411.03743v1-Table2-1.png", "section": [ "2.2.2" ], "request_id": 535, "origin_statement": "Table 1 shows PROTEUS uses lifelines for survival analysis on bulk clinical data. Table 2 indicates GPT-4-Turbo achieves 50% logical coherence (LC). Therefore, pairing PROTEUS’s lifelines workflow with GPT-4-Turbo should maintain moderate logical coherence in clinical hypothesis evaluation.", "perturbed_statement": "Table 1 shows PROTEUS uses BioEnricher for survival analysis on bulk clinical data. Table 2 indicates GPT-4-Turbo achieves 50% logical coherence (LC). Therefore, pairing PROTEUS’s BioEnricher workflow with GPT-4-Turbo should maintain moderate logical coherence in clinical hypothesis evaluation.", "perturbed_explanation": "This statement is incorrect because in Table 1 the survival analysis workflow for bulk clinical cohort data uses the lifelines package, not BioEnricher. Therefore, PROTEUS does not use BioEnricher for survival analysis as claimed.", "claim": "Table 1 shows PROTEUS uses BioEnricher for survival analysis on bulk clinical data. Table 2 indicates GPT-4-Turbo achieves 50% logical coherence (LC). Therefore, pairing PROTEUS’s BioEnricher workflow with GPT-4-Turbo should maintain moderate logical coherence in clinical hypothesis evaluation.", "label": false }, { "paperid": "2411.10746v1", "paper_path": "./SciVer/papers/2411.10746v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.10746v1_figure_5(a).png", "item2_path": "./SciVer/images/2411.10746v1_figure_5(c).png", "section": [ "3.4" ], "request_id": 536, "origin_statement": "Fig. 5(a) shows the model’s heatmap concentrates red regions at the lung bases, matching pleural effusion’s lower-lung fluid localization. Fig. 5(c) highlights the cardiac silhouette, consistent with cardiomegaly. Therefore, the model accurately localizes pleural effusion to lung bases and cardiomegaly to the enlarged heart area.", "perturbed_statement": "Fig. 5(a) shows the model’s heatmap concentrates red regions at the lung apices, matching pleural effusion’s upper-lung fluid localization. Fig. 5(c) highlights the diaphragm, consistent with cardiomegaly. Therefore, the model accurately localizes pleural effusion to lung apices and cardiomegaly to the diaphragmatic region.", "perturbed_explanation": "The perturbation is incorrect because in Fig. 5(a) the red-hot regions clearly lie at the lower lung fields, not the apices, reflecting basilar fluid in pleural effusion. Similarly, Fig. 5(c) focuses on the cardiac silhouette over the heart shadow, not on the diaphragm, so the claim about diaphragmatic localization for cardiomegaly contradicts the visualization.", "claim": "Fig. 5(a) shows the model’s heatmap concentrates red regions at the lung apices, matching pleural effusion’s upper-lung fluid localization. Fig. 5(c) highlights the diaphragm, consistent with cardiomegaly. Therefore, the model accurately localizes pleural effusion to lung apices and cardiomegaly to the diaphragmatic region.", "label": false }, { "paperid": "2409.14836v2", "paper_path": "./SciVer/papers/2409.14836v2.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.14836v2_figure_2.png", "item2_path": "./SciVer/images/2409.14836v2_figure_3.png", "section": [ "3.2" ], "request_id": 537, "origin_statement": "Figure2 shows SAHE in high layers peaks at β=0.1 (~230). Figure3 indicates high-layer value vectors’ hyperspherical energy increases by about 11 in layer-26 after DPO. Therefore, setting β to 0.1 particularly amplifies hyperspherical energy of value vectors in high layers.", "perturbed_statement": "Figure2 shows SAHE in high layers peaks at β=0.1 (~230). Figure3 indicates high-layer query vectors’ hyperspherical energy increases by about 11 in layer-26 after DPO. Therefore, setting β to 0.1 particularly amplifies hyperspherical energy of query vectors in high layers.", "perturbed_explanation": "The perturbed claim misattributes the 11-unit increase to query vectors. In Figure 3, the green bar (value vector) at layer-26 rises by about 11, whereas the orange bar (query vector) at layer-26 actually decreases by roughly 1.5, contradicting the statement.", "claim": "Figure2 shows SAHE in high layers peaks at β=0.1 (~230). Figure3 indicates high-layer query vectors’ hyperspherical energy increases by about 11 in layer-26 after DPO. Therefore, setting β to 0.1 particularly amplifies hyperspherical energy of query vectors in high layers.", "label": false }, { "paperid": "2410.12851v3", "paper_path": "./SciVer/papers/2410.12851v3.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.12851v3_figure_2.png", "item2_path": "./SciVer/images/2410.12851v3-Table3-1.png", "section": [ "5.2" ], "request_id": 541, "origin_statement": "Because “Examples and Illustrations” has the highest preference prediction coefficient (~0.4) in Figure 2, VibeCheck [3 iter] attains 62.04% PP accuracy on writing prompts, outperforming the predefined vibes baseline (59.49%) by 2.55%.", "perturbed_statement": "Because “Ethical Consideration” has the highest preference prediction coefficient (~0.5) in Figure 2, VibeCheck [1 iter] attains only 57.31% PP accuracy on writing prompts, underperforming the predefined vibes baseline (59.49%) by 2.18%.", "perturbed_explanation": "This is incorrect because in Figure 2 the PP coefficient for “Ethical Consideration” (~0.35) is lower than that for “Examples and Illustrations” (~0.4), and Table 3 shows VibeCheck [1 iter] actually achieves 60.58% PP accuracy on writing prompts, not 57.31%.", "claim": "Because “Ethical Consideration” has the highest preference prediction coefficient (~0.5) in Figure 2, VibeCheck [1 iter] attains only 57.31% PP accuracy on writing prompts, underperforming the predefined vibes baseline (59.49%) by 2.18%.", "label": false }, { "paperid": "2409.17090v1", "paper_path": "./SciVer/papers/2409.17090v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17090v1_figure_2(a).png", "item2_path": "./SciVer/images/2409.17090v1_figure_2(d).png", "section": [ "4.5" ], "request_id": 545, "origin_statement": "SRSG’s accuracy peaks at γ≈0.08 (~0.74 from Fig. 1), and its NMI peaks at K=4 (~0.83 from Fig. 2); thus the optimal parameters are γ=0.08 and K=4.", "perturbed_statement": "SRSG’s accuracy peaks at γ≈0.05 (~0.75 from Fig. 1), and its NMI peaks at K=5 (~0.85 from Fig. 2); thus the optimal parameters are γ=0.05 and K=5.", "perturbed_explanation": "In Fig. 1, accuracy at γ=0.05 is about 0.62, not 0.75, and the true accuracy peak (~0.74) is at γ≈0.08. In Fig. 2, NMI at K=5 is about 0.82, not 0.85, with the true NMI peak (~0.83) at K=3–4. These details contradict the perturbed claim.", "claim": "SRSG’s accuracy peaks at γ≈0.05 (~0.75 from Fig. 1), and its NMI peaks at K=5 (~0.85 from Fig. 2); thus the optimal parameters are γ=0.05 and K=5.", "label": false }, { "paperid": "2410.04380v1", "paper_path": "./SciVer/papers/2410.04380v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.04380v1_figure_1.png", "item2_path": "./SciVer/images/2410.04380v1-Table2-1.png", "section": [ "7.1" ], "request_id": 549, "origin_statement": "Table 2 shows that train-90s yields 720 audio tokens at 8 Hz; Figure 1 shows 8 Hz has a relative WER of about 1.8 (near the 48 Hz baseline) and a stable PESQ (~2.21); thus 8 Hz encoding reduces token count while retaining near-baseline speech quality.", "perturbed_statement": "Table 2 shows that train-90s yields 720 audio tokens at 8 Hz; Figure 1 shows 8 Hz has a relative WER equal to the baseline (1.0) and a PESQ equal to the baseline (2.82); thus 8 Hz encoding matches baseline speech quality exactly.", "perturbed_explanation": "Figure 1 actually reports a relative WER of ~1.8 at 8 Hz (not 1.0) and a PESQ of ~2.21 (not 2.82). Both values are clearly different from the reported baseline, so the claim of exact baseline matching is contradicted by the figure’s data.", "claim": "Table 2 shows that train-90s yields 720 audio tokens at 8 Hz; Figure 1 shows 8 Hz has a relative WER equal to the baseline (1.0) and a PESQ equal to the baseline (2.82); thus 8 Hz encoding matches baseline speech quality exactly.", "label": false }, { "paperid": "2411.02278v1", "paper_path": "./SciVer/papers/2411.02278v1.json", "claim_type": "sequential", "item1": "3(d)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.02278v1_figure_3(d).png", "item2_path": "./SciVer/images/2411.02278v1-Table4-1.png", "section": [ "5.3" ], "request_id": 551, "origin_statement": "Figure 3 shows w2c2’s median structural similarity score at O2 exceeding 40 while wasm2c’s is around 30; Table 4 reports w2c2’s average node quantity compare at O2 as 0.223 versus wasm2c’s 0.193, confirming w2c2 yields superior structural similarity at O2.", "perturbed_statement": "Figure 3 shows w2c2’s median structural similarity at O2 around 30 while wasm2c’s exceeds 40; Table 4 reports w2c2’s average node quantity compare at O2 as 0.193 versus wasm2c’s 0.223, falsely suggesting wasm2c is superior at O2.", "perturbed_explanation": "The perturbation is wrong because in Figure 3 the green O2 box for w2c2 is visibly higher than for wasm2c (around 80 versus 40, not 30 versus 40). Moreover, Table 4’s Node Quantity Compare row under O2 lists w2c2 average as 0.223 and wasm2c as 0.193, not the reversed values.", "claim": "Figure 3 shows w2c2’s median structural similarity at O2 around 30 while wasm2c’s exceeds 40; Table 4 reports w2c2’s average node quantity compare at O2 as 0.193 versus wasm2c’s 0.223, falsely suggesting wasm2c is superior at O2.", "label": false }, { "paperid": "2410.07571v2", "paper_path": "./SciVer/papers/2410.07571v2.json", "claim_type": "sequential", "item1": "6", "item2": "6", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.07571v2_figure_6.png", "item2_path": "./SciVer/images/2410.07571v2-Table6-1.png", "section": [ "5.1" ], "request_id": 553, "origin_statement": "Figure 6 shows that SPPFT on LLaMA-2-Chat-7B achieves 65.20% multimodal accuracy. Table 6 reports that freezing layers 6–14 yields 65.2% multimodal helpfulness. Therefore, the SPPFT variant in Figure 6 uses the default safety layers (6–14) frozen.", "perturbed_statement": "Figure 6 shows that SPPFT on LLaMA-2-Chat-7B achieves 65.20% multimodal accuracy. Table 6 reports that freezing layers 15–31 yields 65.2% multimodal helpfulness. Therefore, the SPPFT variant in Figure 6 uses the upper layers (15–31) frozen.", "perturbed_explanation": "The perturbation is wrong because Table 6 actually shows that freezing layers 15–31 yields only 48.3% multimodal helpfulness, not 65.2%. The 65.2% value corresponds to freezing layers 6–14, so it is incorrect to attribute that score to freezing layers 15–31.", "claim": "Figure 6 shows that SPPFT on LLaMA-2-Chat-7B achieves 65.20% multimodal accuracy. Table 6 reports that freezing layers 15–31 yields 65.2% multimodal helpfulness. Therefore, the SPPFT variant in Figure 6 uses the upper layers (15–31) frozen.", "label": false }, { "paperid": "2411.14749v2", "paper_path": "./SciVer/papers/2411.14749v2.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.14749v2_figure_2.png", "item2_path": "./SciVer/images/2411.14749v2-Table2-1.png", "section": [ "3.1.2" ], "request_id": 556, "origin_statement": "Because no coincident host is detected in the Gemini/GMOS i-band image (m > 26.5, Fig. 2), any z = 2.4 host must have M > –18.7 (Table 2), placing it more than a magnitude below the luminosity function knee.", "perturbed_statement": "Since no coincident host is detected in the Gemini/GMOS i-band image (m > 24.5, Fig. 2), any z = 2.4 host must have M > –20.7 (Table 2), placing it more than a magnitude below the luminosity function knee.", "perturbed_explanation": "Table 2 and the figure caption list the Gemini/GMOS i-band 5σ depth as m > 26.5 (not 24.5), corresponding to M > –18.7 at z = 2.4. The perturbed statement wrongly uses the Z-band limit (m > 24.5, M > –20.7) for the i-band, making its magnitude inference incorrect.", "claim": "Since no coincident host is detected in the Gemini/GMOS i-band image (m > 24.5, Fig. 2), any z = 2.4 host must have M > –20.7 (Table 2), placing it more than a magnitude below the luminosity function knee.", "label": false }, { "paperid": "2410.09247v1", "paper_path": "./SciVer/papers/2410.09247v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.09247v1_figure_5.png", "item2_path": "./SciVer/images/2410.09247v1-Table3-1.png", "section": [ "3.2" ], "request_id": 559, "origin_statement": "Given Mistral 7B’s 15% exact match rate in Table 3 and its approximately 9 pp benchmark inflation in Figure 5, Mistral exhibits lower benchmark inflation than GPT-3.5’s 13.1 pp, despite higher data leakage evidence.", "perturbed_statement": "Given Mistral 7B’s 12% exact match rate in Table 3 and its approximately 14 pp benchmark inflation in Figure 5, Mistral exhibits higher benchmark inflation than GPT-3.5’s 13.1 pp, despite slightly lower leakage evidence.", "perturbed_explanation": "The perturbation is wrong because Table 3 actually reports Mistral 7B’s exact match rate as 15%, not 12%, and Figure 5 shows its benchmark inflation is about 8.5 pp (not 14 pp). Thus Mistral does not exceed GPT-3.5’s 13.1 pp inflation.", "claim": "Given Mistral 7B’s 12% exact match rate in Table 3 and its approximately 14 pp benchmark inflation in Figure 5, Mistral exhibits higher benchmark inflation than GPT-3.5’s 13.1 pp, despite slightly lower leakage evidence.", "label": false }, { "paperid": "2410.19093v1", "paper_path": "./SciVer/papers/2410.19093v1.json", "claim_type": "sequential", "item1": "9", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19093v1_figure_9.png", "item2_path": "./SciVer/images/2410.19093v1-Table2-1.png", "section": [ "4.2.6" ], "request_id": 561, "origin_statement": "Figure 9 gives the pulsation period as P1 = 0.0244008189 d, so 1/P1 ≈ 40.983 d⁻¹, which agrees with Table 2’s dominant frequency f₁ = 40.9822311 d⁻¹.", "perturbed_statement": "Figure 9 gives the pulsation period as P1 = 0.0249008189 d, so 1/P1 ≈ 40.16 d⁻¹, which agrees with Table 2’s dominant frequency f₁ = 40.9822311 d⁻¹.", "perturbed_explanation": "The statement is wrong because Figure 9’s caption actually lists P1 = 0.0244008189 d, not 0.0249008189 d. Using the correct period gives 1/P1 ≈ 40.98 d⁻¹, matching f₁ = 40.9822311 d⁻¹ in Table 2, whereas 1/0.0249008189 d = 40.16 d⁻¹ does not match.", "claim": "Figure 9 gives the pulsation period as P1 = 0.0249008189 d, so 1/P1 ≈ 40.16 d⁻¹, which agrees with Table 2’s dominant frequency f₁ = 40.9822311 d⁻¹.", "label": false }, { "paperid": "2409.17455v1", "paper_path": "./SciVer/papers/2409.17455v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "9(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.17455v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.17455v1_figure_9(c).png", "section": [ "3.1.2" ], "request_id": 562, "origin_statement": "Bert’s macro F1 drops about 0.33 at λ=1.0 on Go Emotions with synonym shortcuts, compared to about 0.22 on Yelp with register style shortcuts, indicating greater vulnerability to synonym shortcuts on Go Emotions.", "perturbed_statement": "Bert’s macro F1 drops about 0.22 at λ=1.0 on Go Emotions with synonym shortcuts, compared to about 0.33 on Yelp with register style shortcuts, indicating greater vulnerability to register style shortcuts.", "perturbed_explanation": "Figure 3 shows Bert’s F1 drop at λ=1.0 on Go Emotions is approximately 0.33, not 0.22. Figure 9 shows its drop on Yelp at the same λ is about 0.22, not 0.33. The perturbed claim misstates both values and reverses the observed vulnerability.", "claim": "Bert’s macro F1 drops about 0.22 at λ=1.0 on Go Emotions with synonym shortcuts, compared to about 0.33 on Yelp with register style shortcuts, indicating greater vulnerability to register style shortcuts.", "label": false }, { "paperid": "2409.06290v1", "paper_path": "./SciVer/papers/2409.06290v1.json", "claim_type": "sequential", "item1": "3(c)", "item2": "3(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.06290v1_figure_3(c).png", "item2_path": "./SciVer/images/2409.06290v1_figure_3(d).png", "section": [ "4.2" ], "request_id": 564, "origin_statement": "EntLoss yields a Dunn index of 8.62×10⁻⁵, which increases to 1.07×10⁻⁴ under EA+EL; this rise indicates that adding EntAugment to EntLoss enhances interclass cluster separation in the t-SNE embedding.", "perturbed_statement": "EntLoss yields a Dunn index of 1.07×10⁻⁴, which decreases to 8.62×10⁻⁵ under EA+EL; this drop indicates that adding EntAugment to EntLoss reduces interclass cluster separation in the t-SNE embedding.", "perturbed_explanation": "The perturbed statement swaps the actual DI values: according to the captions, EntLoss alone has DI = 8.62×10⁻⁵ and EA+EL has DI = 1.07×10⁻⁴. In reality, the DI increases, so it does not decrease as claimed.", "claim": "EntLoss yields a Dunn index of 1.07×10⁻⁴, which decreases to 8.62×10⁻⁵ under EA+EL; this drop indicates that adding EntAugment to EntLoss reduces interclass cluster separation in the t-SNE embedding.", "label": false }, { "paperid": "2409.19764v2", "paper_path": "./SciVer/papers/2409.19764v2.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.19764v2_figure_2(a).png", "item2_path": "./SciVer/images/2409.19764v2_figure_2(b).png", "section": [ "4.1" ], "request_id": 567, "origin_statement": "Temporal-only attention shows the highest entropy (~5.8) in Fig.2A, leading to the lowest accuracy (~77.7%) in Fig.2B. In contrast, spatial-temporal attention has the lowest entropy (~4.85) and the highest accuracy (~79.9%), representing a 2.2% gain over temporal-only.", "perturbed_statement": "Temporal-only attention shows the highest entropy (~5.8) in Fig.2A, leading to the lowest accuracy (~77.7%) in Fig.2B. In contrast, spatial-only attention has the lowest entropy (~5.2) and the highest accuracy (~79.5%), representing a 2.2% gain over temporal-only.", "perturbed_explanation": "The perturbed statement incorrectly attributes the lowest entropy and highest accuracy to spatial-only attention. According to Fig.2, spatial-temporal attention actually has the lowest entropy (4.85) and the highest accuracy (79.9%), while spatial-only’s values are 5.17 entropy and 79.5% accuracy.", "claim": "Temporal-only attention shows the highest entropy (~5.8) in Fig.2A, leading to the lowest accuracy (~77.7%) in Fig.2B. In contrast, spatial-only attention has the lowest entropy (~5.2) and the highest accuracy (~79.5%), representing a 2.2% gain over temporal-only.", "label": false }, { "paperid": "2409.05429v2", "paper_path": "./SciVer/papers/2409.05429v2.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.05429v2-Table1-1.png", "item2_path": "./SciVer/images/2409.05429v2-Table2-1.png", "section": [ "4.1" ], "request_id": 570, "origin_statement": "Table 2 shows Airbus 320/200 accounts for 2,816,138 samples (37.86%). Table 1 states fuel data includes remaining fuel measurements. Therefore, approximately 37.86% of the ACARS fuel measurements—over 2.8 million records—originate from Airbus 320/200 flights.", "perturbed_statement": "Table 2 shows Airbus 330/300 accounts for 2,816,138 samples (37.86%). Table 1 states fuel data includes remaining fuel measurements. Therefore, approximately 37.86% of the ACARS fuel measurements—over 2.8 million records—originate from Airbus 330/300 flights.", "perturbed_explanation": "The perturbed claim misattributes the 2,816,138 sample count and 37.86% share to Airbus 330/300. According to Table 2, Airbus 330/300 actually has only 99,304 samples (1.34%), while 2,816,138 samples (37.86%) belong to Airbus 320/200.", "claim": "Table 2 shows Airbus 330/300 accounts for 2,816,138 samples (37.86%). Table 1 states fuel data includes remaining fuel measurements. Therefore, approximately 37.86% of the ACARS fuel measurements—over 2.8 million records—originate from Airbus 330/300 flights.", "label": false }, { "paperid": "2409.11008v1", "paper_path": "./SciVer/papers/2409.11008v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11008v1_figure_5.png", "item2_path": "./SciVer/images/2409.11008v1-Table4-1.png", "section": [ "7.3" ], "request_id": 572, "origin_statement": "Figure 5 indicates that sLMM-VAE’s test MSE decreases to about 0.488 at 8 basis functions; Table 4 reports its GSNN variant at 0.488±0.0019 MSE, placing it slightly behind the oLMM-VAE GSNN’s 0.486±0.0009.", "perturbed_statement": "Figure 5 indicates that sLMM-VAE’s test MSE decreases to about 0.486 at 8 basis functions; Table 4 reports its GSNN variant at 0.486±0.0009 MSE, placing it slightly ahead of the oLMM-VAE GSNN’s 0.488±0.0019.", "perturbed_explanation": "This claim is wrong because Figure 5 actually shows sLMM-VAE’s test MSE at 8 basis functions is around 0.488, not 0.486. Moreover, Table 4 lists sLMM-VAE GSNN as 0.488±0.0019 and oLMM-VAE GSNN as 0.486±0.0009, so sLMM-VAE does not outperform oLMM-VAE.", "claim": "Figure 5 indicates that sLMM-VAE’s test MSE decreases to about 0.486 at 8 basis functions; Table 4 reports its GSNN variant at 0.486±0.0009 MSE, placing it slightly ahead of the oLMM-VAE GSNN’s 0.488±0.0019.", "label": false }, { "paperid": "2410.21562v1", "paper_path": "./SciVer/papers/2410.21562v1.json", "claim_type": "sequential", "item1": "2(a)", "item2": "2(l)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21562v1_figure_2(a).png", "item2_path": "./SciVer/images/2410.21562v1_figure_2(l).png", "section": [ "2.3.2" ], "request_id": 575, "origin_statement": "Because the foam image’s near-isotropic texture leads to eight detected angular boundaries, as shown by the eight red dashed lines in Figure 2, the empirical curvelet transform uses eight angular sectors.", "perturbed_statement": "Because the foam image’s near-isotropic texture leads to six detected angular boundaries, as shown by the six red dashed lines in Figure 2, the empirical curvelet transform uses six angular sectors.", "perturbed_explanation": "This is incorrect because Figure 2 clearly displays eight red dashed lines marking the angular boundaries, not six. Therefore, the transform actually uses eight angular sectors rather than six.", "claim": "Because the foam image’s near-isotropic texture leads to six detected angular boundaries, as shown by the six red dashed lines in Figure 2, the empirical curvelet transform uses six angular sectors.", "label": false }, { "paperid": "2410.18766v2", "paper_path": "./SciVer/papers/2410.18766v2.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.18766v2_figure_3.png", "item2_path": "./SciVer/images/2410.18766v2_figure_4.png", "section": [ "3.3" ], "request_id": 576, "origin_statement": "Fig. 3b shows the variable selection network applies softmax across the feature dimension to produce Nj×τ×F selection weights; Fig. 4 shows these weights elementwise multiply the embedded Nj×τ×F features, yielding an Nj×τ×F filtered output.", "perturbed_statement": "Fig. 3b shows the variable selection network applies softmax across the time dimension to produce Nj×τ selection weights; Fig. 4 shows these weights elementwise multiply the embedded Nj×τ×F features, yielding an Nj×τ filtered output.", "perturbed_explanation": "This is wrong because both Fig. 3b and the text specify the softmax operates over the feature dimension (not time), producing weights of shape Nj×τ×F (as per Eq. 11). Consequently, the weighted multiplication produces an Nj×τ×F output, not Nj×τ.", "claim": "Fig. 3b shows the variable selection network applies softmax across the time dimension to produce Nj×τ selection weights; Fig. 4 shows these weights elementwise multiply the embedded Nj×τ×F features, yielding an Nj×τ filtered output.", "label": false }, { "paperid": "2409.12587v1", "paper_path": "./SciVer/papers/2409.12587v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.12587v1_figure_4.png", "item2_path": "./SciVer/images/2409.12587v1-Table3-1.png", "section": [ "4.1" ], "request_id": 580, "origin_statement": "Since cutmix’s induced points lie linearly between the fixed inputs in Figure 4, its unoptimized MSE in 3-TTA is 0.230 (vs. 0.176 for mixup) according to Table 3, yet VB-TTA reduces cutmix’s MSE to 0.081 by step 50.", "perturbed_statement": "Since mixup’s induced points lie linearly between the fixed inputs in Figure 4, its unoptimized MSE in 3-TTA is 0.230 (vs. 0.176 for cutmix) according to Table 3, yet VB-TTA reduces mixup’s MSE to 0.081 by step 50.", "perturbed_explanation": "This is wrong because Figure 4 actually shows mixup points as a scattered cloud, not a linear segment (that is cutmix). Moreover, Table 3 reports 3-TTA (Mixup) MSE=0.176 at step 1 (not 0.230) and 3-VB-TTA (Mixup) MSE=0.137 at step 50 (not 0.081).", "claim": "Since mixup’s induced points lie linearly between the fixed inputs in Figure 4, its unoptimized MSE in 3-TTA is 0.230 (vs. 0.176 for cutmix) according to Table 3, yet VB-TTA reduces mixup’s MSE to 0.081 by step 50.", "label": false }, { "paperid": "2409.09946v2", "paper_path": "./SciVer/papers/2409.09946v2.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.09946v2_figure_1.png", "item2_path": "./SciVer/images/2409.09946v2-Table1-1.png", "section": [ "2" ], "request_id": 582, "origin_statement": "The BOSS CMASS sample has the largest number of sources (620,292; Table 1). Figure 1 shows its footprint covers the CMB dipole location. Therefore, BOSS CMASS should yield the strongest statistical leverage for measuring dipole anisotropy among the tracers.", "perturbed_statement": "The BOSS LOWZ tracer has the largest number of sources (620,292; Table 1). Figure 1 shows its footprint covers the CMB dipole location. Therefore, LOWZ should yield the strongest statistical leverage for measuring dipole anisotropy among the tracers.", "perturbed_explanation": "This statement is incorrect because Table 1 lists only 280,067 sources for LOWZ, whereas the 620,292 count actually refers to the BOSS CMASS sample. LOWZ therefore does not have the largest source count, invalidating the claim of strongest statistical leverage.", "claim": "The BOSS LOWZ tracer has the largest number of sources (620,292; Table 1). Figure 1 shows its footprint covers the CMB dipole location. Therefore, LOWZ should yield the strongest statistical leverage for measuring dipole anisotropy among the tracers.", "label": false }, { "paperid": "2409.16239v1", "paper_path": "./SciVer/papers/2409.16239v1.json", "claim_type": "sequential", "item1": "1", "item2": "3", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.16239v1-Table1-1.png", "item2_path": "./SciVer/images/2409.16239v1-Table3-1.png", "section": [ "4.2" ], "request_id": 586, "origin_statement": "Since LADD achieves the highest 62.8% accuracy under GLaD (MTT) at IPC = 10 (from Table 1) and RDED-I (S) only reaches 23.6% on ImageNet-1K (from Table 3), LADD-RDED-I thus surpasses RDED-I (S) by 5.2 percentage points.", "perturbed_statement": "Since LADD achieves 64.8% accuracy under GLaD (MTT) at IPC = 10 (from Table 1) and RDED-I (S) only reaches 20.6% on ImageNet-1K (from Table 3), LADD-RDED-I thus surpasses RDED-I (S) by 44.2 percentage points.", "perturbed_explanation": "The perturbed claim misquotes two values: LADD’s GLaD accuracy at IPC = 10 is actually 62.8%, not 64.8%, and RDED-I (S) ‘s ImageNet-1K accuracy is 23.6%, not 20.6%. With the correct numbers, the improvement is 62.8 – 23.6 = 39.2 points, not 44.2 points.", "claim": "Since LADD achieves 64.8% accuracy under GLaD (MTT) at IPC = 10 (from Table 1) and RDED-I (S) only reaches 20.6% on ImageNet-1K (from Table 3), LADD-RDED-I thus surpasses RDED-I (S) by 44.2 percentage points.", "label": false }, { "paperid": "2410.16833v2", "paper_path": "./SciVer/papers/2410.16833v2.json", "claim_type": "sequential", "item1": "13", "item2": "15", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.16833v2_figure_13.png", "item2_path": "./SciVer/images/2410.16833v2_figure_15.png", "section": [ "5.2" ], "request_id": 588, "origin_statement": "Because Fig.13’s area distortion histogram spans roughly ±0.3 and Fig.15’s spans roughly ±0.2, our method achieves tighter area preservation on the Rocker Arm model than on the Bracelet model.", "perturbed_statement": "Because Fig.13’s area distortion histogram spans roughly ±0.3 and Fig.15’s spans roughly ±0.4, our method achieves worse area preservation on the Rocker Arm model than on the Bracelet model.", "perturbed_explanation": "The perturbation is incorrect because the actual area distortion histogram in Fig.15 is more concentrated around zero within approximately ±0.2, not ±0.4. Thus the claim that Fig.15 spans ±0.4 contradicts the true narrower spread shown in the second histogram.", "claim": "Because Fig.13’s area distortion histogram spans roughly ±0.3 and Fig.15’s spans roughly ±0.4, our method achieves worse area preservation on the Rocker Arm model than on the Bracelet model.", "label": false }, { "paperid": "2409.05884v1", "paper_path": "./SciVer/papers/2409.05884v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05884v1_figure_2.png", "item2_path": "./SciVer/images/2409.05884v1-Table3-1.png", "section": [ "5" ], "request_id": 590, "origin_statement": "TST’s MAE decreases by 5.65 MW when adding FCI (w=24) on the Railway dataset, dropping from 14.02 to 8.37, making it the second-best transformer model after STF (8.28 MAE).", "perturbed_statement": "TST’s MAE decreases by 5.65 MW when adding FCI (w=24) on the Railway dataset, dropping from 14.02 to 8.37, making it the best performing transformer model, even outperforming STF (8.28 MAE).", "perturbed_explanation": "The perturbed statement is false because Table 3 shows STF with FCI and w=24 achieves an MAE of 8.28, which is lower than TST’s 8.37. Thus TST remains the second-best transformer, not the best.", "claim": "TST’s MAE decreases by 5.65 MW when adding FCI (w=24) on the Railway dataset, dropping from 14.02 to 8.37, making it the best performing transformer model, even outperforming STF (8.28 MAE).", "label": false }, { "paperid": "2411.14608v1", "paper_path": "./SciVer/papers/2411.14608v1.json", "claim_type": "sequential", "item1": "2", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.14608v1_figure_2.png", "item2_path": "./SciVer/images/2411.14608v1_figure_5.png", "section": [ "3.1" ], "request_id": 591, "origin_statement": "At 300 K, HIP-NN lattice parameter (≈4.85 Å) is ~0.6% smaller than DFT-MD’s 4.88 Å; by the inverse lattice parameter–bulk modulus relation, HIP-NN predicts ~225 GPa, exceeding DFT-MD’s ~220 GPa bulk modulus by ~5 GPa.", "perturbed_statement": "At 300 K, HIP-NN lattice parameter (~4.90 Å) exceeds DFT-MD’s 4.88 Å; since bulk modulus inversely scales, HIP-NN’s ~225 GPa should lie ~5 GPa below DFT-MD’s ~220 GPa.", "perturbed_explanation": "This is incorrect: Fig. 2 shows HIP-NN’s lattice parameter at 300 K (≈4.85 Å) is below DFT-MD’s (≈4.88 Å), not above; and Fig. 5 shows HIP-NN bulk modulus (≈225 GPa) exceeds DFT-MD’s (≈220 GPa), not falls below.", "claim": "At 300 K, HIP-NN lattice parameter (~4.90 Å) exceeds DFT-MD’s 4.88 Å; since bulk modulus inversely scales, HIP-NN’s ~225 GPa should lie ~5 GPa below DFT-MD’s ~220 GPa.", "label": false }, { "paperid": "2410.19346v2", "paper_path": "./SciVer/papers/2410.19346v2.json", "claim_type": "sequential", "item1": "5", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19346v2_figure_5.png", "item2_path": "./SciVer/images/2410.19346v2-Table1-1.png", "section": [ "4.2" ], "request_id": 598, "origin_statement": "Table 1 shows that Qwen2.5-14b achieves the highest judge majority score of 88.14 among Qwen models. Given this, Figure 5(b) indicates Qwen2.5’s defense score (0.27) is markedly lower than its attack score (0.75), implying defense remains a weakness even for high-scoring models.", "perturbed_statement": "Table 1 shows Qwen2.5-14b has the lowest judge majority score (80.47) among Qwen models. Given this, Figure 5(b) indicates Qwen2.5’s defense score (0.27) is higher than its attack score (0.75), implying defense outperforms offense despite the low majority score.", "perturbed_explanation": "This statement is wrong because Table 1 actually reports Qwen2.5-14b’s judge majority score as 88.14 (not 80.47), and it is not the lowest among Qwen models. Additionally, Figure 5(b) shows Qwen2.5’s defense score (0.27) is lower than its attack score (0.75), not higher.", "claim": "Table 1 shows Qwen2.5-14b has the lowest judge majority score (80.47) among Qwen models. Given this, Figure 5(b) indicates Qwen2.5’s defense score (0.27) is higher than its attack score (0.75), implying defense outperforms offense despite the low majority score.", "label": false }, { "paperid": "2410.06992v2", "paper_path": "./SciVer/papers/2410.06992v2.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.06992v2_figure_3.png", "item2_path": "./SciVer/images/2410.06992v2_figure_4.png", "section": [ "2.1.1" ], "request_id": 600, "origin_statement": "Figure 3 shows the sympy issue report explicitly provides the full _print_Indexed method. This direct access enables model solution leakage. Consequently, in Figure 4 the generated django patch for __reversed__ uses reversed(self.dict.keys()) rather than reversed(self.dict), exemplifying an incorrect fix that still passes test cases.", "perturbed_statement": "Figure 3 only includes a stub signature for _print_Indexed without implementation, so the model cannot copy it directly. Consequently, in Figure 4 the generated django patch for __reversed__ returns reversed(self.dict.values()) instead of reversed(self.dict), producing an incorrect fix that still passes tests.", "perturbed_explanation": "This is incorrect because in Figure 3 the issue report actually shows the complete implementation of _print_Indexed (not just a stub). Also, in Figure 4 the generated patch uses reversed(self.dict.keys()), not reversed(self.dict.values()).", "claim": "Figure 3 only includes a stub signature for _print_Indexed without implementation, so the model cannot copy it directly. Consequently, in Figure 4 the generated django patch for __reversed__ returns reversed(self.dict.values()) instead of reversed(self.dict), producing an incorrect fix that still passes tests.", "label": false }, { "paperid": "2409.01722v2", "paper_path": "./SciVer/papers/2409.01722v2.json", "claim_type": "sequential", "item1": "7(a)", "item2": "8", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01722v2_figure_7(a).png", "item2_path": "./SciVer/images/2409.01722v2_figure_8.png", "section": [ "6.1" ], "request_id": 601, "origin_statement": "Since clients in ACCESS-FL accumulate only about 0.4 s over 100 rounds (Fig 7a) vs about 10 s for SecAgg, and Fig 8 shows ACCESS-FL’s MNIST accuracy curve overlaps SecAgg’s, ACCESS-FL matches SecAgg’s MNIST performance with roughly 25× lower client computation time.", "perturbed_statement": "Since clients in ACCESS-FL accumulate only about 0.4 s over 100 rounds vs about 445 000 s for SecAgg (Fig 7a), and Fig 8 shows ACCESS-FL trails SecAgg by roughly 5% on MNIST, ACCESS-FL underperforms in accuracy despite its vastly lower client computation time.", "perturbed_explanation": "This is wrong because Fig 7a reports SecAgg client time of ~10 s (not 445 000 s), and Fig 8 shows ACCESS-FL’s MNIST accuracy curve coincides with SecAgg’s (no 5% deficit). The misread runtime and invented accuracy gap contradict the actual figures.", "claim": "Since clients in ACCESS-FL accumulate only about 0.4 s over 100 rounds vs about 445 000 s for SecAgg (Fig 7a), and Fig 8 shows ACCESS-FL trails SecAgg by roughly 5% on MNIST, ACCESS-FL underperforms in accuracy despite its vastly lower client computation time.", "label": false }, { "paperid": "2409.16708v1", "paper_path": "./SciVer/papers/2409.16708v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.16708v1-Table1-1.png", "item2_path": "./SciVer/images/2409.16708v1-Table2-1.png", "section": [ "4.4" ], "request_id": 602, "origin_statement": "Participants in the lowest quartile (Q1) overestimated their performance by 4.52 points (15.34–10.82, Table 1). Table 2 shows a significant negative correlation (r = –0.43) between actual performance and the estimation gap, indicating that lower performers systematically overestimate more.", "perturbed_statement": "Participants in the lowest quartile (Q1) overestimated their performance by 3.12 points (13.94–10.82, Table 1). Table 2 shows a significant positive correlation (r = +0.43) between actual performance and the estimation gap, indicating that lower performers overestimate less.", "perturbed_explanation": "The perturbed statement is incorrect because Table 1 reports a Q1 estimate of 15.34 and performance of 10.82, yielding a gap of 4.52 points (not 3.12). Moreover, Table 2 gives a negative correlation of –0.43 between performance and the estimation gap, not a positive +0.43.", "claim": "Participants in the lowest quartile (Q1) overestimated their performance by 3.12 points (13.94–10.82, Table 1). Table 2 shows a significant positive correlation (r = +0.43) between actual performance and the estimation gap, indicating that lower performers overestimate less.", "label": false }, { "paperid": "2409.11267v1", "paper_path": "./SciVer/papers/2409.11267v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.11267v1_figure_2.png", "item2_path": "./SciVer/images/2409.11267v1_figure_3.png", "section": [ "4.2" ], "request_id": 606, "origin_statement": "Figure2 shows the RL agent outputs discrete actions ε_d(k) that the MPC uses to compute continuous actions. Building on this, Figure3 unrolls an LSTM over horizon Np, using transformed augmented state g_l(χ(k)) to estimate decoupled Q-values Q_l for each discrete sub-action at step l.", "perturbed_statement": "Figure2 shows the RL agent outputs continuous actions ε_c(k) that the MPC uses to compute discrete actions. Based on this, Figure3 unrolls an LSTM over horizon Np using raw augmented state χ(k) to predict decoupled Q-values Q_l at each step.", "perturbed_explanation": "The perturbed statement wrongly claims the agent outputs continuous actions ε_c(k), but Figure 2 and the context clearly state the agent proposes discrete actions ε_d(k). It also asserts the LSTM takes raw state χ(k), whereas Figure 3 specifies it processes transformed augmented states g_l(χ(k)).", "claim": "Figure2 shows the RL agent outputs continuous actions ε_c(k) that the MPC uses to compute discrete actions. Based on this, Figure3 unrolls an LSTM over horizon Np using raw augmented state χ(k) to predict decoupled Q-values Q_l at each step.", "label": false }, { "paperid": "2409.14838v1", "paper_path": "./SciVer/papers/2409.14838v1.json", "claim_type": "sequential", "item1": "8", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14838v1_figure_8.png", "item2_path": "./SciVer/images/2409.14838v1-Table3-1.png", "section": [ "3.2.3" ], "request_id": 609, "origin_statement": "Table3 shows FeFET supports 4-bit precision for VGG8 (88%) and ResNet18 (78%), then from Figure 8 we see FeFET’s energy efficiency (~125 TOPS/W) exceeds RRAM[30]’s (~35 TOPS/W) by over 3×.", "perturbed_statement": "Table3 shows PCM supports 4-bit precision for VGG8 (89%) and ResNet18 (81%), then from Figure 8 we see PCM’s energy efficiency (~60 TOPS/W) exceeds RRAM[30]’s (~35 TOPS/W) by over 2×.", "perturbed_explanation": "This is wrong because Table 3 actually reports PCM at 4-bit precision yields only 10% accuracy for VGG8 (not 89%) and 2% for ResNet18 (not 81%). Moreover, Figure 8 shows PCM’s ~60 TOPS/W is only about 1.7× RRAM[30]’s ~35 TOPS/W, not over 2×.", "claim": "Table3 shows PCM supports 4-bit precision for VGG8 (89%) and ResNet18 (81%), then from Figure 8 we see PCM’s energy efficiency (~60 TOPS/W) exceeds RRAM[30]’s (~35 TOPS/W) by over 2×.", "label": false }, { "paperid": "2409.01696v1", "paper_path": "./SciVer/papers/2409.01696v1.json", "claim_type": "sequential", "item1": "2", "item2": "11", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.01696v1_figure_2.png", "item2_path": "./SciVer/images/2409.01696v1_figure_11.png", "section": [ "3.2" ], "request_id": 613, "origin_statement": "In ResNet-101 under PPA, removing last-stage skip connections (RoLSS) yields a near-identical likelihood distribution to the full model (Fig. 2), yet attack accuracy falls from 83.00% to 58.68% (Table), confirming that skip connections bolster MI success.", "perturbed_statement": "In ResNet-101 under PPA, removing last-stage skip connections (RoLSS) shifts the likelihood distribution lower, peaking at ~0.996 instead of ~1.000 (Fig. 2), and reduces attack accuracy from 83.00% to 61.72% (Table), confirming that skip connections strengthen MI success.", "perturbed_explanation": "Figure 2 actually shows both Full and RoLSS likelihoods peaking at ~1.000 with nearly identical curves, not a shift to ~0.996. Also, Table reports RoLSS attack accuracy for ResNet-101 as 58.68%, not 61.72% (61.72% is from DenseNet-121 Skip-1 removal), so both premises conflict with the context.", "claim": "In ResNet-101 under PPA, removing last-stage skip connections (RoLSS) shifts the likelihood distribution lower, peaking at ~0.996 instead of ~1.000 (Fig. 2), and reduces attack accuracy from 83.00% to 61.72% (Table), confirming that skip connections strengthen MI success.", "label": false }, { "paperid": "2409.15683v1", "paper_path": "./SciVer/papers/2409.15683v1.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.15683v1_figure_1.png", "item2_path": "./SciVer/images/2409.15683v1-Table1-1.png", "section": [ "2.1.5" ], "request_id": 614, "origin_statement": "Figure 1 shows tomography uses an ancillary qubit for output extraction; Table 1 reports extracting output complexity as O(n/δ²), exceeding O(n) of loading and pyramidal steps, so tomography dominates overall layer complexity scaling as O(n/δ²).", "perturbed_statement": "Figure 1 shows tomography uses two ancillary qubits for output extraction; Table 1 reports extracting output complexity as O(n/δ³), exceeding O(n²) of loading and pyramidal steps, so tomography dominates overall layer complexity scaling as O(n/δ³).", "perturbed_explanation": "This statement is wrong because Figure 1 explicitly depicts a single ancillary qubit (not two) for tomography, and Table 1 gives extracting output complexity as O(n/δ²) (not O(n/δ³)), while both data loading and the pyramidal circuit are O(n) (not O(n²)).", "claim": "Figure 1 shows tomography uses two ancillary qubits for output extraction; Table 1 reports extracting output complexity as O(n/δ³), exceeding O(n²) of loading and pyramidal steps, so tomography dominates overall layer complexity scaling as O(n/δ³).", "label": false }, { "paperid": "2411.01759v1", "paper_path": "./SciVer/papers/2411.01759v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01759v1_figure_4.png", "item2_path": "./SciVer/images/2411.01759v1_figure_5.png", "section": [ "4.3" ], "request_id": 615, "origin_statement": "On the Samsung A53, the pruned Conv model cuts inference time from 10 ms to 6 ms and shrinks model size from 0.32 MB to 0.05 MB, showing that the drop in RAM usage directly corresponds to faster inference.", "perturbed_statement": "On the Samsung A53, the pruned Conv model cuts inference time from 10 ms to 6 ms and shrinks model size from 0.05 MB to 0.32 MB, showing that the drop in RAM usage directly corresponds to faster inference.", "perturbed_explanation": "The perturbed claim wrongly swaps the model sizes: in Figure 5, the original Conv model is 0.32 MB and the Conv-Prune model is 0.05 MB. Saying it shrinks from 0.05 MB to 0.32 MB contradicts the actual sizes in the context.", "claim": "On the Samsung A53, the pruned Conv model cuts inference time from 10 ms to 6 ms and shrinks model size from 0.05 MB to 0.32 MB, showing that the drop in RAM usage directly corresponds to faster inference.", "label": false }, { "paperid": "2409.17266v1", "paper_path": "./SciVer/papers/2409.17266v1.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.17266v1_figure_4.png", "item2_path": "./SciVer/images/2409.17266v1-Table2-1.png", "section": [ "4.3" ], "request_id": 616, "origin_statement": "Figure 4 shows decile 1 yields the highest cumulative excess return; Table 2 reports GPT-4’s model has the lowest avg |α| (0.64) among benchmarks; thus GPT-4 effectively isolates the top-returning decile.", "perturbed_statement": "Figure 4 shows decile 10 has the highest cumulative excess return; Table 2 reports the NN model has the lowest avg |α| (0.83); thus NN effectively isolates the top-returning decile.", "perturbed_explanation": "Both premises are incorrect. In Figure 4, the blue series (decile 1), not the cyan series (decile 10), reaches the highest cumulative excess return. In Table 2, the NN model’s avg |α| is 0.83—the highest among benchmarks—whereas GPT-4’s method attains the lowest avg |α| of 0.64.", "claim": "Figure 4 shows decile 10 has the highest cumulative excess return; Table 2 reports the NN model has the lowest avg |α| (0.83); thus NN effectively isolates the top-returning decile.", "label": false }, { "paperid": "2409.05790v1", "paper_path": "./SciVer/papers/2409.05790v1.json", "claim_type": "sequential", "item1": "1", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.05790v1_figure_1.png", "item2_path": "./SciVer/images/2409.05790v1-Table1-1.png", "section": [ "5.1" ], "request_id": 621, "origin_statement": "Table 1 shows the CVAE’s mean absolute relative error (1.4907%) is 0.3566% lower than the DNN’s (1.8473%). Figure 1’s histogram indicates CHF values cluster below 8000 W/cm2. Therefore, CVAE provides more accurate predictions across the densely populated CHF range.", "perturbed_statement": "Table 1 shows the DNN’s mean absolute relative error (1.8473%) is 0.3566% lower than the CVAE’s (1.4907%). Figure 1’s histogram indicates CHF values cluster above 12000 W/cm2. Therefore, DNN provides more accurate predictions across the densely populated CHF range.", "perturbed_explanation": "This statement is wrong for two reasons: first, Table 1 actually reports the CVAE’s mean error (1.4907%) as lower than the DNN’s (1.8473%), not the other way around. Second, Figure 1 shows CHF values mostly below 8000 W/cm2, not clustering above 12000 W/cm2.", "claim": "Table 1 shows the DNN’s mean absolute relative error (1.8473%) is 0.3566% lower than the CVAE’s (1.4907%). Figure 1’s histogram indicates CHF values cluster above 12000 W/cm2. Therefore, DNN provides more accurate predictions across the densely populated CHF range.", "label": false }, { "paperid": "2411.08284v1", "paper_path": "./SciVer/papers/2411.08284v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.08284v1_figure_2.png", "item2_path": "./SciVer/images/2411.08284v1-Table3-1.png", "section": [ "4.2" ], "request_id": 622, "origin_statement": "The reconstructed waveform by DTAM at κ=0.5 aligns closely with the original (Fig. 2), yet Table 3 shows SP slightly outperforms DTAM in SNR (26.35 dB vs 26.33 dB) at κ=0.5.", "perturbed_statement": "The reconstructed waveform by DTAM at κ=0.5 aligns closely with the original (Fig. 2), yet Table 3 shows DTAM slightly outperforms SP in SNR (26.33 dB vs 26.35 dB) at κ=0.5.", "perturbed_explanation": "This claim is wrong because Table 3 actually reports SP’s SNR as 26.35 dB and DTAM’s as 26.33 dB at κ=0.5, so DTAM does not outperform SP.", "claim": "The reconstructed waveform by DTAM at κ=0.5 aligns closely with the original (Fig. 2), yet Table 3 shows DTAM slightly outperforms SP in SNR (26.33 dB vs 26.35 dB) at κ=0.5.", "label": false }, { "paperid": "2409.11839v1", "paper_path": "./SciVer/papers/2409.11839v1.json", "claim_type": "sequential", "item1": "2", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11839v1_figure_2.png", "item2_path": "./SciVer/images/2409.11839v1-Table2-1.png", "section": [ "5.1" ], "request_id": 623, "origin_statement": "Figure 2 shows treated and control stations’ sulphur dioxide levels track closely over time, indicating parallel pre-SCA trends; Table 2 (column 3) reports an insignificant −3.48 mcg/m³ post-SCA effect. Therefore, SCAs had no significant impact on local sulphur dioxide concentrations.", "perturbed_statement": "Figure 2 shows treated stations’ sulphur dioxide levels rose faster than controls before SCAs; Table 2 (column 3) reports a significant −3.48 mcg/m³ drop (p<0.05) post-SCA. Thus, SCAs significantly reduced sulphur dioxide levels.", "perturbed_explanation": "This is incorrect because Figure 2 (and the dynamic event study described) actually shows parallel pre-SCA trends in sulphur dioxide for treated and control stations, not a faster rise for treated. Moreover, Table 2’s −3.48 mcg/m³ coefficient is statistically insignificant (p>0.1), so it does not indicate a significant reduction.", "claim": "Figure 2 shows treated stations’ sulphur dioxide levels rose faster than controls before SCAs; Table 2 (column 3) reports a significant −3.48 mcg/m³ drop (p<0.05) post-SCA. Thus, SCAs significantly reduced sulphur dioxide levels.", "label": false }, { "paperid": "2410.15698v1", "paper_path": "./SciVer/papers/2410.15698v1.json", "claim_type": "sequential", "item1": "3", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15698v1_figure_3.png", "item2_path": "./SciVer/images/2410.15698v1-Table1-1.png", "section": [ "5.4" ], "request_id": 627, "origin_statement": "Table 1 shows VQ-CD’s mean return (558.22) exceeds CoD’s (478.19). Then, in Figure 3, VQ-CD’s curve reaches the Multitask upper bound at 0.8 success by step 50 × 10⁵. Therefore, VQ-CD outperforms diffusion baselines and attains multitask-level performance on CW10 tasks.", "perturbed_statement": "Table 1 shows VQ-CD’s mean return (458.22) is below CoD’s (478.19). Then, in Figure 3, VQ-CD’s curve peaks at only 0.6 success by step 50 × 10⁵. Therefore, VQ-CD fails to surpass CoD and never reaches multitask-level performance on CW10 tasks.", "perturbed_explanation": "The perturbed claim is wrong because Table 1 actually reports VQ-CD’s mean return as 558.22 (not 458.22), which is higher than CoD’s 478.19. Moreover, Figure 3 shows VQ-CD’s success rate reaches about 0.8 at step 50 × 10⁵ (not 0.6), matching the Multitask bound.", "claim": "Table 1 shows VQ-CD’s mean return (458.22) is below CoD’s (478.19). Then, in Figure 3, VQ-CD’s curve peaks at only 0.6 success by step 50 × 10⁵. Therefore, VQ-CD fails to surpass CoD and never reaches multitask-level performance on CW10 tasks.", "label": false }, { "paperid": "2410.22364v2", "paper_path": "./SciVer/papers/2410.22364v2.json", "claim_type": "sequential", "item1": "4(c)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.22364v2_figure_4(c).png", "item2_path": "./SciVer/images/2410.22364v2-Table3-1.png", "section": [ "6.2" ], "request_id": 629, "origin_statement": "At a 100 M-unit budget, the CA-MSE heatmap shows a CA-MSE of 0.32 for asymmetric token dropout at p_q=0.5; Table 3 then reports an NN accuracy of 69.6% for that same setting.", "perturbed_statement": "At a 100 M-unit budget, the CA-MSE heatmap shows a CA-MSE of 0.10 for asymmetric token dropout at p_q=0.5; Table 3 then reports an NN accuracy of 69.6% for that same setting.", "perturbed_explanation": "The CA-MSE heatmap at 100 M and p_q=0.5 (third row under the 100 column) is 0.32, not 0.10. The value 0.10 does not appear at dropout=0.5 in that column, so the perturbation misstates the actual CA-MSE.", "claim": "At a 100 M-unit budget, the CA-MSE heatmap shows a CA-MSE of 0.10 for asymmetric token dropout at p_q=0.5; Table 3 then reports an NN accuracy of 69.6% for that same setting.", "label": false }, { "paperid": "2411.07795v2", "paper_path": "./SciVer/papers/2411.07795v2.json", "claim_type": "sequential", "item1": "5", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.07795v2_figure_5.png", "item2_path": "./SciVer/images/2411.07795v2-Table5-1.png", "section": [ "3.4" ], "request_id": 631, "origin_statement": "Because the residual in Figure 5 is imperceptible, Table 5’s PSNR of 47.8 and SSIM of 0.997 demonstrate negligible distortion and near-perfect similarity, confirming effective 256-bit watermark embedding with minimal quality loss.", "perturbed_statement": "Because the 20× residual in Figure 5 highlights significant watermark artifacts, Table 5’s PSNR of 47.8 and SSIM of 0.997 indicate only moderate fidelity, revealing noticeable degradation from 256-bit watermark embedding.", "perturbed_explanation": "The perturbed claim is false because the magnified residual in Figure 5 is almost blank, showing imperceptible artifacts rather than significant ones. Furthermore, a PSNR of 47.8 and SSIM of 0.997 actually correspond to very high fidelity and minimal distortion, not moderate degradation.", "claim": "Because the 20× residual in Figure 5 highlights significant watermark artifacts, Table 5’s PSNR of 47.8 and SSIM of 0.997 indicate only moderate fidelity, revealing noticeable degradation from 256-bit watermark embedding.", "label": false }, { "paperid": "2410.23317v1", "paper_path": "./SciVer/papers/2410.23317v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23317v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.23317v1_figure_1(b).png", "section": [ "4.2", "1", "3.2" ], "request_id": 637, "origin_statement": "From Figure 1(a), LLM attention scores are uniformly distributed with no modality boundary; thus in Figure 1(b), VLM attention reveals a distinct horizontal split between visual and language tokens along the query dimension.", "perturbed_statement": "Since Figure 1(a) shows a clear horizontal split between token modalities, Figure 1(b) likewise displays uniform attention across all tokens, implying that VLMs do not require modality-aware cache compression.", "perturbed_explanation": "The perturbation is wrong because Figure 1(a) actually shows LLM attention scores scattered uniformly with no modality boundary, and Figure 1(b) clearly exhibits a horizontal boundary separating visual and language tokens (as marked by the dashed line around the post-vision region).", "claim": "Since Figure 1(a) shows a clear horizontal split between token modalities, Figure 1(b) likewise displays uniform attention across all tokens, implying that VLMs do not require modality-aware cache compression.", "label": false }, { "paperid": "2409.15053v1", "paper_path": "./SciVer/papers/2409.15053v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.15053v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.15053v1_figure_3(b).png", "section": [ "4" ], "request_id": 640, "origin_statement": "The Si41Ge41H72 matrix spans ~18×10^3 rows (x-axis scale) and has 15,011,265 nonzeros, yielding ~833 nonzeros per row, whereas Si87H76 spans ~230×10^3 rows with 10,661,631 nonzeros, yielding ~46 nonzeros per row, making Si41Ge41H72 ~18× denser.", "perturbed_statement": "The Si41Ge41H72 matrix spans ~23×10^4 rows and has 15,011,265 nonzeros, yielding ~652 nonzeros per row, whereas Si87H76 spans ~18×10^3 rows with 10,661,631 nonzeros, yielding ~592 nonzeros per row, making Si87H76 ~9% denser.", "perturbed_explanation": "The perturbation misreads the x-axis scales: the left plot’s axis is labeled ×10^4 (∼18,000 rows), not ×10^5 (∼230,000), and the right plot is ×10^5 (∼230,000 rows), not ×10^4. This reverses the actual row counts and invalidates the density comparison.", "claim": "The Si41Ge41H72 matrix spans ~23×10^4 rows and has 15,011,265 nonzeros, yielding ~652 nonzeros per row, whereas Si87H76 spans ~18×10^3 rows with 10,661,631 nonzeros, yielding ~592 nonzeros per row, making Si87H76 ~9% denser.", "label": false }, { "paperid": "2410.20320v1", "paper_path": "./SciVer/papers/2410.20320v1.json", "claim_type": "sequential", "item1": "3(b)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.20320v1_figure_3(b).png", "item2_path": "./SciVer/images/2410.20320v1-Table1-1.png", "section": [ "5.2" ], "request_id": 644, "origin_statement": "In 5-way-5-shot at NOTA rate 0.5, Table 1 shows GPAM’s known-class accuracy is 95.75%. From Figure 3, its total accuracy at NOTA rate 0.5 is 95.40%. Thus GPAM’s known-class accuracy slightly exceeds its overall accuracy under a 0.5 NOTA rate.", "perturbed_statement": "In 5-way-5-shot at NOTA rate 0.5, Table 1 shows GPAM’s known-class accuracy is 94.75%. From Figure 3, its total accuracy at NOTA rate 0.5 is 95.40%. Thus GPAM’s known-class accuracy is lower than its overall accuracy under a 0.5 NOTA rate.", "perturbed_explanation": "The perturbation incorrectly changes the known-class accuracy from 95.75% to 94.75%. Table 1 explicitly reports GPAM’s known-class accuracy at 5-way-5-shot and NOTA 0.5 as 95.75%. Because the premise is wrong, the conclusion that known-class accuracy is lower than the total (95.40%) is invalid.", "claim": "In 5-way-5-shot at NOTA rate 0.5, Table 1 shows GPAM’s known-class accuracy is 94.75%. From Figure 3, its total accuracy at NOTA rate 0.5 is 95.40%. Thus GPAM’s known-class accuracy is lower than its overall accuracy under a 0.5 NOTA rate.", "label": false }, { "paperid": "2409.03769v1", "paper_path": "./SciVer/papers/2409.03769v1.json", "claim_type": "sequential", "item1": "3(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03769v1_figure_3(a).png", "item2_path": "./SciVer/images/2409.03769v1-Table2-1.png", "section": [ "6.1" ], "request_id": 650, "origin_statement": "Figure 3 shows that after fine-tuning, embeddings form tighter, more distinct clusters. Consequently, Table 2 reports DistMult’s MRR rising from 0.254 to 0.315, a 24% relative improvement.", "perturbed_statement": "Figure 3 shows that after fine-tuning, embeddings form tighter, more distinct clusters. Consequently, Table 2 reports DistMult’s MRR rising from 0.354 to 0.315, a 12% relative boost.", "perturbed_explanation": "Table 2 actually lists the baseline DistMult MRR as 0.254 (not 0.354) and the fine-tuned MRR as 0.315. Thus the increase is +0.061 (≈24%), not a drop from 0.354 to 0.315 or a 12% boost.", "claim": "Figure 3 shows that after fine-tuning, embeddings form tighter, more distinct clusters. Consequently, Table 2 reports DistMult’s MRR rising from 0.354 to 0.315, a 12% relative boost.", "label": false }, { "paperid": "2410.21815v1", "paper_path": "./SciVer/papers/2410.21815v1.json", "claim_type": "sequential", "item1": "7", "item2": "7", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21815v1_figure_7.png", "item2_path": "./SciVer/images/2410.21815v1-Table7-1.png", "section": [ "5.3" ], "request_id": 655, "origin_statement": "From Table 7, AutoGnothi achieves the lowest deletion score (0.4888) on Oxford-IIIT Pet; thus, in Figure 7 its pet heatmap focuses sharply on the cat’s silhouette, unlike RISE’s broader highlights.", "perturbed_statement": "From Table 7, AutoGnothi achieves the lowest insertion score (0.4888) on Oxford-IIIT Pet; thus, in Figure 7 its pet heatmap focuses sharply on the cat’s silhouette, unlike RISE’s broader highlights.", "perturbed_explanation": "The statement wrongly labels 0.4888 as the insertion score. In Table 7, 0.4888 is actually the deletion score for AutoGnothi; its insertion score is 0.9384±0.1088, not 0.4888.", "claim": "From Table 7, AutoGnothi achieves the lowest insertion score (0.4888) on Oxford-IIIT Pet; thus, in Figure 7 its pet heatmap focuses sharply on the cat’s silhouette, unlike RISE’s broader highlights.", "label": false }, { "paperid": "2411.18432v1", "paper_path": "./SciVer/papers/2411.18432v1.json", "claim_type": "sequential", "item1": "10", "item2": "11", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.18432v1_figure_10.png", "item2_path": "./SciVer/images/2411.18432v1_figure_11.png", "section": [ "5.4.1" ], "request_id": 658, "origin_statement": "At 12:00, Grid 62 exhibits the highest divergence in the Gaussian mixture scenario (Figure 10c); accordingly, Figure 11’s 12:00 matching map shows Grid 62 with a lighter shade than the target map, indicating SPO-A underestimates demand there.", "perturbed_statement": "At 8:00, Grid 25 exhibits the highest divergence in the Gaussian distribution (Figure 10b); accordingly, Figure 11’s 8:00 matching map shows Grid 25 with a darker shade than the target map, indicating SPO-A overestimates demand there.", "perturbed_explanation": "This is incorrect because Figure 10b (Gaussian) shows the largest divergence at Grid 62 around 12:00, not Grid 25 at 8:00. Moreover, in Figure 11’s 8:00 row the matching hexagon for Grid 25 is lighter than the target, so SPO-A underestimates rather than overestimates there.", "claim": "At 8:00, Grid 25 exhibits the highest divergence in the Gaussian distribution (Figure 10b); accordingly, Figure 11’s 8:00 matching map shows Grid 25 with a darker shade than the target map, indicating SPO-A overestimates demand there.", "label": false }, { "paperid": "2411.03549v1", "paper_path": "./SciVer/papers/2411.03549v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.03549v1_figure_7.png", "item2_path": "./SciVer/images/2411.03549v1-Table3-1.png", "section": [ "5", "4" ], "request_id": 660, "origin_statement": "Because CWISEP J010650.61+225159.1 has Teff ≈ 800 K and UPM J1040–3551b has Teff ≈ 700 K (Table 3), and Figure 7 shows F444W−F1000W reddens by ~0.1 mag per 100 K, CWISEP J010650.61+225159.1 should exhibit F444W−F1000W ~0.1 mag redder than UPM J1040–3551b.", "perturbed_statement": "Because CWISEP J010650.61+225159.1 has Teff ≈ 800 K and UPM J1040–3551b has Teff ≈ 500 K (Table 3), and Figure 7 shows F444W−F1000W reddens by ~0.1 mag per 100 K, CWISEP J010650.61+225159.1 should exhibit F444W−F1000W ~0.3 mag redder than UPM J1040–3551b.", "perturbed_explanation": "The perturbed claim misquotes Table 3: UPM J1040–3551b actually has Teff ≈ 700 K, not 500 K. With the true 100 K difference, the color offset is ~0.1 mag, not 0.3 mag, so the 0.3 mag prediction is incorrect.", "claim": "Because CWISEP J010650.61+225159.1 has Teff ≈ 800 K and UPM J1040–3551b has Teff ≈ 500 K (Table 3), and Figure 7 shows F444W−F1000W reddens by ~0.1 mag per 100 K, CWISEP J010650.61+225159.1 should exhibit F444W−F1000W ~0.3 mag redder than UPM J1040–3551b.", "label": false }, { "paperid": "2409.10775v1", "paper_path": "./SciVer/papers/2409.10775v1.json", "claim_type": "sequential", "item1": "16(a)", "item2": "16(l)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.10775v1_figure_16(a).png", "item2_path": "./SciVer/images/2409.10775v1_figure_16(l).png", "section": [ "6.4" ], "request_id": 661, "origin_statement": "The first image shows 4×4-pixel occluders covering 50% of the person. In the second image’s stage-7 self-attention map (column (c)), non-occluded regions appear darker (lower attention), indicating that small occluders contaminate intermediate attention more than larger ones.", "perturbed_statement": "The first image shows 16×16-pixel occluders covering 50% of the person. In the second image’s stage-7 self-attention map (column (c)), non-occluded regions appear darker (lower attention), indicating that large occluders contaminate intermediate attention more than smaller ones.", "perturbed_explanation": "This is incorrect because the occluders in the first image are actually 4×4 pixels in size (not 16×16), as stated in the figure caption and visible in the grid pattern. Consequently, the claim about large occluders causing the observed attention drop contradicts the actual 4×4-pixel occlusion shown.", "claim": "The first image shows 16×16-pixel occluders covering 50% of the person. In the second image’s stage-7 self-attention map (column (c)), non-occluded regions appear darker (lower attention), indicating that large occluders contaminate intermediate attention more than smaller ones.", "label": false }, { "paperid": "2411.15091v1", "paper_path": "./SciVer/papers/2411.15091v1.json", "claim_type": "sequential", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15091v1_figure_5.png", "item2_path": "./SciVer/images/2411.15091v1-Table2-1.png", "section": [ "4.4" ], "request_id": 666, "origin_statement": "20.7% of artist websites use Squarespace, and with only 17% of those enabling the “AI Crawlers” toggle shown in Figure 5, about 3.5% of all artist sites actually disallow AI crawlers via robots.txt.", "perturbed_statement": "30.7% of artist websites use Squarespace, and with 27% of those enabling the “AI Crawlers” toggle shown in Figure 5, about 8.3% of all artist sites actually disallow AI crawlers via robots.txt.", "perturbed_explanation": "This is incorrect because Table 2 reports that 20.7% (not 30.7%) of sites use Squarespace, and only 17% (not 27%) of those actually enable the AI-crawlers toggle. Thus the 8.3% figure is unsupported by the data.", "claim": "30.7% of artist websites use Squarespace, and with 27% of those enabling the “AI Crawlers” toggle shown in Figure 5, about 8.3% of all artist sites actually disallow AI crawlers via robots.txt.", "label": false }, { "paperid": "2409.11577v1", "paper_path": "./SciVer/papers/2409.11577v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.11577v1-Table3-1.png", "item2_path": "./SciVer/images/2409.11577v1-Table4-1.png", "section": [ "2.3" ], "request_id": 668, "origin_statement": "Table 3 shows the Sampling method with LOOPH achieves the smallest median CI size (0.033) on the outlier-free dataset. In Table 4, only the Hybrid method with LOOPH reduces MDV below 0.1 (0.094) under outliers. Hence, Sampling LOOPH best quantifies uncertainty clean, Hybrid LOOPH best controls variability with outliers.", "perturbed_statement": "Table 3 shows the Regular method with LOOL yields the lowest RMSE (0.033) on the clean dataset. In Table 4, the Down-Sampling method with LOOPH achieves the smallest MDV (0.094) under outliers. Thus, Regular LOOL and Down-Sampling LOOPH are optimal for clean and outlier scenarios, respectively.", "perturbed_explanation": "This is wrong because Table 3 reports the Regular LOOL RMSE as 0.034 (not 0.033) and the true lowest RMSE is 0.033 from Down-Sampling LOOL. Additionally, in Table 4, Down-Sampling with LOOPH yields an MDV of 0.156, while the MDV of 0.094 actually corresponds to Hybrid LOOPH.", "claim": "Table 3 shows the Regular method with LOOL yields the lowest RMSE (0.033) on the clean dataset. In Table 4, the Down-Sampling method with LOOPH achieves the smallest MDV (0.094) under outliers. Thus, Regular LOOL and Down-Sampling LOOPH are optimal for clean and outlier scenarios, respectively.", "label": false }, { "paperid": "2409.19872v3", "paper_path": "./SciVer/papers/2409.19872v3.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.19872v3_figure_5.png", "item2_path": "./SciVer/images/2409.19872v3-Table4-1.png", "section": [ "4.5" ], "request_id": 669, "origin_statement": "Figure 5a illustrates that in the truthfulness space, positive and negative hidden states are clearly separated. This disentanglement enables Intrin+Latent-IKE to achieve a high T-Loc of 95.2 in 20-step VQA editing (second table), indicating superior locality retention.", "perturbed_statement": "Figure 5a shows that in the semantic space, positive and negative hidden states are completely separated. Therefore, Intrin+Latent-IKE leverages semantic disentanglement to achieve a T-Loc of 95.2 in 20-step VQA editing.", "perturbed_explanation": "In Figure 5a, the semantic space plot actually displays considerable overlap between blue (positive) and red (negative) hidden states, so there is no complete separation there. The distinct clustering appears only in the truthfulness space. Hence the claim about semantic disentanglement driving T-Loc is false.", "claim": "Figure 5a shows that in the semantic space, positive and negative hidden states are completely separated. Therefore, Intrin+Latent-IKE leverages semantic disentanglement to achieve a T-Loc of 95.2 in 20-step VQA editing.", "label": false }, { "paperid": "2409.03247v1", "paper_path": "./SciVer/papers/2409.03247v1.json", "claim_type": "sequential", "item1": "6(c)", "item2": "6(d)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.03247v1_figure_6(c).png", "item2_path": "./SciVer/images/2409.03247v1_figure_6(d).png", "section": [ "4.1.1" ], "request_id": 671, "origin_statement": "By minute 5, the Prompt System’s recall rises above 0.60 (from the recall graph), and correspondingly the F1 score in the F1 graph exceeds 0.55 at minute 5, showing that early gains in recall directly translate to mid-study F1 improvements.", "perturbed_statement": "By minute 5, the Prompt System’s recall rises above 0.70 (from the recall graph), and correspondingly the F1 score in the F1 graph exceeds 0.70 at minute 5, showing that early gains in recall directly translate to mid-study F1 improvements.", "perturbed_explanation": "The perturbation is wrong because at minute 5 the recall actually hovers around 0.60—not above 0.70—and the F1 score at minute 5 is about 0.55, not above 0.70, contradicting both the recall and F1 plots.", "claim": "By minute 5, the Prompt System’s recall rises above 0.70 (from the recall graph), and correspondingly the F1 score in the F1 graph exceeds 0.70 at minute 5, showing that early gains in recall directly translate to mid-study F1 improvements.", "label": false }, { "paperid": "2409.11927v1", "paper_path": "./SciVer/papers/2409.11927v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.11927v1_figure_5.png", "item2_path": "./SciVer/images/2409.11927v1-Table3-1.png", "section": [ "3" ], "request_id": 672, "origin_statement": "M1B Data Set 1 yields a spectral hardening factor f_h=1.41 (Table 3). Fig. 5 shows f_h and spin a_* positively correlate, hence the MCMC distribution predicts a spin of a_*≈0.44.", "perturbed_statement": "M1B Data Set 1 yields a spectral hardening factor f_h=1.29 and since Fig. 5 shows f_h positively correlates with spin, this implies a spin of a_*≈0.98.", "perturbed_explanation": "The perturbation misstates the intermediate value: Table 3 actually gives f_h=1.41 for M1B Data Set 1, not 1.29. With the correct f_h, the MCMC correlation yields a_*≈0.44, so a_*≈0.98 is inconsistent with the table and Fig. 5.", "claim": "M1B Data Set 1 yields a spectral hardening factor f_h=1.29 and since Fig. 5 shows f_h positively correlates with spin, this implies a spin of a_*≈0.98.", "label": false }, { "paperid": "2410.02010v1", "paper_path": "./SciVer/papers/2410.02010v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.02010v1-Table3-1.png", "item2_path": "./SciVer/images/2410.02010v1-Table4-1.png", "section": [ "4.2" ], "request_id": 673, "origin_statement": "On BloodMNIST, re-balanced sampling (RS) achieves a tail accuracy of 91.17% (Table 3); Table 4 shows RS applies class weighting 1/n_c without modulating factor, while Focal Loss uses modulating factor (1–p_t)^γ. Thus, RS’s simple inverse-frequency weighting yields higher tail accuracy than Focal Loss’s modulating factor approach.", "perturbed_statement": "On BloodMNIST, re-balanced sampling (RS) achieves a tail accuracy of 79.20% (Table 3); Table 4 shows RS applies class weighting 1/n_c without modulating factor, while Focal Loss uses modulating factor (1–p_t)^γ. Thus, RS’s simple inverse-frequency weighting underperforms Focal Loss (80.75% tail accuracy).", "perturbed_explanation": "The perturbation misquotes the RS tail accuracy. In Table 3, RS actually attains 91.17% tail accuracy on BloodMNIST, not 79.20%, so RS still outperforms Focal Loss’s 80.75% tail accuracy.", "claim": "On BloodMNIST, re-balanced sampling (RS) achieves a tail accuracy of 79.20% (Table 3); Table 4 shows RS applies class weighting 1/n_c without modulating factor, while Focal Loss uses modulating factor (1–p_t)^γ. Thus, RS’s simple inverse-frequency weighting underperforms Focal Loss (80.75% tail accuracy).", "label": false }, { "paperid": "2409.06998v2", "paper_path": "./SciVer/papers/2409.06998v2.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.06998v2_figure_3.png", "item2_path": "./SciVer/images/2409.06998v2-Table3-1.png", "section": [ "6.3" ], "request_id": 674, "origin_statement": "GAMLP-AS achieves over 53% test accuracy on amazon-ratings at 8 layers (Figure 3). Table 3 shows GCN-AS requires only 0.16 s training time on amazon-ratings. Therefore, AS enables deep GNNs to maintain high accuracy with under 0.2 s extra training overhead.", "perturbed_statement": "GAMLP-AS achieves over 53% test accuracy on amazon-ratings at 8 layers. Table 3 shows GCN-AS requires 1.59 s training time on amazon-ratings. Therefore, AS enables deep GNNs to maintain high accuracy but requires over 1.5 s extra training overhead.", "perturbed_explanation": "Table 3 reports GCN-AS’s amazon-ratings training time as 0.16 s (×0.16), not 1.59 s. The 1.59 s figure corresponds to the GCN baseline (L=6), so the claimed 1.59 s overhead for AS is incorrect.", "claim": "GAMLP-AS achieves over 53% test accuracy on amazon-ratings at 8 layers. Table 3 shows GCN-AS requires 1.59 s training time on amazon-ratings. Therefore, AS enables deep GNNs to maintain high accuracy but requires over 1.5 s extra training overhead.", "label": false }, { "paperid": "2411.16471v1", "paper_path": "./SciVer/papers/2411.16471v1.json", "claim_type": "sequential", "item1": "6(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16471v1_figure_6(a).png", "item2_path": "./SciVer/images/2411.16471v1-Table2-1.png", "section": [ "5.4" ], "request_id": 680, "origin_statement": "Table 2 shows a weighted average zero‐baseline amplitude of 19.23 fs at 470 nm and 9.45 fs at 375 nm, indicating a 9.78 fs decrease; in Figure 6 the black points at zero baseline indeed drop from ≈19.5 fs (470 nm) to ≈9.7 fs (375 nm), confirming this loss.", "perturbed_statement": "Table 2 shows a weighted average zero‐baseline amplitude of 19.23 fs at 470 nm and 8.45 fs at 375 nm, indicating a 10.78 fs decrease; in Figure 6 the black points at zero baseline drop from ≈19.5 fs (470 nm) to ≈9.7 fs (375 nm), confirming this loss.", "perturbed_explanation": "The weighted average amplitude at 375 nm in Table 2 is actually 9.45 fs, not 8.45 fs. Using 8.45 fs misstates the table value and thus makes the claimed 10.78 fs decrease incorrect.", "claim": "Table 2 shows a weighted average zero‐baseline amplitude of 19.23 fs at 470 nm and 8.45 fs at 375 nm, indicating a 10.78 fs decrease; in Figure 6 the black points at zero baseline drop from ≈19.5 fs (470 nm) to ≈9.7 fs (375 nm), confirming this loss.", "label": false }, { "paperid": "2410.20609v1", "paper_path": "./SciVer/papers/2410.20609v1.json", "claim_type": "sequential", "item1": "3", "item2": "4", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2410.20609v1-Table3-1.png", "item2_path": "./SciVer/images/2410.20609v1-Table4-1.png", "section": [ "3.1.2" ], "request_id": 682, "origin_statement": "The [OII] luminosity of 3.28×10^42 erg s⁻¹ implies a star formation rate <46 M⊙ yr⁻¹ (Kennicutt 1998), but a 500 MHz luminosity of 1.1×10^28 W Hz⁻¹ places GLEAM J09170012 clearly in the powerful HzRG regime, indicating AGN dominance over star formation.", "perturbed_statement": "The [OII] luminosity of 3.28×10^43 erg s⁻¹ implies a star formation rate <460 M⊙ yr⁻¹, and with a 500 MHz luminosity of 1.1×10^27 W Hz⁻¹, GLEAM J09170012 is a weak, star-formation-dominated galaxy.", "perturbed_explanation": "The perturbed statement misreports two key values from the tables. Table 3 gives L[OII] = 3.28×10^42 erg s⁻¹, not 3.28×10^43, so the SFR upper limit is <46 M⊙ yr⁻¹, not <460. Table 4 lists L500 MHz = 1.1×10^28 W Hz⁻¹, not 1.1×10^27, which places it among powerful, AGN-dominated radio galaxies rather than weak, star-formation systems.", "claim": "The [OII] luminosity of 3.28×10^43 erg s⁻¹ implies a star formation rate <460 M⊙ yr⁻¹, and with a 500 MHz luminosity of 1.1×10^27 W Hz⁻¹, GLEAM J09170012 is a weak, star-formation-dominated galaxy.", "label": false }, { "paperid": "2410.17632v2", "paper_path": "./SciVer/papers/2410.17632v2.json", "claim_type": "sequential", "item1": "6(a)", "item2": "6(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.17632v2_figure_6(a).png", "item2_path": "./SciVer/images/2410.17632v2_figure_6(b).png", "section": [ "5.2.2" ], "request_id": 683, "origin_statement": "At prompt level 5, GPT-4-Turbo’s self-rated Extraversion distribution peaks around 4.6. When rated by Llama3-8B-Instruct at prompt level 5, GPT-4-Turbo’s Extraversion peaks near 4.0. Thus, Llama3-8B-Instruct perceives GPT-4-Turbo as less extraverted than GPT-4-Turbo’s own rating.", "perturbed_statement": "At prompt level 5, GPT-4-Turbo’s self-rated Extraversion distribution peaks around 3.8. When rated by Llama3-8B-Instruct at prompt level 5, GPT-4-Turbo’s Extraversion peaks near 4.0. Thus, Llama3-8B-Instruct perceives GPT-4-Turbo as more extraverted than GPT-4-Turbo’s own rating.", "perturbed_explanation": "The perturbation incorrectly lowers GPT-4-Turbo’s self-rated Extraversion peak from about 4.6 (as shown in the first chart at prompt 5) to 3.8. In reality, the chart shows GPT-4-Turbo’s own Extraversion peak above Llama3’s rating, so Llama3 does not perceive it as more extraverted.", "claim": "At prompt level 5, GPT-4-Turbo’s self-rated Extraversion distribution peaks around 3.8. When rated by Llama3-8B-Instruct at prompt level 5, GPT-4-Turbo’s Extraversion peaks near 4.0. Thus, Llama3-8B-Instruct perceives GPT-4-Turbo as more extraverted than GPT-4-Turbo’s own rating.", "label": false }, { "paperid": "2410.23409v1", "paper_path": "./SciVer/papers/2410.23409v1.json", "claim_type": "sequential", "item1": "3(k)", "item2": "4(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.23409v1_figure_3(k).png", "item2_path": "./SciVer/images/2410.23409v1_figure_4(a).png", "section": [ "4.2" ], "request_id": 684, "origin_statement": "Figure3 shows TPP-Gaze produces variable-diameter fixations, so it models fixation durations; Figure4 shows its fixation duration density (orange) peaks at ~200 ms overlapping humans; therefore TPP-Gaze accurately captures human fixation duration statistics.", "perturbed_statement": "Figure3 shows TPP-Gaze uses uniform fixation markers, so it doesn’t model durations; Figure4 shows its fixation durations (orange) still peak at ~200 ms matching humans; thus TPP-Gaze captures human-like temporal dynamics even without duration modeling.", "perturbed_explanation": "This is incorrect because Figure3 actually depicts TPP-Gaze’s fixations with variable circle diameters, which directly indicates it models fixation durations. The claim that it uses uniform markers contradicts the visible varying circle sizes in the figure.", "claim": "Figure3 shows TPP-Gaze uses uniform fixation markers, so it doesn’t model durations; Figure4 shows its fixation durations (orange) still peak at ~200 ms matching humans; thus TPP-Gaze captures human-like temporal dynamics even without duration modeling.", "label": false }, { "paperid": "2410.02475v1", "paper_path": "./SciVer/papers/2410.02475v1.json", "claim_type": "sequential", "item1": "2(e)", "item2": "3(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.02475v1_figure_2(e).png", "item2_path": "./SciVer/images/2410.02475v1_figure_3(b).png", "section": [ "5.3" ], "request_id": 687, "origin_statement": "From Figure 2, our geometry-unaware policy achieves a 64.4% success rate grasping the cylindrical toilet paper. Figure 3’s grasp shows a vertical pinch of a cylinder. Thus, our policy’s strong performance on toilet paper transfers to vertical pinch grasps on similar cylindrical objects like mugs.", "perturbed_statement": "From Figure 2, our geometry-unaware policy achieves a 75% success rate grasping the cylindrical toilet paper. Figure 3’s grasp shows a horizontal wrap of a cylinder. Thus, our policy’s strong performance on toilet paper transfers to horizontal wrap grasps on similar cylindrical objects like mugs.", "perturbed_explanation": "This is incorrect because Figure 2 reports a 64.4% success rate for the geometry-unaware policy on toilet paper, not 75%. Furthermore, the grasp illustrated in Figure 3 is a vertical pinch, not a horizontal wrap, contradicting the perturbed premises.", "claim": "From Figure 2, our geometry-unaware policy achieves a 75% success rate grasping the cylindrical toilet paper. Figure 3’s grasp shows a horizontal wrap of a cylinder. Thus, our policy’s strong performance on toilet paper transfers to horizontal wrap grasps on similar cylindrical objects like mugs.", "label": false }, { "paperid": "2410.15678v1", "paper_path": "./SciVer/papers/2410.15678v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15678v1_figure_4(a).png", "item2_path": "./SciVer/images/2410.15678v1-Table1-1.png", "section": [ "3.2" ], "request_id": 688, "origin_statement": "Figure 4 shows Standard-trained Mamba assigns highest attention scores to the sequence start (positions 0–10). Table 1 then shows this model achieves 99.72% on Standard test but only 15.44% and 22.37% on Last and Shuffle tests, evidencing its reliance on initial-position memorization.", "perturbed_statement": "Figure 4 shows Standard-trained Mamba assigns highest attention scores to the sequence end (positions 50–60). Table 1 then shows this model achieves 99.72% on Standard test and 82.64% on Last test, evidencing its reliance on end-position memorization.", "perturbed_explanation": "This is wrong because Figure 4 actually visualizes peak attention at the beginning (around positions 0–10), not the end, and Table 1 reports only 15.44% accuracy for the Last test when trained on Standard, not 82.64%.", "claim": "Figure 4 shows Standard-trained Mamba assigns highest attention scores to the sequence end (positions 50–60). Table 1 then shows this model achieves 99.72% on Standard test and 82.64% on Last test, evidencing its reliance on end-position memorization.", "label": false }, { "paperid": "2409.03550v1", "paper_path": "./SciVer/papers/2409.03550v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.03550v1_figure_4(a).png", "item2_path": "./SciVer/images/2409.03550v1-Table2-1.png", "section": [ "4.2" ], "request_id": 689, "origin_statement": "Table 2 shows the 44.1 M-parameter student runs at 17.9 images/s; Figure 4a reports its DKDM-distilled FID is ≈9.5, demonstrating sub-10 FID at nearly 18 images/s.", "perturbed_statement": "Table 2 shows the 44.1 M-parameter student runs at 20.9 images/s; Figure 4a reports its DKDM-distilled FID is ≈9.5, demonstrating sub-10 FID at over 20 images/s.", "perturbed_explanation": "Table 2 lists the 44.1 M model’s speed as 17.9 images/s, not 20.9 images/s, so the stated 20.9 images/s contradicts the context.", "claim": "Table 2 shows the 44.1 M-parameter student runs at 20.9 images/s; Figure 4a reports its DKDM-distilled FID is ≈9.5, demonstrating sub-10 FID at over 20 images/s.", "label": false }, { "paperid": "2411.08982v1", "paper_path": "./SciVer/papers/2411.08982v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.08982v1-Table1-1.png", "item2_path": "./SciVer/images/2411.08982v1-Table2-1.png", "section": [ "5" ], "request_id": 693, "origin_statement": "DBRX-Base with all 16 experts achieves 71.57 GSM8k accuracy (Table 1), which is 12.43 points higher than Mixtral-8x7B’s baseline with 8 experts at 59.14 (Table 2).", "perturbed_statement": "DBRX-Base with all 16 experts achieves 73.16 GSM8k accuracy (Table 1), which is 14.02 points higher than Mixtral-8x7B’s baseline with 8 experts at 59.14 (Table 2).", "perturbed_explanation": "This is incorrect because Table 1 shows DBRX-Base with 16 experts scoring 71.57 on GSM8k, not 73.16. The 73.16 value actually corresponds to the row with 12 retained experts, not the 16-expert baseline.", "claim": "DBRX-Base with all 16 experts achieves 73.16 GSM8k accuracy (Table 1), which is 14.02 points higher than Mixtral-8x7B’s baseline with 8 experts at 59.14 (Table 2).", "label": false }, { "paperid": "2410.19986v1", "paper_path": "./SciVer/papers/2410.19986v1.json", "claim_type": "sequential", "item1": "4", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19986v1_figure_4.png", "item2_path": "./SciVer/images/2410.19986v1-Table3-1.png", "section": [ "4.2" ], "request_id": 696, "origin_statement": "Figure 4’s harmonized age distribution shows predicted age probabilities flatten to ~0.014, indicating minimized age bias. When fine-tuning with age-harmonization alone on random data, Table 3 reports speech detection balanced accuracy drops to 50.68%, and jointly harmonizing both age and dataset yields voicing accuracy of 52.65%.", "perturbed_statement": "Figure 4’s harmonized age distribution flattens at ~0.014, indicating minimized age bias. Table 3 shows age-only harmonization on random data raises speech detection balanced accuracy to 56.25%, and jointly harmonizing both age and dataset yields voicing accuracy of 52.65%.", "perturbed_explanation": "The perturbation misattributes the 56.25% speech detection accuracy to age-only harmonization. In Table 3, ‘Harmonized (age) Random’ actually achieves 50.68%, while the 56.25% figure corresponds to ‘Harmonized (dataset) Random’. This makes the perturbed claim false.", "claim": "Figure 4’s harmonized age distribution flattens at ~0.014, indicating minimized age bias. Table 3 shows age-only harmonization on random data raises speech detection balanced accuracy to 56.25%, and jointly harmonizing both age and dataset yields voicing accuracy of 52.65%.", "label": false }, { "paperid": "2409.14444v1", "paper_path": "./SciVer/papers/2409.14444v1.json", "claim_type": "sequential", "item1": "7", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.14444v1_figure_7.png", "item2_path": "./SciVer/images/2409.14444v1-Table3-1.png", "section": [ "4.4" ], "request_id": 697, "origin_statement": "Figure 7 reveals that our CDFA highlights boundary blending artifacts between face and background, overlooked by the backbone. Table 4 shows adding CDFA to Swin raises FS AUC from 56.74% to 90.64%. Hence, emphasizing facial boundary inconsistencies dramatically improves face-swap detection.", "perturbed_statement": "Figure 7 shows CDFA highlights fine facial texture inconsistencies overlooked by the backbone. Table 4 shows adding CDFA to Swin raises NT AUC from 78.74% to 90.64%. Hence, emphasizing internal texture inconsistencies significantly improves face-swap detection.", "perturbed_explanation": "This is wrong because Figure 7’s ‘Ours’ GradCAMs actually focus on boundary blending traces, not fine facial textures. Moreover, Table 4 reports the NT AUC improves from 78.74% to 86.27%, while the 90.64% value corresponds to FS, not NT.", "claim": "Figure 7 shows CDFA highlights fine facial texture inconsistencies overlooked by the backbone. Table 4 shows adding CDFA to Swin raises NT AUC from 78.74% to 90.64%. Hence, emphasizing internal texture inconsistencies significantly improves face-swap detection.", "label": false }, { "paperid": "2410.15005v1", "paper_path": "./SciVer/papers/2410.15005v1.json", "claim_type": "sequential", "item1": "3", "item2": "12", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.15005v1_figure_3.png", "item2_path": "./SciVer/images/2410.15005v1-Table12-1.png", "section": [ "3.5" ], "request_id": 699, "origin_statement": "Figure 3 shows ECTSum tasks use semantic modifications maintaining cosine similarity >0.979. Table 12 reveals paraphrases are generated by claude-3.5-sonnet-20240620 using '\\n'-separated sentences. Hence, ECTSum inputs are paraphrased by claude-3.5-sonnet-20240620 with '\\n'-separation to ensure >0.979 semantic similarity.", "perturbed_statement": "Figure 3 shows ECTSum tasks use semantic modifications maintaining cosine similarity >0.979. Table 12 reveals paraphrases are generated by claude-3.5-sonnet-20240620 using commas to separate sentences. Hence, ECTSum inputs are paraphrased by claude-3.5-sonnet-20240620 with comma separation to ensure >0.979 semantic similarity.", "perturbed_explanation": "Table 12 explicitly instructs that input sentences must be separated by '\\n', not commas. The prompt reads 'The input consists of sentences separated by ‘\\n.’', so claiming comma-separated paraphrases contradicts the actual prompt format.", "claim": "Figure 3 shows ECTSum tasks use semantic modifications maintaining cosine similarity >0.979. Table 12 reveals paraphrases are generated by claude-3.5-sonnet-20240620 using commas to separate sentences. Hence, ECTSum inputs are paraphrased by claude-3.5-sonnet-20240620 with comma separation to ensure >0.979 semantic similarity.", "label": false }, { "paperid": "2410.21357v1", "paper_path": "./SciVer/papers/2410.21357v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.21357v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.21357v1-Table3-1.png", "section": [ "5.3" ], "request_id": 702, "origin_statement": "Figure 1(a) shows EDLM-AR reaches GPT-2 perplexity 35.7 in ~13 s, matching the AR baseline quality. Table 3 reports Autoregressive with 1024 timesteps yields GPT-2 perplexity 35.7, implying EDLM-AR matches AR quality with half the timesteps and faster wall-clock time.", "perturbed_statement": "Figure 1(a) shows EDLM-AR reaches GPT-2 perplexity 35.7 in ~13 s, matching the AR baseline quality. Table 3 reports Autoregressive with 512 timesteps yields GPT-2 perplexity 35.7, implying EDLM-AR matches AR quality using the same timesteps.", "perturbed_explanation": "This is incorrect because Table 3 indicates the Autoregressive baseline uses 1024 timesteps to achieve GPT-2 perplexity 35.7, not 512 timesteps as stated in the perturbed claim.", "claim": "Figure 1(a) shows EDLM-AR reaches GPT-2 perplexity 35.7 in ~13 s, matching the AR baseline quality. Table 3 reports Autoregressive with 512 timesteps yields GPT-2 perplexity 35.7, implying EDLM-AR matches AR quality using the same timesteps.", "label": false }, { "paperid": "2411.10545v1", "paper_path": "./SciVer/papers/2411.10545v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.10545v1_figure_1.png", "item2_path": "./SciVer/images/2411.10545v1-Table2-1.png", "section": [ "3" ], "request_id": 703, "origin_statement": "Figure1 shows golden_anthropic performance plateau at ~83% winrate. Table2 shows at 25% data it already achieves 81.32%. Therefore 25% data yields over 97% of the max golden_anthropic winrate.", "perturbed_statement": "Figure1 shows golden_anthropic performance plateaus at ~85% winrate. Table2 shows at 25% data it achieves 81.32%. Therefore 25% data yields 95.7% of the max golden_anthropic winrate.", "perturbed_explanation": "The claim is wrong because Figure 1’s exponential curve for golden_anthropic actually levels off around 83%, not 85%. Table 2 further confirms the asymptote at ~83.17% (75% data → 83.1681%), so the plateau premise of 85% contradicts both the graph and table.", "claim": "Figure1 shows golden_anthropic performance plateaus at ~85% winrate. Table2 shows at 25% data it achieves 81.32%. Therefore 25% data yields 95.7% of the max golden_anthropic winrate.", "label": false }, { "paperid": "2411.18473v1", "paper_path": "./SciVer/papers/2411.18473v1.json", "claim_type": "sequential", "item1": "3(c)", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18473v1_figure_3(c).png", "item2_path": "./SciVer/images/2411.18473v1-Table1-1.png", "section": [ "4.3" ], "request_id": 704, "origin_statement": "At roughly 6.4 MB on DeepBlending, the RD curve shows HEMGS achieves 30.40 PSNR; Table 1 shows HAC yields 30.34 PSNR at 6.35 MB, so HEMGS outperforms HAC by 0.06 dB at similar storage.", "perturbed_statement": "At roughly 6.4 MB on DeepBlending, the RD curve shows HEMGS achieves 30.40 PSNR; Table 1 shows HAC yields 30.45 PSNR at 6.35 MB, so HEMGS underperforms HAC by 0.05 dB at similar storage.", "perturbed_explanation": "The perturbation is incorrect because Table 1 actually reports HAC’s PSNR on DeepBlending as 30.34 dB (not 30.45 dB), and HEMGS’s PSNR at 6.39 MB is 30.40 dB, so HEMGS still outperforms HAC by 0.06 dB.", "claim": "At roughly 6.4 MB on DeepBlending, the RD curve shows HEMGS achieves 30.40 PSNR; Table 1 shows HAC yields 30.45 PSNR at 6.35 MB, so HEMGS underperforms HAC by 0.05 dB at similar storage.", "label": false }, { "paperid": "2410.13605v1", "paper_path": "./SciVer/papers/2410.13605v1.json", "claim_type": "sequential", "item1": "5", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.13605v1_figure_5.png", "item2_path": "./SciVer/images/2410.13605v1-Table4-1.png", "section": [ "4" ], "request_id": 705, "origin_statement": "ResBiLSTM trains in about 50 seconds per epoch (Figure 5) and exhibits a maximum INT8 quantization degradation of 6.45% on OH (Table 4), showing that it pairs minimal training cost with robust quantization stability.", "perturbed_statement": "ResBiLSTM trains in about 50 seconds per epoch and exhibits up to 80.87% INT8 quantization degradation on S, showing it pairs minimal training time with highly unstable quantization.", "perturbed_explanation": "The perturbation misattributes the 80.87% degradation (which in Table 4 actually corresponds to TTN on S) to ResBiLSTM. In reality, Table 4 shows ResBiLSTM’s degradation on S is only 0.36%, not 80.87%, so the claim is false.", "claim": "ResBiLSTM trains in about 50 seconds per epoch and exhibits up to 80.87% INT8 quantization degradation on S, showing it pairs minimal training time with highly unstable quantization.", "label": false }, { "paperid": "2409.13499v2", "paper_path": "./SciVer/papers/2409.13499v2.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.13499v2_figure_2.png", "item2_path": "./SciVer/images/2409.13499v2-Table3-1.png", "section": [ "4.1" ], "request_id": 706, "origin_statement": "Figure 2 shows that adding 100 h of supervised data yields larger WER improvements for Italian (200 h) than Spanish (317 h). Table 3 confirms for Whisper-tiny: Italian WER drops from 32.2 to 19.8 (12.4-point reduction), whereas Spanish drops from 20.1 to 16.1 (4.0-point reduction).", "perturbed_statement": "Figure 2 shows that adding 100 h of supervised data yields larger WER improvements for Spanish (317 h) than Italian (200 h). Table 3 confirms for Whisper-tiny: Spanish WER drops from 20.1 to 16.1 (12.4-point reduction), whereas Italian drops from 32.2 to 19.8 (4.0-point reduction).", "perturbed_explanation": "This statement is incorrect because Table 3 actually reports a 4.0-point WER reduction for Spanish (20.1→16.1) and a 12.4-point reduction for Italian (32.2→19.8), not the other way around.", "claim": "Figure 2 shows that adding 100 h of supervised data yields larger WER improvements for Spanish (317 h) than Italian (200 h). Table 3 confirms for Whisper-tiny: Spanish WER drops from 20.1 to 16.1 (12.4-point reduction), whereas Italian drops from 32.2 to 19.8 (4.0-point reduction).", "label": false }, { "paperid": "2411.18200v1", "paper_path": "./SciVer/papers/2411.18200v1.json", "claim_type": "sequential", "item1": "4", "item2": "5", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.18200v1_figure_4.png", "item2_path": "./SciVer/images/2411.18200v1-Table5-1.png", "section": [ "5.2" ], "request_id": 707, "origin_statement": "Table 5 shows NGC7582’s PVD log MBH = 6.62. In Fig. 4, the blue PVD distribution peaks at 6.62. The yellow Wang & Zhang (2007) distribution peaks at ~5.6, so the PVD-based estimate is ~1.0 dex higher than the Wang & Zhang value.", "perturbed_statement": "Table 5 shows NGC7582’s PVD log MBH = 6.62. In Fig. 4, the blue PVD distribution peaks at 6.62. The yellow Wang & Zhang (2007) distribution peaks at ~6.6, so the PVD-based estimate matches within 0.02 dex.", "perturbed_explanation": "Figure 4 actually shows the yellow Wang & Zhang (2007) distribution for NGC7582 peaking at ~5.6 dex, not ~6.6. The perturbed statement misrepresents the position of the yellow peak, making the claim false.", "claim": "Table 5 shows NGC7582’s PVD log MBH = 6.62. In Fig. 4, the blue PVD distribution peaks at 6.62. The yellow Wang & Zhang (2007) distribution peaks at ~6.6, so the PVD-based estimate matches within 0.02 dex.", "label": false }, { "paperid": "2410.09300v2", "paper_path": "./SciVer/papers/2410.09300v2.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.09300v2_figure_2.png", "item2_path": "./SciVer/images/2410.09300v2_figure_3.png", "section": [ "2.1" ], "request_id": 708, "origin_statement": "Figure 2 shows that token positions with base top-1 probability below 0.1 disagree over 90%. Figure 3 shows that a 0.1 threshold yields about 65% recall. Thus, a 0.1 threshold misses around 35% of alignment-related positions.", "perturbed_statement": "Figure 2 shows that token positions with base top-1 probability below 0.1 disagree over 90%. Figure 3 shows that a 0.1 threshold yields about 85% recall. Thus, a 0.1 threshold misses around 35% of alignment-related positions.", "perturbed_explanation": "The perturbation misreports Figure 3: at a threshold of 0.1 the alignment-related recall (dotted red curve) is only about 60–65%, not 85%. Therefore the claimed 85% recall is incorrect and the resulting conclusion about misses is invalid.", "claim": "Figure 2 shows that token positions with base top-1 probability below 0.1 disagree over 90%. Figure 3 shows that a 0.1 threshold yields about 85% recall. Thus, a 0.1 threshold misses around 35% of alignment-related positions.", "label": false }, { "paperid": "2410.19279v1", "paper_path": "./SciVer/papers/2410.19279v1.json", "claim_type": "sequential", "item1": "8", "item2": "1", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.19279v1_figure_8.png", "item2_path": "./SciVer/images/2410.19279v1-Table1-1.png", "section": [ "3.4.1" ], "request_id": 716, "origin_statement": "Figure 8 shows consistent abrupt rPPG peak transitions, implying long-range dependencies. Table 1 then reveals PhysNet’s MAE increases by 6.34 (from 1.86 in Task1 to 8.20 in Task2), demonstrating short-range methods’ instability under noise.", "perturbed_statement": "Figure 8 shows consistent abrupt rPPG peak transitions, implying long-range dependencies. Table 1 then reveals DeepPhys’s MAE increases by 6.34 (from 2.11 in Task1 to 12.82 in Task2), demonstrating short-range methods’ instability under noise.", "perturbed_explanation": "The perturbation misreports DeepPhys’s error increase. According to Table 1, DeepPhys’s MAE rises from 2.11 to 12.82, a jump of 10.71, not 6.34.", "claim": "Figure 8 shows consistent abrupt rPPG peak transitions, implying long-range dependencies. Table 1 then reveals DeepPhys’s MAE increases by 6.34 (from 2.11 in Task1 to 12.82 in Task2), demonstrating short-range methods’ instability under noise.", "label": false }, { "paperid": "2410.10880v1", "paper_path": "./SciVer/papers/2410.10880v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.10880v1_figure_4(b).png", "item2_path": "./SciVer/images/2410.10880v1-Table2-1.png", "section": [ "4.2" ], "request_id": 717, "origin_statement": "Figure 4 indicates FSD raises perplexity TPR@5%FPR at 100 non-member samples from ~0.15 to ~0.54; Table 2 shows that for LLaMA-7B, our method increases perplexity AUC from 0.64 to 0.92, demonstrating FSD’s consistent performance improvement across metrics.", "perturbed_statement": "Figure 4 indicates FSD raises perplexity TPR@5%FPR at 100 non-member samples from ~0.15 to ~0.60; Table 2 shows that for LLaMA-7B, our method increases perplexity AUC from 0.64 to 0.91, demonstrating FSD’s consistent performance improvement across metrics.", "perturbed_explanation": "This is incorrect because Figure 4 shows the perplexity TPR@5%FPR at 100 non-member samples is approximately 0.54, not 0.60, and Table 2 reports the LLaMA-7B perplexity AUC with +Ours as 0.92, not 0.91.", "claim": "Figure 4 indicates FSD raises perplexity TPR@5%FPR at 100 non-member samples from ~0.15 to ~0.60; Table 2 shows that for LLaMA-7B, our method increases perplexity AUC from 0.64 to 0.91, demonstrating FSD’s consistent performance improvement across metrics.", "label": false }, { "paperid": "2411.16725v1", "paper_path": "./SciVer/papers/2411.16725v1.json", "claim_type": "sequential", "item1": "10", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.16725v1_figure_10.png", "item2_path": "./SciVer/images/2411.16725v1-Table4-1.png", "section": [ "4.6" ], "request_id": 718, "origin_statement": "Fig.10 shows that SD-1.5’s up_ft1 captures fine-grained spatial patterns. Table 4 reveals that interleaving CLIP with SD-1.5 up_ft1 at t = 25 raises the relative score from 56.6 to 59.9. Thus, spatially localized diffusion features yield a 3.3-point improvement over CLIP alone.", "perturbed_statement": "Fig.10 shows that DiT block_14 captures fine-grained spatial details. Table 4 shows that interleaving CLIP with DINO-v2 raises the relative score from 56.6 to 59.9. Therefore, DiT’s locally semantic features deliver a 3.3-point boost over CLIP.", "perturbed_explanation": "This is incorrect because the Figure 10 caption explicitly states that spatial localization is missing from DiT features across blocks (i.e., DiT block_14 does not capture fine-grained spatial details). Moreover, Table 4 lists the relative score for CLIP + DINO-v2 as 47.0 (not 59.9), so interleaving CLIP with DINO-v2 does not yield a 3.3-point improvement.", "claim": "Fig.10 shows that DiT block_14 captures fine-grained spatial details. Table 4 shows that interleaving CLIP with DINO-v2 raises the relative score from 56.6 to 59.9. Therefore, DiT’s locally semantic features deliver a 3.3-point boost over CLIP.", "label": false }, { "paperid": "2410.21526v1", "paper_path": "./SciVer/papers/2410.21526v1.json", "claim_type": "sequential", "item1": "1(c)", "item2": "2", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.21526v1_figure_1(c).png", "item2_path": "./SciVer/images/2410.21526v1_figure_2.png", "section": [ "5.1" ], "request_id": 720, "origin_statement": "Figure 1 shows DIMP-Loss achieves ~74.5% accuracy on MRPC at epoch 3, outperforming Focal-Loss (~70.1%). Figure 2 then shows with 30% real data for the quality checker, DIMP-Loss reaches ~83.2% vs the checker’s ~80.8%, so DIMP-Loss still outperforms the checker.", "perturbed_statement": "Figure 1 shows Focal-Loss achieves ~74.5% accuracy on MRPC at epoch 3, matching DIMP-Loss. Figure 2 shows the quality checker with 30% data achieves ~83.5%, slightly above DIMP-Loss’s ~83.2%, so DIMP-Loss underperforms the checker.", "perturbed_explanation": "The perturbation misreports two facts: (1) In Figure 1, Focal-Loss at epoch 3 only reaches ~70.1%, not ~74.5%, so it does not match DIMP-Loss. (2) In Figure 2, the checker at 30% actually scores ~80.8%, not ~83.5%, so DIMP-Loss (83.2%) still outperforms it.", "claim": "Figure 1 shows Focal-Loss achieves ~74.5% accuracy on MRPC at epoch 3, matching DIMP-Loss. Figure 2 shows the quality checker with 30% data achieves ~83.5%, slightly above DIMP-Loss’s ~83.2%, so DIMP-Loss underperforms the checker.", "label": false }, { "paperid": "2409.04073v2", "paper_path": "./SciVer/papers/2409.04073v2.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2409.04073v2-Table1-1.png", "item2_path": "./SciVer/images/2409.04073v2-Table2-1.png", "section": [ "6.1" ], "request_id": 730, "origin_statement": "Table 1 shows the BEER dataset contains only 450 samples. Table 2 reveals that AnyMatch achieves a 96.55 F1 on BEER, the highest score among all methods, surpassing MatchGPT [GPT-4] which scores only 69.57.", "perturbed_statement": "Table 1 shows the BEER dataset contains only 450 samples. Table 2 reveals that MatchGPT [GPT-4] achieves a 96.55 F1 on BEER, the highest score among all methods, surpassing AnyMatch which scores only 69.57.", "perturbed_explanation": "The perturbed claim swaps the F1 scores of AnyMatch and GPT-4. In Table 2, AnyMatch actually scores 96.55 on BEER (not GPT-4), and GPT-4 scores 69.57 (not AnyMatch). Therefore GPT-4 does not achieve 96.55, nor does AnyMatch only score 69.57.", "claim": "Table 1 shows the BEER dataset contains only 450 samples. Table 2 reveals that MatchGPT [GPT-4] achieves a 96.55 F1 on BEER, the highest score among all methods, surpassing AnyMatch which scores only 69.57.", "label": false }, { "paperid": "2410.15658v2", "paper_path": "./SciVer/papers/2410.15658v2.json", "claim_type": "sequential", "item1": "5", "item2": "6(a)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.15658v2_figure_5.png", "item2_path": "./SciVer/images/2410.15658v2_figure_6(a).png", "section": [ "4.3", "4.3.1" ], "request_id": 732, "origin_statement": "Fig.5 (Adience, ORCU) shows feature embeddings aligned in ascending label order (green to red), indicating strong ordinal structure. Consequently, in Fig.6(a), ORCU’s output (green) peaks at the true age group [4–6] and smoothly decreases for adjacent classes 8–13 and 0–2, reflecting consistent ordinal confidence.", "perturbed_statement": "Fig.5 (Adience, LS) shows feature embeddings aligned in ascending label order (blue to red), indicating strong ordinal structure. Consequently, in Fig.6(a), LS’s output (blue) peaks at the true age group [4–6] and smoothly decreases for adjacent classes 8–13 and 0–2, reflecting consistent ordinal confidence.", "perturbed_explanation": "This is incorrect because Fig.5(c) (LS) displays dispersed blue embeddings without an ordered color gradient, showing no clear ordinal alignment. Moreover, in Fig.6(a), the LS curve (blue) does not smoothly decrease: it has a pronounced secondary peak at the 25–32 class rather than a monotonic decline to neighboring bins.", "claim": "Fig.5 (Adience, LS) shows feature embeddings aligned in ascending label order (blue to red), indicating strong ordinal structure. Consequently, in Fig.6(a), LS’s output (blue) peaks at the true age group [4–6] and smoothly decreases for adjacent classes 8–13 and 0–2, reflecting consistent ordinal confidence.", "label": false }, { "paperid": "2410.12378v1", "paper_path": "./SciVer/papers/2410.12378v1.json", "claim_type": "sequential", "item1": "1(a)", "item2": "1(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2410.12378v1_figure_1(a).png", "item2_path": "./SciVer/images/2410.12378v1_figure_1(b).png", "section": [ "3.1.2" ], "request_id": 733, "origin_statement": "Table 1 shows the GRW boundary position converges with an EOC ≈2; halving h from 0.025 to 0.0125 reduces the s-error from ≈5×10⁻⁴ to ≈1.25×10⁻⁴; combining this with s_GRW≈0.89 at t=0.5 (Figure 1) gives s≈0.89±1.25×10⁻⁴.", "perturbed_statement": "Table 1 shows the GRW boundary position converges with an EOC ≈3; halving h from 0.025 to 0.0125 reduces the s-error from ≈5×10⁻⁴ to ≈6.25×10⁻⁵; combining this with s_GRW≈0.89 at t=0.5 (Figure 1) gives s≈0.89±6.25×10⁻⁵.", "perturbed_explanation": "This is incorrect because Table 1 reports an estimated order of convergence (EOC) of about 2, not 3. With EOC≈2, halving h reduces the error by a factor of 4 (5×10⁻⁴→1.25×10⁻⁴), not by a factor of 8 (which would be the case for EOC≈3).", "claim": "Table 1 shows the GRW boundary position converges with an EOC ≈3; halving h from 0.025 to 0.0125 reduces the s-error from ≈5×10⁻⁴ to ≈6.25×10⁻⁵; combining this with s_GRW≈0.89 at t=0.5 (Figure 1) gives s≈0.89±6.25×10⁻⁵.", "label": false }, { "paperid": "2411.15375v1", "paper_path": "./SciVer/papers/2411.15375v1.json", "claim_type": "sequential", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.15375v1_figure_5.png", "item2_path": "./SciVer/images/2411.15375v1-Table2-1.png", "section": [ "4.2" ], "request_id": 735, "origin_statement": "In Figure 5, AdamZ’s loss reaches 0.18 by epoch 3; Table 2 reports its median accuracy as 97.83%; thus, AdamZ’s rapid early loss reduction correlates with the highest classification accuracy among the optimisers.", "perturbed_statement": "By epoch 3, AdamZ’s loss drops to 0.12; Table 2 records its median accuracy as 98.50%; hence, its extremely fast loss minimisation yields superior classification accuracy above all optimisers.", "perturbed_explanation": "The perturbed claim misreads Figure 5: AdamZ’s loss at epoch 3 is approximately 0.18, not 0.12; and misstates Table 2: its median accuracy is 97.83%, not 98.50%, making the conclusion false.", "claim": "By epoch 3, AdamZ’s loss drops to 0.12; Table 2 records its median accuracy as 98.50%; hence, its extremely fast loss minimisation yields superior classification accuracy above all optimisers.", "label": false }, { "paperid": "2411.01023v1", "paper_path": "./SciVer/papers/2411.01023v1.json", "claim_type": "sequential", "item1": "5", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01023v1_figure_5.png", "item2_path": "./SciVer/images/2411.01023v1-Table2-1.png", "section": [ "3.2.3" ], "request_id": 737, "origin_statement": "Figure 5 shows that Dataset dolce:has-quality DatasetCharacteristics, which hasValue Value. Table 2 lists “# of Numeric Features” as a numerical dataset characteristic. Hence, the KG can represent a dataset’s number of numeric features using a Value instance.", "perturbed_statement": "Figure 5 shows that Dataset dolce:has-quality DatasetCharacteristics, which hasValue Value. Table 2 lists “# of Numeric Features” as a categorical dataset characteristic. Hence, the KG can represent a dataset’s number of numeric features using a Value instance.", "perturbed_explanation": "The perturbation mislabels “# of Numeric Features” as categorical in Table 2. In the actual table, “# of Numeric Features” is marked under the Numerical column (✓), not under Categorical, so it cannot be treated as a categorical characteristic.", "claim": "Figure 5 shows that Dataset dolce:has-quality DatasetCharacteristics, which hasValue Value. Table 2 lists “# of Numeric Features” as a categorical dataset characteristic. Hence, the KG can represent a dataset’s number of numeric features using a Value instance.", "label": false }, { "paperid": "2411.01424v1", "paper_path": "./SciVer/papers/2411.01424v1.json", "claim_type": "sequential", "item1": "4(b)", "item2": "4(i)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.01424v1_figure_4(b).png", "item2_path": "./SciVer/images/2411.01424v1_figure_4(i).png", "section": [ "7.2" ], "request_id": 738, "origin_statement": "Figure 4(b) shows that raising k from 1 to 9 cuts CD-SBN query time from 0.277 s to 0.053 s, and Figure 4(i) shows increasing |L(G)| from 1 K to 5 K reduces query cost from 0.407 s to 0.031 s.", "perturbed_statement": "Figure 4(b) shows that raising k from 1 to 9 increases CD-SBN query time from 0.053 s to 0.277 s, and Figure 4(i) shows increasing |L(G)| from 1 K to 5 K reduces query cost from 0.407 s to 0.031 s.", "perturbed_explanation": "The perturbation is wrong because Figure 4(b) actually shows query time decreases with higher k (from 0.277 s at k=1 down to 0.053 s at k=9), not increases as claimed.", "claim": "Figure 4(b) shows that raising k from 1 to 9 increases CD-SBN query time from 0.053 s to 0.277 s, and Figure 4(i) shows increasing |L(G)| from 1 K to 5 K reduces query cost from 0.407 s to 0.031 s.", "label": false }, { "paperid": "2411.01217v1", "paper_path": "./SciVer/papers/2411.01217v1.json", "claim_type": "sequential", "item1": "5", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.01217v1_figure_5.png", "item2_path": "./SciVer/images/2411.01217v1-Table3-1.png", "section": [ "5.2" ], "request_id": 740, "origin_statement": "The Normal AI strategy folds 5.4% of hands preflop and achieves 252.2 mBB/h when playing as button against Normal opponents in a three-player game.", "perturbed_statement": "The Normal AI strategy folds 4.7% of hands preflop and achieves 260.0 mBB/h when playing as button against Normal opponents in a three-player game.", "perturbed_explanation": "This statement is incorrect on two counts. First, Figure 5 shows the Normal AI’s fold rate is 5.4%, not 4.7%. Second, Table 3 reports the Normal AI’s win rate as button versus Normal blinds as 252.2 mBB/h, not 260.0 mBB/h (260.0 is its win rate versus Loose-passive blinds).", "claim": "The Normal AI strategy folds 4.7% of hands preflop and achieves 260.0 mBB/h when playing as button against Normal opponents in a three-player game.", "label": false }, { "paperid": "2410.24145v1", "paper_path": "./SciVer/papers/2410.24145v1.json", "claim_type": "sequential", "item1": "3", "item2": "3", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2410.24145v1_figure_3.png", "item2_path": "./SciVer/images/2410.24145v1-Table3-1.png", "section": [ "4.4" ], "request_id": 741, "origin_statement": "The circular histogram peaks at π/2, showing most winds blow eastward; Table 3 reports projected random forests has the smallest median arc length (1.90); hence, this method delivers the tightest prediction sets for the most common eastward winds.", "perturbed_statement": "The circular histogram peaks at π, showing most winds blow westward; Table 3 reports projected random forests has the smallest median arc length (1.90); hence, this method delivers the tightest prediction sets for the most common westward winds.", "perturbed_explanation": "The perturbation is false because Figure 3’s histogram shows the highest frequency bar at π/2 (90°), not at π (180°). Thus the prevailing wind direction is eastward, not westward, contradicting the modified premise about a peak at π.", "claim": "The circular histogram peaks at π, showing most winds blow westward; Table 3 reports projected random forests has the smallest median arc length (1.90); hence, this method delivers the tightest prediction sets for the most common westward winds.", "label": false }, { "paperid": "2409.08056v1", "paper_path": "./SciVer/papers/2409.08056v1.json", "claim_type": "sequential", "item1": "2", "item2": "3", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08056v1_figure_2.png", "item2_path": "./SciVer/images/2409.08056v1_figure_3.png", "section": [ "3.3" ], "request_id": 742, "origin_statement": "Figure 2 shows that only the top 10% of pixels—located at high-frequency edges—carry most rendering error. Using this in Figure 3, expansive supervision renders just these anchor pixels plus sampled sources, cutting rendered rays by roughly 90% while preserving visual quality.", "perturbed_statement": "Figure 2 shows that only the bottom 10% of pixels—located in low-frequency areas—carry most rendering error. Accordingly, expansive supervision in Figure 3 renders only these source pixels, reducing rendered rays by about 50% while maintaining visual fidelity.", "perturbed_explanation": "The perturbation is incorrect because Figure 2’s column 4 highlights the top 10% highest‐error pixels (not the bottom 10%), and they correspond to high-frequency edges rather than low-frequency areas. Moreover, Figure 3’s pipeline clearly renders both anchor (high-error) and sampled source pixels, not just source pixels alone, and achieves approximately 90% reduction in rendered rays, not 50%.", "claim": "Figure 2 shows that only the bottom 10% of pixels—located in low-frequency areas—carry most rendering error. Accordingly, expansive supervision in Figure 3 renders only these source pixels, reducing rendered rays by about 50% while maintaining visual fidelity.", "label": false }, { "paperid": "2411.09899v1", "paper_path": "./SciVer/papers/2411.09899v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2411.09899v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.09899v1-Table4-1.png", "section": [ "4.2" ], "request_id": 744, "origin_statement": "Figure 4(a) shows five bias weights from the bias node, implying a five-neuron hidden layer. Table 4 reports the ANN’s mean terminal utility as 0.07840 versus analytic’s 0.07748, with a smaller standard error (0.00055 < 0.00060), indicating more stable performance.", "perturbed_statement": "Figure 4(a) shows six bias weights from the bias node, implying a six-neuron hidden layer. Table 4 reports the ANN’s mean terminal utility as 0.07840 versus analytic’s 0.07748, with a smaller standard error (0.00055 < 0.00060).", "perturbed_explanation": "The perturbation is incorrect because Figure 4(a) actually displays only five bias connections from the bias node to hidden units, not six. The ANN thus has a five-neuron hidden layer, contradicting the claim of six neurons.", "claim": "Figure 4(a) shows six bias weights from the bias node, implying a six-neuron hidden layer. Table 4 reports the ANN’s mean terminal utility as 0.07840 versus analytic’s 0.07748, with a smaller standard error (0.00055 < 0.00060).", "label": false }, { "paperid": "2411.05087v1", "paper_path": "./SciVer/papers/2411.05087v1.json", "claim_type": "sequential", "item1": "4(a)", "item2": "4(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2411.05087v1_figure_4(a).png", "item2_path": "./SciVer/images/2411.05087v1_figure_4(b).png", "section": [ "3.5.1" ], "request_id": 747, "origin_statement": "Since major releases have a median complexity rating of 5 (Figure 4a), and JavaScript packages at complexity 5 show a median log-difference in dependents of about 0.5 (Figure 4b), major JavaScript releases typically gain around 0.5 log dependents after one year.", "perturbed_statement": "Since major releases have a median complexity rating of 5 (Figure 4a), and JavaScript packages at complexity 5 show a median log-difference in dependents of about 2.0 (Figure 4b), major JavaScript releases typically gain around 2.0 log dependents after one year.", "perturbed_explanation": "The perturbation is wrong because Figure 4b shows that at complexity rating 5 JavaScript releases have a median log-difference in dependents of roughly 0.5, not 2.0. The boxplot’s median line at complexity 5 lies near 0.5, and no whisker or quartile reaches 2.0.", "claim": "Since major releases have a median complexity rating of 5 (Figure 4a), and JavaScript packages at complexity 5 show a median log-difference in dependents of about 2.0 (Figure 4b), major JavaScript releases typically gain around 2.0 log dependents after one year.", "label": false }, { "paperid": "2409.08512v1", "paper_path": "./SciVer/papers/2409.08512v1.json", "claim_type": "sequential", "item1": "5(a)", "item2": "5(b)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.08512v1_figure_5(a).png", "item2_path": "./SciVer/images/2409.08512v1_figure_5(b).png", "section": [ "5.3.1" ], "request_id": 750, "origin_statement": "From Fig.5a, CPG achieves the highest Precision in Vulnerability Fix Identification (0.88 vs AST 0.48, CDG+DDG 0.30). Fig.5b shows CPG also leads in Precision for Vulnerability Types Classification (0.82 vs AST 0.25, CDG+DDG 0.39). Thus, CPG consistently yields the best precision across both tasks.", "perturbed_statement": "Fig.5a shows AST achieves Precision 0.60 in Vulnerability Fix Identification, surpassing CPG’s 0.45. In Fig.5b, AST also posts Precision 0.40 versus CPG’s 0.35 for Vulnerability Types Classification. Therefore, AST consistently yields higher precision across both tasks.", "perturbed_explanation": "The perturbed claim misquotes both subplots: In Fig.5a, AST’s actual precision is 0.48 (not 0.60) and CPG’s is 0.88 (not 0.45). In Fig.5b, AST’s precision is 0.25 (not 0.40) and CPG’s is 0.82 (not 0.35), directly contradicting the plotted values.", "claim": "Fig.5a shows AST achieves Precision 0.60 in Vulnerability Fix Identification, surpassing CPG’s 0.45. In Fig.5b, AST also posts Precision 0.40 versus CPG’s 0.35 for Vulnerability Types Classification. Therefore, AST consistently yields higher precision across both tasks.", "label": false }, { "paperid": "2409.09099v2", "paper_path": "./SciVer/papers/2409.09099v2.json", "claim_type": "sequential", "item1": "4", "item2": "2", "item1_type": "chart", "item2_type": "table", "item1_path": "./SciVer/images/2409.09099v2_figure_4.png", "item2_path": "./SciVer/images/2409.09099v2-Table2-1.png", "section": [ "4.2" ], "request_id": 751, "origin_statement": "Figure 4(a) shows β=1 yields the lowest flip rate across training compared to β=2 and β=4; Table 2 reports that the “No scaling” (β=1) recipe achieves a test BLEU of 25.28, which is lower than the minimizing-MSE (S-STE) recipe’s 26.3.", "perturbed_statement": "Figure 4(a) shows β=4 yields the lowest flip rate across training compared to β=1 and β=2; Table 2 reports that the “No scaling” recipe achieves a test BLEU of 25.85, slightly below the minimizing-MSE (S-STE) recipe’s 26.3.", "perturbed_explanation": "This is incorrect because in Figure 4(a) the red curve (β=4) clearly exhibits the highest flip rate, not the lowest, and Table 2 shows the “No scaling” recipe’s test BLEU is actually 25.28, not 25.85.", "claim": "Figure 4(a) shows β=4 yields the lowest flip rate across training compared to β=1 and β=2; Table 2 reports that the “No scaling” recipe achieves a test BLEU of 25.85, slightly below the minimizing-MSE (S-STE) recipe’s 26.3.", "label": false }, { "paperid": "2411.00969v1", "paper_path": "./SciVer/papers/2411.00969v1.json", "claim_type": "sequential", "item1": "1", "item2": "2", "item1_type": "table", "item2_type": "table", "item1_path": "./SciVer/images/2411.00969v1-Table1-1.png", "item2_path": "./SciVer/images/2411.00969v1-Table2-1.png", "section": [ "4.2" ], "request_id": 755, "origin_statement": "At 90% sparsity, MGPP prunes DeBERTaV3base to 85.2% matched MNLI accuracy (Table 1); Table 2 shows MGPP pruned BERTbase achieves 82.1%, so DeBERTaV3base outperforms BERTbase by 3.1% under extreme sparsity.", "perturbed_statement": "At 90% sparsity, MGPP prunes DeBERTaV3base to 83.2% matched MNLI accuracy (Table 1); Table 2 shows MGPP pruned BERTbase achieves 82.1%, so DeBERTaV3base outperforms BERTbase by 1.1% under extreme sparsity.", "perturbed_explanation": "Table 1 reports that MGPP on DeBERTaV3base at 90% sparsity actually achieves 85.2% matched MNLI accuracy, not 83.2%. The incorrect premise underestimates the true 85.2% value, making the claimed 1.1% gap invalid.", "claim": "At 90% sparsity, MGPP prunes DeBERTaV3base to 83.2% matched MNLI accuracy (Table 1); Table 2 shows MGPP pruned BERTbase achieves 82.1%, so DeBERTaV3base outperforms BERTbase by 1.1% under extreme sparsity.", "label": false }, { "paperid": "2409.12757v2", "paper_path": "./SciVer/papers/2409.12757v2.json", "claim_type": "sequential", "item1": "3(a)", "item2": "3(c)", "item1_type": "chart", "item2_type": "chart", "item1_path": "./SciVer/images/2409.12757v2_figure_3(a).png", "item2_path": "./SciVer/images/2409.12757v2_figure_3(c).png", "section": [ "3.1" ], "request_id": 757, "origin_statement": "From Figure 3 and Figure 3, the DHF valence MO gap in CuH is 0.15936 eV (Cu: –0.33617 – (–0.49553)), and in AuH is 0.04179 eV (Au: –0.38258 – (–0.42437)), so CuH’s gap is about 3.8× larger, reflecting weaker relativistic effects in Cu.", "perturbed_statement": "From Figure 3 and Figure 3, the DHF valence MO gap in CuH is 0.15936 eV, and in AuH is 0.12173 eV, so CuH’s gap is only 1.3× larger, indicating similar relativistic effects.", "perturbed_explanation": "The perturbation wrongly uses the AuH x2c gap (0.12173 eV) instead of the DHF gap (0.04179 eV) from the DHF column in Figure 3. This misread contradicts the context’s DHF values, making the 1.3× ratio false.", "claim": "From Figure 3 and Figure 3, the DHF valence MO gap in CuH is 0.15936 eV, and in AuH is 0.12173 eV, so CuH’s gap is only 1.3× larger, indicating similar relativistic effects.", "label": false } ]