HoangHa commited on
Commit
35e8439
·
verified ·
1 Parent(s): 114a021

Update pubmed25.py

Browse files
Files changed (1) hide show
  1. pubmed25.py +59 -61
pubmed25.py CHANGED
@@ -12,28 +12,15 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """
16
- MEDLINE/PubMed data loader for Hugging Face Datasets.
17
-
18
- This script is based on the original NCBI PubMed dataset script from Hugging Face.
19
- Modifications by:
20
- - Hoang Ha (LIG): Updated for 2025 baseline structure and integrated full abstract parsing.
21
- - Tiziri Terkmani (Research Engineer, LIG, Team SIGMA): Enhanced abstract parsing logic was adapted for the NanoBubble Project: https://nanobubbles.hypotheses.org/
22
-
23
- **Important Caution:** The default URL configuration in this script is set up as an
24
- example and primarily targets files from the **2025 baseline**. To download data
25
- covering the full intended range (e.g., 2015-2025), you **MUST** modify the `_URLs`
26
- list below to include the correct file paths for **ALL** desired years from the
27
- NCBI FTP server (ftp://ftp.ncbi.nlm.nih.gov/pubmed/baseline/). The number of files
28
- per year varies. Use with caution and verify the downloaded data range.
29
- """
30
 
31
  import copy
32
  import gzip
33
  import xml.etree.ElementTree as ET # Using standard ElementTree
34
 
35
  import datasets
36
-
37
 
38
  logger = datasets.logging.get_logger(__name__)
39
 
@@ -53,9 +40,27 @@ _HOMEPAGE = "https://www.nlm.nih.gov/databases/download/pubmed_medline.html"
53
 
54
  _LICENSE = "" # Assuming standard NLM terms apply, check source for specifics
55
 
56
- # Example URLs - Replace with desired baseline/update files
57
- _URLs = [f"https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed25n{i:04d}.xml.gz" for i in range(800, 1274)]
 
 
 
 
 
 
 
 
 
 
 
 
 
58
 
 
 
 
 
 
59
 
60
  # Copyright Ferry Boender, released under the MIT license.
61
  # Modified by @Narsil to handle more oddities
@@ -577,92 +582,85 @@ class Pubmed(datasets.GeneratorBasedBuilder):
577
 
578
 
579
  def _generate_examples(self, filenames):
580
- """Yields examples parsing XML files using iterparse for memory efficiency."""
581
- id_ = 0 # Simple counter for yielding unique keys for examples
 
582
 
583
  for filename in filenames:
584
  logger.info(f"Processing file: {filename}")
585
  try:
586
  with gzip.open(filename, "rb") as f: # Read as bytes for ET
587
- # Use iterparse to process the XML incrementally
588
  context = ET.iterparse(f, events=("end",))
589
- # Get the root element iterator (needed for clearing)
590
- event, root = next(context)
591
 
592
  for event, elem in context:
593
- # Process each PubmedArticle element when its closing tag is found
594
  if event == "end" and elem.tag == "PubmedArticle":
595
- article_dict_wrapper = None # Reset for each article
 
596
  try:
597
- # Parse the completed element into our dictionary structure
598
  article_dict_wrapper = self.xml_to_dictionnary(elem)
599
- # Expected structure: {'PubmedArticle': {...actual data...}}
600
 
601
  if not article_dict_wrapper or 'PubmedArticle' not in article_dict_wrapper:
602
  logger.warning(f"Parser returned empty or invalid structure for a PubmedArticle element in {filename}")
603
- elem.clear() # Free memory for this element
604
- root.clear() # Periodically clear root too? Maybe not needed per element.
605
  continue
606
 
607
  article = article_dict_wrapper.get('PubmedArticle')
608
-
609
- # Ensure the extracted article data is not empty
610
  if not article or not isinstance(article, dict):
611
  logger.warning(f"Parsed empty or invalid article data from element in {filename}")
612
  elem.clear()
613
  continue
614
 
615
- # --- Data Cleaning and Structuring ---
616
- # 1. Flatten the ReferenceList structure
617
- self.update_citation(article)
618
-
619
- # 2. Create the default structure expected by the schema
620
- new_article = default_article()
621
-
622
- # 3. Merge the parsed data into the default structure
623
- deepupdate(new_article, article)
624
-
625
- # --- Validation and Yielding ---
626
- # Retrieve PMID for yielding and logging (ensure it's valid)
627
- pmid_val = new_article.get("MedlineCitation", {}).get("PMID", 0)
628
- pmid = 0
629
  try:
630
  pmid = int(pmid_val)
631
  if pmid <= 0: raise ValueError("PMID must be positive")
632
  except (ValueError, TypeError):
633
  logger.warning(f"Skipping article due to invalid or missing PMID: '{pmid_val}' in {filename}")
634
  elem.clear()
635
- continue # Skip this article
 
 
 
 
 
 
 
 
 
 
 
 
636
 
637
- # Validate the final structure against the features schema
 
 
 
 
 
 
 
638
  encoded_example = self.info.features.encode_example(new_article)
639
 
640
  # Yield pmid as key and the validated dictionary
641
  yield pmid, new_article # Use actual PMID as the example key
642
- id_ += 1 # Increment processed count if needed
643
 
644
  except Exception as e:
645
- # Catch errors during parsing, deepupdate, or encoding for a single article
646
- pmid_for_log = article.get("MedlineCitation", {}).get("PMID", "UNKNOWN") if 'article' in locals() else "UNKNOWN"
647
- logger.error(f"Failed to process article PMID {pmid_for_log} in {filename}: {e}", exc_info=True)
648
- # Optionally log the problematic article data (can be large)
649
  # if 'new_article' in locals(): logger.debug(f"Problematic data: {new_article}")
650
 
651
  finally:
652
- # --- Memory Management ---
653
- # Crucial step for iterparse: clear the element and its descendants
654
- elem.clear()
655
- # Optionally, clear previous siblings from the root periodically
656
- # while root and root.getprevious() is not None:
657
- # del root.getparent()[0]
658
-
659
- # Clear the root element after processing the file
660
  if root is not None:
661
  root.clear()
662
 
663
  except ET.ParseError as e:
664
  logger.error(f"XML ParseError in file {filename}: {e}")
665
- continue # Skip to the next file
666
  except gzip.BadGzipFile:
667
  logger.error(f"Bad Gzip File error for {filename}. It might be corrupted or incomplete.")
668
  continue
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ """MEDLINE/PubMed data - Modified for full abstract text extraction."""
16
+
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  import copy
19
  import gzip
20
  import xml.etree.ElementTree as ET # Using standard ElementTree
21
 
22
  import datasets
23
+ import random
24
 
25
  logger = datasets.logging.get_logger(__name__)
26
 
 
40
 
41
  _LICENSE = "" # Assuming standard NLM terms apply, check source for specifics
42
 
43
+ # Parameters
44
+ total_files = 1274
45
+ num_bins = 50
46
+ total_urls = 20
47
+
48
+ # Compute bin size
49
+ bin_size = total_files // num_bins
50
+
51
+ # Sample one random file from each bin
52
+ selected_indices = []
53
+ for b in range(num_bins):
54
+ start = b * bin_size + 1
55
+ end = min((b + 1) * bin_size + 1, total_files + 1)
56
+ if start < end:
57
+ selected_indices.append(random.randint(start, end - 1))
58
 
59
+ # Ensure we only keep 20 URLs total (in case rounding leads to more)
60
+ selected_indices = sorted(random.sample(selected_indices, total_urls))
61
+
62
+ # Create URLs
63
+ _URLs = [f"https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed25n{i:04d}.xml.gz" for i in range(1200, 1274)]
64
 
65
  # Copyright Ferry Boender, released under the MIT license.
66
  # Modified by @Narsil to handle more oddities
 
582
 
583
 
584
  def _generate_examples(self, filenames):
585
+ """Yields examples parsing XML files using iterparse for memory efficiency, skipping duplicate PMIDs."""
586
+ # id_ = 0 # Simple counter if needed for logging, but not used as key anymore
587
+ yielded_pmids = set() # Keep track of PMIDs we've already yielded
588
 
589
  for filename in filenames:
590
  logger.info(f"Processing file: {filename}")
591
  try:
592
  with gzip.open(filename, "rb") as f: # Read as bytes for ET
 
593
  context = ET.iterparse(f, events=("end",))
594
+ event, root = next(context) # Get root iterator
 
595
 
596
  for event, elem in context:
 
597
  if event == "end" and elem.tag == "PubmedArticle":
598
+ article_dict_wrapper = None
599
+ pmid = "UNKNOWN_PMID" # Default for logging if extraction fails early
600
  try:
 
601
  article_dict_wrapper = self.xml_to_dictionnary(elem)
 
602
 
603
  if not article_dict_wrapper or 'PubmedArticle' not in article_dict_wrapper:
604
  logger.warning(f"Parser returned empty or invalid structure for a PubmedArticle element in {filename}")
605
+ elem.clear()
 
606
  continue
607
 
608
  article = article_dict_wrapper.get('PubmedArticle')
 
 
609
  if not article or not isinstance(article, dict):
610
  logger.warning(f"Parsed empty or invalid article data from element in {filename}")
611
  elem.clear()
612
  continue
613
 
614
+ # --- Extract PMID early for duplicate check ---
615
+ pmid_val = article.get("MedlineCitation", {}).get("PMID", 0)
 
 
 
 
 
 
 
 
 
 
 
 
616
  try:
617
  pmid = int(pmid_val)
618
  if pmid <= 0: raise ValueError("PMID must be positive")
619
  except (ValueError, TypeError):
620
  logger.warning(f"Skipping article due to invalid or missing PMID: '{pmid_val}' in {filename}")
621
  elem.clear()
622
+ continue # Skip this article if PMID invalid
623
+
624
+ # --- !!! DUPLICATE CHECK !!! ---
625
+ if pmid in yielded_pmids:
626
+ logger.warning(f"Skipping duplicate PMID {pmid} found in {filename}.")
627
+ elem.clear()
628
+ continue # Skip this duplicate entry
629
+ # --- End DUPLICATE CHECK ---
630
+
631
+ # --- If not duplicate, proceed with processing ---
632
+ self.update_citation(article)
633
+ new_article = default_article()
634
+ deepupdate(new_article, article)
635
 
636
+ # --- Final validation before yield (PMID check redundant but safe) ---
637
+ final_pmid_check = new_article.get("MedlineCitation", {}).get("PMID", 0)
638
+ if final_pmid_check != pmid:
639
+ logger.error(f"PMID mismatch after processing! Expected {pmid}, got {final_pmid_check}. Skipping article.")
640
+ elem.clear()
641
+ continue
642
+
643
+ # Validate against schema
644
  encoded_example = self.info.features.encode_example(new_article)
645
 
646
  # Yield pmid as key and the validated dictionary
647
  yield pmid, new_article # Use actual PMID as the example key
648
+ yielded_pmids.add(pmid) # Add to set *after* successful yield
649
 
650
  except Exception as e:
651
+ logger.error(f"Failed to process article PMID {pmid} in {filename}: {e}", exc_info=False) # exc_info=True for full traceback
652
+ # Logging data causing error can be helpful but verbose:
 
 
653
  # if 'new_article' in locals(): logger.debug(f"Problematic data: {new_article}")
654
 
655
  finally:
656
+ elem.clear() # Clear the element in all cases (success, skip, error)
657
+
 
 
 
 
 
 
658
  if root is not None:
659
  root.clear()
660
 
661
  except ET.ParseError as e:
662
  logger.error(f"XML ParseError in file {filename}: {e}")
663
+ continue
664
  except gzip.BadGzipFile:
665
  logger.error(f"Bad Gzip File error for {filename}. It might be corrupted or incomplete.")
666
  continue