Datasets:
File size: 4,837 Bytes
26896f5 652eeab 26896f5 652eeab 26896f5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 |
# Config file by Simon Hengchen, https://hengchen.net
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@misc{botenanna,
title = {"Jag känner en bot, hon heter [MASK]. A BERT for older Swedish, and a more usable dataset for historical newspapers"},
author = {Simon Hengchen}
year={2023},
}
"""
_DESCRIPTION = """
This is a version of the Kubhist 2 dataset created, curated and made available by Språkbanken Text (SBX) at the University of Gothenburg (Sweden) under the CC BY 4.0 license.
This is a a corpus of OCRed newspapers from Sweden spanning the 1640s to the 1900s.
The original data is available with many types of annotation in XML at https://spraakbanken.gu.se/en/resources/kubhist2.
A good description of the data is available in this blog entry by Dana Dannélls: https://spraakbanken.gu.se/blogg/index.php/2019/09/15/the-kubhist-corpus-of-swedish-newspapers/
In a nutshell, this hugginface dataset version offers:
- only the OCRed text
- available in decadal subsets
License is CC BY 4.0 with attribution.
"""
_URL = "https://github.com/ChangeIsKey/kubhist2"
_URLS = {'1640': './text/1640/1640.txt.gz',
'1650': './text/1650/1650.txt.gz',
'1660': './text/1660/1660.txt.gz',
'1670': './text/1670/1670.txt.gz',
'1680': './text/1680/1680.txt.gz',
'1690': './text/1690/1690.txt.gz',
'1700': './text/1700/1700.txt.gz',
'1710': './text/1710/1710.txt.gz',
'1720': './text/1720/1720.txt.gz',
'1730': './text/1730/1730.txt.gz',
'1740': './text/1740/1740.txt.gz',
'1750': './text/1750/1750.txt.gz',
'1760': './text/1760/1760.txt.gz',
'1770': './text/1770/1770.txt.gz',
'1780': './text/1780/1780.txt.gz',
'1790': './text/1790/1790.txt.gz',
'1800': './text/1800/1800.txt.gz',
'1810': './text/1810/1810.txt.gz',
'1820': './text/1820/1820.txt.gz',
'1830': './text/1830/1830.txt.gz',
'1840': './text/1840/1840.txt.gz',
'1850': './text/1850/1850.txt.gz',
'1860': './text/1860/1860.txt.gz',
'1870': './text/1870/1870.txt.gz',
'1880': './text/1880/1880.txt.gz',
'1890': './text/1890/1890.txt.gz',
'1900': './text/1900/1900.txt.gz'
}
class kubhist2Config(datasets.BuilderConfig):
"""BuilderConfig for kubhist2."""
def __init__(self, period="all", **kwargs):
"""Constructs a kubhist2Dataset.
Args:
period: can be any key in _URLS, `all` takes all
**kwargs: keyword arguments forwarded to super.
"""
if str(period) not in _URLS.keys():
#logger.warning("No period specified or wrong period, getting everything instead.")
self.period = "all"
else:
self.period = str(period)
super(kubhist2Config, self).__init__(**kwargs)
class kubhist2(datasets.GeneratorBasedBuilder):
BUILDER_CONFIG_CLASS = kubhist2Config
BUILDER_CONFIGS = []
for key in _URLS:
BUILDER_CONFIGS.append(
kubhist2Config(
name=key,
version=datasets.Version("1.0.1", ""),
description=f"Kubhist2: {key}",
period=key,
)
)
BUILDER_CONFIGS.append(
kubhist2Config(
name="all",
version=datasets.Version("1.0.1", ""),
description=f"Kubhist2: all",
period="all",
)
)
DEFAULT_CONFIG_NAME = "all"
def _info(self):
f = datasets.Features(
{
"text": datasets.Value("string")
}
)
return datasets.DatasetInfo(
features=f,
supervised_keys=None,
homepage="https://github.com/ChangeIsKey",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.period != "all":
url = {"train" : _URLS[self.config.period]}
downloaded_files = dl_manager.download_and_extract(url)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})]
elif self.config.period == "all":
url = {"train" : './text/all/all.txt.gz'}
#print(url)
downloaded_files = dl_manager.download_and_extract(url)
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]})]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath) as f:
for i, line in enumerate(f):
yield i, {"text" : line.rstrip()}
|