Datasets:
Commit
·
71db6d3
0
Parent(s):
Initial HF dataset builder for S&P 500 earnings transcripts
Browse files- .gitattributes +59 -0
- README.md +163 -0
- parquet_files/part-0.parquet +3 -0
- requirements.txt +2 -0
- sp500_earnings_transcripts.py +87 -0
.gitattributes
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
28 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
38 |
+
# Audio files - uncompressed
|
39 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
40 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
41 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
42 |
+
# Audio files - compressed
|
43 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
44 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
45 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
46 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
47 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
48 |
+
# Image files - uncompressed
|
49 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
50 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
51 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
52 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
53 |
+
# Image files - compressed
|
54 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
55 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
56 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
57 |
+
# Video files - compressed
|
58 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
59 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
language:
|
4 |
+
- en
|
5 |
+
pretty_name: "S&P 500 Earnings Call Transcripts (2005-2025)"
|
6 |
+
tags:
|
7 |
+
- finance
|
8 |
+
- earnings
|
9 |
+
- transcripts
|
10 |
+
- sp500
|
11 |
+
- stocks
|
12 |
+
- text
|
13 |
+
datasets:
|
14 |
+
- kurry/sp500_earnings_transcripts
|
15 |
+
task_categories:
|
16 |
+
- text-generation
|
17 |
+
- text-classification
|
18 |
+
size_categories:
|
19 |
+
- 10M<n<100M
|
20 |
+
date: "2025-05-19"
|
21 |
+
---
|
22 |
+
# S&P 500 Earnings Transcripts Dataset
|
23 |
+
|
24 |
+
This comprehensive dataset contains earnings call transcripts for S&P 500 companies and US large-caps, spanning from 2005 to 2025. Earnings calls provide valuable insights into company performance, strategic initiatives, and management perspectives that are essential for financial analysis, natural language processing research, and market sentiment studies.
|
25 |
+
|
26 |
+
## Dataset Description
|
27 |
+
|
28 |
+
This collection includes:
|
29 |
+
|
30 |
+
- **Complete transcripts**: Full verbatim records of quarterly earnings calls for S&P 500 constituents and related large-cap US companies
|
31 |
+
- **Structured dialogues**: Organized speaker-by-speaker segmentation allowing for detailed conversation analysis
|
32 |
+
- **Rich metadata**: Comprehensive company information including ticker symbols, company names, unique identifiers, and temporal data
|
33 |
+
- **Extensive coverage**: Over 33,000 unique transcripts across 685 companies and more than two decades
|
34 |
+
|
35 |
+
## Data Structure
|
36 |
+
|
37 |
+
Each transcript record contains the following fields:
|
38 |
+
|
39 |
+
| Field | Type | Description |
|
40 |
+
|----------------------|---------|-----------------------------------------------------------------|
|
41 |
+
| `symbol` | string | Stock ticker symbol (e.g., "AAPL") |
|
42 |
+
| `company_name` | string | Full company name (e.g., "Apple Inc.") |
|
43 |
+
| `company_id` | float | Capital IQ Company identifier |
|
44 |
+
| `year` | integer | Fiscal year of the earnings call |
|
45 |
+
| `quarter` | integer | Fiscal quarter (1-4) |
|
46 |
+
| `date` | string | Earnings call date in YYYY-MM-DD HH:MM:SS format |
|
47 |
+
| `content` | string | Complete raw transcript text |
|
48 |
+
| `structured_content` | object | Array of `{speaker, text}` objects segmenting the dialogue |
|
49 |
+
|
50 |
+
## Coverage and Distribution
|
51 |
+
|
52 |
+
The dataset provides extensive temporal coverage with transcript counts increasing significantly after 2007:
|
53 |
+
|
54 |
+
- **Total companies:** 685
|
55 |
+
- **Total transcripts:** 33,362
|
56 |
+
- **Year range:** 2005–2025
|
57 |
+
- **Quarter range:** 1–4
|
58 |
+
- **Example company tickers:** MCHP, INFO, HCA, NEM, ERIE, ZBRA, GD, PSA, DLR, STI
|
59 |
+
|
60 |
+
### Transcript Length Statistics
|
61 |
+
|
62 |
+
- Characters per transcript: min=0, median=53,734, max=244,695
|
63 |
+
|
64 |
+
### Top Companies by Transcript Count
|
65 |
+
- Akamai Technologies, Inc.: 78 transcripts
|
66 |
+
- AutoZone, Inc.: 78 transcripts
|
67 |
+
- Biogen Inc.: 78 transcripts
|
68 |
+
- Broadcom Inc.: 78 transcripts
|
69 |
+
- Adobe Inc.: 78 transcripts
|
70 |
+
- Amazon.com, Inc.: 78 transcripts
|
71 |
+
- Amgen Inc.: 77 transcripts
|
72 |
+
- The Boeing Company: 76 transcripts
|
73 |
+
- Automatic Data Processing, Inc.: 76 transcripts
|
74 |
+
- Applied Materials, Inc.: 76 transcripts
|
75 |
+
|
76 |
+
### Transcript Count by Year
|
77 |
+
|
78 |
+
```
|
79 |
+
2005: 67 | 2010: 1322 | 2015: 1814 | 2020: 2210
|
80 |
+
2006: 358 | 2011: 1556 | 2016: 1899 | 2021: 2170
|
81 |
+
2007: 927 | 2012: 1710 | 2017: 1946 | 2022: 2110
|
82 |
+
2008: 1489 | 2013: 1765 | 2018: 1969 | 2023: 2079
|
83 |
+
2009: 1497 | 2014: 1780 | 2019: 2014 | 2024: 2033
|
84 |
+
| 2025: 647 (partial)
|
85 |
+
```
|
86 |
+
|
87 |
+
## S&P 500 Coverage
|
88 |
+
|
89 |
+
As of May 2025, this dataset includes transcripts from all major S&P 500 constituents and primary equity listings. Some companies have multiple share classes but only the primary class is typically included in earnings calls.
|
90 |
+
|
91 |
+
The dataset includes transcripts from companies across all 11 GICS sectors:
|
92 |
+
- Information Technology
|
93 |
+
- Health Care
|
94 |
+
- Financials
|
95 |
+
- Consumer Discretionary
|
96 |
+
- Industrials
|
97 |
+
- Communication Services
|
98 |
+
- Consumer Staples
|
99 |
+
- Energy
|
100 |
+
- Utilities
|
101 |
+
- Real Estate
|
102 |
+
- Materials
|
103 |
+
|
104 |
+
### Sample Companies
|
105 |
+
|
106 |
+
```
|
107 |
+
A – Agilent Technologies, Inc.
|
108 |
+
AAPL – Apple Inc.
|
109 |
+
ABBV – AbbVie Inc.
|
110 |
+
ABNB – Airbnb, Inc.
|
111 |
+
ABT – Abbott Laboratories
|
112 |
+
…
|
113 |
+
XOM – Exxon Mobil Corporation
|
114 |
+
XYL – Xylem Inc.
|
115 |
+
YUM – Yum! Brands, Inc.
|
116 |
+
ZTS – Zoetis Inc.
|
117 |
+
```
|
118 |
+
## Typical Transcript Structure
|
119 |
+
|
120 |
+
Most earnings call transcripts follow a standard format:
|
121 |
+
1. **Call introduction**: Operator remarks and standard disclaimers
|
122 |
+
2. **Management presentation**: Prepared remarks from executives (typically CEO and CFO)
|
123 |
+
3. **Q&A session**: Analyst questions and management responses
|
124 |
+
4. **Call conclusion**: Closing remarks and end of call notification
|
125 |
+
|
126 |
+
The `structured_content` field makes it possible to analyze these distinct sections separately for more nuanced research.
|
127 |
+
|
128 |
+
## Usage Examples
|
129 |
+
|
130 |
+
```python
|
131 |
+
from datasets import load_dataset
|
132 |
+
|
133 |
+
# Load the dataset
|
134 |
+
ds = load_dataset("kurry/sp500_earnings_transcripts")
|
135 |
+
|
136 |
+
# Inspect a sample record
|
137 |
+
rec = ds["train"][0]
|
138 |
+
print(rec["symbol"], rec["company_name"], rec["date"], "Q"+str(rec["quarter"]), rec["year"])
|
139 |
+
for seg in rec["structured_content"][:3]:
|
140 |
+
print(seg["speaker"], ":", seg["text"][:80], "…")
|
141 |
+
```
|
142 |
+
|
143 |
+
```python
|
144 |
+
# Filter Apple transcripts from 2022
|
145 |
+
apple_2022 = ds["train"].filter(lambda x: x["symbol"]=="AAPL" and x["year"]==2022)
|
146 |
+
print("Apple 2022 count:", len(apple_2022))
|
147 |
+
```
|
148 |
+
|
149 |
+
## Citation
|
150 |
+
|
151 |
+
```
|
152 |
+
@dataset{kurry2025sp500earnings,
|
153 |
+
author = {Kurry},
|
154 |
+
title = {S&P 500 Earnings Transcripts Dataset},
|
155 |
+
year = {2025},
|
156 |
+
publisher = {Hugging Face},
|
157 |
+
url = {https://huggingface.co/datasets/kurry/sp500_earnings_transcripts}
|
158 |
+
}
|
159 |
+
```
|
160 |
+
|
161 |
+
## License & Limitations
|
162 |
+
|
163 |
+
Licensed under MIT. For research and educational use only.
|
parquet_files/part-0.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9a305a1c4cbf6c4b0b9c8b98b242ea3de4dbe0ad43937dc54b56418629a17b31
|
3 |
+
size 1824805388
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
pandas
|
2 |
+
pyarrow
|
sp500_earnings_transcripts.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
DatasetBuilder for S&P 500 Earnings Call Transcripts (2005-2025) dataset.
|
3 |
+
"""
|
4 |
+
import os
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
import datasets
|
8 |
+
import pyarrow.parquet as pq
|
9 |
+
|
10 |
+
_CITATION = """
|
11 |
+
@dataset{kurry2025sp500earnings,
|
12 |
+
author = {Kurry},
|
13 |
+
title = {S&P 500 Earnings Transcripts Dataset},
|
14 |
+
year = {2025},
|
15 |
+
publisher = {Hugging Face},
|
16 |
+
url = {https://huggingface.co/datasets/kurry/sp500_earnings_transcripts}
|
17 |
+
}
|
18 |
+
"""
|
19 |
+
|
20 |
+
_DESCRIPTION = """
|
21 |
+
Full earnings call transcripts for S&P 500 companies and US large-cap companies
|
22 |
+
from 2005 to 2025, including metadata and structured speaker-by-speaker dialogue.
|
23 |
+
"""
|
24 |
+
|
25 |
+
_HOMEPAGE = "https://huggingface.co/datasets/kurry/sp500_earnings_transcripts"
|
26 |
+
_LICENSE = "mit"
|
27 |
+
|
28 |
+
|
29 |
+
class Sp500EarningsTranscripts(datasets.GeneratorBasedBuilder):
|
30 |
+
"""S&P 500 Earnings Call Transcripts Dataset"""
|
31 |
+
|
32 |
+
VERSION = datasets.Version("1.0.0")
|
33 |
+
BUILDER_CONFIGS = [
|
34 |
+
datasets.BuilderConfig(name="default", version=VERSION,
|
35 |
+
description="S&P 500 earnings call transcripts dataset")
|
36 |
+
]
|
37 |
+
DEFAULT_CONFIG_NAME = "default"
|
38 |
+
|
39 |
+
def _info(self):
|
40 |
+
features = datasets.Features({
|
41 |
+
"symbol": datasets.Value("string"),
|
42 |
+
"company_name": datasets.Value("string"),
|
43 |
+
"company_id": datasets.Value("float64"),
|
44 |
+
"year": datasets.Value("int32"),
|
45 |
+
"quarter": datasets.Value("int32"),
|
46 |
+
"date": datasets.Value("string"),
|
47 |
+
"content": datasets.Value("string"),
|
48 |
+
"structured_content": datasets.Sequence(
|
49 |
+
datasets.Features({
|
50 |
+
"speaker": datasets.Value("string"),
|
51 |
+
"text": datasets.Value("string"),
|
52 |
+
})
|
53 |
+
),
|
54 |
+
})
|
55 |
+
return datasets.DatasetInfo(
|
56 |
+
description=_DESCRIPTION,
|
57 |
+
features=features,
|
58 |
+
homepage=_HOMEPAGE,
|
59 |
+
license=_LICENSE,
|
60 |
+
citation=_CITATION,
|
61 |
+
)
|
62 |
+
|
63 |
+
def _split_generators(self, dl_manager):
|
64 |
+
# data is provided locally in parquet_files directory
|
65 |
+
data_dir = Path(__file__).resolve().parent / "parquet_files"
|
66 |
+
# collect all parquet files
|
67 |
+
filepaths = sorted(data_dir.glob("*.parquet"))
|
68 |
+
return [
|
69 |
+
datasets.SplitGenerator(
|
70 |
+
name=datasets.Split.TRAIN,
|
71 |
+
gen_kwargs={"filepaths": filepaths},
|
72 |
+
)
|
73 |
+
]
|
74 |
+
|
75 |
+
def _generate_examples(self, filepaths):
|
76 |
+
"""
|
77 |
+
Generate examples from parquet files.
|
78 |
+
Args:
|
79 |
+
filepaths: list of pathlib.Path to parquet files
|
80 |
+
"""
|
81 |
+
idx = 0
|
82 |
+
for path in filepaths:
|
83 |
+
# read parquet file with nested structured_content
|
84 |
+
table = pq.read_table(path)
|
85 |
+
for record in table.to_pylist():
|
86 |
+
yield idx, record
|
87 |
+
idx += 1
|