Update README.md
Browse files
README.md
CHANGED
@@ -18,6 +18,7 @@ from IPython.display import clear_output, display, HTML
|
|
18 |
|
19 |
import os, time, shutil, sys, json, io, zipfile, requests, shutil, re
|
20 |
import pandas as pd
|
|
|
21 |
from os.path import join as osj, isdir as osd, isfile as osf, basename as osb
|
22 |
|
23 |
url = 'https://www.sec.gov/Archives/edgar/daily-index/xbrl/companyfacts.zip'
|
@@ -53,7 +54,7 @@ def process_json_file(json_data, api, token, repo_id, work):
|
|
53 |
|
54 |
cik_folder = re.sub(r'[^a-zA-Z0-9]', '_', osj(f'{cik}_{entity_name}')).replace('__', '_').strip('_')
|
55 |
|
56 |
-
|
57 |
|
58 |
fact_keys = data['facts'].keys()
|
59 |
|
@@ -89,7 +90,7 @@ def process_json_file(json_data, api, token, repo_id, work):
|
|
89 |
[df_dei_EntityPublicFloat, 'EntityPublicFloat']
|
90 |
]:
|
91 |
if df is not None:
|
92 |
-
filename =
|
93 |
df.to_parquet(filename, index=False)
|
94 |
|
95 |
def process_fact_section(section_key, section_data):
|
@@ -103,29 +104,29 @@ def process_json_file(json_data, api, token, repo_id, work):
|
|
103 |
|
104 |
df_facts_invest = process_fact_section('invest', data['facts'].get('invest', {}))
|
105 |
if df_facts_invest is not None:
|
106 |
-
filename =
|
107 |
df_facts_invest.to_parquet(filename, index=False)
|
108 |
|
109 |
df_facts_srt = process_fact_section('srt', data['facts'].get('srt', {}))
|
110 |
if df_facts_srt is not None:
|
111 |
-
filename =
|
112 |
df_facts_srt.to_parquet(filename, index=False)
|
113 |
|
114 |
df_facts_usgaap = process_fact_section('us-gaap', data['facts'].get('us-gaap', {}))
|
115 |
if df_facts_usgaap is not None:
|
116 |
-
filename =
|
117 |
df_facts_usgaap.to_parquet(filename, index=False)
|
118 |
|
119 |
df_facts_ifrs = process_fact_section('ifrs-full', data['facts'].get('ifrs-full', {}))
|
120 |
if df_facts_ifrs is not None:
|
121 |
-
filename =
|
122 |
df_facts_ifrs.to_parquet(filename, index=False)
|
123 |
|
124 |
|
125 |
-
|
126 |
|
127 |
-
work =
|
128 |
-
json_files = [i for i in
|
129 |
|
130 |
for json_data in json_files:
|
131 |
process_json_file(json_data, api, token, repo_id, work)
|
|
|
18 |
|
19 |
import os, time, shutil, sys, json, io, zipfile, requests, shutil, re
|
20 |
import pandas as pd
|
21 |
+
from os import makedirs as mk, remove as rm, getcwd as cwd, listdir as ls
|
22 |
from os.path import join as osj, isdir as osd, isfile as osf, basename as osb
|
23 |
|
24 |
url = 'https://www.sec.gov/Archives/edgar/daily-index/xbrl/companyfacts.zip'
|
|
|
54 |
|
55 |
cik_folder = re.sub(r'[^a-zA-Z0-9]', '_', osj(f'{cik}_{entity_name}')).replace('__', '_').strip('_')
|
56 |
|
57 |
+
mk(cik_folder, exist_ok=True)
|
58 |
|
59 |
fact_keys = data['facts'].keys()
|
60 |
|
|
|
90 |
[df_dei_EntityPublicFloat, 'EntityPublicFloat']
|
91 |
]:
|
92 |
if df is not None:
|
93 |
+
filename = osj(cik_folder, name) + '.parquet'
|
94 |
df.to_parquet(filename, index=False)
|
95 |
|
96 |
def process_fact_section(section_key, section_data):
|
|
|
104 |
|
105 |
df_facts_invest = process_fact_section('invest', data['facts'].get('invest', {}))
|
106 |
if df_facts_invest is not None:
|
107 |
+
filename = osj(cik_folder, 'Facts_Invest.parquet')
|
108 |
df_facts_invest.to_parquet(filename, index=False)
|
109 |
|
110 |
df_facts_srt = process_fact_section('srt', data['facts'].get('srt', {}))
|
111 |
if df_facts_srt is not None:
|
112 |
+
filename = osj(cik_folder, 'Facts_Srt.parquet')
|
113 |
df_facts_srt.to_parquet(filename, index=False)
|
114 |
|
115 |
df_facts_usgaap = process_fact_section('us-gaap', data['facts'].get('us-gaap', {}))
|
116 |
if df_facts_usgaap is not None:
|
117 |
+
filename = osj(cik_folder, 'Facts_UsGaap.parquet')
|
118 |
df_facts_usgaap.to_parquet(filename, index=False)
|
119 |
|
120 |
df_facts_ifrs = process_fact_section('ifrs-full', data['facts'].get('ifrs-full', {}))
|
121 |
if df_facts_ifrs is not None:
|
122 |
+
filename = osj(cik_folder, 'Facts_IfrsFull.parquet')
|
123 |
df_facts_ifrs.to_parquet(filename, index=False)
|
124 |
|
125 |
|
126 |
+
rm(json_data)
|
127 |
|
128 |
+
work = cwd()
|
129 |
+
json_files = [i for i in ls(work) if i.endswith('.json')]
|
130 |
|
131 |
for json_data in json_files:
|
132 |
process_json_file(json_data, api, token, repo_id, work)
|