|
|
|
|
|
""" |
|
Extract article URLs from the RSS |
|
|
|
This script is designed to fetch and extract article URLs from the RSS feeds of various French media outlets. |
|
The sources of these RSS feeds are stored in the `base_media` table of the database. |
|
For each source, the script retrieves the RSS content, processes each entry, and stores the article URLs |
|
into the database for further processing or analysis. |
|
|
|
To install the necessary packages: |
|
pip install aiohttp mysql-connector-python |
|
|
|
Author : Guillaume Eckendoerffer |
|
Date : 03-10-23 |
|
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ |
|
https://huggingface.co/datasets/eckendoerffer/news_fr |
|
""" |
|
|
|
import hashlib |
|
import xml.etree.ElementTree as ET |
|
import aiohttp |
|
import asyncio |
|
import time, os, re |
|
from config import DB_CONFIG |
|
from utils import save_to_file, create_connection |
|
|
|
path = os.getcwd() |
|
|
|
def update_last_fetched(connection, id_source, last): |
|
cursor = connection.cursor() |
|
cursor.execute("UPDATE `base_media` SET `last`=%s WHERE `id`=%s LIMIT 1", (last, id_source)) |
|
|
|
def fetch_rss(url): |
|
headers = { |
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36", |
|
"Accept-Language": "fr-FR;q=1.0", |
|
"Accept-Encoding": "gzip, deflate", |
|
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", |
|
"DNT": "1", |
|
"Connection": "keep-alive", |
|
"Upgrade-Insecure-Requests": "1" |
|
} |
|
|
|
loop = asyncio.get_event_loop() |
|
return loop.run_until_complete(fetch_rss_async(url, headers)) |
|
|
|
async def fetch_rss_async(url, headers): |
|
async with aiohttp.ClientSession() as session: |
|
async with session.get(url, headers=headers) as resp: |
|
if resp.status == 200: |
|
return await resp.text() |
|
|
|
def check_url(connection, id_source, url): |
|
global nb |
|
for pattern in ['#', '?utm', '</loc>', '</url>']: |
|
url = url.split(pattern)[0] |
|
url = url.replace('<loc>', '') |
|
url = url.replace('<url>', '') |
|
key = hashlib.md5(url.encode()).hexdigest() |
|
nb += 1 |
|
|
|
cursor = connection.cursor() |
|
cursor.execute("SELECT `id` FROM `base_news` WHERE `key_media`=%s OR `url` LIKE %s LIMIT 1", (key, url + "%")) |
|
nb_base = cursor.fetchone() |
|
|
|
if url and not nb_base: |
|
cursor.execute("INSERT INTO `base_news` (`key_media`, `media`, `url`, `link`, `step`) VALUES (%s, %s, %s, '0', '0')", (key, id_source, url)) |
|
|
|
def process_rss_content(connection, content, id_source): |
|
global nb |
|
nb = 0 |
|
try: |
|
root = ET.fromstring(content) |
|
except ET.ParseError as e: |
|
return |
|
|
|
|
|
channel = root.find('channel') |
|
if channel is not None: |
|
for entry in root.find('channel').findall('item'): |
|
url = entry.find('link').text.strip() |
|
for pattern in ['#', '?utm', '?xtor']: |
|
url = url.split(pattern)[0] |
|
key = hashlib.md5(url.encode()).hexdigest() |
|
nb += 1 |
|
|
|
cursor = connection.cursor() |
|
cursor.execute("SELECT `id` FROM `base_news` WHERE `key_media`=%s OR `url` LIKE %s LIMIT 1", (key, url + "%")) |
|
nb_base = cursor.fetchone() |
|
|
|
if url and not nb_base: |
|
cursor.execute("INSERT INTO `base_news` (`key_media`, `media`, `url`, `link`, `step`) VALUES (%s, %s, %s, '0', '0')", (key, id_source, url)) |
|
|
|
|
|
if nb == 0: |
|
for entry in root: |
|
url ='' |
|
loc_element = entry.find('loc') |
|
url_element = entry.find('url') |
|
|
|
if loc_element is not None and loc_element.text is not None: |
|
url = loc_element.text.strip() |
|
|
|
elif url_element is not None and url_element.text is not None: |
|
url = url_element.text.strip() |
|
|
|
if url: check_url(connection, id_source, url) |
|
|
|
if nb == 0: |
|
links = re.findall(r'<url>(.*?)<\/url>', content) |
|
for url in links: |
|
check_url(connection, id_source, url) |
|
|
|
if nb: |
|
cursor = connection.cursor() |
|
cursor.execute("SELECT `id` FROM `base_news` WHERE `media` = %s", (id_source,)) |
|
cursor.fetchall() |
|
nb_base_news = cursor.rowcount |
|
cursor.execute("UPDATE `base_media` SET `nb`=%s WHERE `id`=%s LIMIT 1", (nb_base_news, id_source)) |
|
|
|
def main(): |
|
global nb |
|
nb = 0 |
|
connection = create_connection(DB_CONFIG) |
|
cursor = connection.cursor(dictionary=True) |
|
|
|
cursor.execute("SELECT `id`, `url`, `last` FROM `base_media` ORDER By `last`") |
|
all_records = cursor.fetchall() |
|
|
|
for record in all_records: |
|
id_source = record['id'] |
|
url = record['url'].strip() |
|
last_update = record['last'] |
|
|
|
if last_update + 3600 > time.time(): |
|
break |
|
|
|
update_last_fetched(connection, id_source, int(time.time())) |
|
data = fetch_rss(url) |
|
if data.strip(): |
|
file_path = os.path.join(path, "sources", "rss", f"{id_source}.txt") |
|
save_to_file(file_path, data) |
|
process_rss_content(connection, data, id_source) |
|
print(f"{id_source} # ({nb}) {url} ") |
|
connection.close() |
|
|
|
if __name__ == "__main__": |
|
main() |