File size: 5,301 Bytes
fc9c13b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
# -*- coding: utf-8 -*-

"""
Extract article URLs from the RSS
 
This script is designed to fetch and extract article URLs from the RSS feeds of various French media outlets.
The sources of these RSS feeds are stored in the `base_media` table of the database.
For each source, the script retrieves the RSS content, processes each entry, and stores the article URLs 
into the database for further processing or analysis.

To install the necessary packages:
pip install aiohttp mysql-connector-python

Author     : Guillaume Eckendoerffer
Date       : 03-10-23
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
             https://huggingface.co/datasets/eckendoerffer/news_fr
"""

import hashlib
import xml.etree.ElementTree as ET
import aiohttp
import asyncio
import time, os, re
from config import DB_CONFIG
from utils import save_to_file, create_connection

path = os.getcwd()

def update_last_fetched(connection, id_source, last):
    cursor = connection.cursor()
    cursor.execute("UPDATE `base_media` SET `last`=%s WHERE `id`=%s LIMIT 1", (last, id_source))

def fetch_rss(url):
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36",
        "Accept-Language": "fr-FR;q=1.0",
        "Accept-Encoding": "gzip, deflate",
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "DNT": "1",
        "Connection": "keep-alive",
        "Upgrade-Insecure-Requests": "1"
    }

    loop = asyncio.get_event_loop()
    return loop.run_until_complete(fetch_rss_async(url, headers))

async def fetch_rss_async(url, headers):
    async with aiohttp.ClientSession() as session:
        async with session.get(url, headers=headers) as resp:
            if resp.status == 200:
                return await resp.text()

def check_url(connection, id_source, url):
    global nb
    for pattern in ['#', '?utm', '</loc>', '</url>']:
        url = url.split(pattern)[0]
    url = url.replace('<loc>', '')
    url = url.replace('<url>', '')
    key = hashlib.md5(url.encode()).hexdigest()
    nb += 1

    cursor = connection.cursor()
    cursor.execute("SELECT `id` FROM `base_news` WHERE `key_media`=%s OR `url` LIKE %s LIMIT 1", (key, url + "%"))
    nb_base = cursor.fetchone()

    if url and not nb_base:
        cursor.execute("INSERT INTO `base_news` (`key_media`, `media`, `url`, `link`, `step`) VALUES (%s, %s, %s, '0', '0')", (key, id_source, url))

def process_rss_content(connection, content, id_source):
    global nb
    nb = 0
    try:
        root = ET.fromstring(content)
    except ET.ParseError as e:
        return
 
    # First attempt: Process each item in the channel
    channel = root.find('channel')
    if channel is not None:
        for entry in root.find('channel').findall('item'):
            url = entry.find('link').text.strip()
            for pattern in ['#', '?utm', '?xtor']:
                url = url.split(pattern)[0]
            key = hashlib.md5(url.encode()).hexdigest()
            nb += 1

            cursor = connection.cursor()
            cursor.execute("SELECT `id` FROM `base_news` WHERE `key_media`=%s OR `url` LIKE %s LIMIT 1", (key, url + "%"))
            nb_base = cursor.fetchone()

            if url and not nb_base:
                cursor.execute("INSERT INTO `base_news` (`key_media`, `media`, `url`, `link`, `step`) VALUES (%s, %s, %s, '0', '0')", (key, id_source, url))

    # Second attempt
    if nb == 0:
        for entry in root:
            url =''
            loc_element = entry.find('loc')
            url_element = entry.find('url')

            if loc_element is not None and loc_element.text is not None:
                url = loc_element.text.strip()
   
            elif url_element is not None and url_element.text is not None:
                url = url_element.text.strip()

            if url: check_url(connection, id_source, url)

    if nb == 0:
        links = re.findall(r'<url>(.*?)<\/url>', content)
        for url in links:
            check_url(connection, id_source, url)

    if nb:
        cursor = connection.cursor()
        cursor.execute("SELECT `id` FROM `base_news` WHERE `media` = %s", (id_source,))
        cursor.fetchall()  
        nb_base_news = cursor.rowcount
        cursor.execute("UPDATE `base_media` SET `nb`=%s WHERE `id`=%s LIMIT 1", (nb_base_news, id_source))

def main():
    global nb
    nb = 0
    connection = create_connection(DB_CONFIG)
    cursor = connection.cursor(dictionary=True)

    cursor.execute("SELECT `id`, `url`, `last` FROM `base_media` ORDER By `last`")
    all_records = cursor.fetchall()

    for record in all_records:
        id_source = record['id']
        url = record['url'].strip()
        last_update = record['last']

        if last_update + 3600 > time.time():
            break

        update_last_fetched(connection, id_source, int(time.time()))
        data = fetch_rss(url)
        if data.strip():
            file_path = os.path.join(path, "sources", "rss", f"{id_source}.txt")
            save_to_file(file_path, data)
            process_rss_content(connection, data, id_source)
        print(f"{id_source} # ({nb}) {url} ")
    connection.close()

if __name__ == "__main__":
    main()