View Single Post
Old 01-23-2024, 02:33 PM   #9
Teebob
Junior Member
Teebob began at the beginning.
 
Posts: 3
Karma: 10
Join Date: Jan 2024
Location: France
Device: Kindle Scribe
Well. It actually does that when running the default recipe (the one called "lemonde edition abonnés" - i made a copy below).
I just tried just now and it crashed again provoking that weird error 406 on lemonde.fr

However i noticed when I run the other lemonde recipe (the basic one for non subscribers) then it works. And if I put my password I get the full articles. But I think its only for a limited number of feeds.

Code:
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import absolute_import, division, print_function, unicode_literals

__author__ = 'S. Durand <sylvaindurand@users.noreply.github.com>'
__license__ = 'GPL v3'

'''
lemonde.fr
'''

from calibre.web.feeds.news import BasicNewsRecipe, classes
from datetime import date
import re


class LeMondeNumerique(BasicNewsRecipe):
    title = 'Le Monde: Édition abonnés'
    __author__ = 'Sylvain Durand'
    description = 'La version numérique du quotidien Le Monde'
    publisher = 'Société Editrice du Monde'
    publication_type = 'newspaper'
    needs_subscription = True
    language = 'fr'

    no_stylesheets = True
    ignore_duplicate_articles = {'title', 'url'}

    conversion_options = {
        'publisher': publisher
    }

    masthead_url = 'http://upload.wikimedia.org/wikipedia/commons/thumb/5/54/Le_monde_logo.svg/800px-Le_monde_logo.svg.png'

    lm_sections = [
        'international:International',
        'politique:Politique',
        'societe:Société',
        'economie:Éco',
        'culture:Culture',
        'idees:Idées',
        'planete:Planète',
        'sport:Sport',
        'sciences:Sciences',
        'pixels:Pixels',
        'campus:Campus'
    ]

    keep_only_tags = [
        classes('article__header'),
        dict(name='section', attrs={'class': ['article__content', 'article__heading',
                                              'article__wrapper']})
    ]

    remove_tags = [
        classes('article__status meta__date meta__reading-time meta__social multimedia-embed'),
        dict(name=['footer', 'link']),
        dict(name='img', attrs={'class': ['article__author-picture']}),
        dict(name='section', attrs={'class': ['article__reactions', 'author', 'catcher',
                                              'portfolio', 'services-inread']})
    ]

    remove_attributes = [
        'data-sizes', 'height', 'sizes', 'width'
    ]

    preprocess_regexps = [
        # insert space between author name and description
        (re.compile(r'(<span class="[^"]*author__desc[^>]*>)([^<]*</span>)',
                    re.IGNORECASE), lambda match: match.group(1) + ' ' + match.group(2)),
        # insert " | " between article type and description
        (re.compile(r'(<span class="[^"]*article__kicker[^>]*>[^<]*)(</span>)',
                    re.IGNORECASE), lambda match: match.group(1) + ' | ' + match.group(2))
    ]

    extra_css = '''
        h2 { font-size: 1em; }
        h3 { font-size: 1em; }
        .article__desc { font-weight: bold; }
        .article__fact { font-weight: bold; text-transform: uppercase; }
        .article__kicker { text-transform: uppercase; }
        .article__legend { font-size: 0.6em; margin-bottom: 1em; }
        .article__title { margin-top: 0em; }
    '''

    def get_browser(self):
        br = BasicNewsRecipe.get_browser(self)
        if self.username is not None and self.password is not None:
            try:
                br.open('https://secure.lemonde.fr/sfuser/connexion')
                br.select_form(nr=0)
                br['email'] = self.username
                br['password'] = self.password
                br.submit()
            except Exception as e:
                self.log('Login failed with error:', str(e))
        return br

    def get_cover_url(self):
        # today's date is a reasonable guess for the ID of the cover
        cover_id = date.today().strftime('%Y%m%d')
        soup = self.index_to_soup('https://www.lemonde.fr/')
        a = soup.find('a', {'id': 'jelec_link', 'style': True})
        if a and a['style']:
            url = a['style'].split('/')
            if len(url) > 5 and url[3].isdigit():
                # overwrite guess if actual cover ID was found
                cover_id = url[3]
        return 'https://www.lemonde.fr/thumbnail/journal/' + cover_id + '/1000/1490'

    def parse_index(self):
        ans = []
        for x in self.lm_sections:
            s, section_title = x.partition(':')[::2]
            self.log('Processing section', section_title, '...')
            articles = list(self.parse_section('https://www.lemonde.fr/%s/' % s))
            if articles:
                ans.append((section_title, articles))
        return ans

    def parse_section(self, url):
        soup = self.index_to_soup(url)
        for article in soup.find_all('section', {'class': 'teaser'}):
            # extract URL
            a = article.find('a', {'class': 'teaser__link', 'href': True})
            if a is None:
                continue
            url = a['href']
            # skip articles without relevant content (e.g., videos)
            for el in 'blog chat live newsletters podcasts portfolio video visuel'.split():
                if '/' + el + '/' in url:
                    url = None
                    break
            if url is None:
                continue
            # extract title
            h3 = article.find('h3', {'class': 'teaser__title'})
            if h3 is None:
                continue
            title = self.tag_to_string(h3)
            # extract description
            desc = ''
            p = article.find('p', {'class': 'teaser__desc'})
            if p is not None:
                desc = self.tag_to_string(p)
            self.log('\tFound article', title, 'at', url)
            yield {'title': title, 'url': url, 'description': desc}

    def preprocess_html(self, soup):
        # when an image is available in multiple sizes, select the smallest one
        for img in soup.find_all('img', {'data-srcset': True}):
            data_srcset = img['data-srcset'].split()
            if len(data_srcset) > 1:
                img['src'] = data_srcset[-2]
                del img['data-srcset']
        return soup

    def postprocess_html(self, soup, first_fetch):
        # remove local hyperlinks
        for a in soup.find_all('a', {'href': True}):
            if '.lemonde.fr/' in a['href']:
                a.replace_with(self.tag_to_string(a))
        # clean up header
        for ul in soup.find_all('ul', {'class': 'breadcrumb'}):
            div = soup.new_tag('div')
            category = ''
            for li in ul.find_all('li', {'class': True}):
                category += self.tag_to_string(li).strip().upper() + ' - '
                div.string = category[:-3]
            ul.replace_with(div)
        return soup


calibre_most_common_ua = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'
Teebob is offline   Reply With Quote