View Single Post
Old 10-30-2010, 06:54 AM   #1
Metapioca
Member
Metapioca began at the beginning.
 
Posts: 18
Karma: 10
Join Date: Sep 2010
Device: Kindle 3 3G intl
Rules for mediapart.fr and rue89.com (french news websites)

One new rule for rue89.com, free french news website:
Spoiler:

Code:
__license__   = 'GPL v3'
__copyright__ = '2010, Louis Gesbert <meta at antislash dot info>'
'''
Rue89
'''

import re, string
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag
from calibre.web.feeds.news import BasicNewsRecipe

class Rue89(BasicNewsRecipe):
    title = 'Rue89'
    __author__ = '2010, Louis Gesbert <meta at antislash dot info>'
    description = 'Popular free french news website'
    title = u'Rue89'
    language = 'fr'
    oldest_article = 7
    max_articles_per_feed = 50

    feeds = [(u'La Une', u'http://www.rue89.com/homepage/feed')]

    no_stylesheets = True

    preprocess_regexps = [
        (re.compile(r'<(/?)h2>', re.IGNORECASE|re.DOTALL),
         lambda match : '<'+match.group(1)+'h3>'),
        (re.compile(r'<div class="print-title">([^>]+)</div>', re.IGNORECASE|re.DOTALL),
         lambda match : '<h2>'+match.group(1)+'</h2>'),
        (re.compile(r'<img[^>]+src="[^"]*/numeros/(\d+)[^0-9.">]*.gif"[^>]*/>', re.IGNORECASE|re.DOTALL),
         lambda match : '<span style="font-family: Sans-serif; color: red; font-size:24pt; padding=2pt;">'+match.group(1)+'</span>'),
        (re.compile(r'\''), lambda match: '&rsquo;'),
        ]

    def preprocess_html(self,soup):
        body = Tag(soup, 'body')
        title = soup.find('h1', {'class':'title'})
        content = soup.find('div', {'class':'content'})
        soup.body.replaceWith(body)
        body.insert(0, title)
        body.insert(1, content)
        return soup

    remove_tags = [ #dict(name='div', attrs={'class':'print-source_url'}),
                    #dict(name='div', attrs={'class':'print-links'}),
                    #dict(name='img', attrs={'class':'print-logo'}),
                    dict(name='div', attrs={'class':'content_top'}),
                    dict(name='div', attrs={'id':'sidebar-left'}), ]

# -- print-version has poor quality on this website, better do the conversion ourselves
#    def print_version(self, url):
#        return re.sub('^.*-([0-9]+)$', 'http://www.rue89.com/print/\\1',url)


And a widely improved one (original version by Mathieu Godlewski) for Mediapart, a famous online-only newspaper with paying subscription:
Spoiler:

Code:
__license__   = 'GPL v3'
__copyright__ = '2009, Mathieu Godlewski <mathieu at godlewski.fr>; 2010, Louis Gesbert <meta at antislash dot info>'
'''
Mediapart
'''

import re, string
from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag
from calibre.web.feeds.news import BasicNewsRecipe

class Mediapart(BasicNewsRecipe):
    title          = 'Mediapart'
    __author__ = 'Mathieu Godlewski <mathieu at godlewski.fr>'
    description = 'Global news in french from online newspapers'
    oldest_article = 7
    language = 'fr'
    needs_subscription = True

    max_articles_per_feed = 50
    no_stylesheets = True

    cover_url = 'http://www.mediapart.fr/sites/all/themes/mediapart/mediapart/images/annonce.jpg'

    feeds =  [
        ('Les articles', 'http://www.mediapart.fr/articles/feed'),
    ]

# -- print-version has poor quality on this website, better do the conversion ourselves
#
#     preprocess_regexps = [ (re.compile(i[0], re.IGNORECASE|re.DOTALL), i[1]) for i in
#         [
#             (r'<div class="print-title">([^>]+)</div>', lambda match : '<h2>'+match.group(1)+'</h2>'),
#             (r'<span class=\'auteur_staff\'>[^>]+<a title=\'[^\']*\'[^>]*>([^<]*)</a>[^<]*</span>',
#              lambda match : '<i>'+match.group(1)+'</i>'),
#             (r'\'', lambda match: '&rsquo;'),
#         ]
#      ]
#
#     remove_tags    = [ dict(name='div', attrs={'class':'print-source_url'}),
#                        dict(name='div', attrs={'class':'print-links'}),
#                        dict(name='img', attrs={'src':'entete_article.png'}),
#                        dict(name='br') ]
#
#     def print_version(self, url):
#         raw = self.browser.open(url).read()
#         soup = BeautifulSoup(raw.decode('utf8', 'replace'))
#         div = soup.find('div', {'id':re.compile('node-\d+')})
#         if div is None:
#             return None
#         article_id = string.replace(div['id'], 'node-', '')
#         if article_id is None:
#             return None
#         return 'http://www.mediapart.fr/print/'+article_id

# -- Non-print version [dict(name='div', attrs={'class':'advert'})]

    keep_only_tags = [
        dict(name='h1', attrs={'class':'title'}),
        dict(name='div', attrs={'class':'page_papier_detail'}),
        ]

    def preprocess_html(self,soup):
        for title in soup.findAll('div', {'class':'titre'}):
            tag = Tag(soup, 'h3')
            title.replaceWith(tag)
            tag.insert(0,title)
        return soup

# -- Handle login

    def get_browser(self):
        br = BasicNewsRecipe.get_browser()
        if self.username is not None and self.password is not None:
            br.open('http://www.mediapart.fr/')
            br.select_form(nr=1)
            br['name'] = self.username
            br['pass'] = self.password
            br.submit()
        return br


I've been testing them for a few days, but there's probably room for improvement.
Metapioca is offline   Reply With Quote