View Single Post
Old 04-03-2016, 04:56 PM   #1
Aimylios
Member
Aimylios began at the beginning.
 
Posts: 17
Karma: 10
Join Date: Apr 2016
Device: Tolino Vision 3HD
Handelsblatt recipe

Hi,

the current Handelsblatt recipe is broken for quite some time. When trying to fix it I found out that the article structure has changed fundamentally, so I had to write a completely new one.

It would of course be great if the old handelsblatt.recipe could be replaced by this new code.

Code:
#!/usr/bin/env python2

__license__   = 'GPL v3'
__copyright__ = '2016, Aimylios'

'''
handelsblatt.com
'''

import re
from calibre.web.feeds.news import BasicNewsRecipe

class Handelsblatt(BasicNewsRecipe):
    title            = u'Handelsblatt'
    __author__       = 'Aimylios' # based on the work of malfi and Hegi
    description      = u'RSS-Feeds von Handelsblatt.com'
    publisher        = 'Verlagsgruppe Handelsblatt GmbH'
    category         = 'news, politics, business, economy, Germany'
    publication_type = 'newspaper'
    language         = 'de'

    encoding                  = 'utf8'
    oldest_article            = 4
    max_articles_per_feed     = 30
    simultaneous_downloads    = 20
    no_stylesheets            = True
    remove_javascript         = True
    remove_empty_feeds        = True
    ignore_duplicate_articles = {'title', 'url'}

    conversion_options = {'smarten_punctuation' : True,
                          'publisher'           : publisher}

    # uncomment this to reduce file size
    #  compress_news_images = True

    cover_source = 'https://kaufhaus.handelsblatt.com/downloads/handelsblatt-epaper-p1951.html'
    masthead_url = 'http://www.handelsblatt.com/images/logo_handelsblatt/11002806/7-formatOriginal.png'
    #masthead_url = 'http://www.handelsblatt.com/images/hb_logo/6543086/1-format3.jpg'
    #masthead_url = 'http://www.handelsblatt-chemie.de/wp-content/uploads/2012/01/hb-logo.gif'

    feeds = [
              (u'Top-Themen', u'http://www.handelsblatt.com/contentexport/feed/top-themen'),
              (u'Politik', u'http://www.handelsblatt.com/contentexport/feed/politik'),
              (u'Unternehmen', u'http://www.handelsblatt.com/contentexport/feed/unternehmen'),
              (u'Finanzen', u'http://www.handelsblatt.com/contentexport/feed/finanzen'),
              (u'Technologie', u'http://www.handelsblatt.com/contentexport/feed/technologie'),
              (u'Panorama', u'http://www.handelsblatt.com/contentexport/feed/panorama'),
              (u'Sport', u'http://www.handelsblatt.com/contentexport/feed/sport')
            ]

    keep_only_tags = [ dict(name='div', attrs={'class':['vhb-article-container']}) ]

    remove_tags = [
                    dict(name='span', attrs={'class':['vhb-media', 'vhb-colon']}),
                    dict(name='small', attrs={'class':['vhb-credit']}),
                    dict(name='aside', attrs={'class':['vhb-article-element vhb-left',
                                                       'vhb-article-element vhb-left vhb-teasergallery',
                                                       'vhb-article-element vhb-left vhb-shorttexts']}),
                    dict(name='article', attrs={'class':['vhb-imagegallery vhb-teaser', 'vhb-teaser vhb-type-video']}),
                    dict(name='div', attrs={'class':['fb-post']}),
                    dict(name='blockquote', attrs={'class':['twitter-tweet']}),
                    dict(name='a', attrs={'class':['twitter-follow-button']})
                  ]

    preprocess_regexps = [
                           # Insert ". " after "Place" in <span class="hcf-location-mark">Place</span>
                           (re.compile(r'(<span class="hcf-location-mark">[^<]+)(</span>)',
                           re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + '.&nbsp;' + match.group(2)),
                           # Insert ": " after "Title" in <em itemtype="text" itemprop="name" class="vhb-title">Title</em>
                           (re.compile(r'(<em itemtype="text" itemprop="name" class="vhb-title">[^<]+)(</em>)',
                           re.DOTALL|re.IGNORECASE), lambda match: match.group(1) + ':&nbsp;' + match.group(2))
                         ]

    extra_css = 'h2 {text-align: left} \
                 h3 {font-size: 1em; text-align: left} \
                 h4 {font-size: 1em; text-align: left; margin-bottom: 0em} \
                 em {font-style: normal; font-weight: bold} \
                 .vhb-subline {font-size: 0.6em; text-transform: uppercase} \
                 .vhb-article-caption {float: left; padding-right: 0.2em} \
                 .vhb-article-author-cell ul {list-style-type: none; margin: 0em} \
                 .vhb-teaser-head {margin-top: 1em; margin-bottom: 1em} \
                 .vhb-caption-wrapper {font-size: 0.6em} \
                 .hcf-location-mark {font-weight: bold} \
                 .panel-link {color: black; text-decoration: none} \
                 .panel-body p {margin-top: 0em}'

    def get_cover_url(self):
        soup = self.index_to_soup(self.cover_source)
        style = soup.find('img', alt='Handelsblatt ePaper', style=True)['style']
        self.cover_url = style.partition('(')[-1].rpartition(')')[0]
        return self.cover_url

    def print_version(self, url):
        main, sep, id = url.rpartition('/')
        return main + '/v_detail_tab_print/' + id

    def preprocess_html(self, soup):
        # remove all articles without relevant content (e.g., videos)
        article_container = soup.find('div', {'class':'vhb-article-container'})
        if article_container == None:
            self.abort_article()
        else:
            return soup

    def postprocess_html(self, soup, first_fetch):
        # make sure that all figure captions (including the source) are shown
        # without linebreaks by using the alternative text given within <img/>
        # instead of the original text (which is oddly formatted)
        article_figures = soup.findAll('figure', {'class':'vhb-image'})
        for fig in article_figures:
            fig.find('div', {'class':'vhb-caption'}).replaceWith(fig.find('img')['alt'])
        return soup
Aimylios is offline   Reply With Quote